openlit 1.34.19__tar.gz → 1.34.22__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (155) hide show
  1. {openlit-1.34.19 → openlit-1.34.22}/PKG-INFO +1 -1
  2. {openlit-1.34.19 → openlit-1.34.22}/pyproject.toml +1 -1
  3. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/__helpers.py +40 -0
  4. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/bedrock/__init__.py +19 -14
  5. openlit-1.34.22/src/openlit/instrumentation/bedrock/bedrock.py +211 -0
  6. openlit-1.34.22/src/openlit/instrumentation/bedrock/utils.py +223 -0
  7. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/litellm/async_litellm.py +2 -2
  8. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/openai/__init__.py +63 -68
  9. openlit-1.34.22/src/openlit/instrumentation/openai/async_openai.py +501 -0
  10. openlit-1.34.22/src/openlit/instrumentation/openai/openai.py +501 -0
  11. openlit-1.34.22/src/openlit/instrumentation/openai/utils.py +794 -0
  12. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/vertexai/__init__.py +18 -23
  13. openlit-1.34.22/src/openlit/instrumentation/vertexai/async_vertexai.py +141 -0
  14. openlit-1.34.22/src/openlit/instrumentation/vertexai/utils.py +204 -0
  15. openlit-1.34.22/src/openlit/instrumentation/vertexai/vertexai.py +141 -0
  16. openlit-1.34.19/src/openlit/instrumentation/bedrock/bedrock.py +0 -77
  17. openlit-1.34.19/src/openlit/instrumentation/bedrock/utils.py +0 -252
  18. openlit-1.34.19/src/openlit/instrumentation/openai/async_openai.py +0 -1575
  19. openlit-1.34.19/src/openlit/instrumentation/openai/openai.py +0 -1575
  20. openlit-1.34.19/src/openlit/instrumentation/vertexai/async_vertexai.py +0 -459
  21. openlit-1.34.19/src/openlit/instrumentation/vertexai/vertexai.py +0 -459
  22. {openlit-1.34.19 → openlit-1.34.22}/LICENSE +0 -0
  23. {openlit-1.34.19 → openlit-1.34.22}/README.md +0 -0
  24. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/__init__.py +0 -0
  25. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/evals/__init__.py +0 -0
  26. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/evals/all.py +0 -0
  27. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/evals/bias_detection.py +0 -0
  28. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/evals/hallucination.py +0 -0
  29. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/evals/toxicity.py +0 -0
  30. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/evals/utils.py +0 -0
  31. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/guard/__init__.py +0 -0
  32. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/guard/all.py +0 -0
  33. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/guard/prompt_injection.py +0 -0
  34. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/guard/restrict_topic.py +0 -0
  35. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/guard/sensitive_topic.py +0 -0
  36. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/guard/utils.py +0 -0
  37. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/ag2/__init__.py +0 -0
  38. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/ag2/ag2.py +0 -0
  39. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/ai21/__init__.py +0 -0
  40. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/ai21/ai21.py +0 -0
  41. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/ai21/async_ai21.py +0 -0
  42. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/ai21/utils.py +0 -0
  43. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/anthropic/__init__.py +0 -0
  44. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/anthropic/anthropic.py +0 -0
  45. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/anthropic/async_anthropic.py +0 -0
  46. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/anthropic/utils.py +0 -0
  47. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/assemblyai/__init__.py +0 -0
  48. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/assemblyai/assemblyai.py +0 -0
  49. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/assemblyai/utils.py +0 -0
  50. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/astra/__init__.py +0 -0
  51. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/astra/astra.py +0 -0
  52. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/astra/async_astra.py +0 -0
  53. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/astra/utils.py +0 -0
  54. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/azure_ai_inference/__init__.py +0 -0
  55. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +0 -0
  56. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +0 -0
  57. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/azure_ai_inference/utils.py +0 -0
  58. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/chroma/__init__.py +0 -0
  59. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/chroma/chroma.py +0 -0
  60. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/cohere/__init__.py +0 -0
  61. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/cohere/async_cohere.py +0 -0
  62. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/cohere/cohere.py +0 -0
  63. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/cohere/utils.py +0 -0
  64. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/controlflow/__init__.py +0 -0
  65. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/controlflow/controlflow.py +0 -0
  66. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/crawl4ai/__init__.py +0 -0
  67. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/crawl4ai/async_crawl4ai.py +0 -0
  68. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/crawl4ai/crawl4ai.py +0 -0
  69. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/crewai/__init__.py +0 -0
  70. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/crewai/crewai.py +0 -0
  71. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/dynamiq/__init__.py +0 -0
  72. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/dynamiq/dynamiq.py +0 -0
  73. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/elevenlabs/__init__.py +0 -0
  74. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/elevenlabs/async_elevenlabs.py +0 -0
  75. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/elevenlabs/elevenlabs.py +0 -0
  76. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/elevenlabs/utils.py +0 -0
  77. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/embedchain/__init__.py +0 -0
  78. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/embedchain/embedchain.py +0 -0
  79. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/firecrawl/__init__.py +0 -0
  80. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/firecrawl/firecrawl.py +0 -0
  81. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/google_ai_studio/__init__.py +0 -0
  82. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +0 -0
  83. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/google_ai_studio/google_ai_studio.py +0 -0
  84. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/google_ai_studio/utils.py +0 -0
  85. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/gpt4all/__init__.py +0 -0
  86. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/gpt4all/gpt4all.py +0 -0
  87. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/gpt4all/utils.py +0 -0
  88. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/gpu/__init__.py +0 -0
  89. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/groq/__init__.py +0 -0
  90. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/groq/async_groq.py +0 -0
  91. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/groq/groq.py +0 -0
  92. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/groq/utils.py +0 -0
  93. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/haystack/__init__.py +0 -0
  94. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/haystack/haystack.py +0 -0
  95. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/julep/__init__.py +0 -0
  96. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/julep/async_julep.py +0 -0
  97. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/julep/julep.py +0 -0
  98. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/langchain/__init__.py +0 -0
  99. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/langchain/async_langchain.py +0 -0
  100. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/langchain/langchain.py +0 -0
  101. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/letta/__init__.py +0 -0
  102. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/letta/letta.py +0 -0
  103. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/litellm/__init__.py +0 -0
  104. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/litellm/litellm.py +0 -0
  105. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/litellm/utils.py +0 -0
  106. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/llamaindex/__init__.py +0 -0
  107. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/llamaindex/llamaindex.py +0 -0
  108. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/mem0/__init__.py +0 -0
  109. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/mem0/mem0.py +0 -0
  110. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/milvus/__init__.py +0 -0
  111. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/milvus/milvus.py +0 -0
  112. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/mistral/__init__.py +0 -0
  113. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/mistral/async_mistral.py +0 -0
  114. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/mistral/mistral.py +0 -0
  115. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/mistral/utils.py +0 -0
  116. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/multion/__init__.py +0 -0
  117. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/multion/async_multion.py +0 -0
  118. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/multion/multion.py +0 -0
  119. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/ollama/__init__.py +0 -0
  120. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/ollama/async_ollama.py +0 -0
  121. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/ollama/ollama.py +0 -0
  122. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/ollama/utils.py +0 -0
  123. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/openai_agents/__init__.py +0 -0
  124. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/openai_agents/openai_agents.py +0 -0
  125. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/phidata/__init__.py +0 -0
  126. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/phidata/phidata.py +0 -0
  127. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/pinecone/__init__.py +0 -0
  128. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/pinecone/pinecone.py +0 -0
  129. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/premai/__init__.py +0 -0
  130. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/premai/premai.py +0 -0
  131. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/premai/utils.py +0 -0
  132. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/pydantic_ai/__init__.py +0 -0
  133. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/pydantic_ai/pydantic_ai.py +0 -0
  134. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/pydantic_ai/utils.py +0 -0
  135. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/qdrant/__init__.py +0 -0
  136. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/qdrant/async_qdrant.py +0 -0
  137. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/qdrant/qdrant.py +0 -0
  138. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/reka/__init__.py +0 -0
  139. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/reka/async_reka.py +0 -0
  140. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/reka/reka.py +0 -0
  141. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/reka/utils.py +0 -0
  142. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/together/__init__.py +0 -0
  143. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/together/async_together.py +0 -0
  144. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/together/together.py +0 -0
  145. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/together/utils.py +0 -0
  146. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/transformers/__init__.py +0 -0
  147. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/transformers/transformers.py +0 -0
  148. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/transformers/utils.py +0 -0
  149. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/vllm/__init__.py +0 -0
  150. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/vllm/utils.py +0 -0
  151. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/instrumentation/vllm/vllm.py +0 -0
  152. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/otel/events.py +0 -0
  153. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/otel/metrics.py +0 -0
  154. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/otel/tracing.py +0 -0
  155. {openlit-1.34.19 → openlit-1.34.22}/src/openlit/semcov/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: openlit
3
- Version: 1.34.19
3
+ Version: 1.34.22
4
4
  Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
5
5
  License: Apache-2.0
6
6
  Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "openlit"
3
- version = "1.34.19"
3
+ version = "1.34.22"
4
4
  description = "OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects"
5
5
  authors = ["OpenLIT"]
6
6
  license = "Apache-2.0"
@@ -402,3 +402,43 @@ def record_embedding_metrics(metrics, gen_ai_operation, gen_ai_system, server_ad
402
402
  metrics["genai_requests"].add(1, attributes)
403
403
  metrics["genai_prompt_tokens"].add(input_tokens, attributes)
404
404
  metrics["genai_cost"].record(cost, attributes)
405
+
406
+ def record_audio_metrics(metrics, gen_ai_operation, gen_ai_system, server_address, server_port,
407
+ request_model, response_model, environment, application_name, start_time, end_time, cost):
408
+ """
409
+ Record audio-specific metrics for the operation.
410
+ """
411
+
412
+ attributes = create_metrics_attributes(
413
+ operation=gen_ai_operation,
414
+ system=gen_ai_system,
415
+ server_address=server_address,
416
+ server_port=server_port,
417
+ request_model=request_model,
418
+ response_model=response_model,
419
+ service_name=application_name,
420
+ deployment_environment=environment,
421
+ )
422
+ metrics["genai_client_operation_duration"].record(end_time - start_time, attributes)
423
+ metrics["genai_requests"].add(1, attributes)
424
+ metrics["genai_cost"].record(cost, attributes)
425
+
426
+ def record_image_metrics(metrics, gen_ai_operation, gen_ai_system, server_address, server_port,
427
+ request_model, response_model, environment, application_name, start_time, end_time, cost):
428
+ """
429
+ Record image-specific metrics for the operation.
430
+ """
431
+
432
+ attributes = create_metrics_attributes(
433
+ operation=gen_ai_operation,
434
+ system=gen_ai_system,
435
+ server_address=server_address,
436
+ server_port=server_port,
437
+ request_model=request_model,
438
+ response_model=response_model,
439
+ service_name=application_name,
440
+ deployment_environment=environment,
441
+ )
442
+ metrics["genai_client_operation_duration"].record(end_time - start_time, attributes)
443
+ metrics["genai_requests"].add(1, attributes)
444
+ metrics["genai_cost"].record(cost, attributes)
@@ -1,4 +1,3 @@
1
- # pylint: disable=useless-return, bad-staticmethod-argument, disable=duplicate-code
2
1
  """Initializer of Auto Instrumentation of AWS Bedrock Functions"""
3
2
 
4
3
  from typing import Collection
@@ -6,37 +5,43 @@ import importlib.metadata
6
5
  from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
7
6
  from wrapt import wrap_function_wrapper
8
7
 
9
- from openlit.instrumentation.bedrock.bedrock import converse
8
+ from openlit.instrumentation.bedrock.bedrock import converse, converse_stream
10
9
 
11
10
  _instruments = ("boto3 >= 1.34.138",)
12
11
 
13
12
  class BedrockInstrumentor(BaseInstrumentor):
14
13
  """
15
- An instrumentor for AWS Bedrock's client library.
14
+ An instrumentor for AWS Bedrock client library.
16
15
  """
17
16
 
18
17
  def instrumentation_dependencies(self) -> Collection[str]:
19
18
  return _instruments
20
19
 
21
20
  def _instrument(self, **kwargs):
22
- application_name = kwargs.get("application_name", "default_application")
23
- environment = kwargs.get("environment", "default_environment")
21
+ version = importlib.metadata.version("boto3")
22
+ environment = kwargs.get("environment", "default")
23
+ application_name = kwargs.get("application_name", "default")
24
24
  tracer = kwargs.get("tracer")
25
- event_provider = kwargs.get('event_provider')
26
- metrics = kwargs.get("metrics_dict")
27
25
  pricing_info = kwargs.get("pricing_info", {})
28
26
  capture_message_content = kwargs.get("capture_message_content", False)
27
+ metrics = kwargs.get("metrics_dict")
29
28
  disable_metrics = kwargs.get("disable_metrics")
30
- version = importlib.metadata.version("boto3")
31
29
 
32
- #sync
30
+ # sync
31
+ wrap_function_wrapper(
32
+ "botocore.client",
33
+ "ClientCreator.create_client",
34
+ converse(version, environment, application_name, tracer, pricing_info,
35
+ capture_message_content, metrics, disable_metrics),
36
+ )
37
+
38
+ # streaming
33
39
  wrap_function_wrapper(
34
- "botocore.client",
35
- "ClientCreator.create_client",
36
- converse(version, environment, application_name,
37
- tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics),
40
+ "botocore.client",
41
+ "ClientCreator.create_client",
42
+ converse_stream(version, environment, application_name, tracer, pricing_info,
43
+ capture_message_content, metrics, disable_metrics),
38
44
  )
39
45
 
40
46
  def _uninstrument(self, **kwargs):
41
- # Proper uninstrumentation logic to revert patched methods
42
47
  pass
@@ -0,0 +1,211 @@
1
+ """
2
+ Module for monitoring Amazon Bedrock API calls.
3
+ """
4
+
5
+ import time
6
+ from opentelemetry.trace import SpanKind
7
+ from openlit.__helpers import (
8
+ handle_exception,
9
+ set_server_address_and_port
10
+ )
11
+ from openlit.instrumentation.bedrock.utils import (
12
+ process_chunk,
13
+ process_chat_response,
14
+ process_streaming_chat_response,
15
+ )
16
+ from openlit.semcov import SemanticConvention
17
+
18
+ def converse(version, environment, application_name, tracer, pricing_info, capture_message_content, metrics, disable_metrics):
19
+ """
20
+ Generates a telemetry wrapper for AWS Bedrock converse calls.
21
+ """
22
+
23
+ def wrapper(wrapped, instance, args, kwargs):
24
+ """
25
+ Wraps the ClientCreator.create_client call.
26
+ """
27
+
28
+ def converse_wrapper(original_method, *method_args, **method_kwargs):
29
+ """
30
+ Wraps the individual converse method call.
31
+ """
32
+
33
+ server_address, server_port = set_server_address_and_port(instance, "aws.amazon.com", 443)
34
+ request_model = method_kwargs.get("modelId", "amazon.titan-text-express-v1")
35
+
36
+ span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
37
+
38
+ with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
39
+ start_time = time.time()
40
+ response = original_method(*method_args, **method_kwargs)
41
+ llm_config = method_kwargs.get("inferenceConfig", {})
42
+
43
+ try:
44
+ response = process_chat_response(
45
+ response=response,
46
+ request_model=request_model,
47
+ pricing_info=pricing_info,
48
+ server_port=server_port,
49
+ server_address=server_address,
50
+ environment=environment,
51
+ application_name=application_name,
52
+ metrics=metrics,
53
+ start_time=start_time,
54
+ span=span,
55
+ capture_message_content=capture_message_content,
56
+ disable_metrics=disable_metrics,
57
+ version=version,
58
+ llm_config=llm_config,
59
+ **method_kwargs
60
+ )
61
+
62
+ except Exception as e:
63
+ handle_exception(span, e)
64
+
65
+ return response
66
+
67
+ # Get the original client instance from the wrapper
68
+ client = wrapped(*args, **kwargs)
69
+
70
+ # Replace the original method with the instrumented one
71
+ if kwargs.get("service_name") == "bedrock-runtime":
72
+ original_invoke_model = client.converse
73
+ client.converse = lambda *args, **kwargs: converse_wrapper(original_invoke_model, *args, **kwargs)
74
+
75
+ return client
76
+
77
+ return wrapper
78
+
79
+ def converse_stream(version, environment, application_name, tracer, pricing_info, capture_message_content, metrics, disable_metrics):
80
+ """
81
+ Generates a telemetry wrapper for AWS Bedrock converse_stream calls.
82
+ """
83
+
84
+ class TracedSyncStream:
85
+ """
86
+ Wrapper for streaming responses to collect telemetry.
87
+ """
88
+
89
+ def __init__(
90
+ self,
91
+ wrapped_response,
92
+ span,
93
+ span_name,
94
+ kwargs,
95
+ server_address,
96
+ server_port,
97
+ **args,
98
+ ):
99
+ self.__wrapped_response = wrapped_response
100
+ # Extract the actual stream iterator from the response
101
+ if isinstance(wrapped_response, dict) and "stream" in wrapped_response:
102
+ self.__wrapped_stream = iter(wrapped_response["stream"])
103
+ else:
104
+ self.__wrapped_stream = iter(wrapped_response)
105
+
106
+ self._span = span
107
+ self._span_name = span_name
108
+ self._llmresponse = ""
109
+ self._response_id = ""
110
+ self._response_model = ""
111
+ self._finish_reason = ""
112
+ self._tools = None
113
+ self._input_tokens = 0
114
+ self._output_tokens = 0
115
+
116
+ self._args = args
117
+ self._kwargs = kwargs
118
+ self._start_time = time.time()
119
+ self._end_time = None
120
+ self._timestamps = []
121
+ self._ttft = 0
122
+ self._tbt = 0
123
+ self._server_address = server_address
124
+ self._server_port = server_port
125
+
126
+ def __enter__(self):
127
+ if hasattr(self.__wrapped_stream, "__enter__"):
128
+ self.__wrapped_stream.__enter__()
129
+ return self
130
+
131
+ def __exit__(self, exc_type, exc_value, traceback):
132
+ if hasattr(self.__wrapped_stream, "__exit__"):
133
+ self.__wrapped_stream.__exit__(exc_type, exc_value, traceback)
134
+
135
+ def __iter__(self):
136
+ return self
137
+
138
+ def __getattr__(self, name):
139
+ """Delegate attribute access to the wrapped response."""
140
+ return getattr(self.__wrapped_response, name)
141
+
142
+ def get(self, key, default=None):
143
+ """Delegate get method to the wrapped response if its a dict."""
144
+ if isinstance(self.__wrapped_response, dict):
145
+ return self.__wrapped_response.get(key, default)
146
+ return getattr(self.__wrapped_response, key, default)
147
+
148
+ def __getitem__(self, key):
149
+ """Delegate item access to the wrapped response if its a dict."""
150
+ if isinstance(self.__wrapped_response, dict):
151
+ return self.__wrapped_response[key]
152
+ return getattr(self.__wrapped_response, key)
153
+
154
+ def __next__(self):
155
+ try:
156
+ chunk = next(self.__wrapped_stream)
157
+ process_chunk(self, chunk)
158
+ return chunk
159
+ except StopIteration:
160
+ try:
161
+ llm_config = self._kwargs.get("inferenceConfig", {})
162
+ with tracer.start_as_current_span(self._span_name, kind=SpanKind.CLIENT) as self._span:
163
+ process_streaming_chat_response(
164
+ self,
165
+ pricing_info=pricing_info,
166
+ environment=environment,
167
+ application_name=application_name,
168
+ metrics=metrics,
169
+ capture_message_content=capture_message_content,
170
+ disable_metrics=disable_metrics,
171
+ version=version,
172
+ llm_config=llm_config
173
+ )
174
+
175
+ except Exception as e:
176
+ handle_exception(self._span, e)
177
+
178
+ raise
179
+
180
+ def wrapper(wrapped, instance, args, kwargs):
181
+ """
182
+ Wraps the ClientCreator.create_client call.
183
+ """
184
+
185
+ def converse_stream_wrapper(original_method, *method_args, **method_kwargs):
186
+ """
187
+ Wraps the individual converse_stream method call.
188
+ """
189
+
190
+ server_address, server_port = set_server_address_and_port(instance, "aws.amazon.com", 443)
191
+ request_model = method_kwargs.get("modelId", "amazon.titan-text-express-v1")
192
+
193
+ span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
194
+
195
+ # Get the streaming response
196
+ stream_response = original_method(*method_args, **method_kwargs)
197
+ span = tracer.start_span(span_name, kind=SpanKind.CLIENT)
198
+
199
+ return TracedSyncStream(stream_response, span, span_name, method_kwargs, server_address, server_port)
200
+
201
+ # Get the original client instance from the wrapper
202
+ client = wrapped(*args, **kwargs)
203
+
204
+ # Replace the original method with the instrumented one
205
+ if kwargs.get("service_name") == "bedrock-runtime":
206
+ original_stream_model = client.converse_stream
207
+ client.converse_stream = lambda *args, **kwargs: converse_stream_wrapper(original_stream_model, *args, **kwargs)
208
+
209
+ return client
210
+
211
+ return wrapper
@@ -0,0 +1,223 @@
1
+ """
2
+ AWS Bedrock OpenTelemetry instrumentation utility functions
3
+ """
4
+ import time
5
+
6
+ from opentelemetry.trace import Status, StatusCode
7
+
8
+ from openlit.__helpers import (
9
+ calculate_ttft,
10
+ response_as_dict,
11
+ calculate_tbt,
12
+ get_chat_model_cost,
13
+ record_completion_metrics,
14
+ common_span_attributes,
15
+ handle_exception
16
+ )
17
+ from openlit.semcov import SemanticConvention
18
+
19
+ def format_content(messages):
20
+ """
21
+ Format the messages into a string for span events.
22
+ """
23
+
24
+ if not messages:
25
+ return ""
26
+
27
+ formatted_messages = []
28
+ for message in messages:
29
+ if isinstance(message, dict):
30
+ role = message.get("role", "user")
31
+ content = message.get("content", "")
32
+ else:
33
+ # Handle Bedrock object format
34
+ role = getattr(message, "role", "user")
35
+ content = getattr(message, "content", "")
36
+
37
+ if isinstance(content, list):
38
+ # Handle structured content (e.g., text + images)
39
+ text_parts = []
40
+ for part in content:
41
+ if isinstance(part, dict):
42
+ # Bedrock format: {"text": "content"} or generic format: {"type": "text", "text": "content"}
43
+ if "text" in part:
44
+ text_parts.append(part.get("text", ""))
45
+ elif part.get("type") == "text":
46
+ text_parts.append(part.get("text", ""))
47
+ content = " ".join(text_parts)
48
+ elif not isinstance(content, str):
49
+ content = str(content)
50
+
51
+ formatted_messages.append(f"{role}: {content}")
52
+
53
+ return "\n".join(formatted_messages)
54
+
55
+ def process_chunk(self, chunk):
56
+ """
57
+ Process a chunk of response data and update state.
58
+ """
59
+
60
+ end_time = time.time()
61
+ # Record the timestamp for the current chunk
62
+ self._timestamps.append(end_time)
63
+
64
+ if len(self._timestamps) == 1:
65
+ # Calculate time to first chunk
66
+ self._ttft = calculate_ttft(self._timestamps, self._start_time)
67
+
68
+ chunked = response_as_dict(chunk)
69
+
70
+ # Handle Bedrock messageStart event
71
+ if "messageStart" in chunked:
72
+ message_start = chunked.get("messageStart", {})
73
+ self._response_role = message_start.get("role", "assistant")
74
+
75
+ # Handle Bedrock contentBlockDelta event
76
+ if "contentBlockDelta" in chunked:
77
+ content_delta = chunked.get("contentBlockDelta", {})
78
+ delta = content_delta.get("delta", {})
79
+ if "text" in delta:
80
+ self._llmresponse += delta.get("text", "")
81
+
82
+ # Handle Bedrock messageStop event
83
+ if "messageStop" in chunked:
84
+ message_stop = chunked.get("messageStop", {})
85
+ self._finish_reason = message_stop.get("stopReason", "")
86
+
87
+ # Handle Bedrock metadata event (final event with usage info)
88
+ if "metadata" in chunked:
89
+ metadata = chunked.get("metadata", {})
90
+ usage = metadata.get("usage", {})
91
+ self._input_tokens = usage.get("inputTokens", 0)
92
+ self._output_tokens = usage.get("outputTokens", 0)
93
+ self._end_time = end_time
94
+
95
+ def common_chat_logic(scope, pricing_info, environment, application_name, metrics,
96
+ capture_message_content, disable_metrics, version, llm_config, is_stream):
97
+ """
98
+ Process chat request and generate Telemetry
99
+ """
100
+
101
+ scope._end_time = time.time()
102
+ if len(scope._timestamps) > 1:
103
+ scope._tbt = calculate_tbt(scope._timestamps)
104
+
105
+ formatted_messages = format_content(scope._kwargs.get("messages", []))
106
+ request_model = scope._kwargs.get("modelId", "amazon.titan-text-express-v1")
107
+
108
+ cost = get_chat_model_cost(request_model, pricing_info, scope._input_tokens, scope._output_tokens)
109
+
110
+ # Common Span Attributes
111
+ common_span_attributes(scope,
112
+ SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT, SemanticConvention.GEN_AI_SYSTEM_AWS_BEDROCK,
113
+ scope._server_address, scope._server_port, request_model, scope._response_model,
114
+ environment, application_name, is_stream, scope._tbt, scope._ttft, version)
115
+
116
+ # Bedrock-specific attributes from llm_config
117
+ bedrock_attributes = [
118
+ (SemanticConvention.GEN_AI_REQUEST_FREQUENCY_PENALTY, "frequencyPenalty"),
119
+ (SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS, "maxTokens"),
120
+ (SemanticConvention.GEN_AI_REQUEST_PRESENCE_PENALTY, "presencePenalty"),
121
+ (SemanticConvention.GEN_AI_REQUEST_STOP_SEQUENCES, "stopSequences"),
122
+ (SemanticConvention.GEN_AI_REQUEST_TEMPERATURE, "temperature"),
123
+ (SemanticConvention.GEN_AI_REQUEST_TOP_P, "topP"),
124
+ (SemanticConvention.GEN_AI_REQUEST_TOP_K, "topK"),
125
+ ]
126
+
127
+ # Set each bedrock-specific attribute if the corresponding value exists and is not None
128
+ for attribute, key in bedrock_attributes:
129
+ value = llm_config.get(key)
130
+ if value is not None:
131
+ scope._span.set_attribute(attribute, value)
132
+
133
+ # Span Attributes for Response parameters
134
+ scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID, scope._response_id)
135
+ scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_FINISH_REASON, [scope._finish_reason])
136
+ scope._span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE, "text" if isinstance(scope._llmresponse, str) else "json")
137
+
138
+ # Span Attributes for Cost and Tokens
139
+ scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, scope._input_tokens)
140
+ scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, scope._output_tokens)
141
+ scope._span.set_attribute(SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, scope._input_tokens + scope._output_tokens)
142
+ scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
143
+
144
+ # Span Attributes for Content
145
+ if capture_message_content:
146
+ scope._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_PROMPT, formatted_messages)
147
+ scope._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_COMPLETION, scope._llmresponse)
148
+
149
+ # To be removed once the change to span_attributes (from span events) is complete
150
+ scope._span.add_event(
151
+ name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
152
+ attributes={
153
+ SemanticConvention.GEN_AI_CONTENT_PROMPT: formatted_messages,
154
+ },
155
+ )
156
+ scope._span.add_event(
157
+ name=SemanticConvention.GEN_AI_CONTENT_COMPLETION_EVENT,
158
+ attributes={
159
+ SemanticConvention.GEN_AI_CONTENT_COMPLETION: scope._llmresponse,
160
+ },
161
+ )
162
+
163
+ scope._span.set_status(Status(StatusCode.OK))
164
+
165
+ # Record metrics
166
+ if not disable_metrics:
167
+ record_completion_metrics(metrics, SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT, SemanticConvention.GEN_AI_SYSTEM_AWS_BEDROCK,
168
+ scope._server_address, scope._server_port, request_model, scope._response_model, environment,
169
+ application_name, scope._start_time, scope._end_time, scope._input_tokens, scope._output_tokens,
170
+ cost, scope._tbt, scope._ttft)
171
+
172
+ def process_streaming_chat_response(scope, pricing_info, environment, application_name, metrics,
173
+ capture_message_content=False, disable_metrics=False, version="", llm_config=None):
174
+ """
175
+ Process streaming chat response and generate telemetry.
176
+ """
177
+
178
+ try:
179
+ if llm_config is None:
180
+ llm_config = {}
181
+
182
+ common_chat_logic(scope, pricing_info, environment, application_name, metrics,
183
+ capture_message_content, disable_metrics, version, llm_config, is_stream=True)
184
+ except Exception as e:
185
+ handle_exception(scope._span, e)
186
+ raise
187
+
188
+ def process_chat_response(response, request_model, pricing_info, server_port, server_address, environment,
189
+ application_name, metrics, start_time, span, capture_message_content=False,
190
+ disable_metrics=False, version="1.0.0", llm_config=None, **kwargs):
191
+ """
192
+ Process non-streaming chat response and generate telemetry.
193
+ """
194
+
195
+ try:
196
+ if llm_config is None:
197
+ llm_config = {}
198
+
199
+ scope = type("GenericScope", (), {})()
200
+ response_dict = response_as_dict(response)
201
+
202
+ scope._start_time = start_time
203
+ scope._end_time = time.time()
204
+ scope._span = span
205
+ scope._llmresponse = response_dict.get("output", {}).get("message", {}).get("content", [{}])[0].get("text", "")
206
+ scope._response_role = response_dict.get("output", {}).get("message", {}).get("role", "assistant")
207
+ scope._input_tokens = response_dict.get("usage", {}).get("inputTokens", 0)
208
+ scope._output_tokens = response_dict.get("usage", {}).get("outputTokens", 0)
209
+ scope._response_model = request_model
210
+ scope._finish_reason = response_dict.get("stopReason", "")
211
+ scope._response_id = response_dict.get("RequestId", "")
212
+ scope._timestamps = []
213
+ scope._ttft, scope._tbt = scope._end_time - scope._start_time, 0
214
+ scope._server_address, scope._server_port = server_address, server_port
215
+ scope._kwargs = kwargs
216
+
217
+ common_chat_logic(scope, pricing_info, environment, application_name, metrics,
218
+ capture_message_content, disable_metrics, version, llm_config, is_stream=False)
219
+
220
+ return response
221
+ except Exception as e:
222
+ handle_exception(span, e)
223
+ raise
@@ -68,9 +68,9 @@ def acompletion(version, environment, application_name, tracer, pricing_info,
68
68
  def __aiter__(self):
69
69
  return self
70
70
 
71
- def __getattr__(self, name):
71
+ async def __getattr__(self, name):
72
72
  """Delegate attribute access to the wrapped object."""
73
- return getattr(self.__wrapped__, name)
73
+ return getattr(await self.__wrapped__, name)
74
74
 
75
75
  async def __anext__(self):
76
76
  try: