openlit 1.33.10__py3-none-any.whl → 1.33.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. openlit/__helpers.py +73 -0
  2. openlit/__init__.py +38 -11
  3. openlit/instrumentation/ag2/__init__.py +9 -10
  4. openlit/instrumentation/ag2/ag2.py +133 -68
  5. openlit/instrumentation/ai21/__init__.py +6 -5
  6. openlit/instrumentation/ai21/ai21.py +71 -534
  7. openlit/instrumentation/ai21/async_ai21.py +71 -534
  8. openlit/instrumentation/ai21/utils.py +407 -0
  9. openlit/instrumentation/anthropic/__init__.py +3 -3
  10. openlit/instrumentation/anthropic/anthropic.py +4 -4
  11. openlit/instrumentation/anthropic/async_anthropic.py +4 -4
  12. openlit/instrumentation/assemblyai/__init__.py +2 -2
  13. openlit/instrumentation/assemblyai/assemblyai.py +3 -3
  14. openlit/instrumentation/astra/__init__.py +25 -25
  15. openlit/instrumentation/astra/astra.py +2 -2
  16. openlit/instrumentation/astra/async_astra.py +2 -2
  17. openlit/instrumentation/azure_ai_inference/__init__.py +5 -5
  18. openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +8 -8
  19. openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +8 -8
  20. openlit/instrumentation/bedrock/__init__.py +2 -2
  21. openlit/instrumentation/bedrock/bedrock.py +3 -3
  22. openlit/instrumentation/chroma/__init__.py +9 -9
  23. openlit/instrumentation/chroma/chroma.py +2 -2
  24. openlit/instrumentation/cohere/__init__.py +7 -7
  25. openlit/instrumentation/cohere/async_cohere.py +9 -9
  26. openlit/instrumentation/cohere/cohere.py +9 -9
  27. openlit/instrumentation/controlflow/__init__.py +4 -4
  28. openlit/instrumentation/controlflow/controlflow.py +2 -2
  29. openlit/instrumentation/crawl4ai/__init__.py +3 -3
  30. openlit/instrumentation/crawl4ai/async_crawl4ai.py +2 -2
  31. openlit/instrumentation/crawl4ai/crawl4ai.py +2 -2
  32. openlit/instrumentation/crewai/__init__.py +3 -3
  33. openlit/instrumentation/crewai/crewai.py +2 -2
  34. openlit/instrumentation/dynamiq/__init__.py +5 -5
  35. openlit/instrumentation/dynamiq/dynamiq.py +2 -2
  36. openlit/instrumentation/elevenlabs/__init__.py +5 -5
  37. openlit/instrumentation/elevenlabs/async_elevenlabs.py +3 -3
  38. openlit/instrumentation/elevenlabs/elevenlabs.py +3 -3
  39. openlit/instrumentation/embedchain/__init__.py +2 -2
  40. openlit/instrumentation/embedchain/embedchain.py +4 -4
  41. openlit/instrumentation/firecrawl/__init__.py +3 -3
  42. openlit/instrumentation/firecrawl/firecrawl.py +2 -2
  43. openlit/instrumentation/google_ai_studio/__init__.py +3 -3
  44. openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +3 -3
  45. openlit/instrumentation/google_ai_studio/google_ai_studio.py +3 -3
  46. openlit/instrumentation/gpt4all/__init__.py +3 -3
  47. openlit/instrumentation/gpt4all/gpt4all.py +7 -7
  48. openlit/instrumentation/groq/__init__.py +3 -3
  49. openlit/instrumentation/groq/async_groq.py +5 -5
  50. openlit/instrumentation/groq/groq.py +5 -5
  51. openlit/instrumentation/haystack/__init__.py +2 -2
  52. openlit/instrumentation/haystack/haystack.py +2 -2
  53. openlit/instrumentation/julep/__init__.py +7 -7
  54. openlit/instrumentation/julep/async_julep.py +3 -3
  55. openlit/instrumentation/julep/julep.py +3 -3
  56. openlit/instrumentation/langchain/__init__.py +2 -2
  57. openlit/instrumentation/langchain/async_langchain.py +13 -9
  58. openlit/instrumentation/langchain/langchain.py +13 -8
  59. openlit/instrumentation/letta/__init__.py +7 -7
  60. openlit/instrumentation/letta/letta.py +5 -5
  61. openlit/instrumentation/litellm/__init__.py +5 -5
  62. openlit/instrumentation/litellm/async_litellm.py +8 -8
  63. openlit/instrumentation/litellm/litellm.py +8 -8
  64. openlit/instrumentation/llamaindex/__init__.py +2 -2
  65. openlit/instrumentation/llamaindex/llamaindex.py +2 -2
  66. openlit/instrumentation/mem0/__init__.py +2 -2
  67. openlit/instrumentation/mem0/mem0.py +2 -2
  68. openlit/instrumentation/milvus/__init__.py +2 -2
  69. openlit/instrumentation/milvus/milvus.py +2 -2
  70. openlit/instrumentation/mistral/__init__.py +7 -7
  71. openlit/instrumentation/mistral/async_mistral.py +10 -10
  72. openlit/instrumentation/mistral/mistral.py +10 -10
  73. openlit/instrumentation/multion/__init__.py +7 -7
  74. openlit/instrumentation/multion/async_multion.py +5 -5
  75. openlit/instrumentation/multion/multion.py +5 -5
  76. openlit/instrumentation/ollama/__init__.py +11 -9
  77. openlit/instrumentation/ollama/async_ollama.py +71 -465
  78. openlit/instrumentation/ollama/ollama.py +71 -465
  79. openlit/instrumentation/ollama/utils.py +333 -0
  80. openlit/instrumentation/openai/__init__.py +11 -11
  81. openlit/instrumentation/openai/async_openai.py +18 -18
  82. openlit/instrumentation/openai/openai.py +18 -18
  83. openlit/instrumentation/phidata/__init__.py +2 -2
  84. openlit/instrumentation/phidata/phidata.py +2 -2
  85. openlit/instrumentation/pinecone/__init__.py +6 -6
  86. openlit/instrumentation/pinecone/pinecone.py +2 -2
  87. openlit/instrumentation/premai/__init__.py +3 -3
  88. openlit/instrumentation/premai/premai.py +7 -7
  89. openlit/instrumentation/qdrant/__init__.py +2 -2
  90. openlit/instrumentation/qdrant/async_qdrant.py +2 -2
  91. openlit/instrumentation/qdrant/qdrant.py +2 -2
  92. openlit/instrumentation/reka/__init__.py +3 -3
  93. openlit/instrumentation/reka/async_reka.py +3 -3
  94. openlit/instrumentation/reka/reka.py +3 -3
  95. openlit/instrumentation/together/__init__.py +5 -5
  96. openlit/instrumentation/together/async_together.py +8 -8
  97. openlit/instrumentation/together/together.py +8 -8
  98. openlit/instrumentation/transformers/__init__.py +2 -2
  99. openlit/instrumentation/transformers/transformers.py +4 -4
  100. openlit/instrumentation/vertexai/__init__.py +9 -9
  101. openlit/instrumentation/vertexai/async_vertexai.py +4 -4
  102. openlit/instrumentation/vertexai/vertexai.py +4 -4
  103. openlit/instrumentation/vllm/__init__.py +2 -2
  104. openlit/instrumentation/vllm/vllm.py +3 -3
  105. openlit/otel/events.py +85 -0
  106. openlit/otel/tracing.py +3 -13
  107. openlit/semcov/__init__.py +13 -1
  108. {openlit-1.33.10.dist-info → openlit-1.33.11.dist-info}/METADATA +2 -2
  109. openlit-1.33.11.dist-info/RECORD +125 -0
  110. openlit-1.33.10.dist-info/RECORD +0 -122
  111. {openlit-1.33.10.dist-info → openlit-1.33.11.dist-info}/LICENSE +0 -0
  112. {openlit-1.33.10.dist-info → openlit-1.33.11.dist-info}/WHEEL +0 -0
@@ -22,7 +22,7 @@ from openlit.semcov import SemanticConvetion
22
22
  logger = logging.getLogger(__name__)
23
23
 
24
24
  def async_embed(version, environment, application_name, tracer,
25
- pricing_info, trace_content, metrics, disable_metrics):
25
+ pricing_info, capture_message_content, metrics, disable_metrics):
26
26
  """
27
27
  Generates a telemetry wrapper for embeddings to collect metrics.
28
28
 
@@ -32,7 +32,7 @@ def async_embed(version, environment, application_name, tracer,
32
32
  application_name: Name of the application using the Cohere API.
33
33
  tracer: OpenTelemetry tracer for creating spans.
34
34
  pricing_info: Information used for calculating the cost of Cohere usage.
35
- trace_content: Flag indicating whether to trace the actual content.
35
+ capture_message_content: Flag indicating whether to trace the actual content.
36
36
 
37
37
  Returns:
38
38
  A function that wraps the embeddings method to add telemetry.
@@ -105,7 +105,7 @@ def async_embed(version, environment, application_name, tracer,
105
105
  span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
106
106
  version)
107
107
 
108
- if trace_content:
108
+ if capture_message_content:
109
109
  span.add_event(
110
110
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
111
111
  attributes={
@@ -149,7 +149,7 @@ def async_embed(version, environment, application_name, tracer,
149
149
  return wrapper
150
150
 
151
151
  def async_chat(version, environment, application_name, tracer,
152
- pricing_info, trace_content, metrics, disable_metrics):
152
+ pricing_info, capture_message_content, metrics, disable_metrics):
153
153
  """
154
154
  Generates a telemetry wrapper for chat to collect metrics.
155
155
 
@@ -159,7 +159,7 @@ def async_chat(version, environment, application_name, tracer,
159
159
  application_name: Name of the application using the Cohere API.
160
160
  tracer: OpenTelemetry tracer for creating spans.
161
161
  pricing_info: Information used for calculating the cost of Cohere usage.
162
- trace_content: Flag indicating whether to trace the actual content.
162
+ capture_message_content: Flag indicating whether to trace the actual content.
163
163
 
164
164
  Returns:
165
165
  A function that wraps the chat method to add telemetry.
@@ -281,7 +281,7 @@ def async_chat(version, environment, application_name, tracer,
281
281
  span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
282
282
  version)
283
283
 
284
- if trace_content:
284
+ if capture_message_content:
285
285
  span.add_event(
286
286
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
287
287
  attributes={
@@ -336,7 +336,7 @@ def async_chat(version, environment, application_name, tracer,
336
336
  return wrapper
337
337
 
338
338
  def async_chat_stream(version, environment, application_name,
339
- tracer, pricing_info, trace_content, metrics, disable_metrics):
339
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
340
340
  """
341
341
  Generates a telemetry wrapper for chat_stream to collect metrics.
342
342
 
@@ -346,7 +346,7 @@ def async_chat_stream(version, environment, application_name,
346
346
  application_name: Name of the application using the Cohere API.
347
347
  tracer: OpenTelemetry tracer for creating spans.
348
348
  pricing_info: Information used for calculating the cost of Cohere usage.
349
- trace_content: Flag indicating whether to trace the actual content.
349
+ capture_message_content: Flag indicating whether to trace the actual content.
350
350
 
351
351
  Returns:
352
352
  A function that wraps the chat method to add telemetry.
@@ -547,7 +547,7 @@ def async_chat_stream(version, environment, application_name,
547
547
  self._ttft)
548
548
  self._span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
549
549
  version)
550
- if trace_content:
550
+ if capture_message_content:
551
551
  self._span.add_event(
552
552
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
553
553
  attributes={
@@ -22,7 +22,7 @@ from openlit.semcov import SemanticConvetion
22
22
  logger = logging.getLogger(__name__)
23
23
 
24
24
  def embed(version, environment, application_name, tracer,
25
- pricing_info, trace_content, metrics, disable_metrics):
25
+ pricing_info, capture_message_content, metrics, disable_metrics):
26
26
  """
27
27
  Generates a telemetry wrapper for embeddings to collect metrics.
28
28
 
@@ -32,7 +32,7 @@ def embed(version, environment, application_name, tracer,
32
32
  application_name: Name of the application using the Cohere API.
33
33
  tracer: OpenTelemetry tracer for creating spans.
34
34
  pricing_info: Information used for calculating the cost of Cohere usage.
35
- trace_content: Flag indicating whether to trace the actual content.
35
+ capture_message_content: Flag indicating whether to trace the actual content.
36
36
 
37
37
  Returns:
38
38
  A function that wraps the embeddings method to add telemetry.
@@ -105,7 +105,7 @@ def embed(version, environment, application_name, tracer,
105
105
  span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
106
106
  version)
107
107
 
108
- if trace_content:
108
+ if capture_message_content:
109
109
  span.add_event(
110
110
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
111
111
  attributes={
@@ -149,7 +149,7 @@ def embed(version, environment, application_name, tracer,
149
149
  return wrapper
150
150
 
151
151
  def chat(version, environment, application_name, tracer,
152
- pricing_info, trace_content, metrics, disable_metrics):
152
+ pricing_info, capture_message_content, metrics, disable_metrics):
153
153
  """
154
154
  Generates a telemetry wrapper for chat to collect metrics.
155
155
 
@@ -159,7 +159,7 @@ def chat(version, environment, application_name, tracer,
159
159
  application_name: Name of the application using the Cohere API.
160
160
  tracer: OpenTelemetry tracer for creating spans.
161
161
  pricing_info: Information used for calculating the cost of Cohere usage.
162
- trace_content: Flag indicating whether to trace the actual content.
162
+ capture_message_content: Flag indicating whether to trace the actual content.
163
163
 
164
164
  Returns:
165
165
  A function that wraps the chat method to add telemetry.
@@ -281,7 +281,7 @@ def chat(version, environment, application_name, tracer,
281
281
  span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
282
282
  version)
283
283
 
284
- if trace_content:
284
+ if capture_message_content:
285
285
  span.add_event(
286
286
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
287
287
  attributes={
@@ -336,7 +336,7 @@ def chat(version, environment, application_name, tracer,
336
336
  return wrapper
337
337
 
338
338
  def chat_stream(version, environment, application_name,
339
- tracer, pricing_info, trace_content, metrics, disable_metrics):
339
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
340
340
  """
341
341
  Generates a telemetry wrapper for chat_stream to collect metrics.
342
342
 
@@ -346,7 +346,7 @@ def chat_stream(version, environment, application_name,
346
346
  application_name: Name of the application using the Cohere API.
347
347
  tracer: OpenTelemetry tracer for creating spans.
348
348
  pricing_info: Information used for calculating the cost of Cohere usage.
349
- trace_content: Flag indicating whether to trace the actual content.
349
+ capture_message_content: Flag indicating whether to trace the actual content.
350
350
 
351
351
  Returns:
352
352
  A function that wraps the chat method to add telemetry.
@@ -547,7 +547,7 @@ def chat_stream(version, environment, application_name,
547
547
  self._ttft)
548
548
  self._span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
549
549
  version)
550
- if trace_content:
550
+ if capture_message_content:
551
551
  self._span.add_event(
552
552
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
553
553
  attributes={
@@ -26,7 +26,7 @@ class ControlFlowInstrumentor(BaseInstrumentor):
26
26
  tracer = kwargs.get("tracer")
27
27
  metrics = kwargs.get("metrics_dict")
28
28
  pricing_info = kwargs.get("pricing_info", {})
29
- trace_content = kwargs.get("trace_content", False)
29
+ capture_message_content = kwargs.get("capture_message_content", False)
30
30
  disable_metrics = kwargs.get("disable_metrics")
31
31
  version = importlib.metadata.version("controlflow")
32
32
 
@@ -34,21 +34,21 @@ class ControlFlowInstrumentor(BaseInstrumentor):
34
34
  "controlflow.agents.agent",
35
35
  "Agent.__init__",
36
36
  wrap_controlflow("controlflow.create_agent", version, environment, application_name,
37
- tracer, pricing_info, trace_content, metrics, disable_metrics),
37
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
38
38
  )
39
39
 
40
40
  wrap_function_wrapper(
41
41
  "controlflow.tasks.task",
42
42
  "Task.__init__",
43
43
  wrap_controlflow("controlflow.create_task", version, environment, application_name,
44
- tracer, pricing_info, trace_content, metrics, disable_metrics),
44
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
45
45
  )
46
46
 
47
47
  wrap_function_wrapper(
48
48
  "controlflow",
49
49
  "run",
50
50
  wrap_controlflow("controlflow.run", version, environment, application_name,
51
- tracer, pricing_info, trace_content, metrics, disable_metrics),
51
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
52
52
  )
53
53
 
54
54
  def _uninstrument(self, **kwargs):
@@ -13,7 +13,7 @@ from openlit.semcov import SemanticConvetion
13
13
  logger = logging.getLogger(__name__)
14
14
 
15
15
  def wrap_controlflow(gen_ai_endpoint, version, environment, application_name,
16
- tracer, pricing_info, trace_content, metrics, disable_metrics):
16
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
17
17
  """
18
18
  Creates a wrapper around a function call to trace and log its execution metrics.
19
19
 
@@ -27,7 +27,7 @@ def wrap_controlflow(gen_ai_endpoint, version, environment, application_name,
27
27
  - application_name (str): Name of the Langchain application.
28
28
  - tracer (opentelemetry.trace.Tracer): The tracer object used for OpenTelemetry tracing.
29
29
  - pricing_info (dict): Information about the pricing for internal metrics (currently not used).
30
- - trace_content (bool): Flag indicating whether to trace the content of the response.
30
+ - capture_message_content (bool): Flag indicating whether to trace the content of the response.
31
31
 
32
32
  Returns:
33
33
  - function: A higher-order function that takes a function 'wrapped' and returns
@@ -29,7 +29,7 @@ class Crawl4AIInstrumentor(BaseInstrumentor):
29
29
  tracer = kwargs.get("tracer")
30
30
  metrics = kwargs.get("metrics_dict")
31
31
  pricing_info = kwargs.get("pricing_info", {})
32
- trace_content = kwargs.get("trace_content", False)
32
+ capture_message_content = kwargs.get("capture_message_content", False)
33
33
  disable_metrics = kwargs.get("disable_metrics")
34
34
  version = importlib.metadata.version("crawl4ai")
35
35
 
@@ -37,14 +37,14 @@ class Crawl4AIInstrumentor(BaseInstrumentor):
37
37
  "crawl4ai.web_crawler",
38
38
  "WebCrawler.run",
39
39
  wrap_crawl("crawl4ai.web_crawl", version, environment, application_name,
40
- tracer, pricing_info, trace_content, metrics, disable_metrics),
40
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
41
41
  )
42
42
 
43
43
  wrap_function_wrapper(
44
44
  "crawl4ai.async_webcrawler",
45
45
  "AsyncWebCrawler.arun",
46
46
  async_wrap_crawl("crawl4ai.web_crawl", version, environment, application_name,
47
- tracer, pricing_info, trace_content, metrics, disable_metrics),
47
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
48
48
  )
49
49
 
50
50
  def _uninstrument(self, **kwargs):
@@ -15,7 +15,7 @@ from openlit.semcov import SemanticConvetion
15
15
  logger = logging.getLogger(__name__)
16
16
 
17
17
  def async_wrap_crawl(gen_ai_endpoint, version, environment, application_name,
18
- tracer, pricing_info, trace_content, metrics, disable_metrics):
18
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
19
19
  """
20
20
  Generates a telemetry wrapper for chat completions to collect metrics.
21
21
 
@@ -26,7 +26,7 @@ def async_wrap_crawl(gen_ai_endpoint, version, environment, application_name,
26
26
  application_name: Name of the application using the Crawl4AI Agent.
27
27
  tracer: OpenTelemetry tracer for creating spans.
28
28
  pricing_info: Information used for calculating the cost of Crawl4AI usage.
29
- trace_content: Flag indicating whether to trace the actual content.
29
+ capture_message_content: Flag indicating whether to trace the actual content.
30
30
 
31
31
  Returns:
32
32
  A function that wraps the chat completions method to add telemetry.
@@ -15,7 +15,7 @@ from openlit.semcov import SemanticConvetion
15
15
  logger = logging.getLogger(__name__)
16
16
 
17
17
  def wrap_crawl(gen_ai_endpoint, version, environment, application_name,
18
- tracer, pricing_info, trace_content, metrics, disable_metrics):
18
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
19
19
  """
20
20
  Generates a telemetry wrapper for chat completions to collect metrics.
21
21
 
@@ -26,7 +26,7 @@ def wrap_crawl(gen_ai_endpoint, version, environment, application_name,
26
26
  application_name: Name of the application using the Crawl4AI Agent.
27
27
  tracer: OpenTelemetry tracer for creating spans.
28
28
  pricing_info: Information used for calculating the cost of Crawl4AI usage.
29
- trace_content: Flag indicating whether to trace the actual content.
29
+ capture_message_content: Flag indicating whether to trace the actual content.
30
30
 
31
31
  Returns:
32
32
  A function that wraps the chat completions method to add telemetry.
@@ -26,7 +26,7 @@ class CrewAIInstrumentor(BaseInstrumentor):
26
26
  tracer = kwargs.get("tracer")
27
27
  metrics = kwargs.get("metrics_dict")
28
28
  pricing_info = kwargs.get("pricing_info", {})
29
- trace_content = kwargs.get("trace_content", False)
29
+ capture_message_content = kwargs.get("capture_message_content", False)
30
30
  disable_metrics = kwargs.get("disable_metrics")
31
31
  version = importlib.metadata.version("crewai")
32
32
 
@@ -34,14 +34,14 @@ class CrewAIInstrumentor(BaseInstrumentor):
34
34
  "crewai.agent",
35
35
  "Agent.execute_task",
36
36
  crew_wrap("crewai.agent_execute_task", version, environment, application_name,
37
- tracer, pricing_info, trace_content, metrics, disable_metrics),
37
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
38
38
  )
39
39
 
40
40
  wrap_function_wrapper(
41
41
  "crewai.task",
42
42
  "Task._execute_core",
43
43
  crew_wrap("crewai.task_execute_core", version, environment, application_name,
44
- tracer, pricing_info, trace_content, metrics, disable_metrics),
44
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
45
45
  )
46
46
 
47
47
 
@@ -28,7 +28,7 @@ def _parse_tools(tools):
28
28
  return json.dumps(result)
29
29
 
30
30
  def crew_wrap(gen_ai_endpoint, version, environment, application_name,
31
- tracer, pricing_info, trace_content, metrics, disable_metrics):
31
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
32
32
  """
33
33
  Generates a telemetry wrapper for chat completions to collect metrics.
34
34
 
@@ -39,7 +39,7 @@ def crew_wrap(gen_ai_endpoint, version, environment, application_name,
39
39
  application_name: Name of the application using the CrewAI Agent.
40
40
  tracer: OpenTelemetry tracer for creating spans.
41
41
  pricing_info: Information used for calculating the cost of CrewAI usage.
42
- trace_content: Flag indicating whether to trace the actual content.
42
+ capture_message_content: Flag indicating whether to trace the actual content.
43
43
 
44
44
  Returns:
45
45
  A function that wraps the chat completions method to add telemetry.
@@ -26,7 +26,7 @@ class DynamiqInstrumentor(BaseInstrumentor):
26
26
  tracer = kwargs.get("tracer")
27
27
  metrics = kwargs.get("metrics_dict")
28
28
  pricing_info = kwargs.get("pricing_info", {})
29
- trace_content = kwargs.get("trace_content", False)
29
+ capture_message_content = kwargs.get("capture_message_content", False)
30
30
  disable_metrics = kwargs.get("disable_metrics")
31
31
  version = importlib.metadata.version("dynamiq")
32
32
 
@@ -34,28 +34,28 @@ class DynamiqInstrumentor(BaseInstrumentor):
34
34
  "dynamiq.nodes.agents.base",
35
35
  "Agent.run",
36
36
  dynamiq_wrap("dynamiq.agent_run", version, environment, application_name,
37
- tracer, pricing_info, trace_content, metrics, disable_metrics),
37
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
38
38
  )
39
39
 
40
40
  wrap_function_wrapper(
41
41
  "dynamiq",
42
42
  "Workflow.run",
43
43
  dynamiq_wrap("dynamiq.workflow_run", version, environment, application_name,
44
- tracer, pricing_info, trace_content, metrics, disable_metrics),
44
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
45
45
  )
46
46
 
47
47
  wrap_function_wrapper(
48
48
  "dynamiq.memory",
49
49
  "Memory.add",
50
50
  dynamiq_wrap("dynamiq.memory_add", version, environment, application_name,
51
- tracer, pricing_info, trace_content, metrics, disable_metrics),
51
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
52
52
  )
53
53
 
54
54
  wrap_function_wrapper(
55
55
  "dynamiq.memory",
56
56
  "Memory.search",
57
57
  dynamiq_wrap("dynamiq.memory_search", version, environment, application_name,
58
- tracer, pricing_info, trace_content, metrics, disable_metrics),
58
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
59
59
  )
60
60
 
61
61
 
@@ -15,7 +15,7 @@ from openlit.semcov import SemanticConvetion
15
15
  logger = logging.getLogger(__name__)
16
16
 
17
17
  def dynamiq_wrap(gen_ai_endpoint, version, environment, application_name,
18
- tracer, pricing_info, trace_content, metrics, disable_metrics):
18
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
19
19
  """
20
20
  Generates a telemetry wrapper for chat completions to collect metrics.
21
21
 
@@ -26,7 +26,7 @@ def dynamiq_wrap(gen_ai_endpoint, version, environment, application_name,
26
26
  application_name: Name of the application using the dynamiq Agent.
27
27
  tracer: OpenTelemetry tracer for creating spans.
28
28
  pricing_info: Information used for calculating the cost of dynamiq usage.
29
- trace_content: Flag indicating whether to trace the actual content.
29
+ capture_message_content: Flag indicating whether to trace the actual content.
30
30
 
31
31
  Returns:
32
32
  A function that wraps the chat completions method to add telemetry.
@@ -29,7 +29,7 @@ class ElevenLabsInstrumentor(BaseInstrumentor):
29
29
  tracer = kwargs.get("tracer")
30
30
  metrics = kwargs.get("metrics_dict")
31
31
  pricing_info = kwargs.get("pricing_info", {})
32
- trace_content = kwargs.get("trace_content", False)
32
+ capture_message_content = kwargs.get("capture_message_content", False)
33
33
  disable_metrics = kwargs.get("disable_metrics")
34
34
  version = importlib.metadata.version("elevenlabs")
35
35
 
@@ -38,7 +38,7 @@ class ElevenLabsInstrumentor(BaseInstrumentor):
38
38
  "elevenlabs.client",
39
39
  "ElevenLabs.generate",
40
40
  generate("elevenlabs.generate", version, environment, application_name,
41
- tracer, pricing_info, trace_content, metrics, disable_metrics),
41
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
42
42
  )
43
43
 
44
44
  # sync text_to_speech.convert
@@ -46,7 +46,7 @@ class ElevenLabsInstrumentor(BaseInstrumentor):
46
46
  "elevenlabs.text_to_speech.client",
47
47
  "TextToSpeechClient.convert",
48
48
  generate("elevenlabs.text_to_speech", version, environment, application_name,
49
- tracer, pricing_info, trace_content, metrics, disable_metrics),
49
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
50
50
  )
51
51
 
52
52
  # async generate
@@ -54,7 +54,7 @@ class ElevenLabsInstrumentor(BaseInstrumentor):
54
54
  "elevenlabs.client",
55
55
  "AsyncElevenLabs.generate",
56
56
  async_generate("elevenlabs.generate", version, environment, application_name,
57
- tracer, pricing_info, trace_content, metrics, disable_metrics),
57
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
58
58
  )
59
59
 
60
60
  # sync text_to_speech.convert
@@ -62,7 +62,7 @@ class ElevenLabsInstrumentor(BaseInstrumentor):
62
62
  "elevenlabs.text_to_speech.client",
63
63
  "AsyncTextToSpeechClient.convert",
64
64
  generate("elevenlabs.text_to_speech", version, environment, application_name,
65
- tracer, pricing_info, trace_content, metrics, disable_metrics),
65
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
66
66
  )
67
67
 
68
68
  def _uninstrument(self, **kwargs):
@@ -18,7 +18,7 @@ from openlit.semcov import SemanticConvetion
18
18
  logger = logging.getLogger(__name__)
19
19
 
20
20
  def async_generate(gen_ai_endpoint, version, environment, application_name,
21
- tracer, pricing_info, trace_content, metrics, disable_metrics):
21
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
22
22
  """
23
23
  Generates a telemetry wrapper for creating speech audio to collect metrics.
24
24
 
@@ -28,7 +28,7 @@ def async_generate(gen_ai_endpoint, version, environment, application_name,
28
28
  application_name: Name of the application using the ElevenLabs API.
29
29
  tracer: OpenTelemetry tracer for creating spans.
30
30
  pricing_info: Information used for calculating the cost of generating speech audio.
31
- trace_content: Flag indicating whether to trace the input text and generated audio.
31
+ capture_message_content: Flag indicating whether to trace the input text and generated audio.
32
32
 
33
33
  Returns:
34
34
  A function that wraps the speech audio creation method to add telemetry.
@@ -105,7 +105,7 @@ def async_generate(gen_ai_endpoint, version, environment, application_name,
105
105
  cost)
106
106
  span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
107
107
  version)
108
- if trace_content:
108
+ if capture_message_content:
109
109
  span.add_event(
110
110
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
111
111
  attributes={
@@ -18,7 +18,7 @@ from openlit.semcov import SemanticConvetion
18
18
  logger = logging.getLogger(__name__)
19
19
 
20
20
  def generate(gen_ai_endpoint, version, environment, application_name,
21
- tracer, pricing_info, trace_content, metrics, disable_metrics):
21
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
22
22
  """
23
23
  Generates a telemetry wrapper for creating speech audio to collect metrics.
24
24
 
@@ -28,7 +28,7 @@ def generate(gen_ai_endpoint, version, environment, application_name,
28
28
  application_name: Name of the application using the ElevenLabs API.
29
29
  tracer: OpenTelemetry tracer for creating spans.
30
30
  pricing_info: Information used for calculating the cost of generating speech audio.
31
- trace_content: Flag indicating whether to trace the input text and generated audio.
31
+ capture_message_content: Flag indicating whether to trace the input text and generated audio.
32
32
 
33
33
  Returns:
34
34
  A function that wraps the speech audio creation method to add telemetry.
@@ -106,7 +106,7 @@ def generate(gen_ai_endpoint, version, environment, application_name,
106
106
  cost)
107
107
  span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
108
108
  version)
109
- if trace_content:
109
+ if capture_message_content:
110
110
  span.add_event(
111
111
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
112
112
  attributes={
@@ -35,7 +35,7 @@ class EmbedChainInstrumentor(BaseInstrumentor):
35
35
  environment = kwargs.get("environment")
36
36
  tracer = kwargs.get("tracer")
37
37
  pricing_info = kwargs.get("pricing_info")
38
- trace_content = kwargs.get("trace_content")
38
+ capture_message_content = kwargs.get("capture_message_content")
39
39
  version = importlib.metadata.version("embedchain")
40
40
 
41
41
  for wrapped_method in WRAPPED_METHODS:
@@ -47,7 +47,7 @@ class EmbedChainInstrumentor(BaseInstrumentor):
47
47
  wrap_package,
48
48
  wrap_object,
49
49
  wrapper(gen_ai_endpoint, version, environment, application_name,
50
- tracer, pricing_info, trace_content),
50
+ tracer, pricing_info, capture_message_content),
51
51
  )
52
52
 
53
53
  @staticmethod
@@ -13,7 +13,7 @@ from openlit.semcov import SemanticConvetion
13
13
  logger = logging.getLogger(__name__)
14
14
 
15
15
  def evaluate(gen_ai_endpoint, version, environment, application_name,
16
- tracer, pricing_info, trace_content):
16
+ tracer, pricing_info, capture_message_content):
17
17
  """
18
18
  Creates a wrapper around a function call to trace and log its execution metrics.
19
19
 
@@ -27,7 +27,7 @@ def evaluate(gen_ai_endpoint, version, environment, application_name,
27
27
  - application_name (str): Name of the EmbedChain application.
28
28
  - tracer (opentelemetry.trace.Tracer): The tracer object used for OpenTelemetry tracing.
29
29
  - pricing_info (dict): Information about the pricing for internal metrics (currently not used).
30
- - trace_content (bool): Flag indicating whether to trace the content of the response.
30
+ - capture_message_content (bool): Flag indicating whether to trace the content of the response.
31
31
 
32
32
  Returns:
33
33
  - function: A higher-order function that takes a function 'wrapped' and returns
@@ -91,7 +91,7 @@ def evaluate(gen_ai_endpoint, version, environment, application_name,
91
91
  return wrapper
92
92
 
93
93
  def get_data_sources(gen_ai_endpoint, version, environment, application_name,
94
- tracer, pricing_info, trace_content):
94
+ tracer, pricing_info, capture_message_content):
95
95
  """
96
96
  Creates a wrapper around a function call to trace and log its execution metrics.
97
97
 
@@ -105,7 +105,7 @@ def get_data_sources(gen_ai_endpoint, version, environment, application_name,
105
105
  - application_name (str): Name of the EmbedChain application.
106
106
  - tracer (opentelemetry.trace.Tracer): The tracer object used for OpenTelemetry tracing.
107
107
  - pricing_info (dict): Information about the pricing for internal metrics (currently not used).
108
- - trace_content (bool): Flag indicating whether to trace the content of the response.
108
+ - capture_message_content (bool): Flag indicating whether to trace the content of the response.
109
109
 
110
110
  Returns:
111
111
  - function: A higher-order function that takes a function 'wrapped' and returns
@@ -26,7 +26,7 @@ class FireCrawlInstrumentor(BaseInstrumentor):
26
26
  tracer = kwargs.get("tracer")
27
27
  metrics = kwargs.get("metrics_dict")
28
28
  pricing_info = kwargs.get("pricing_info", {})
29
- trace_content = kwargs.get("trace_content", False)
29
+ capture_message_content = kwargs.get("capture_message_content", False)
30
30
  disable_metrics = kwargs.get("disable_metrics")
31
31
  version = importlib.metadata.version("firecrawl-py")
32
32
 
@@ -34,14 +34,14 @@ class FireCrawlInstrumentor(BaseInstrumentor):
34
34
  "firecrawl.firecrawl",
35
35
  "FirecrawlApp.scrape_url",
36
36
  wrap_crawl("firecrawl.scrape_url", version, environment, application_name,
37
- tracer, pricing_info, trace_content, metrics, disable_metrics),
37
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
38
38
  )
39
39
 
40
40
  wrap_function_wrapper(
41
41
  "firecrawl.firecrawl",
42
42
  "FirecrawlApp.crawl_url",
43
43
  wrap_crawl("firecrawl.crawl_url", version, environment, application_name,
44
- tracer, pricing_info, trace_content, metrics, disable_metrics),
44
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
45
45
  )
46
46
 
47
47
  def _uninstrument(self, **kwargs):
@@ -15,7 +15,7 @@ from openlit.semcov import SemanticConvetion
15
15
  logger = logging.getLogger(__name__)
16
16
 
17
17
  def wrap_crawl(gen_ai_endpoint, version, environment, application_name,
18
- tracer, pricing_info, trace_content, metrics, disable_metrics):
18
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
19
19
  """
20
20
  Generates a telemetry wrapper for chat completions to collect metrics.
21
21
 
@@ -26,7 +26,7 @@ def wrap_crawl(gen_ai_endpoint, version, environment, application_name,
26
26
  application_name: Name of the application using the FireCrawl Agent.
27
27
  tracer: OpenTelemetry tracer for creating spans.
28
28
  pricing_info: Information used for calculating the cost of FireCrawl usage.
29
- trace_content: Flag indicating whether to trace the actual content.
29
+ capture_message_content: Flag indicating whether to trace the actual content.
30
30
 
31
31
  Returns:
32
32
  A function that wraps the chat completions method to add telemetry.
@@ -30,7 +30,7 @@ class GoogleAIStudioInstrumentor(BaseInstrumentor):
30
30
  tracer = kwargs.get("tracer")
31
31
  metrics = kwargs.get("metrics_dict")
32
32
  pricing_info = kwargs.get("pricing_info", {})
33
- trace_content = kwargs.get("trace_content", False)
33
+ capture_message_content = kwargs.get("capture_message_content", False)
34
34
  disable_metrics = kwargs.get("disable_metrics")
35
35
  version = importlib.metadata.version("google-genai")
36
36
 
@@ -39,7 +39,7 @@ class GoogleAIStudioInstrumentor(BaseInstrumentor):
39
39
  "google.genai.models",
40
40
  "Models.generate_content",
41
41
  generate(version, environment, application_name,
42
- tracer, pricing_info, trace_content, metrics, disable_metrics),
42
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
43
43
  )
44
44
 
45
45
  # async generate
@@ -47,7 +47,7 @@ class GoogleAIStudioInstrumentor(BaseInstrumentor):
47
47
  "google.genai.models",
48
48
  "AsyncModels.generate_content",
49
49
  async_generate(version, environment,
50
- application_name, tracer, pricing_info, trace_content, metrics,
50
+ application_name, tracer, pricing_info, capture_message_content, metrics,
51
51
  disable_metrics),
52
52
  )
53
53
 
@@ -19,7 +19,7 @@ from openlit.semcov import SemanticConvetion
19
19
  logger = logging.getLogger(__name__)
20
20
 
21
21
  def async_generate(version, environment, application_name,
22
- tracer, pricing_info, trace_content, metrics, disable_metrics):
22
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
23
23
  """
24
24
  Generates a telemetry wrapper for chat to collect metrics.
25
25
 
@@ -30,7 +30,7 @@ def async_generate(version, environment, application_name,
30
30
  application_name: Name of the application using the Google AI Studio API.
31
31
  tracer: OpenTelemetry tracer for creating spans.
32
32
  pricing_info: Information used for calculating the cost of Google AI Studio usage.
33
- trace_content: Flag indicating whether to trace the actual content.
33
+ capture_message_content: Flag indicating whether to trace the actual content.
34
34
 
35
35
  Returns:
36
36
  A function that wraps the chat method to add telemetry.
@@ -165,7 +165,7 @@ def async_generate(version, environment, application_name,
165
165
  end_time - start_time)
166
166
  span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
167
167
  version)
168
- if trace_content:
168
+ if capture_message_content:
169
169
  span.add_event(
170
170
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
171
171
  attributes={