openlit 1.33.9__py3-none-any.whl → 1.33.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. openlit/__helpers.py +78 -0
  2. openlit/__init__.py +41 -13
  3. openlit/instrumentation/ag2/__init__.py +9 -10
  4. openlit/instrumentation/ag2/ag2.py +134 -69
  5. openlit/instrumentation/ai21/__init__.py +6 -5
  6. openlit/instrumentation/ai21/ai21.py +71 -534
  7. openlit/instrumentation/ai21/async_ai21.py +71 -534
  8. openlit/instrumentation/ai21/utils.py +407 -0
  9. openlit/instrumentation/anthropic/__init__.py +3 -3
  10. openlit/instrumentation/anthropic/anthropic.py +5 -5
  11. openlit/instrumentation/anthropic/async_anthropic.py +5 -5
  12. openlit/instrumentation/assemblyai/__init__.py +2 -2
  13. openlit/instrumentation/assemblyai/assemblyai.py +3 -3
  14. openlit/instrumentation/astra/__init__.py +25 -25
  15. openlit/instrumentation/astra/astra.py +7 -7
  16. openlit/instrumentation/astra/async_astra.py +7 -7
  17. openlit/instrumentation/azure_ai_inference/__init__.py +5 -5
  18. openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +11 -11
  19. openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +11 -11
  20. openlit/instrumentation/bedrock/__init__.py +2 -2
  21. openlit/instrumentation/bedrock/bedrock.py +3 -3
  22. openlit/instrumentation/chroma/__init__.py +9 -9
  23. openlit/instrumentation/chroma/chroma.py +7 -7
  24. openlit/instrumentation/cohere/__init__.py +7 -7
  25. openlit/instrumentation/cohere/async_cohere.py +10 -10
  26. openlit/instrumentation/cohere/cohere.py +11 -11
  27. openlit/instrumentation/controlflow/__init__.py +4 -4
  28. openlit/instrumentation/controlflow/controlflow.py +5 -5
  29. openlit/instrumentation/crawl4ai/__init__.py +3 -3
  30. openlit/instrumentation/crawl4ai/async_crawl4ai.py +5 -5
  31. openlit/instrumentation/crawl4ai/crawl4ai.py +5 -5
  32. openlit/instrumentation/crewai/__init__.py +3 -3
  33. openlit/instrumentation/crewai/crewai.py +6 -4
  34. openlit/instrumentation/dynamiq/__init__.py +5 -5
  35. openlit/instrumentation/dynamiq/dynamiq.py +5 -5
  36. openlit/instrumentation/elevenlabs/__init__.py +5 -5
  37. openlit/instrumentation/elevenlabs/async_elevenlabs.py +4 -5
  38. openlit/instrumentation/elevenlabs/elevenlabs.py +4 -5
  39. openlit/instrumentation/embedchain/__init__.py +2 -2
  40. openlit/instrumentation/embedchain/embedchain.py +9 -9
  41. openlit/instrumentation/firecrawl/__init__.py +3 -3
  42. openlit/instrumentation/firecrawl/firecrawl.py +5 -5
  43. openlit/instrumentation/google_ai_studio/__init__.py +3 -3
  44. openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +3 -3
  45. openlit/instrumentation/google_ai_studio/google_ai_studio.py +3 -3
  46. openlit/instrumentation/gpt4all/__init__.py +5 -5
  47. openlit/instrumentation/gpt4all/gpt4all.py +350 -225
  48. openlit/instrumentation/gpu/__init__.py +5 -5
  49. openlit/instrumentation/groq/__init__.py +5 -5
  50. openlit/instrumentation/groq/async_groq.py +359 -243
  51. openlit/instrumentation/groq/groq.py +359 -243
  52. openlit/instrumentation/haystack/__init__.py +2 -2
  53. openlit/instrumentation/haystack/haystack.py +5 -5
  54. openlit/instrumentation/julep/__init__.py +7 -7
  55. openlit/instrumentation/julep/async_julep.py +6 -6
  56. openlit/instrumentation/julep/julep.py +6 -6
  57. openlit/instrumentation/langchain/__init__.py +15 -9
  58. openlit/instrumentation/langchain/async_langchain.py +388 -0
  59. openlit/instrumentation/langchain/langchain.py +110 -497
  60. openlit/instrumentation/letta/__init__.py +7 -7
  61. openlit/instrumentation/letta/letta.py +10 -8
  62. openlit/instrumentation/litellm/__init__.py +9 -10
  63. openlit/instrumentation/litellm/async_litellm.py +321 -250
  64. openlit/instrumentation/litellm/litellm.py +319 -248
  65. openlit/instrumentation/llamaindex/__init__.py +2 -2
  66. openlit/instrumentation/llamaindex/llamaindex.py +5 -5
  67. openlit/instrumentation/mem0/__init__.py +2 -2
  68. openlit/instrumentation/mem0/mem0.py +5 -5
  69. openlit/instrumentation/milvus/__init__.py +2 -2
  70. openlit/instrumentation/milvus/milvus.py +7 -7
  71. openlit/instrumentation/mistral/__init__.py +13 -13
  72. openlit/instrumentation/mistral/async_mistral.py +426 -253
  73. openlit/instrumentation/mistral/mistral.py +424 -250
  74. openlit/instrumentation/multion/__init__.py +7 -7
  75. openlit/instrumentation/multion/async_multion.py +9 -7
  76. openlit/instrumentation/multion/multion.py +9 -7
  77. openlit/instrumentation/ollama/__init__.py +19 -39
  78. openlit/instrumentation/ollama/async_ollama.py +137 -563
  79. openlit/instrumentation/ollama/ollama.py +136 -563
  80. openlit/instrumentation/ollama/utils.py +333 -0
  81. openlit/instrumentation/openai/__init__.py +11 -11
  82. openlit/instrumentation/openai/async_openai.py +25 -27
  83. openlit/instrumentation/openai/openai.py +25 -27
  84. openlit/instrumentation/phidata/__init__.py +2 -2
  85. openlit/instrumentation/phidata/phidata.py +6 -4
  86. openlit/instrumentation/pinecone/__init__.py +6 -6
  87. openlit/instrumentation/pinecone/pinecone.py +7 -7
  88. openlit/instrumentation/premai/__init__.py +5 -5
  89. openlit/instrumentation/premai/premai.py +268 -219
  90. openlit/instrumentation/qdrant/__init__.py +2 -2
  91. openlit/instrumentation/qdrant/async_qdrant.py +7 -7
  92. openlit/instrumentation/qdrant/qdrant.py +7 -7
  93. openlit/instrumentation/reka/__init__.py +5 -5
  94. openlit/instrumentation/reka/async_reka.py +93 -55
  95. openlit/instrumentation/reka/reka.py +93 -55
  96. openlit/instrumentation/together/__init__.py +9 -9
  97. openlit/instrumentation/together/async_together.py +284 -242
  98. openlit/instrumentation/together/together.py +284 -242
  99. openlit/instrumentation/transformers/__init__.py +3 -3
  100. openlit/instrumentation/transformers/transformers.py +79 -48
  101. openlit/instrumentation/vertexai/__init__.py +19 -69
  102. openlit/instrumentation/vertexai/async_vertexai.py +333 -990
  103. openlit/instrumentation/vertexai/vertexai.py +333 -990
  104. openlit/instrumentation/vllm/__init__.py +3 -3
  105. openlit/instrumentation/vllm/vllm.py +65 -35
  106. openlit/otel/events.py +85 -0
  107. openlit/otel/tracing.py +3 -13
  108. openlit/semcov/__init__.py +16 -4
  109. {openlit-1.33.9.dist-info → openlit-1.33.11.dist-info}/METADATA +2 -2
  110. openlit-1.33.11.dist-info/RECORD +125 -0
  111. openlit-1.33.9.dist-info/RECORD +0 -121
  112. {openlit-1.33.9.dist-info → openlit-1.33.11.dist-info}/LICENSE +0 -0
  113. {openlit-1.33.9.dist-info → openlit-1.33.11.dist-info}/WHEEL +0 -0
@@ -5,7 +5,7 @@ Module for monitoring ChromaDB.
5
5
 
6
6
  import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
8
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
9
  from openlit.__helpers import handle_exception
10
10
  from openlit.semcov import SemanticConvetion
11
11
 
@@ -25,7 +25,7 @@ def object_count(obj):
25
25
  return cnt
26
26
 
27
27
  def general_wrap(gen_ai_endpoint, version, environment, application_name,
28
- tracer, pricing_info, trace_content, metrics, disable_metrics):
28
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
29
29
  """
30
30
  Creates a wrapper around a function call to trace and log its execution metrics.
31
31
 
@@ -39,7 +39,7 @@ def general_wrap(gen_ai_endpoint, version, environment, application_name,
39
39
  - application_name (str): Name of the Langchain application.
40
40
  - tracer (opentelemetry.trace.Tracer): The tracer object used for OpenTelemetry tracing.
41
41
  - pricing_info (dict): Information about the pricing for internal metrics (currently not used).
42
- - trace_content (bool): Flag indicating whether to trace the content of the response.
42
+ - capture_message_content (bool): Flag indicating whether to trace the content of the response.
43
43
 
44
44
  Returns:
45
45
  - function: A higher-order function that takes a function 'wrapped' and returns
@@ -73,9 +73,9 @@ def general_wrap(gen_ai_endpoint, version, environment, application_name,
73
73
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
74
74
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
75
75
  gen_ai_endpoint)
76
- span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
76
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
77
77
  environment)
78
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
78
+ span.set_attribute(SERVICE_NAME,
79
79
  application_name)
80
80
  span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
81
81
  SemanticConvetion.GEN_AI_OPERATION_TYPE_VECTORDB)
@@ -173,11 +173,11 @@ def general_wrap(gen_ai_endpoint, version, environment, application_name,
173
173
  attributes = {
174
174
  TELEMETRY_SDK_NAME:
175
175
  "openlit",
176
- SemanticConvetion.GEN_AI_APPLICATION_NAME:
176
+ SERVICE_NAME:
177
177
  application_name,
178
178
  SemanticConvetion.DB_SYSTEM:
179
179
  SemanticConvetion.DB_SYSTEM_CHROMA,
180
- SemanticConvetion.GEN_AI_ENVIRONMENT:
180
+ DEPLOYMENT_ENVIRONMENT:
181
181
  environment,
182
182
  SemanticConvetion.GEN_AI_OPERATION:
183
183
  SemanticConvetion.GEN_AI_OPERATION_TYPE_VECTORDB,
@@ -23,7 +23,7 @@ class CohereInstrumentor(BaseInstrumentor):
23
23
  tracer = kwargs.get("tracer")
24
24
  metrics = kwargs.get("metrics_dict")
25
25
  pricing_info = kwargs.get("pricing_info")
26
- trace_content = kwargs.get("trace_content")
26
+ capture_message_content = kwargs.get("capture_message_content")
27
27
  disable_metrics = kwargs.get("disable_metrics")
28
28
  version = importlib.metadata.version("cohere")
29
29
 
@@ -32,19 +32,19 @@ class CohereInstrumentor(BaseInstrumentor):
32
32
  "cohere.client_v2",
33
33
  "ClientV2.chat",
34
34
  chat(version, environment, application_name,
35
- tracer, pricing_info, trace_content, metrics, disable_metrics),
35
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
36
36
  )
37
37
  wrap_function_wrapper(
38
38
  "cohere.client_v2",
39
39
  "ClientV2.chat_stream",
40
40
  chat_stream(version, environment, application_name,
41
- tracer, pricing_info, trace_content, metrics, disable_metrics),
41
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
42
42
  )
43
43
  wrap_function_wrapper(
44
44
  "cohere.client_v2",
45
45
  "ClientV2.embed",
46
46
  embed(version, environment, application_name,
47
- tracer, pricing_info, trace_content, metrics, disable_metrics),
47
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
48
48
  )
49
49
 
50
50
  # Async Client
@@ -52,21 +52,21 @@ class CohereInstrumentor(BaseInstrumentor):
52
52
  "cohere.client_v2",
53
53
  "AsyncClientV2.chat",
54
54
  async_chat(version, environment, application_name,
55
- tracer, pricing_info, trace_content, metrics, disable_metrics),
55
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
56
56
  )
57
57
 
58
58
  wrap_function_wrapper(
59
59
  "cohere.client_v2",
60
60
  "AsyncClientV2.chat_stream",
61
61
  async_chat_stream(version, environment, application_name,
62
- tracer, pricing_info, trace_content, metrics, disable_metrics),
62
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
63
63
  )
64
64
 
65
65
  wrap_function_wrapper(
66
66
  "cohere.client_v2",
67
67
  "AsyncClientV2.embed",
68
68
  async_embed(version, environment, application_name,
69
- tracer, pricing_info, trace_content, metrics, disable_metrics),
69
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
70
70
  )
71
71
 
72
72
  @staticmethod
@@ -22,7 +22,7 @@ from openlit.semcov import SemanticConvetion
22
22
  logger = logging.getLogger(__name__)
23
23
 
24
24
  def async_embed(version, environment, application_name, tracer,
25
- pricing_info, trace_content, metrics, disable_metrics):
25
+ pricing_info, capture_message_content, metrics, disable_metrics):
26
26
  """
27
27
  Generates a telemetry wrapper for embeddings to collect metrics.
28
28
 
@@ -32,7 +32,7 @@ def async_embed(version, environment, application_name, tracer,
32
32
  application_name: Name of the application using the Cohere API.
33
33
  tracer: OpenTelemetry tracer for creating spans.
34
34
  pricing_info: Information used for calculating the cost of Cohere usage.
35
- trace_content: Flag indicating whether to trace the actual content.
35
+ capture_message_content: Flag indicating whether to trace the actual content.
36
36
 
37
37
  Returns:
38
38
  A function that wraps the embeddings method to add telemetry.
@@ -105,7 +105,7 @@ def async_embed(version, environment, application_name, tracer,
105
105
  span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
106
106
  version)
107
107
 
108
- if trace_content:
108
+ if capture_message_content:
109
109
  span.add_event(
110
110
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
111
111
  attributes={
@@ -149,7 +149,7 @@ def async_embed(version, environment, application_name, tracer,
149
149
  return wrapper
150
150
 
151
151
  def async_chat(version, environment, application_name, tracer,
152
- pricing_info, trace_content, metrics, disable_metrics):
152
+ pricing_info, capture_message_content, metrics, disable_metrics):
153
153
  """
154
154
  Generates a telemetry wrapper for chat to collect metrics.
155
155
 
@@ -159,7 +159,7 @@ def async_chat(version, environment, application_name, tracer,
159
159
  application_name: Name of the application using the Cohere API.
160
160
  tracer: OpenTelemetry tracer for creating spans.
161
161
  pricing_info: Information used for calculating the cost of Cohere usage.
162
- trace_content: Flag indicating whether to trace the actual content.
162
+ capture_message_content: Flag indicating whether to trace the actual content.
163
163
 
164
164
  Returns:
165
165
  A function that wraps the chat method to add telemetry.
@@ -281,7 +281,7 @@ def async_chat(version, environment, application_name, tracer,
281
281
  span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
282
282
  version)
283
283
 
284
- if trace_content:
284
+ if capture_message_content:
285
285
  span.add_event(
286
286
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
287
287
  attributes={
@@ -336,7 +336,7 @@ def async_chat(version, environment, application_name, tracer,
336
336
  return wrapper
337
337
 
338
338
  def async_chat_stream(version, environment, application_name,
339
- tracer, pricing_info, trace_content, metrics, disable_metrics):
339
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
340
340
  """
341
341
  Generates a telemetry wrapper for chat_stream to collect metrics.
342
342
 
@@ -346,7 +346,7 @@ def async_chat_stream(version, environment, application_name,
346
346
  application_name: Name of the application using the Cohere API.
347
347
  tracer: OpenTelemetry tracer for creating spans.
348
348
  pricing_info: Information used for calculating the cost of Cohere usage.
349
- trace_content: Flag indicating whether to trace the actual content.
349
+ capture_message_content: Flag indicating whether to trace the actual content.
350
350
 
351
351
  Returns:
352
352
  A function that wraps the chat method to add telemetry.
@@ -372,7 +372,7 @@ def async_chat_stream(version, environment, application_name,
372
372
  class TracedAsyncStream:
373
373
  """
374
374
  Wrapper for streaming responses to collect metrics and trace data.
375
- Wraps the 'cohere.AsyncStream' response to collect message IDs and aggregated response.
375
+ Wraps the response to collect message IDs and aggregated response.
376
376
 
377
377
  This class implements the '__aiter__' and '__anext__' methods that
378
378
  handle asynchronous streaming responses.
@@ -547,7 +547,7 @@ def async_chat_stream(version, environment, application_name,
547
547
  self._ttft)
548
548
  self._span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
549
549
  version)
550
- if trace_content:
550
+ if capture_message_content:
551
551
  self._span.add_event(
552
552
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
553
553
  attributes={
@@ -22,7 +22,7 @@ from openlit.semcov import SemanticConvetion
22
22
  logger = logging.getLogger(__name__)
23
23
 
24
24
  def embed(version, environment, application_name, tracer,
25
- pricing_info, trace_content, metrics, disable_metrics):
25
+ pricing_info, capture_message_content, metrics, disable_metrics):
26
26
  """
27
27
  Generates a telemetry wrapper for embeddings to collect metrics.
28
28
 
@@ -32,7 +32,7 @@ def embed(version, environment, application_name, tracer,
32
32
  application_name: Name of the application using the Cohere API.
33
33
  tracer: OpenTelemetry tracer for creating spans.
34
34
  pricing_info: Information used for calculating the cost of Cohere usage.
35
- trace_content: Flag indicating whether to trace the actual content.
35
+ capture_message_content: Flag indicating whether to trace the actual content.
36
36
 
37
37
  Returns:
38
38
  A function that wraps the embeddings method to add telemetry.
@@ -105,7 +105,7 @@ def embed(version, environment, application_name, tracer,
105
105
  span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
106
106
  version)
107
107
 
108
- if trace_content:
108
+ if capture_message_content:
109
109
  span.add_event(
110
110
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
111
111
  attributes={
@@ -149,7 +149,7 @@ def embed(version, environment, application_name, tracer,
149
149
  return wrapper
150
150
 
151
151
  def chat(version, environment, application_name, tracer,
152
- pricing_info, trace_content, metrics, disable_metrics):
152
+ pricing_info, capture_message_content, metrics, disable_metrics):
153
153
  """
154
154
  Generates a telemetry wrapper for chat to collect metrics.
155
155
 
@@ -159,7 +159,7 @@ def chat(version, environment, application_name, tracer,
159
159
  application_name: Name of the application using the Cohere API.
160
160
  tracer: OpenTelemetry tracer for creating spans.
161
161
  pricing_info: Information used for calculating the cost of Cohere usage.
162
- trace_content: Flag indicating whether to trace the actual content.
162
+ capture_message_content: Flag indicating whether to trace the actual content.
163
163
 
164
164
  Returns:
165
165
  A function that wraps the chat method to add telemetry.
@@ -222,7 +222,7 @@ def chat(version, environment, application_name, tracer,
222
222
 
223
223
  llm_response = response_dict.get('message').get('content')[0].get('text')
224
224
 
225
- # Set base span attribues (OTel Semconv)
225
+ # Set base span attribues (OTel Semconv)
226
226
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
227
227
  span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
228
228
  SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
@@ -281,7 +281,7 @@ def chat(version, environment, application_name, tracer,
281
281
  span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
282
282
  version)
283
283
 
284
- if trace_content:
284
+ if capture_message_content:
285
285
  span.add_event(
286
286
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
287
287
  attributes={
@@ -336,7 +336,7 @@ def chat(version, environment, application_name, tracer,
336
336
  return wrapper
337
337
 
338
338
  def chat_stream(version, environment, application_name,
339
- tracer, pricing_info, trace_content, metrics, disable_metrics):
339
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
340
340
  """
341
341
  Generates a telemetry wrapper for chat_stream to collect metrics.
342
342
 
@@ -346,7 +346,7 @@ def chat_stream(version, environment, application_name,
346
346
  application_name: Name of the application using the Cohere API.
347
347
  tracer: OpenTelemetry tracer for creating spans.
348
348
  pricing_info: Information used for calculating the cost of Cohere usage.
349
- trace_content: Flag indicating whether to trace the actual content.
349
+ capture_message_content: Flag indicating whether to trace the actual content.
350
350
 
351
351
  Returns:
352
352
  A function that wraps the chat method to add telemetry.
@@ -372,7 +372,7 @@ def chat_stream(version, environment, application_name,
372
372
  class TracedSyncStream:
373
373
  """
374
374
  Wrapper for streaming responses to collect metrics and trace data.
375
- Wraps the 'cohere.AsyncStream' response to collect message IDs and aggregated response.
375
+ Wraps the response to collect message IDs and aggregated response.
376
376
 
377
377
  This class implements the '__aiter__' and '__anext__' methods that
378
378
  handle asynchronous streaming responses.
@@ -547,7 +547,7 @@ def chat_stream(version, environment, application_name,
547
547
  self._ttft)
548
548
  self._span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
549
549
  version)
550
- if trace_content:
550
+ if capture_message_content:
551
551
  self._span.add_event(
552
552
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
553
553
  attributes={
@@ -26,7 +26,7 @@ class ControlFlowInstrumentor(BaseInstrumentor):
26
26
  tracer = kwargs.get("tracer")
27
27
  metrics = kwargs.get("metrics_dict")
28
28
  pricing_info = kwargs.get("pricing_info", {})
29
- trace_content = kwargs.get("trace_content", False)
29
+ capture_message_content = kwargs.get("capture_message_content", False)
30
30
  disable_metrics = kwargs.get("disable_metrics")
31
31
  version = importlib.metadata.version("controlflow")
32
32
 
@@ -34,21 +34,21 @@ class ControlFlowInstrumentor(BaseInstrumentor):
34
34
  "controlflow.agents.agent",
35
35
  "Agent.__init__",
36
36
  wrap_controlflow("controlflow.create_agent", version, environment, application_name,
37
- tracer, pricing_info, trace_content, metrics, disable_metrics),
37
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
38
38
  )
39
39
 
40
40
  wrap_function_wrapper(
41
41
  "controlflow.tasks.task",
42
42
  "Task.__init__",
43
43
  wrap_controlflow("controlflow.create_task", version, environment, application_name,
44
- tracer, pricing_info, trace_content, metrics, disable_metrics),
44
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
45
45
  )
46
46
 
47
47
  wrap_function_wrapper(
48
48
  "controlflow",
49
49
  "run",
50
50
  wrap_controlflow("controlflow.run", version, environment, application_name,
51
- tracer, pricing_info, trace_content, metrics, disable_metrics),
51
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
52
52
  )
53
53
 
54
54
  def _uninstrument(self, **kwargs):
@@ -5,7 +5,7 @@ Module for monitoring controlflow.
5
5
 
6
6
  import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
8
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
9
  from openlit.__helpers import handle_exception
10
10
  from openlit.semcov import SemanticConvetion
11
11
 
@@ -13,7 +13,7 @@ from openlit.semcov import SemanticConvetion
13
13
  logger = logging.getLogger(__name__)
14
14
 
15
15
  def wrap_controlflow(gen_ai_endpoint, version, environment, application_name,
16
- tracer, pricing_info, trace_content, metrics, disable_metrics):
16
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
17
17
  """
18
18
  Creates a wrapper around a function call to trace and log its execution metrics.
19
19
 
@@ -27,7 +27,7 @@ def wrap_controlflow(gen_ai_endpoint, version, environment, application_name,
27
27
  - application_name (str): Name of the Langchain application.
28
28
  - tracer (opentelemetry.trace.Tracer): The tracer object used for OpenTelemetry tracing.
29
29
  - pricing_info (dict): Information about the pricing for internal metrics (currently not used).
30
- - trace_content (bool): Flag indicating whether to trace the content of the response.
30
+ - capture_message_content (bool): Flag indicating whether to trace the content of the response.
31
31
 
32
32
  Returns:
33
33
  - function: A higher-order function that takes a function 'wrapped' and returns
@@ -64,9 +64,9 @@ def wrap_controlflow(gen_ai_endpoint, version, environment, application_name,
64
64
  gen_ai_endpoint)
65
65
  span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
66
66
  SemanticConvetion.GEN_AI_SYSTEM_CONTROLFLOW)
67
- span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
67
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
68
68
  environment)
69
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
69
+ span.set_attribute(SERVICE_NAME,
70
70
  application_name)
71
71
  span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
72
72
  SemanticConvetion.GEN_AI_OPERATION_TYPE_AGENT)
@@ -29,7 +29,7 @@ class Crawl4AIInstrumentor(BaseInstrumentor):
29
29
  tracer = kwargs.get("tracer")
30
30
  metrics = kwargs.get("metrics_dict")
31
31
  pricing_info = kwargs.get("pricing_info", {})
32
- trace_content = kwargs.get("trace_content", False)
32
+ capture_message_content = kwargs.get("capture_message_content", False)
33
33
  disable_metrics = kwargs.get("disable_metrics")
34
34
  version = importlib.metadata.version("crawl4ai")
35
35
 
@@ -37,14 +37,14 @@ class Crawl4AIInstrumentor(BaseInstrumentor):
37
37
  "crawl4ai.web_crawler",
38
38
  "WebCrawler.run",
39
39
  wrap_crawl("crawl4ai.web_crawl", version, environment, application_name,
40
- tracer, pricing_info, trace_content, metrics, disable_metrics),
40
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
41
41
  )
42
42
 
43
43
  wrap_function_wrapper(
44
44
  "crawl4ai.async_webcrawler",
45
45
  "AsyncWebCrawler.arun",
46
46
  async_wrap_crawl("crawl4ai.web_crawl", version, environment, application_name,
47
- tracer, pricing_info, trace_content, metrics, disable_metrics),
47
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
48
48
  )
49
49
 
50
50
  def _uninstrument(self, **kwargs):
@@ -5,7 +5,7 @@ Module for monitoring Crawl4AI calls.
5
5
 
6
6
  import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
8
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
9
  from openlit.__helpers import (
10
10
  handle_exception,
11
11
  )
@@ -15,7 +15,7 @@ from openlit.semcov import SemanticConvetion
15
15
  logger = logging.getLogger(__name__)
16
16
 
17
17
  def async_wrap_crawl(gen_ai_endpoint, version, environment, application_name,
18
- tracer, pricing_info, trace_content, metrics, disable_metrics):
18
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
19
19
  """
20
20
  Generates a telemetry wrapper for chat completions to collect metrics.
21
21
 
@@ -26,7 +26,7 @@ def async_wrap_crawl(gen_ai_endpoint, version, environment, application_name,
26
26
  application_name: Name of the application using the Crawl4AI Agent.
27
27
  tracer: OpenTelemetry tracer for creating spans.
28
28
  pricing_info: Information used for calculating the cost of Crawl4AI usage.
29
- trace_content: Flag indicating whether to trace the actual content.
29
+ capture_message_content: Flag indicating whether to trace the actual content.
30
30
 
31
31
  Returns:
32
32
  A function that wraps the chat completions method to add telemetry.
@@ -62,9 +62,9 @@ def async_wrap_crawl(gen_ai_endpoint, version, environment, application_name,
62
62
  SemanticConvetion.GEN_AI_OPERATION_TYPE_AGENT)
63
63
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
64
64
  gen_ai_endpoint)
65
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
65
+ span.set_attribute(SERVICE_NAME,
66
66
  application_name)
67
- span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
67
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
68
68
  environment)
69
69
  span.set_attribute(SemanticConvetion.GEN_AI_AGENT_TYPE,
70
70
  SemanticConvetion.GEN_AI_AGENT_TYPE_BROWSER)
@@ -5,7 +5,7 @@ Module for monitoring Crawl4AI calls.
5
5
 
6
6
  import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
8
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
9
  from openlit.__helpers import (
10
10
  handle_exception,
11
11
  )
@@ -15,7 +15,7 @@ from openlit.semcov import SemanticConvetion
15
15
  logger = logging.getLogger(__name__)
16
16
 
17
17
  def wrap_crawl(gen_ai_endpoint, version, environment, application_name,
18
- tracer, pricing_info, trace_content, metrics, disable_metrics):
18
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
19
19
  """
20
20
  Generates a telemetry wrapper for chat completions to collect metrics.
21
21
 
@@ -26,7 +26,7 @@ def wrap_crawl(gen_ai_endpoint, version, environment, application_name,
26
26
  application_name: Name of the application using the Crawl4AI Agent.
27
27
  tracer: OpenTelemetry tracer for creating spans.
28
28
  pricing_info: Information used for calculating the cost of Crawl4AI usage.
29
- trace_content: Flag indicating whether to trace the actual content.
29
+ capture_message_content: Flag indicating whether to trace the actual content.
30
30
 
31
31
  Returns:
32
32
  A function that wraps the chat completions method to add telemetry.
@@ -62,9 +62,9 @@ def wrap_crawl(gen_ai_endpoint, version, environment, application_name,
62
62
  SemanticConvetion.GEN_AI_OPERATION_TYPE_AGENT)
63
63
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
64
64
  gen_ai_endpoint)
65
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
65
+ span.set_attribute(SERVICE_NAME,
66
66
  application_name)
67
- span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
67
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
68
68
  environment)
69
69
  span.set_attribute(SemanticConvetion.GEN_AI_AGENT_TYPE,
70
70
  SemanticConvetion.GEN_AI_AGENT_TYPE_BROWSER)
@@ -26,7 +26,7 @@ class CrewAIInstrumentor(BaseInstrumentor):
26
26
  tracer = kwargs.get("tracer")
27
27
  metrics = kwargs.get("metrics_dict")
28
28
  pricing_info = kwargs.get("pricing_info", {})
29
- trace_content = kwargs.get("trace_content", False)
29
+ capture_message_content = kwargs.get("capture_message_content", False)
30
30
  disable_metrics = kwargs.get("disable_metrics")
31
31
  version = importlib.metadata.version("crewai")
32
32
 
@@ -34,14 +34,14 @@ class CrewAIInstrumentor(BaseInstrumentor):
34
34
  "crewai.agent",
35
35
  "Agent.execute_task",
36
36
  crew_wrap("crewai.agent_execute_task", version, environment, application_name,
37
- tracer, pricing_info, trace_content, metrics, disable_metrics),
37
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
38
38
  )
39
39
 
40
40
  wrap_function_wrapper(
41
41
  "crewai.task",
42
42
  "Task._execute_core",
43
43
  crew_wrap("crewai.task_execute_core", version, environment, application_name,
44
- tracer, pricing_info, trace_content, metrics, disable_metrics),
44
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
45
45
  )
46
46
 
47
47
 
@@ -6,7 +6,7 @@ Module for monitoring LiteLLM calls.
6
6
  import logging
7
7
  import json
8
8
  from opentelemetry.trace import SpanKind, Status, StatusCode
9
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
9
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
10
10
  from openlit.__helpers import (
11
11
  handle_exception,
12
12
  )
@@ -28,7 +28,7 @@ def _parse_tools(tools):
28
28
  return json.dumps(result)
29
29
 
30
30
  def crew_wrap(gen_ai_endpoint, version, environment, application_name,
31
- tracer, pricing_info, trace_content, metrics, disable_metrics):
31
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
32
32
  """
33
33
  Generates a telemetry wrapper for chat completions to collect metrics.
34
34
 
@@ -39,7 +39,7 @@ def crew_wrap(gen_ai_endpoint, version, environment, application_name,
39
39
  application_name: Name of the application using the CrewAI Agent.
40
40
  tracer: OpenTelemetry tracer for creating spans.
41
41
  pricing_info: Information used for calculating the cost of CrewAI usage.
42
- trace_content: Flag indicating whether to trace the actual content.
42
+ capture_message_content: Flag indicating whether to trace the actual content.
43
43
 
44
44
  Returns:
45
45
  A function that wraps the chat completions method to add telemetry.
@@ -75,8 +75,10 @@ def crew_wrap(gen_ai_endpoint, version, environment, application_name,
75
75
  SemanticConvetion.GEN_AI_OPERATION_TYPE_AGENT)
76
76
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
77
77
  gen_ai_endpoint)
78
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
78
+ span.set_attribute(SERVICE_NAME,
79
79
  application_name)
80
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
81
+ environment)
80
82
 
81
83
  instance_class = instance.__class__.__name__
82
84
 
@@ -26,7 +26,7 @@ class DynamiqInstrumentor(BaseInstrumentor):
26
26
  tracer = kwargs.get("tracer")
27
27
  metrics = kwargs.get("metrics_dict")
28
28
  pricing_info = kwargs.get("pricing_info", {})
29
- trace_content = kwargs.get("trace_content", False)
29
+ capture_message_content = kwargs.get("capture_message_content", False)
30
30
  disable_metrics = kwargs.get("disable_metrics")
31
31
  version = importlib.metadata.version("dynamiq")
32
32
 
@@ -34,28 +34,28 @@ class DynamiqInstrumentor(BaseInstrumentor):
34
34
  "dynamiq.nodes.agents.base",
35
35
  "Agent.run",
36
36
  dynamiq_wrap("dynamiq.agent_run", version, environment, application_name,
37
- tracer, pricing_info, trace_content, metrics, disable_metrics),
37
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
38
38
  )
39
39
 
40
40
  wrap_function_wrapper(
41
41
  "dynamiq",
42
42
  "Workflow.run",
43
43
  dynamiq_wrap("dynamiq.workflow_run", version, environment, application_name,
44
- tracer, pricing_info, trace_content, metrics, disable_metrics),
44
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
45
45
  )
46
46
 
47
47
  wrap_function_wrapper(
48
48
  "dynamiq.memory",
49
49
  "Memory.add",
50
50
  dynamiq_wrap("dynamiq.memory_add", version, environment, application_name,
51
- tracer, pricing_info, trace_content, metrics, disable_metrics),
51
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
52
52
  )
53
53
 
54
54
  wrap_function_wrapper(
55
55
  "dynamiq.memory",
56
56
  "Memory.search",
57
57
  dynamiq_wrap("dynamiq.memory_search", version, environment, application_name,
58
- tracer, pricing_info, trace_content, metrics, disable_metrics),
58
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
59
59
  )
60
60
 
61
61
 
@@ -5,7 +5,7 @@ Module for monitoring Dynamiq calls.
5
5
 
6
6
  import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
8
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
9
  from openlit.__helpers import (
10
10
  handle_exception,
11
11
  )
@@ -15,7 +15,7 @@ from openlit.semcov import SemanticConvetion
15
15
  logger = logging.getLogger(__name__)
16
16
 
17
17
  def dynamiq_wrap(gen_ai_endpoint, version, environment, application_name,
18
- tracer, pricing_info, trace_content, metrics, disable_metrics):
18
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
19
19
  """
20
20
  Generates a telemetry wrapper for chat completions to collect metrics.
21
21
 
@@ -26,7 +26,7 @@ def dynamiq_wrap(gen_ai_endpoint, version, environment, application_name,
26
26
  application_name: Name of the application using the dynamiq Agent.
27
27
  tracer: OpenTelemetry tracer for creating spans.
28
28
  pricing_info: Information used for calculating the cost of dynamiq usage.
29
- trace_content: Flag indicating whether to trace the actual content.
29
+ capture_message_content: Flag indicating whether to trace the actual content.
30
30
 
31
31
  Returns:
32
32
  A function that wraps the chat completions method to add telemetry.
@@ -62,9 +62,9 @@ def dynamiq_wrap(gen_ai_endpoint, version, environment, application_name,
62
62
  SemanticConvetion.GEN_AI_OPERATION_TYPE_AGENT)
63
63
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
64
64
  gen_ai_endpoint)
65
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
65
+ span.set_attribute(SERVICE_NAME,
66
66
  application_name)
67
- span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
67
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
68
68
  environment)
69
69
 
70
70
  if gen_ai_endpoint == "dynamiq.agent_run":