openlit 1.33.10__py3-none-any.whl → 1.33.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. openlit/__helpers.py +125 -88
  2. openlit/__init__.py +38 -11
  3. openlit/instrumentation/ag2/__init__.py +19 -20
  4. openlit/instrumentation/ag2/ag2.py +134 -69
  5. openlit/instrumentation/ai21/__init__.py +22 -21
  6. openlit/instrumentation/ai21/ai21.py +82 -546
  7. openlit/instrumentation/ai21/async_ai21.py +82 -546
  8. openlit/instrumentation/ai21/utils.py +409 -0
  9. openlit/instrumentation/anthropic/__init__.py +16 -16
  10. openlit/instrumentation/anthropic/anthropic.py +61 -353
  11. openlit/instrumentation/anthropic/async_anthropic.py +62 -354
  12. openlit/instrumentation/anthropic/utils.py +251 -0
  13. openlit/instrumentation/assemblyai/__init__.py +2 -2
  14. openlit/instrumentation/assemblyai/assemblyai.py +3 -3
  15. openlit/instrumentation/astra/__init__.py +25 -25
  16. openlit/instrumentation/astra/astra.py +2 -2
  17. openlit/instrumentation/astra/async_astra.py +2 -2
  18. openlit/instrumentation/azure_ai_inference/__init__.py +5 -5
  19. openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +8 -8
  20. openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +8 -8
  21. openlit/instrumentation/bedrock/__init__.py +2 -2
  22. openlit/instrumentation/bedrock/bedrock.py +3 -3
  23. openlit/instrumentation/chroma/__init__.py +9 -9
  24. openlit/instrumentation/chroma/chroma.py +2 -2
  25. openlit/instrumentation/cohere/__init__.py +7 -7
  26. openlit/instrumentation/cohere/async_cohere.py +9 -9
  27. openlit/instrumentation/cohere/cohere.py +9 -9
  28. openlit/instrumentation/controlflow/__init__.py +4 -4
  29. openlit/instrumentation/controlflow/controlflow.py +2 -2
  30. openlit/instrumentation/crawl4ai/__init__.py +3 -3
  31. openlit/instrumentation/crawl4ai/async_crawl4ai.py +2 -2
  32. openlit/instrumentation/crawl4ai/crawl4ai.py +2 -2
  33. openlit/instrumentation/crewai/__init__.py +3 -3
  34. openlit/instrumentation/crewai/crewai.py +2 -2
  35. openlit/instrumentation/dynamiq/__init__.py +5 -5
  36. openlit/instrumentation/dynamiq/dynamiq.py +2 -2
  37. openlit/instrumentation/elevenlabs/__init__.py +5 -5
  38. openlit/instrumentation/elevenlabs/async_elevenlabs.py +3 -3
  39. openlit/instrumentation/elevenlabs/elevenlabs.py +3 -3
  40. openlit/instrumentation/embedchain/__init__.py +2 -2
  41. openlit/instrumentation/embedchain/embedchain.py +4 -4
  42. openlit/instrumentation/firecrawl/__init__.py +3 -3
  43. openlit/instrumentation/firecrawl/firecrawl.py +2 -2
  44. openlit/instrumentation/google_ai_studio/__init__.py +3 -3
  45. openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +3 -3
  46. openlit/instrumentation/google_ai_studio/google_ai_studio.py +3 -3
  47. openlit/instrumentation/gpt4all/__init__.py +3 -3
  48. openlit/instrumentation/gpt4all/gpt4all.py +7 -7
  49. openlit/instrumentation/groq/__init__.py +3 -3
  50. openlit/instrumentation/groq/async_groq.py +5 -5
  51. openlit/instrumentation/groq/groq.py +5 -5
  52. openlit/instrumentation/haystack/__init__.py +2 -2
  53. openlit/instrumentation/haystack/haystack.py +2 -2
  54. openlit/instrumentation/julep/__init__.py +7 -7
  55. openlit/instrumentation/julep/async_julep.py +3 -3
  56. openlit/instrumentation/julep/julep.py +3 -3
  57. openlit/instrumentation/langchain/__init__.py +2 -2
  58. openlit/instrumentation/langchain/async_langchain.py +13 -9
  59. openlit/instrumentation/langchain/langchain.py +13 -8
  60. openlit/instrumentation/letta/__init__.py +7 -7
  61. openlit/instrumentation/letta/letta.py +5 -5
  62. openlit/instrumentation/litellm/__init__.py +5 -5
  63. openlit/instrumentation/litellm/async_litellm.py +8 -8
  64. openlit/instrumentation/litellm/litellm.py +8 -8
  65. openlit/instrumentation/llamaindex/__init__.py +2 -2
  66. openlit/instrumentation/llamaindex/llamaindex.py +2 -2
  67. openlit/instrumentation/mem0/__init__.py +2 -2
  68. openlit/instrumentation/mem0/mem0.py +2 -2
  69. openlit/instrumentation/milvus/__init__.py +2 -2
  70. openlit/instrumentation/milvus/milvus.py +2 -2
  71. openlit/instrumentation/mistral/__init__.py +7 -7
  72. openlit/instrumentation/mistral/async_mistral.py +10 -10
  73. openlit/instrumentation/mistral/mistral.py +10 -10
  74. openlit/instrumentation/multion/__init__.py +7 -7
  75. openlit/instrumentation/multion/async_multion.py +5 -5
  76. openlit/instrumentation/multion/multion.py +5 -5
  77. openlit/instrumentation/ollama/__init__.py +11 -9
  78. openlit/instrumentation/ollama/async_ollama.py +71 -465
  79. openlit/instrumentation/ollama/ollama.py +71 -465
  80. openlit/instrumentation/ollama/utils.py +332 -0
  81. openlit/instrumentation/openai/__init__.py +11 -11
  82. openlit/instrumentation/openai/async_openai.py +18 -18
  83. openlit/instrumentation/openai/openai.py +18 -18
  84. openlit/instrumentation/phidata/__init__.py +2 -2
  85. openlit/instrumentation/phidata/phidata.py +2 -2
  86. openlit/instrumentation/pinecone/__init__.py +6 -6
  87. openlit/instrumentation/pinecone/pinecone.py +2 -2
  88. openlit/instrumentation/premai/__init__.py +3 -3
  89. openlit/instrumentation/premai/premai.py +7 -7
  90. openlit/instrumentation/qdrant/__init__.py +2 -2
  91. openlit/instrumentation/qdrant/async_qdrant.py +2 -2
  92. openlit/instrumentation/qdrant/qdrant.py +2 -2
  93. openlit/instrumentation/reka/__init__.py +3 -3
  94. openlit/instrumentation/reka/async_reka.py +3 -3
  95. openlit/instrumentation/reka/reka.py +3 -3
  96. openlit/instrumentation/together/__init__.py +5 -5
  97. openlit/instrumentation/together/async_together.py +8 -8
  98. openlit/instrumentation/together/together.py +8 -8
  99. openlit/instrumentation/transformers/__init__.py +2 -2
  100. openlit/instrumentation/transformers/transformers.py +4 -4
  101. openlit/instrumentation/vertexai/__init__.py +9 -9
  102. openlit/instrumentation/vertexai/async_vertexai.py +4 -4
  103. openlit/instrumentation/vertexai/vertexai.py +4 -4
  104. openlit/instrumentation/vllm/__init__.py +2 -2
  105. openlit/instrumentation/vllm/vllm.py +3 -3
  106. openlit/otel/events.py +85 -0
  107. openlit/otel/tracing.py +3 -13
  108. openlit/semcov/__init__.py +13 -1
  109. {openlit-1.33.10.dist-info → openlit-1.33.12.dist-info}/METADATA +2 -2
  110. openlit-1.33.12.dist-info/RECORD +126 -0
  111. openlit-1.33.10.dist-info/RECORD +0 -122
  112. {openlit-1.33.10.dist-info → openlit-1.33.12.dist-info}/LICENSE +0 -0
  113. {openlit-1.33.10.dist-info → openlit-1.33.12.dist-info}/WHEEL +0 -0
@@ -22,7 +22,7 @@ from openlit.semcov import SemanticConvetion
22
22
  logger = logging.getLogger(__name__)
23
23
 
24
24
  def async_chat(version, environment, application_name, tracer,
25
- pricing_info, trace_content, metrics, disable_metrics):
25
+ pricing_info, capture_message_content, metrics, disable_metrics):
26
26
  """
27
27
  Generates a telemetry wrapper for chat to collect metrics.
28
28
 
@@ -32,7 +32,7 @@ def async_chat(version, environment, application_name, tracer,
32
32
  application_name: Name of the application using the Mistral API.
33
33
  tracer: OpenTelemetry tracer for creating spans.
34
34
  pricing_info: Information used for calculating the cost of Mistral usage.
35
- trace_content: Flag indicating whether to trace the actual content.
35
+ capture_message_content: Flag indicating whether to trace the actual content.
36
36
 
37
37
  Returns:
38
38
  A function that wraps the chat method to add telemetry.
@@ -144,7 +144,7 @@ def async_chat(version, environment, application_name, tracer,
144
144
  end_time - start_time)
145
145
  span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
146
146
  version)
147
- if trace_content:
147
+ if capture_message_content:
148
148
  span.add_event(
149
149
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
150
150
  attributes={
@@ -155,7 +155,7 @@ def async_chat(version, environment, application_name, tracer,
155
155
  for i in range(kwargs.get('n',1)):
156
156
  span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON,
157
157
  [response_dict.get('choices')[i].get('finish_reason')])
158
- if trace_content:
158
+ if capture_message_content:
159
159
  span.add_event(
160
160
  name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
161
161
  attributes={
@@ -215,7 +215,7 @@ def async_chat(version, environment, application_name, tracer,
215
215
  return wrapper
216
216
 
217
217
  def async_chat_stream(version, environment, application_name,
218
- tracer, pricing_info, trace_content, metrics, disable_metrics):
218
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
219
219
  """
220
220
  Generates a telemetry wrapper for chat_stream to collect metrics.
221
221
 
@@ -225,7 +225,7 @@ def async_chat_stream(version, environment, application_name,
225
225
  application_name: Name of the application using the Mistral API.
226
226
  tracer: OpenTelemetry tracer for creating spans.
227
227
  pricing_info: Information used for calculating the cost of Mistral usage.
228
- trace_content: Flag indicating whether to trace the actual content.
228
+ capture_message_content: Flag indicating whether to trace the actual content.
229
229
 
230
230
  Returns:
231
231
  A function that wraps the chat method to add telemetry.
@@ -422,7 +422,7 @@ def async_chat_stream(version, environment, application_name,
422
422
  self._ttft)
423
423
  self._span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
424
424
  version)
425
- if trace_content:
425
+ if capture_message_content:
426
426
  self._span.add_event(
427
427
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
428
428
  attributes={
@@ -485,7 +485,7 @@ def async_chat_stream(version, environment, application_name,
485
485
  return wrapper
486
486
 
487
487
  def async_embeddings(version, environment, application_name,
488
- tracer, pricing_info, trace_content, metrics, disable_metrics):
488
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
489
489
  """
490
490
  Generates a telemetry wrapper for embeddings to collect metrics.
491
491
 
@@ -495,7 +495,7 @@ def async_embeddings(version, environment, application_name,
495
495
  application_name: Name of the application using the Mistral API.
496
496
  tracer: OpenTelemetry tracer for creating spans.
497
497
  pricing_info: Information used for calculating the cost of Mistral usage.
498
- trace_content: Flag indicating whether to trace the actual content.
498
+ capture_message_content: Flag indicating whether to trace the actual content.
499
499
 
500
500
  Returns:
501
501
  A function that wraps the embeddings method to add telemetry.
@@ -567,7 +567,7 @@ def async_embeddings(version, environment, application_name,
567
567
  span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
568
568
  version)
569
569
 
570
- if trace_content:
570
+ if capture_message_content:
571
571
  span.add_event(
572
572
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
573
573
  attributes={
@@ -22,7 +22,7 @@ from openlit.semcov import SemanticConvetion
22
22
  logger = logging.getLogger(__name__)
23
23
 
24
24
  def chat(version, environment, application_name, tracer,
25
- pricing_info, trace_content, metrics, disable_metrics):
25
+ pricing_info, capture_message_content, metrics, disable_metrics):
26
26
  """
27
27
  Generates a telemetry wrapper for chat to collect metrics.
28
28
 
@@ -32,7 +32,7 @@ def chat(version, environment, application_name, tracer,
32
32
  application_name: Name of the application using the Mistral API.
33
33
  tracer: OpenTelemetry tracer for creating spans.
34
34
  pricing_info: Information used for calculating the cost of Mistral usage.
35
- trace_content: Flag indicating whether to trace the actual content.
35
+ capture_message_content: Flag indicating whether to trace the actual content.
36
36
 
37
37
  Returns:
38
38
  A function that wraps the chat method to add telemetry.
@@ -144,7 +144,7 @@ def chat(version, environment, application_name, tracer,
144
144
  end_time - start_time)
145
145
  span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
146
146
  version)
147
- if trace_content:
147
+ if capture_message_content:
148
148
  span.add_event(
149
149
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
150
150
  attributes={
@@ -155,7 +155,7 @@ def chat(version, environment, application_name, tracer,
155
155
  for i in range(kwargs.get('n',1)):
156
156
  span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON,
157
157
  [response_dict.get('choices')[i].get('finish_reason')])
158
- if trace_content:
158
+ if capture_message_content:
159
159
  span.add_event(
160
160
  name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
161
161
  attributes={
@@ -215,7 +215,7 @@ def chat(version, environment, application_name, tracer,
215
215
  return wrapper
216
216
 
217
217
  def chat_stream(version, environment, application_name,
218
- tracer, pricing_info, trace_content, metrics, disable_metrics):
218
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
219
219
  """
220
220
  Generates a telemetry wrapper for chat_stream to collect metrics.
221
221
 
@@ -225,7 +225,7 @@ def chat_stream(version, environment, application_name,
225
225
  application_name: Name of the application using the Mistral API.
226
226
  tracer: OpenTelemetry tracer for creating spans.
227
227
  pricing_info: Information used for calculating the cost of Mistral usage.
228
- trace_content: Flag indicating whether to trace the actual content.
228
+ capture_message_content: Flag indicating whether to trace the actual content.
229
229
 
230
230
  Returns:
231
231
  A function that wraps the chat method to add telemetry.
@@ -422,7 +422,7 @@ def chat_stream(version, environment, application_name,
422
422
  self._ttft)
423
423
  self._span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
424
424
  version)
425
- if trace_content:
425
+ if capture_message_content:
426
426
  self._span.add_event(
427
427
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
428
428
  attributes={
@@ -485,7 +485,7 @@ def chat_stream(version, environment, application_name,
485
485
  return wrapper
486
486
 
487
487
  def embeddings(version, environment, application_name,
488
- tracer, pricing_info, trace_content, metrics, disable_metrics):
488
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
489
489
  """
490
490
  Generates a telemetry wrapper for embeddings to collect metrics.
491
491
 
@@ -495,7 +495,7 @@ def embeddings(version, environment, application_name,
495
495
  application_name: Name of the application using the Mistral API.
496
496
  tracer: OpenTelemetry tracer for creating spans.
497
497
  pricing_info: Information used for calculating the cost of Mistral usage.
498
- trace_content: Flag indicating whether to trace the actual content.
498
+ capture_message_content: Flag indicating whether to trace the actual content.
499
499
 
500
500
  Returns:
501
501
  A function that wraps the embeddings method to add telemetry.
@@ -567,7 +567,7 @@ def embeddings(version, environment, application_name,
567
567
  span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
568
568
  version)
569
569
 
570
- if trace_content:
570
+ if capture_message_content:
571
571
  span.add_event(
572
572
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
573
573
  attributes={
@@ -30,7 +30,7 @@ class MultiOnInstrumentor(BaseInstrumentor):
30
30
  tracer = kwargs.get("tracer")
31
31
  metrics = kwargs.get("metrics_dict")
32
32
  pricing_info = kwargs.get("pricing_info", {})
33
- trace_content = kwargs.get("trace_content", False)
33
+ capture_message_content = kwargs.get("capture_message_content", False)
34
34
  disable_metrics = kwargs.get("disable_metrics")
35
35
  version = importlib.metadata.version("multion")
36
36
 
@@ -39,19 +39,19 @@ class MultiOnInstrumentor(BaseInstrumentor):
39
39
  "multion.client",
40
40
  "MultiOn.browse",
41
41
  multion_wrap("multion.browse", version, environment, application_name,
42
- tracer, pricing_info, trace_content, metrics, disable_metrics),
42
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
43
43
  )
44
44
  wrap_function_wrapper(
45
45
  "multion.client",
46
46
  "MultiOn.retrieve",
47
47
  multion_wrap("multion.retrieve", version, environment, application_name,
48
- tracer, pricing_info, trace_content, metrics, disable_metrics),
48
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
49
49
  )
50
50
  wrap_function_wrapper(
51
51
  "multion.sessions.client",
52
52
  "SessionsClient.create",
53
53
  multion_wrap("multion.sessions.create", version, environment, application_name,
54
- tracer, pricing_info, trace_content, metrics, disable_metrics),
54
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
55
55
  )
56
56
 
57
57
  # Asynchronus
@@ -59,19 +59,19 @@ class MultiOnInstrumentor(BaseInstrumentor):
59
59
  "multion.client",
60
60
  "AsyncMultiOn.browse",
61
61
  async_multion_wrap("multion.browse", version, environment, application_name,
62
- tracer, pricing_info, trace_content, metrics, disable_metrics),
62
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
63
63
  )
64
64
  wrap_function_wrapper(
65
65
  "multion.client",
66
66
  "AsyncMultiOn.retrieve",
67
67
  async_multion_wrap("multion.retrieve", version, environment, application_name,
68
- tracer, pricing_info, trace_content, metrics, disable_metrics),
68
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
69
69
  )
70
70
  wrap_function_wrapper(
71
71
  "multion.sessions.client",
72
72
  "AsyncSessionsClient.create",
73
73
  async_multion_wrap("multion.sessions.create", version, environment, application_name,
74
- tracer, pricing_info, trace_content, metrics, disable_metrics),
74
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
75
75
  )
76
76
 
77
77
 
@@ -15,7 +15,7 @@ from openlit.semcov import SemanticConvetion
15
15
  logger = logging.getLogger(__name__)
16
16
 
17
17
  def async_multion_wrap(gen_ai_endpoint, version, environment, application_name,
18
- tracer, pricing_info, trace_content, metrics, disable_metrics):
18
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
19
19
  """
20
20
  Generates a telemetry wrapper for chat completions to collect metrics.
21
21
 
@@ -26,7 +26,7 @@ def async_multion_wrap(gen_ai_endpoint, version, environment, application_name,
26
26
  application_name: Name of the application using the multion Agent.
27
27
  tracer: OpenTelemetry tracer for creating spans.
28
28
  pricing_info: Information used for calculating the cost of multion usage.
29
- trace_content: Flag indicating whether to trace the actual content.
29
+ capture_message_content: Flag indicating whether to trace the actual content.
30
30
 
31
31
  Returns:
32
32
  A function that wraps the chat completions method to add telemetry.
@@ -75,7 +75,7 @@ def async_multion_wrap(gen_ai_endpoint, version, environment, application_name,
75
75
  span.set_attribute(SemanticConvetion.GEN_AI_AGENT_RESPONSE_TIME,
76
76
  response.metadata.processing_time)
77
77
 
78
- if trace_content:
78
+ if capture_message_content:
79
79
  span.add_event(
80
80
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
81
81
  attributes={
@@ -92,7 +92,7 @@ def async_multion_wrap(gen_ai_endpoint, version, environment, application_name,
92
92
  span.set_attribute(SemanticConvetion.GEN_AI_AGENT_BROWSE_URL,
93
93
  kwargs.get("url", ""))
94
94
 
95
- if trace_content:
95
+ if capture_message_content:
96
96
  span.add_event(
97
97
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
98
98
  attributes={
@@ -110,7 +110,7 @@ def async_multion_wrap(gen_ai_endpoint, version, environment, application_name,
110
110
  span.set_attribute(SemanticConvetion.GEN_AI_AGENT_BROWSE_URL,
111
111
  kwargs.get("url", ""))
112
112
 
113
- if trace_content:
113
+ if capture_message_content:
114
114
  span.add_event(
115
115
  name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
116
116
  attributes={
@@ -15,7 +15,7 @@ from openlit.semcov import SemanticConvetion
15
15
  logger = logging.getLogger(__name__)
16
16
 
17
17
  def multion_wrap(gen_ai_endpoint, version, environment, application_name,
18
- tracer, pricing_info, trace_content, metrics, disable_metrics):
18
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
19
19
  """
20
20
  Generates a telemetry wrapper for chat completions to collect metrics.
21
21
 
@@ -26,7 +26,7 @@ def multion_wrap(gen_ai_endpoint, version, environment, application_name,
26
26
  application_name: Name of the application using the multion Agent.
27
27
  tracer: OpenTelemetry tracer for creating spans.
28
28
  pricing_info: Information used for calculating the cost of multion usage.
29
- trace_content: Flag indicating whether to trace the actual content.
29
+ capture_message_content: Flag indicating whether to trace the actual content.
30
30
 
31
31
  Returns:
32
32
  A function that wraps the chat completions method to add telemetry.
@@ -75,7 +75,7 @@ def multion_wrap(gen_ai_endpoint, version, environment, application_name,
75
75
  span.set_attribute(SemanticConvetion.GEN_AI_AGENT_RESPONSE_TIME,
76
76
  response.metadata.processing_time)
77
77
 
78
- if trace_content:
78
+ if capture_message_content:
79
79
  span.add_event(
80
80
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
81
81
  attributes={
@@ -92,7 +92,7 @@ def multion_wrap(gen_ai_endpoint, version, environment, application_name,
92
92
  span.set_attribute(SemanticConvetion.GEN_AI_AGENT_BROWSE_URL,
93
93
  kwargs.get("url", ""))
94
94
 
95
- if trace_content:
95
+ if capture_message_content:
96
96
  span.add_event(
97
97
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
98
98
  attributes={
@@ -110,7 +110,7 @@ def multion_wrap(gen_ai_endpoint, version, environment, application_name,
110
110
  span.set_attribute(SemanticConvetion.GEN_AI_AGENT_BROWSE_URL,
111
111
  kwargs.get("url", ""))
112
112
 
113
- if trace_content:
113
+ if capture_message_content:
114
114
  span.add_event(
115
115
  name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
116
116
  attributes={
@@ -1,5 +1,6 @@
1
- # pylint: disable=useless-return, bad-staticmethod-argument, disable=duplicate-code
2
- """Initializer of Auto Instrumentation of Ollama Functions"""
1
+ """
2
+ Initializer of Auto Instrumentation of Ollama Functions
3
+ """
3
4
 
4
5
  from typing import Collection
5
6
  import importlib.metadata
@@ -27,9 +28,10 @@ class OllamaInstrumentor(BaseInstrumentor):
27
28
  application_name = kwargs.get("application_name", "default_application")
28
29
  environment = kwargs.get("environment", "default_environment")
29
30
  tracer = kwargs.get("tracer")
31
+ event_provider = kwargs.get("event_provider")
30
32
  metrics = kwargs.get("metrics_dict")
31
33
  pricing_info = kwargs.get("pricing_info", {})
32
- trace_content = kwargs.get("trace_content", False)
34
+ capture_message_content = kwargs.get("capture_message_content", False)
33
35
  disable_metrics = kwargs.get("disable_metrics")
34
36
  version = importlib.metadata.version("ollama")
35
37
 
@@ -38,13 +40,13 @@ class OllamaInstrumentor(BaseInstrumentor):
38
40
  "ollama",
39
41
  "chat",
40
42
  chat(version, environment, application_name,
41
- tracer, pricing_info, trace_content, metrics, disable_metrics),
43
+ tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics),
42
44
  )
43
45
  wrap_function_wrapper(
44
46
  "ollama",
45
47
  "Client.chat",
46
48
  chat(version, environment, application_name,
47
- tracer, pricing_info, trace_content, metrics, disable_metrics),
49
+ tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics),
48
50
  )
49
51
 
50
52
  # sync embeddings
@@ -52,13 +54,13 @@ class OllamaInstrumentor(BaseInstrumentor):
52
54
  "ollama",
53
55
  "embeddings",
54
56
  embeddings(version, environment, application_name,
55
- tracer, pricing_info, trace_content, metrics, disable_metrics),
57
+ tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics),
56
58
  )
57
59
  wrap_function_wrapper(
58
60
  "ollama",
59
61
  "Client.embeddings",
60
62
  embeddings(version, environment, application_name,
61
- tracer, pricing_info, trace_content, metrics, disable_metrics),
63
+ tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics),
62
64
  )
63
65
 
64
66
  # async chat
@@ -66,7 +68,7 @@ class OllamaInstrumentor(BaseInstrumentor):
66
68
  "ollama",
67
69
  "AsyncClient.chat",
68
70
  async_chat(version, environment, application_name,
69
- tracer, pricing_info, trace_content, metrics, disable_metrics),
71
+ tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics),
70
72
  )
71
73
 
72
74
  # async embeddings
@@ -74,7 +76,7 @@ class OllamaInstrumentor(BaseInstrumentor):
74
76
  "ollama",
75
77
  "AsyncClient.embeddings",
76
78
  async_embeddings(version, environment, application_name,
77
- tracer, pricing_info, trace_content, metrics, disable_metrics),
79
+ tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics),
78
80
  )
79
81
 
80
82
  def _uninstrument(self, **kwargs):