openlit 1.33.10__py3-none-any.whl → 1.33.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/__helpers.py +125 -88
- openlit/__init__.py +38 -11
- openlit/instrumentation/ag2/__init__.py +19 -20
- openlit/instrumentation/ag2/ag2.py +134 -69
- openlit/instrumentation/ai21/__init__.py +22 -21
- openlit/instrumentation/ai21/ai21.py +82 -546
- openlit/instrumentation/ai21/async_ai21.py +82 -546
- openlit/instrumentation/ai21/utils.py +409 -0
- openlit/instrumentation/anthropic/__init__.py +16 -16
- openlit/instrumentation/anthropic/anthropic.py +61 -353
- openlit/instrumentation/anthropic/async_anthropic.py +62 -354
- openlit/instrumentation/anthropic/utils.py +251 -0
- openlit/instrumentation/assemblyai/__init__.py +2 -2
- openlit/instrumentation/assemblyai/assemblyai.py +3 -3
- openlit/instrumentation/astra/__init__.py +25 -25
- openlit/instrumentation/astra/astra.py +2 -2
- openlit/instrumentation/astra/async_astra.py +2 -2
- openlit/instrumentation/azure_ai_inference/__init__.py +5 -5
- openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +8 -8
- openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +8 -8
- openlit/instrumentation/bedrock/__init__.py +2 -2
- openlit/instrumentation/bedrock/bedrock.py +3 -3
- openlit/instrumentation/chroma/__init__.py +9 -9
- openlit/instrumentation/chroma/chroma.py +2 -2
- openlit/instrumentation/cohere/__init__.py +7 -7
- openlit/instrumentation/cohere/async_cohere.py +9 -9
- openlit/instrumentation/cohere/cohere.py +9 -9
- openlit/instrumentation/controlflow/__init__.py +4 -4
- openlit/instrumentation/controlflow/controlflow.py +2 -2
- openlit/instrumentation/crawl4ai/__init__.py +3 -3
- openlit/instrumentation/crawl4ai/async_crawl4ai.py +2 -2
- openlit/instrumentation/crawl4ai/crawl4ai.py +2 -2
- openlit/instrumentation/crewai/__init__.py +3 -3
- openlit/instrumentation/crewai/crewai.py +2 -2
- openlit/instrumentation/dynamiq/__init__.py +5 -5
- openlit/instrumentation/dynamiq/dynamiq.py +2 -2
- openlit/instrumentation/elevenlabs/__init__.py +5 -5
- openlit/instrumentation/elevenlabs/async_elevenlabs.py +3 -3
- openlit/instrumentation/elevenlabs/elevenlabs.py +3 -3
- openlit/instrumentation/embedchain/__init__.py +2 -2
- openlit/instrumentation/embedchain/embedchain.py +4 -4
- openlit/instrumentation/firecrawl/__init__.py +3 -3
- openlit/instrumentation/firecrawl/firecrawl.py +2 -2
- openlit/instrumentation/google_ai_studio/__init__.py +3 -3
- openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +3 -3
- openlit/instrumentation/google_ai_studio/google_ai_studio.py +3 -3
- openlit/instrumentation/gpt4all/__init__.py +3 -3
- openlit/instrumentation/gpt4all/gpt4all.py +7 -7
- openlit/instrumentation/groq/__init__.py +3 -3
- openlit/instrumentation/groq/async_groq.py +5 -5
- openlit/instrumentation/groq/groq.py +5 -5
- openlit/instrumentation/haystack/__init__.py +2 -2
- openlit/instrumentation/haystack/haystack.py +2 -2
- openlit/instrumentation/julep/__init__.py +7 -7
- openlit/instrumentation/julep/async_julep.py +3 -3
- openlit/instrumentation/julep/julep.py +3 -3
- openlit/instrumentation/langchain/__init__.py +2 -2
- openlit/instrumentation/langchain/async_langchain.py +13 -9
- openlit/instrumentation/langchain/langchain.py +13 -8
- openlit/instrumentation/letta/__init__.py +7 -7
- openlit/instrumentation/letta/letta.py +5 -5
- openlit/instrumentation/litellm/__init__.py +5 -5
- openlit/instrumentation/litellm/async_litellm.py +8 -8
- openlit/instrumentation/litellm/litellm.py +8 -8
- openlit/instrumentation/llamaindex/__init__.py +2 -2
- openlit/instrumentation/llamaindex/llamaindex.py +2 -2
- openlit/instrumentation/mem0/__init__.py +2 -2
- openlit/instrumentation/mem0/mem0.py +2 -2
- openlit/instrumentation/milvus/__init__.py +2 -2
- openlit/instrumentation/milvus/milvus.py +2 -2
- openlit/instrumentation/mistral/__init__.py +7 -7
- openlit/instrumentation/mistral/async_mistral.py +10 -10
- openlit/instrumentation/mistral/mistral.py +10 -10
- openlit/instrumentation/multion/__init__.py +7 -7
- openlit/instrumentation/multion/async_multion.py +5 -5
- openlit/instrumentation/multion/multion.py +5 -5
- openlit/instrumentation/ollama/__init__.py +11 -9
- openlit/instrumentation/ollama/async_ollama.py +71 -465
- openlit/instrumentation/ollama/ollama.py +71 -465
- openlit/instrumentation/ollama/utils.py +332 -0
- openlit/instrumentation/openai/__init__.py +11 -11
- openlit/instrumentation/openai/async_openai.py +18 -18
- openlit/instrumentation/openai/openai.py +18 -18
- openlit/instrumentation/phidata/__init__.py +2 -2
- openlit/instrumentation/phidata/phidata.py +2 -2
- openlit/instrumentation/pinecone/__init__.py +6 -6
- openlit/instrumentation/pinecone/pinecone.py +2 -2
- openlit/instrumentation/premai/__init__.py +3 -3
- openlit/instrumentation/premai/premai.py +7 -7
- openlit/instrumentation/qdrant/__init__.py +2 -2
- openlit/instrumentation/qdrant/async_qdrant.py +2 -2
- openlit/instrumentation/qdrant/qdrant.py +2 -2
- openlit/instrumentation/reka/__init__.py +3 -3
- openlit/instrumentation/reka/async_reka.py +3 -3
- openlit/instrumentation/reka/reka.py +3 -3
- openlit/instrumentation/together/__init__.py +5 -5
- openlit/instrumentation/together/async_together.py +8 -8
- openlit/instrumentation/together/together.py +8 -8
- openlit/instrumentation/transformers/__init__.py +2 -2
- openlit/instrumentation/transformers/transformers.py +4 -4
- openlit/instrumentation/vertexai/__init__.py +9 -9
- openlit/instrumentation/vertexai/async_vertexai.py +4 -4
- openlit/instrumentation/vertexai/vertexai.py +4 -4
- openlit/instrumentation/vllm/__init__.py +2 -2
- openlit/instrumentation/vllm/vllm.py +3 -3
- openlit/otel/events.py +85 -0
- openlit/otel/tracing.py +3 -13
- openlit/semcov/__init__.py +13 -1
- {openlit-1.33.10.dist-info → openlit-1.33.12.dist-info}/METADATA +2 -2
- openlit-1.33.12.dist-info/RECORD +126 -0
- openlit-1.33.10.dist-info/RECORD +0 -122
- {openlit-1.33.10.dist-info → openlit-1.33.12.dist-info}/LICENSE +0 -0
- {openlit-1.33.10.dist-info → openlit-1.33.12.dist-info}/WHEEL +0 -0
@@ -22,7 +22,7 @@ from openlit.semcov import SemanticConvetion
|
|
22
22
|
logger = logging.getLogger(__name__)
|
23
23
|
|
24
24
|
def async_chat(version, environment, application_name, tracer,
|
25
|
-
pricing_info,
|
25
|
+
pricing_info, capture_message_content, metrics, disable_metrics):
|
26
26
|
"""
|
27
27
|
Generates a telemetry wrapper for chat to collect metrics.
|
28
28
|
|
@@ -32,7 +32,7 @@ def async_chat(version, environment, application_name, tracer,
|
|
32
32
|
application_name: Name of the application using the Mistral API.
|
33
33
|
tracer: OpenTelemetry tracer for creating spans.
|
34
34
|
pricing_info: Information used for calculating the cost of Mistral usage.
|
35
|
-
|
35
|
+
capture_message_content: Flag indicating whether to trace the actual content.
|
36
36
|
|
37
37
|
Returns:
|
38
38
|
A function that wraps the chat method to add telemetry.
|
@@ -144,7 +144,7 @@ def async_chat(version, environment, application_name, tracer,
|
|
144
144
|
end_time - start_time)
|
145
145
|
span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
|
146
146
|
version)
|
147
|
-
if
|
147
|
+
if capture_message_content:
|
148
148
|
span.add_event(
|
149
149
|
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
150
150
|
attributes={
|
@@ -155,7 +155,7 @@ def async_chat(version, environment, application_name, tracer,
|
|
155
155
|
for i in range(kwargs.get('n',1)):
|
156
156
|
span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON,
|
157
157
|
[response_dict.get('choices')[i].get('finish_reason')])
|
158
|
-
if
|
158
|
+
if capture_message_content:
|
159
159
|
span.add_event(
|
160
160
|
name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
|
161
161
|
attributes={
|
@@ -215,7 +215,7 @@ def async_chat(version, environment, application_name, tracer,
|
|
215
215
|
return wrapper
|
216
216
|
|
217
217
|
def async_chat_stream(version, environment, application_name,
|
218
|
-
tracer, pricing_info,
|
218
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
219
219
|
"""
|
220
220
|
Generates a telemetry wrapper for chat_stream to collect metrics.
|
221
221
|
|
@@ -225,7 +225,7 @@ def async_chat_stream(version, environment, application_name,
|
|
225
225
|
application_name: Name of the application using the Mistral API.
|
226
226
|
tracer: OpenTelemetry tracer for creating spans.
|
227
227
|
pricing_info: Information used for calculating the cost of Mistral usage.
|
228
|
-
|
228
|
+
capture_message_content: Flag indicating whether to trace the actual content.
|
229
229
|
|
230
230
|
Returns:
|
231
231
|
A function that wraps the chat method to add telemetry.
|
@@ -422,7 +422,7 @@ def async_chat_stream(version, environment, application_name,
|
|
422
422
|
self._ttft)
|
423
423
|
self._span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
|
424
424
|
version)
|
425
|
-
if
|
425
|
+
if capture_message_content:
|
426
426
|
self._span.add_event(
|
427
427
|
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
428
428
|
attributes={
|
@@ -485,7 +485,7 @@ def async_chat_stream(version, environment, application_name,
|
|
485
485
|
return wrapper
|
486
486
|
|
487
487
|
def async_embeddings(version, environment, application_name,
|
488
|
-
tracer, pricing_info,
|
488
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
489
489
|
"""
|
490
490
|
Generates a telemetry wrapper for embeddings to collect metrics.
|
491
491
|
|
@@ -495,7 +495,7 @@ def async_embeddings(version, environment, application_name,
|
|
495
495
|
application_name: Name of the application using the Mistral API.
|
496
496
|
tracer: OpenTelemetry tracer for creating spans.
|
497
497
|
pricing_info: Information used for calculating the cost of Mistral usage.
|
498
|
-
|
498
|
+
capture_message_content: Flag indicating whether to trace the actual content.
|
499
499
|
|
500
500
|
Returns:
|
501
501
|
A function that wraps the embeddings method to add telemetry.
|
@@ -567,7 +567,7 @@ def async_embeddings(version, environment, application_name,
|
|
567
567
|
span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
|
568
568
|
version)
|
569
569
|
|
570
|
-
if
|
570
|
+
if capture_message_content:
|
571
571
|
span.add_event(
|
572
572
|
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
573
573
|
attributes={
|
@@ -22,7 +22,7 @@ from openlit.semcov import SemanticConvetion
|
|
22
22
|
logger = logging.getLogger(__name__)
|
23
23
|
|
24
24
|
def chat(version, environment, application_name, tracer,
|
25
|
-
pricing_info,
|
25
|
+
pricing_info, capture_message_content, metrics, disable_metrics):
|
26
26
|
"""
|
27
27
|
Generates a telemetry wrapper for chat to collect metrics.
|
28
28
|
|
@@ -32,7 +32,7 @@ def chat(version, environment, application_name, tracer,
|
|
32
32
|
application_name: Name of the application using the Mistral API.
|
33
33
|
tracer: OpenTelemetry tracer for creating spans.
|
34
34
|
pricing_info: Information used for calculating the cost of Mistral usage.
|
35
|
-
|
35
|
+
capture_message_content: Flag indicating whether to trace the actual content.
|
36
36
|
|
37
37
|
Returns:
|
38
38
|
A function that wraps the chat method to add telemetry.
|
@@ -144,7 +144,7 @@ def chat(version, environment, application_name, tracer,
|
|
144
144
|
end_time - start_time)
|
145
145
|
span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
|
146
146
|
version)
|
147
|
-
if
|
147
|
+
if capture_message_content:
|
148
148
|
span.add_event(
|
149
149
|
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
150
150
|
attributes={
|
@@ -155,7 +155,7 @@ def chat(version, environment, application_name, tracer,
|
|
155
155
|
for i in range(kwargs.get('n',1)):
|
156
156
|
span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON,
|
157
157
|
[response_dict.get('choices')[i].get('finish_reason')])
|
158
|
-
if
|
158
|
+
if capture_message_content:
|
159
159
|
span.add_event(
|
160
160
|
name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
|
161
161
|
attributes={
|
@@ -215,7 +215,7 @@ def chat(version, environment, application_name, tracer,
|
|
215
215
|
return wrapper
|
216
216
|
|
217
217
|
def chat_stream(version, environment, application_name,
|
218
|
-
tracer, pricing_info,
|
218
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
219
219
|
"""
|
220
220
|
Generates a telemetry wrapper for chat_stream to collect metrics.
|
221
221
|
|
@@ -225,7 +225,7 @@ def chat_stream(version, environment, application_name,
|
|
225
225
|
application_name: Name of the application using the Mistral API.
|
226
226
|
tracer: OpenTelemetry tracer for creating spans.
|
227
227
|
pricing_info: Information used for calculating the cost of Mistral usage.
|
228
|
-
|
228
|
+
capture_message_content: Flag indicating whether to trace the actual content.
|
229
229
|
|
230
230
|
Returns:
|
231
231
|
A function that wraps the chat method to add telemetry.
|
@@ -422,7 +422,7 @@ def chat_stream(version, environment, application_name,
|
|
422
422
|
self._ttft)
|
423
423
|
self._span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
|
424
424
|
version)
|
425
|
-
if
|
425
|
+
if capture_message_content:
|
426
426
|
self._span.add_event(
|
427
427
|
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
428
428
|
attributes={
|
@@ -485,7 +485,7 @@ def chat_stream(version, environment, application_name,
|
|
485
485
|
return wrapper
|
486
486
|
|
487
487
|
def embeddings(version, environment, application_name,
|
488
|
-
tracer, pricing_info,
|
488
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
489
489
|
"""
|
490
490
|
Generates a telemetry wrapper for embeddings to collect metrics.
|
491
491
|
|
@@ -495,7 +495,7 @@ def embeddings(version, environment, application_name,
|
|
495
495
|
application_name: Name of the application using the Mistral API.
|
496
496
|
tracer: OpenTelemetry tracer for creating spans.
|
497
497
|
pricing_info: Information used for calculating the cost of Mistral usage.
|
498
|
-
|
498
|
+
capture_message_content: Flag indicating whether to trace the actual content.
|
499
499
|
|
500
500
|
Returns:
|
501
501
|
A function that wraps the embeddings method to add telemetry.
|
@@ -567,7 +567,7 @@ def embeddings(version, environment, application_name,
|
|
567
567
|
span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
|
568
568
|
version)
|
569
569
|
|
570
|
-
if
|
570
|
+
if capture_message_content:
|
571
571
|
span.add_event(
|
572
572
|
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
573
573
|
attributes={
|
@@ -30,7 +30,7 @@ class MultiOnInstrumentor(BaseInstrumentor):
|
|
30
30
|
tracer = kwargs.get("tracer")
|
31
31
|
metrics = kwargs.get("metrics_dict")
|
32
32
|
pricing_info = kwargs.get("pricing_info", {})
|
33
|
-
|
33
|
+
capture_message_content = kwargs.get("capture_message_content", False)
|
34
34
|
disable_metrics = kwargs.get("disable_metrics")
|
35
35
|
version = importlib.metadata.version("multion")
|
36
36
|
|
@@ -39,19 +39,19 @@ class MultiOnInstrumentor(BaseInstrumentor):
|
|
39
39
|
"multion.client",
|
40
40
|
"MultiOn.browse",
|
41
41
|
multion_wrap("multion.browse", version, environment, application_name,
|
42
|
-
tracer, pricing_info,
|
42
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
43
43
|
)
|
44
44
|
wrap_function_wrapper(
|
45
45
|
"multion.client",
|
46
46
|
"MultiOn.retrieve",
|
47
47
|
multion_wrap("multion.retrieve", version, environment, application_name,
|
48
|
-
tracer, pricing_info,
|
48
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
49
49
|
)
|
50
50
|
wrap_function_wrapper(
|
51
51
|
"multion.sessions.client",
|
52
52
|
"SessionsClient.create",
|
53
53
|
multion_wrap("multion.sessions.create", version, environment, application_name,
|
54
|
-
tracer, pricing_info,
|
54
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
55
55
|
)
|
56
56
|
|
57
57
|
# Asynchronus
|
@@ -59,19 +59,19 @@ class MultiOnInstrumentor(BaseInstrumentor):
|
|
59
59
|
"multion.client",
|
60
60
|
"AsyncMultiOn.browse",
|
61
61
|
async_multion_wrap("multion.browse", version, environment, application_name,
|
62
|
-
tracer, pricing_info,
|
62
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
63
63
|
)
|
64
64
|
wrap_function_wrapper(
|
65
65
|
"multion.client",
|
66
66
|
"AsyncMultiOn.retrieve",
|
67
67
|
async_multion_wrap("multion.retrieve", version, environment, application_name,
|
68
|
-
tracer, pricing_info,
|
68
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
69
69
|
)
|
70
70
|
wrap_function_wrapper(
|
71
71
|
"multion.sessions.client",
|
72
72
|
"AsyncSessionsClient.create",
|
73
73
|
async_multion_wrap("multion.sessions.create", version, environment, application_name,
|
74
|
-
tracer, pricing_info,
|
74
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
75
75
|
)
|
76
76
|
|
77
77
|
|
@@ -15,7 +15,7 @@ from openlit.semcov import SemanticConvetion
|
|
15
15
|
logger = logging.getLogger(__name__)
|
16
16
|
|
17
17
|
def async_multion_wrap(gen_ai_endpoint, version, environment, application_name,
|
18
|
-
tracer, pricing_info,
|
18
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
19
19
|
"""
|
20
20
|
Generates a telemetry wrapper for chat completions to collect metrics.
|
21
21
|
|
@@ -26,7 +26,7 @@ def async_multion_wrap(gen_ai_endpoint, version, environment, application_name,
|
|
26
26
|
application_name: Name of the application using the multion Agent.
|
27
27
|
tracer: OpenTelemetry tracer for creating spans.
|
28
28
|
pricing_info: Information used for calculating the cost of multion usage.
|
29
|
-
|
29
|
+
capture_message_content: Flag indicating whether to trace the actual content.
|
30
30
|
|
31
31
|
Returns:
|
32
32
|
A function that wraps the chat completions method to add telemetry.
|
@@ -75,7 +75,7 @@ def async_multion_wrap(gen_ai_endpoint, version, environment, application_name,
|
|
75
75
|
span.set_attribute(SemanticConvetion.GEN_AI_AGENT_RESPONSE_TIME,
|
76
76
|
response.metadata.processing_time)
|
77
77
|
|
78
|
-
if
|
78
|
+
if capture_message_content:
|
79
79
|
span.add_event(
|
80
80
|
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
81
81
|
attributes={
|
@@ -92,7 +92,7 @@ def async_multion_wrap(gen_ai_endpoint, version, environment, application_name,
|
|
92
92
|
span.set_attribute(SemanticConvetion.GEN_AI_AGENT_BROWSE_URL,
|
93
93
|
kwargs.get("url", ""))
|
94
94
|
|
95
|
-
if
|
95
|
+
if capture_message_content:
|
96
96
|
span.add_event(
|
97
97
|
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
98
98
|
attributes={
|
@@ -110,7 +110,7 @@ def async_multion_wrap(gen_ai_endpoint, version, environment, application_name,
|
|
110
110
|
span.set_attribute(SemanticConvetion.GEN_AI_AGENT_BROWSE_URL,
|
111
111
|
kwargs.get("url", ""))
|
112
112
|
|
113
|
-
if
|
113
|
+
if capture_message_content:
|
114
114
|
span.add_event(
|
115
115
|
name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
|
116
116
|
attributes={
|
@@ -15,7 +15,7 @@ from openlit.semcov import SemanticConvetion
|
|
15
15
|
logger = logging.getLogger(__name__)
|
16
16
|
|
17
17
|
def multion_wrap(gen_ai_endpoint, version, environment, application_name,
|
18
|
-
tracer, pricing_info,
|
18
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
19
19
|
"""
|
20
20
|
Generates a telemetry wrapper for chat completions to collect metrics.
|
21
21
|
|
@@ -26,7 +26,7 @@ def multion_wrap(gen_ai_endpoint, version, environment, application_name,
|
|
26
26
|
application_name: Name of the application using the multion Agent.
|
27
27
|
tracer: OpenTelemetry tracer for creating spans.
|
28
28
|
pricing_info: Information used for calculating the cost of multion usage.
|
29
|
-
|
29
|
+
capture_message_content: Flag indicating whether to trace the actual content.
|
30
30
|
|
31
31
|
Returns:
|
32
32
|
A function that wraps the chat completions method to add telemetry.
|
@@ -75,7 +75,7 @@ def multion_wrap(gen_ai_endpoint, version, environment, application_name,
|
|
75
75
|
span.set_attribute(SemanticConvetion.GEN_AI_AGENT_RESPONSE_TIME,
|
76
76
|
response.metadata.processing_time)
|
77
77
|
|
78
|
-
if
|
78
|
+
if capture_message_content:
|
79
79
|
span.add_event(
|
80
80
|
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
81
81
|
attributes={
|
@@ -92,7 +92,7 @@ def multion_wrap(gen_ai_endpoint, version, environment, application_name,
|
|
92
92
|
span.set_attribute(SemanticConvetion.GEN_AI_AGENT_BROWSE_URL,
|
93
93
|
kwargs.get("url", ""))
|
94
94
|
|
95
|
-
if
|
95
|
+
if capture_message_content:
|
96
96
|
span.add_event(
|
97
97
|
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
98
98
|
attributes={
|
@@ -110,7 +110,7 @@ def multion_wrap(gen_ai_endpoint, version, environment, application_name,
|
|
110
110
|
span.set_attribute(SemanticConvetion.GEN_AI_AGENT_BROWSE_URL,
|
111
111
|
kwargs.get("url", ""))
|
112
112
|
|
113
|
-
if
|
113
|
+
if capture_message_content:
|
114
114
|
span.add_event(
|
115
115
|
name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
|
116
116
|
attributes={
|
@@ -1,5 +1,6 @@
|
|
1
|
-
|
2
|
-
|
1
|
+
"""
|
2
|
+
Initializer of Auto Instrumentation of Ollama Functions
|
3
|
+
"""
|
3
4
|
|
4
5
|
from typing import Collection
|
5
6
|
import importlib.metadata
|
@@ -27,9 +28,10 @@ class OllamaInstrumentor(BaseInstrumentor):
|
|
27
28
|
application_name = kwargs.get("application_name", "default_application")
|
28
29
|
environment = kwargs.get("environment", "default_environment")
|
29
30
|
tracer = kwargs.get("tracer")
|
31
|
+
event_provider = kwargs.get("event_provider")
|
30
32
|
metrics = kwargs.get("metrics_dict")
|
31
33
|
pricing_info = kwargs.get("pricing_info", {})
|
32
|
-
|
34
|
+
capture_message_content = kwargs.get("capture_message_content", False)
|
33
35
|
disable_metrics = kwargs.get("disable_metrics")
|
34
36
|
version = importlib.metadata.version("ollama")
|
35
37
|
|
@@ -38,13 +40,13 @@ class OllamaInstrumentor(BaseInstrumentor):
|
|
38
40
|
"ollama",
|
39
41
|
"chat",
|
40
42
|
chat(version, environment, application_name,
|
41
|
-
tracer, pricing_info,
|
43
|
+
tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics),
|
42
44
|
)
|
43
45
|
wrap_function_wrapper(
|
44
46
|
"ollama",
|
45
47
|
"Client.chat",
|
46
48
|
chat(version, environment, application_name,
|
47
|
-
tracer, pricing_info,
|
49
|
+
tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics),
|
48
50
|
)
|
49
51
|
|
50
52
|
# sync embeddings
|
@@ -52,13 +54,13 @@ class OllamaInstrumentor(BaseInstrumentor):
|
|
52
54
|
"ollama",
|
53
55
|
"embeddings",
|
54
56
|
embeddings(version, environment, application_name,
|
55
|
-
tracer, pricing_info,
|
57
|
+
tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics),
|
56
58
|
)
|
57
59
|
wrap_function_wrapper(
|
58
60
|
"ollama",
|
59
61
|
"Client.embeddings",
|
60
62
|
embeddings(version, environment, application_name,
|
61
|
-
tracer, pricing_info,
|
63
|
+
tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics),
|
62
64
|
)
|
63
65
|
|
64
66
|
# async chat
|
@@ -66,7 +68,7 @@ class OllamaInstrumentor(BaseInstrumentor):
|
|
66
68
|
"ollama",
|
67
69
|
"AsyncClient.chat",
|
68
70
|
async_chat(version, environment, application_name,
|
69
|
-
tracer, pricing_info,
|
71
|
+
tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics),
|
70
72
|
)
|
71
73
|
|
72
74
|
# async embeddings
|
@@ -74,7 +76,7 @@ class OllamaInstrumentor(BaseInstrumentor):
|
|
74
76
|
"ollama",
|
75
77
|
"AsyncClient.embeddings",
|
76
78
|
async_embeddings(version, environment, application_name,
|
77
|
-
tracer, pricing_info,
|
79
|
+
tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics),
|
78
80
|
)
|
79
81
|
|
80
82
|
def _uninstrument(self, **kwargs):
|