openlit 1.33.10__py3-none-any.whl → 1.33.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/__helpers.py +125 -88
- openlit/__init__.py +38 -11
- openlit/instrumentation/ag2/__init__.py +19 -20
- openlit/instrumentation/ag2/ag2.py +134 -69
- openlit/instrumentation/ai21/__init__.py +22 -21
- openlit/instrumentation/ai21/ai21.py +82 -546
- openlit/instrumentation/ai21/async_ai21.py +82 -546
- openlit/instrumentation/ai21/utils.py +409 -0
- openlit/instrumentation/anthropic/__init__.py +16 -16
- openlit/instrumentation/anthropic/anthropic.py +61 -353
- openlit/instrumentation/anthropic/async_anthropic.py +62 -354
- openlit/instrumentation/anthropic/utils.py +251 -0
- openlit/instrumentation/assemblyai/__init__.py +2 -2
- openlit/instrumentation/assemblyai/assemblyai.py +3 -3
- openlit/instrumentation/astra/__init__.py +25 -25
- openlit/instrumentation/astra/astra.py +2 -2
- openlit/instrumentation/astra/async_astra.py +2 -2
- openlit/instrumentation/azure_ai_inference/__init__.py +5 -5
- openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +8 -8
- openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +8 -8
- openlit/instrumentation/bedrock/__init__.py +2 -2
- openlit/instrumentation/bedrock/bedrock.py +3 -3
- openlit/instrumentation/chroma/__init__.py +9 -9
- openlit/instrumentation/chroma/chroma.py +2 -2
- openlit/instrumentation/cohere/__init__.py +7 -7
- openlit/instrumentation/cohere/async_cohere.py +9 -9
- openlit/instrumentation/cohere/cohere.py +9 -9
- openlit/instrumentation/controlflow/__init__.py +4 -4
- openlit/instrumentation/controlflow/controlflow.py +2 -2
- openlit/instrumentation/crawl4ai/__init__.py +3 -3
- openlit/instrumentation/crawl4ai/async_crawl4ai.py +2 -2
- openlit/instrumentation/crawl4ai/crawl4ai.py +2 -2
- openlit/instrumentation/crewai/__init__.py +3 -3
- openlit/instrumentation/crewai/crewai.py +2 -2
- openlit/instrumentation/dynamiq/__init__.py +5 -5
- openlit/instrumentation/dynamiq/dynamiq.py +2 -2
- openlit/instrumentation/elevenlabs/__init__.py +5 -5
- openlit/instrumentation/elevenlabs/async_elevenlabs.py +3 -3
- openlit/instrumentation/elevenlabs/elevenlabs.py +3 -3
- openlit/instrumentation/embedchain/__init__.py +2 -2
- openlit/instrumentation/embedchain/embedchain.py +4 -4
- openlit/instrumentation/firecrawl/__init__.py +3 -3
- openlit/instrumentation/firecrawl/firecrawl.py +2 -2
- openlit/instrumentation/google_ai_studio/__init__.py +3 -3
- openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +3 -3
- openlit/instrumentation/google_ai_studio/google_ai_studio.py +3 -3
- openlit/instrumentation/gpt4all/__init__.py +3 -3
- openlit/instrumentation/gpt4all/gpt4all.py +7 -7
- openlit/instrumentation/groq/__init__.py +3 -3
- openlit/instrumentation/groq/async_groq.py +5 -5
- openlit/instrumentation/groq/groq.py +5 -5
- openlit/instrumentation/haystack/__init__.py +2 -2
- openlit/instrumentation/haystack/haystack.py +2 -2
- openlit/instrumentation/julep/__init__.py +7 -7
- openlit/instrumentation/julep/async_julep.py +3 -3
- openlit/instrumentation/julep/julep.py +3 -3
- openlit/instrumentation/langchain/__init__.py +2 -2
- openlit/instrumentation/langchain/async_langchain.py +13 -9
- openlit/instrumentation/langchain/langchain.py +13 -8
- openlit/instrumentation/letta/__init__.py +7 -7
- openlit/instrumentation/letta/letta.py +5 -5
- openlit/instrumentation/litellm/__init__.py +5 -5
- openlit/instrumentation/litellm/async_litellm.py +8 -8
- openlit/instrumentation/litellm/litellm.py +8 -8
- openlit/instrumentation/llamaindex/__init__.py +2 -2
- openlit/instrumentation/llamaindex/llamaindex.py +2 -2
- openlit/instrumentation/mem0/__init__.py +2 -2
- openlit/instrumentation/mem0/mem0.py +2 -2
- openlit/instrumentation/milvus/__init__.py +2 -2
- openlit/instrumentation/milvus/milvus.py +2 -2
- openlit/instrumentation/mistral/__init__.py +7 -7
- openlit/instrumentation/mistral/async_mistral.py +10 -10
- openlit/instrumentation/mistral/mistral.py +10 -10
- openlit/instrumentation/multion/__init__.py +7 -7
- openlit/instrumentation/multion/async_multion.py +5 -5
- openlit/instrumentation/multion/multion.py +5 -5
- openlit/instrumentation/ollama/__init__.py +11 -9
- openlit/instrumentation/ollama/async_ollama.py +71 -465
- openlit/instrumentation/ollama/ollama.py +71 -465
- openlit/instrumentation/ollama/utils.py +332 -0
- openlit/instrumentation/openai/__init__.py +11 -11
- openlit/instrumentation/openai/async_openai.py +18 -18
- openlit/instrumentation/openai/openai.py +18 -18
- openlit/instrumentation/phidata/__init__.py +2 -2
- openlit/instrumentation/phidata/phidata.py +2 -2
- openlit/instrumentation/pinecone/__init__.py +6 -6
- openlit/instrumentation/pinecone/pinecone.py +2 -2
- openlit/instrumentation/premai/__init__.py +3 -3
- openlit/instrumentation/premai/premai.py +7 -7
- openlit/instrumentation/qdrant/__init__.py +2 -2
- openlit/instrumentation/qdrant/async_qdrant.py +2 -2
- openlit/instrumentation/qdrant/qdrant.py +2 -2
- openlit/instrumentation/reka/__init__.py +3 -3
- openlit/instrumentation/reka/async_reka.py +3 -3
- openlit/instrumentation/reka/reka.py +3 -3
- openlit/instrumentation/together/__init__.py +5 -5
- openlit/instrumentation/together/async_together.py +8 -8
- openlit/instrumentation/together/together.py +8 -8
- openlit/instrumentation/transformers/__init__.py +2 -2
- openlit/instrumentation/transformers/transformers.py +4 -4
- openlit/instrumentation/vertexai/__init__.py +9 -9
- openlit/instrumentation/vertexai/async_vertexai.py +4 -4
- openlit/instrumentation/vertexai/vertexai.py +4 -4
- openlit/instrumentation/vllm/__init__.py +2 -2
- openlit/instrumentation/vllm/vllm.py +3 -3
- openlit/otel/events.py +85 -0
- openlit/otel/tracing.py +3 -13
- openlit/semcov/__init__.py +13 -1
- {openlit-1.33.10.dist-info → openlit-1.33.12.dist-info}/METADATA +2 -2
- openlit-1.33.12.dist-info/RECORD +126 -0
- openlit-1.33.10.dist-info/RECORD +0 -122
- {openlit-1.33.10.dist-info → openlit-1.33.12.dist-info}/LICENSE +0 -0
- {openlit-1.33.10.dist-info → openlit-1.33.12.dist-info}/WHEEL +0 -0
@@ -23,7 +23,7 @@ from openlit.semcov import SemanticConvetion
|
|
23
23
|
logger = logging.getLogger(__name__)
|
24
24
|
|
25
25
|
def complete(version, environment, application_name,
|
26
|
-
tracer, pricing_info,
|
26
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
27
27
|
"""
|
28
28
|
Generates a telemetry wrapper for chat to collect metrics.
|
29
29
|
|
@@ -33,7 +33,7 @@ def complete(version, environment, application_name,
|
|
33
33
|
application_name: Name of the application using the Azure AI Inference API.
|
34
34
|
tracer: OpenTelemetry tracer for creating spans.
|
35
35
|
pricing_info: Information used for calculating the cost of Azure AI Inference usage.
|
36
|
-
|
36
|
+
capture_message_content: Flag indicating whether to trace the actual content.
|
37
37
|
|
38
38
|
Returns:
|
39
39
|
A function that wraps the chat method to add telemetry.
|
@@ -217,7 +217,7 @@ def complete(version, environment, application_name,
|
|
217
217
|
self._ttft)
|
218
218
|
self._span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
|
219
219
|
version)
|
220
|
-
if
|
220
|
+
if capture_message_content:
|
221
221
|
self._span.add_event(
|
222
222
|
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
223
223
|
attributes={
|
@@ -388,7 +388,7 @@ def complete(version, environment, application_name,
|
|
388
388
|
end_time - start_time)
|
389
389
|
span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
|
390
390
|
version)
|
391
|
-
if
|
391
|
+
if capture_message_content:
|
392
392
|
span.add_event(
|
393
393
|
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
394
394
|
attributes={
|
@@ -399,7 +399,7 @@ def complete(version, environment, application_name,
|
|
399
399
|
for i in range(kwargs.get('n',1)):
|
400
400
|
span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON,
|
401
401
|
[response_dict.get('choices')[i].get('finish_reason')])
|
402
|
-
if
|
402
|
+
if capture_message_content:
|
403
403
|
span.add_event(
|
404
404
|
name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
|
405
405
|
attributes={
|
@@ -459,7 +459,7 @@ def complete(version, environment, application_name,
|
|
459
459
|
return wrapper
|
460
460
|
|
461
461
|
def embedding(version, environment, application_name,
|
462
|
-
tracer, pricing_info,
|
462
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
463
463
|
"""
|
464
464
|
Generates a telemetry wrapper for embeddings to collect metrics.
|
465
465
|
|
@@ -469,7 +469,7 @@ def embedding(version, environment, application_name,
|
|
469
469
|
application_name: Name of the application using the Azure Inference API.
|
470
470
|
tracer: OpenTelemetry tracer for creating spans.
|
471
471
|
pricing_info: Information used for calculating the cost of Azure Inference usage.
|
472
|
-
|
472
|
+
capture_message_content: Flag indicating whether to trace the actual content.
|
473
473
|
|
474
474
|
Returns:
|
475
475
|
A function that wraps the embeddings method to add telemetry.
|
@@ -541,7 +541,7 @@ def embedding(version, environment, application_name,
|
|
541
541
|
span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
|
542
542
|
version)
|
543
543
|
|
544
|
-
if
|
544
|
+
if capture_message_content:
|
545
545
|
span.add_event(
|
546
546
|
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
547
547
|
attributes={
|
@@ -24,7 +24,7 @@ class BedrockInstrumentor(BaseInstrumentor):
|
|
24
24
|
tracer = kwargs.get("tracer")
|
25
25
|
metrics = kwargs.get("metrics_dict")
|
26
26
|
pricing_info = kwargs.get("pricing_info", {})
|
27
|
-
|
27
|
+
capture_message_content = kwargs.get("capture_message_content", False)
|
28
28
|
disable_metrics = kwargs.get("disable_metrics")
|
29
29
|
version = importlib.metadata.version("boto3")
|
30
30
|
|
@@ -33,7 +33,7 @@ class BedrockInstrumentor(BaseInstrumentor):
|
|
33
33
|
"botocore.client",
|
34
34
|
"ClientCreator.create_client",
|
35
35
|
converse(version, environment, application_name,
|
36
|
-
tracer, pricing_info,
|
36
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
37
37
|
)
|
38
38
|
|
39
39
|
def _uninstrument(self, **kwargs):
|
@@ -53,7 +53,7 @@ class CustomStreamWrapper(StreamingBody):
|
|
53
53
|
return data_chunk
|
54
54
|
|
55
55
|
def converse(version, environment, application_name, tracer,
|
56
|
-
pricing_info,
|
56
|
+
pricing_info, capture_message_content, metrics, disable_metrics):
|
57
57
|
"""
|
58
58
|
Generates a telemetry wrapper for messages to collect metrics.
|
59
59
|
|
@@ -64,7 +64,7 @@ def converse(version, environment, application_name, tracer,
|
|
64
64
|
application_name: Name of the application using the Bedrock API.
|
65
65
|
tracer: OpenTelemetry tracer for creating spans.
|
66
66
|
pricing_info: Information for calculating Bedrock usage cost.
|
67
|
-
|
67
|
+
capture_message_content: Whether to trace the actual content.
|
68
68
|
metrics: Metrics collector.
|
69
69
|
disable_metrics: Flag to toggle metrics collection.
|
70
70
|
Returns:
|
@@ -194,7 +194,7 @@ def converse(version, environment, application_name, tracer,
|
|
194
194
|
span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
|
195
195
|
version)
|
196
196
|
|
197
|
-
if
|
197
|
+
if capture_message_content:
|
198
198
|
span.add_event(
|
199
199
|
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
200
200
|
attributes={
|
@@ -21,7 +21,7 @@ class ChromaInstrumentor(BaseInstrumentor):
|
|
21
21
|
tracer = kwargs.get("tracer")
|
22
22
|
metrics = kwargs.get("metrics_dict")
|
23
23
|
pricing_info = kwargs.get("pricing_info")
|
24
|
-
|
24
|
+
capture_message_content = kwargs.get("capture_message_content")
|
25
25
|
disable_metrics = kwargs.get("disable_metrics")
|
26
26
|
version = importlib.metadata.version("chromadb")
|
27
27
|
|
@@ -29,55 +29,55 @@ class ChromaInstrumentor(BaseInstrumentor):
|
|
29
29
|
"chromadb.db",
|
30
30
|
"DB.create_collection",
|
31
31
|
general_wrap("chroma.create_collection", version, environment, application_name,
|
32
|
-
tracer, pricing_info,
|
32
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
33
33
|
)
|
34
34
|
|
35
35
|
wrap_function_wrapper(
|
36
36
|
"chromadb",
|
37
37
|
"Collection.add",
|
38
38
|
general_wrap("chroma.add", version, environment, application_name,
|
39
|
-
tracer, pricing_info,
|
39
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
40
40
|
)
|
41
41
|
|
42
42
|
wrap_function_wrapper(
|
43
43
|
"chromadb",
|
44
44
|
"Collection.get",
|
45
45
|
general_wrap("chroma.get", version, environment, application_name,
|
46
|
-
tracer, pricing_info,
|
46
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
47
47
|
)
|
48
48
|
|
49
49
|
wrap_function_wrapper(
|
50
50
|
"chromadb",
|
51
51
|
"Collection.peek",
|
52
52
|
general_wrap("chroma.peek", version, environment, application_name,
|
53
|
-
tracer, pricing_info,
|
53
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
54
54
|
)
|
55
55
|
|
56
56
|
wrap_function_wrapper(
|
57
57
|
"chromadb",
|
58
58
|
"Collection.query",
|
59
59
|
general_wrap("chroma.query", version, environment, application_name,
|
60
|
-
tracer, pricing_info,
|
60
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
61
61
|
)
|
62
62
|
|
63
63
|
wrap_function_wrapper(
|
64
64
|
"chromadb",
|
65
65
|
"Collection.update",
|
66
66
|
general_wrap("chroma.update", version, environment, application_name,
|
67
|
-
tracer, pricing_info,
|
67
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
68
68
|
)
|
69
69
|
|
70
70
|
wrap_function_wrapper(
|
71
71
|
"chromadb",
|
72
72
|
"Collection.upsert",
|
73
73
|
general_wrap("chroma.upsert", version, environment, application_name,
|
74
|
-
tracer, pricing_info,
|
74
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
75
75
|
)
|
76
76
|
wrap_function_wrapper(
|
77
77
|
"chromadb",
|
78
78
|
"Collection.delete",
|
79
79
|
general_wrap("chroma.delete", version, environment, application_name,
|
80
|
-
tracer, pricing_info,
|
80
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
81
81
|
)
|
82
82
|
|
83
83
|
|
@@ -25,7 +25,7 @@ def object_count(obj):
|
|
25
25
|
return cnt
|
26
26
|
|
27
27
|
def general_wrap(gen_ai_endpoint, version, environment, application_name,
|
28
|
-
tracer, pricing_info,
|
28
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
29
29
|
"""
|
30
30
|
Creates a wrapper around a function call to trace and log its execution metrics.
|
31
31
|
|
@@ -39,7 +39,7 @@ def general_wrap(gen_ai_endpoint, version, environment, application_name,
|
|
39
39
|
- application_name (str): Name of the Langchain application.
|
40
40
|
- tracer (opentelemetry.trace.Tracer): The tracer object used for OpenTelemetry tracing.
|
41
41
|
- pricing_info (dict): Information about the pricing for internal metrics (currently not used).
|
42
|
-
-
|
42
|
+
- capture_message_content (bool): Flag indicating whether to trace the content of the response.
|
43
43
|
|
44
44
|
Returns:
|
45
45
|
- function: A higher-order function that takes a function 'wrapped' and returns
|
@@ -23,7 +23,7 @@ class CohereInstrumentor(BaseInstrumentor):
|
|
23
23
|
tracer = kwargs.get("tracer")
|
24
24
|
metrics = kwargs.get("metrics_dict")
|
25
25
|
pricing_info = kwargs.get("pricing_info")
|
26
|
-
|
26
|
+
capture_message_content = kwargs.get("capture_message_content")
|
27
27
|
disable_metrics = kwargs.get("disable_metrics")
|
28
28
|
version = importlib.metadata.version("cohere")
|
29
29
|
|
@@ -32,19 +32,19 @@ class CohereInstrumentor(BaseInstrumentor):
|
|
32
32
|
"cohere.client_v2",
|
33
33
|
"ClientV2.chat",
|
34
34
|
chat(version, environment, application_name,
|
35
|
-
tracer, pricing_info,
|
35
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
36
36
|
)
|
37
37
|
wrap_function_wrapper(
|
38
38
|
"cohere.client_v2",
|
39
39
|
"ClientV2.chat_stream",
|
40
40
|
chat_stream(version, environment, application_name,
|
41
|
-
tracer, pricing_info,
|
41
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
42
42
|
)
|
43
43
|
wrap_function_wrapper(
|
44
44
|
"cohere.client_v2",
|
45
45
|
"ClientV2.embed",
|
46
46
|
embed(version, environment, application_name,
|
47
|
-
tracer, pricing_info,
|
47
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
48
48
|
)
|
49
49
|
|
50
50
|
# Async Client
|
@@ -52,21 +52,21 @@ class CohereInstrumentor(BaseInstrumentor):
|
|
52
52
|
"cohere.client_v2",
|
53
53
|
"AsyncClientV2.chat",
|
54
54
|
async_chat(version, environment, application_name,
|
55
|
-
tracer, pricing_info,
|
55
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
56
56
|
)
|
57
57
|
|
58
58
|
wrap_function_wrapper(
|
59
59
|
"cohere.client_v2",
|
60
60
|
"AsyncClientV2.chat_stream",
|
61
61
|
async_chat_stream(version, environment, application_name,
|
62
|
-
tracer, pricing_info,
|
62
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
63
63
|
)
|
64
64
|
|
65
65
|
wrap_function_wrapper(
|
66
66
|
"cohere.client_v2",
|
67
67
|
"AsyncClientV2.embed",
|
68
68
|
async_embed(version, environment, application_name,
|
69
|
-
tracer, pricing_info,
|
69
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
70
70
|
)
|
71
71
|
|
72
72
|
@staticmethod
|
@@ -22,7 +22,7 @@ from openlit.semcov import SemanticConvetion
|
|
22
22
|
logger = logging.getLogger(__name__)
|
23
23
|
|
24
24
|
def async_embed(version, environment, application_name, tracer,
|
25
|
-
pricing_info,
|
25
|
+
pricing_info, capture_message_content, metrics, disable_metrics):
|
26
26
|
"""
|
27
27
|
Generates a telemetry wrapper for embeddings to collect metrics.
|
28
28
|
|
@@ -32,7 +32,7 @@ def async_embed(version, environment, application_name, tracer,
|
|
32
32
|
application_name: Name of the application using the Cohere API.
|
33
33
|
tracer: OpenTelemetry tracer for creating spans.
|
34
34
|
pricing_info: Information used for calculating the cost of Cohere usage.
|
35
|
-
|
35
|
+
capture_message_content: Flag indicating whether to trace the actual content.
|
36
36
|
|
37
37
|
Returns:
|
38
38
|
A function that wraps the embeddings method to add telemetry.
|
@@ -105,7 +105,7 @@ def async_embed(version, environment, application_name, tracer,
|
|
105
105
|
span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
|
106
106
|
version)
|
107
107
|
|
108
|
-
if
|
108
|
+
if capture_message_content:
|
109
109
|
span.add_event(
|
110
110
|
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
111
111
|
attributes={
|
@@ -149,7 +149,7 @@ def async_embed(version, environment, application_name, tracer,
|
|
149
149
|
return wrapper
|
150
150
|
|
151
151
|
def async_chat(version, environment, application_name, tracer,
|
152
|
-
pricing_info,
|
152
|
+
pricing_info, capture_message_content, metrics, disable_metrics):
|
153
153
|
"""
|
154
154
|
Generates a telemetry wrapper for chat to collect metrics.
|
155
155
|
|
@@ -159,7 +159,7 @@ def async_chat(version, environment, application_name, tracer,
|
|
159
159
|
application_name: Name of the application using the Cohere API.
|
160
160
|
tracer: OpenTelemetry tracer for creating spans.
|
161
161
|
pricing_info: Information used for calculating the cost of Cohere usage.
|
162
|
-
|
162
|
+
capture_message_content: Flag indicating whether to trace the actual content.
|
163
163
|
|
164
164
|
Returns:
|
165
165
|
A function that wraps the chat method to add telemetry.
|
@@ -281,7 +281,7 @@ def async_chat(version, environment, application_name, tracer,
|
|
281
281
|
span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
|
282
282
|
version)
|
283
283
|
|
284
|
-
if
|
284
|
+
if capture_message_content:
|
285
285
|
span.add_event(
|
286
286
|
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
287
287
|
attributes={
|
@@ -336,7 +336,7 @@ def async_chat(version, environment, application_name, tracer,
|
|
336
336
|
return wrapper
|
337
337
|
|
338
338
|
def async_chat_stream(version, environment, application_name,
|
339
|
-
tracer, pricing_info,
|
339
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
340
340
|
"""
|
341
341
|
Generates a telemetry wrapper for chat_stream to collect metrics.
|
342
342
|
|
@@ -346,7 +346,7 @@ def async_chat_stream(version, environment, application_name,
|
|
346
346
|
application_name: Name of the application using the Cohere API.
|
347
347
|
tracer: OpenTelemetry tracer for creating spans.
|
348
348
|
pricing_info: Information used for calculating the cost of Cohere usage.
|
349
|
-
|
349
|
+
capture_message_content: Flag indicating whether to trace the actual content.
|
350
350
|
|
351
351
|
Returns:
|
352
352
|
A function that wraps the chat method to add telemetry.
|
@@ -547,7 +547,7 @@ def async_chat_stream(version, environment, application_name,
|
|
547
547
|
self._ttft)
|
548
548
|
self._span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
|
549
549
|
version)
|
550
|
-
if
|
550
|
+
if capture_message_content:
|
551
551
|
self._span.add_event(
|
552
552
|
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
553
553
|
attributes={
|
@@ -22,7 +22,7 @@ from openlit.semcov import SemanticConvetion
|
|
22
22
|
logger = logging.getLogger(__name__)
|
23
23
|
|
24
24
|
def embed(version, environment, application_name, tracer,
|
25
|
-
pricing_info,
|
25
|
+
pricing_info, capture_message_content, metrics, disable_metrics):
|
26
26
|
"""
|
27
27
|
Generates a telemetry wrapper for embeddings to collect metrics.
|
28
28
|
|
@@ -32,7 +32,7 @@ def embed(version, environment, application_name, tracer,
|
|
32
32
|
application_name: Name of the application using the Cohere API.
|
33
33
|
tracer: OpenTelemetry tracer for creating spans.
|
34
34
|
pricing_info: Information used for calculating the cost of Cohere usage.
|
35
|
-
|
35
|
+
capture_message_content: Flag indicating whether to trace the actual content.
|
36
36
|
|
37
37
|
Returns:
|
38
38
|
A function that wraps the embeddings method to add telemetry.
|
@@ -105,7 +105,7 @@ def embed(version, environment, application_name, tracer,
|
|
105
105
|
span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
|
106
106
|
version)
|
107
107
|
|
108
|
-
if
|
108
|
+
if capture_message_content:
|
109
109
|
span.add_event(
|
110
110
|
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
111
111
|
attributes={
|
@@ -149,7 +149,7 @@ def embed(version, environment, application_name, tracer,
|
|
149
149
|
return wrapper
|
150
150
|
|
151
151
|
def chat(version, environment, application_name, tracer,
|
152
|
-
pricing_info,
|
152
|
+
pricing_info, capture_message_content, metrics, disable_metrics):
|
153
153
|
"""
|
154
154
|
Generates a telemetry wrapper for chat to collect metrics.
|
155
155
|
|
@@ -159,7 +159,7 @@ def chat(version, environment, application_name, tracer,
|
|
159
159
|
application_name: Name of the application using the Cohere API.
|
160
160
|
tracer: OpenTelemetry tracer for creating spans.
|
161
161
|
pricing_info: Information used for calculating the cost of Cohere usage.
|
162
|
-
|
162
|
+
capture_message_content: Flag indicating whether to trace the actual content.
|
163
163
|
|
164
164
|
Returns:
|
165
165
|
A function that wraps the chat method to add telemetry.
|
@@ -281,7 +281,7 @@ def chat(version, environment, application_name, tracer,
|
|
281
281
|
span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
|
282
282
|
version)
|
283
283
|
|
284
|
-
if
|
284
|
+
if capture_message_content:
|
285
285
|
span.add_event(
|
286
286
|
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
287
287
|
attributes={
|
@@ -336,7 +336,7 @@ def chat(version, environment, application_name, tracer,
|
|
336
336
|
return wrapper
|
337
337
|
|
338
338
|
def chat_stream(version, environment, application_name,
|
339
|
-
tracer, pricing_info,
|
339
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
340
340
|
"""
|
341
341
|
Generates a telemetry wrapper for chat_stream to collect metrics.
|
342
342
|
|
@@ -346,7 +346,7 @@ def chat_stream(version, environment, application_name,
|
|
346
346
|
application_name: Name of the application using the Cohere API.
|
347
347
|
tracer: OpenTelemetry tracer for creating spans.
|
348
348
|
pricing_info: Information used for calculating the cost of Cohere usage.
|
349
|
-
|
349
|
+
capture_message_content: Flag indicating whether to trace the actual content.
|
350
350
|
|
351
351
|
Returns:
|
352
352
|
A function that wraps the chat method to add telemetry.
|
@@ -547,7 +547,7 @@ def chat_stream(version, environment, application_name,
|
|
547
547
|
self._ttft)
|
548
548
|
self._span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
|
549
549
|
version)
|
550
|
-
if
|
550
|
+
if capture_message_content:
|
551
551
|
self._span.add_event(
|
552
552
|
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
553
553
|
attributes={
|
@@ -26,7 +26,7 @@ class ControlFlowInstrumentor(BaseInstrumentor):
|
|
26
26
|
tracer = kwargs.get("tracer")
|
27
27
|
metrics = kwargs.get("metrics_dict")
|
28
28
|
pricing_info = kwargs.get("pricing_info", {})
|
29
|
-
|
29
|
+
capture_message_content = kwargs.get("capture_message_content", False)
|
30
30
|
disable_metrics = kwargs.get("disable_metrics")
|
31
31
|
version = importlib.metadata.version("controlflow")
|
32
32
|
|
@@ -34,21 +34,21 @@ class ControlFlowInstrumentor(BaseInstrumentor):
|
|
34
34
|
"controlflow.agents.agent",
|
35
35
|
"Agent.__init__",
|
36
36
|
wrap_controlflow("controlflow.create_agent", version, environment, application_name,
|
37
|
-
tracer, pricing_info,
|
37
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
38
38
|
)
|
39
39
|
|
40
40
|
wrap_function_wrapper(
|
41
41
|
"controlflow.tasks.task",
|
42
42
|
"Task.__init__",
|
43
43
|
wrap_controlflow("controlflow.create_task", version, environment, application_name,
|
44
|
-
tracer, pricing_info,
|
44
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
45
45
|
)
|
46
46
|
|
47
47
|
wrap_function_wrapper(
|
48
48
|
"controlflow",
|
49
49
|
"run",
|
50
50
|
wrap_controlflow("controlflow.run", version, environment, application_name,
|
51
|
-
tracer, pricing_info,
|
51
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
52
52
|
)
|
53
53
|
|
54
54
|
def _uninstrument(self, **kwargs):
|
@@ -13,7 +13,7 @@ from openlit.semcov import SemanticConvetion
|
|
13
13
|
logger = logging.getLogger(__name__)
|
14
14
|
|
15
15
|
def wrap_controlflow(gen_ai_endpoint, version, environment, application_name,
|
16
|
-
tracer, pricing_info,
|
16
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
17
17
|
"""
|
18
18
|
Creates a wrapper around a function call to trace and log its execution metrics.
|
19
19
|
|
@@ -27,7 +27,7 @@ def wrap_controlflow(gen_ai_endpoint, version, environment, application_name,
|
|
27
27
|
- application_name (str): Name of the Langchain application.
|
28
28
|
- tracer (opentelemetry.trace.Tracer): The tracer object used for OpenTelemetry tracing.
|
29
29
|
- pricing_info (dict): Information about the pricing for internal metrics (currently not used).
|
30
|
-
-
|
30
|
+
- capture_message_content (bool): Flag indicating whether to trace the content of the response.
|
31
31
|
|
32
32
|
Returns:
|
33
33
|
- function: A higher-order function that takes a function 'wrapped' and returns
|
@@ -29,7 +29,7 @@ class Crawl4AIInstrumentor(BaseInstrumentor):
|
|
29
29
|
tracer = kwargs.get("tracer")
|
30
30
|
metrics = kwargs.get("metrics_dict")
|
31
31
|
pricing_info = kwargs.get("pricing_info", {})
|
32
|
-
|
32
|
+
capture_message_content = kwargs.get("capture_message_content", False)
|
33
33
|
disable_metrics = kwargs.get("disable_metrics")
|
34
34
|
version = importlib.metadata.version("crawl4ai")
|
35
35
|
|
@@ -37,14 +37,14 @@ class Crawl4AIInstrumentor(BaseInstrumentor):
|
|
37
37
|
"crawl4ai.web_crawler",
|
38
38
|
"WebCrawler.run",
|
39
39
|
wrap_crawl("crawl4ai.web_crawl", version, environment, application_name,
|
40
|
-
tracer, pricing_info,
|
40
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
41
41
|
)
|
42
42
|
|
43
43
|
wrap_function_wrapper(
|
44
44
|
"crawl4ai.async_webcrawler",
|
45
45
|
"AsyncWebCrawler.arun",
|
46
46
|
async_wrap_crawl("crawl4ai.web_crawl", version, environment, application_name,
|
47
|
-
tracer, pricing_info,
|
47
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
48
48
|
)
|
49
49
|
|
50
50
|
def _uninstrument(self, **kwargs):
|
@@ -15,7 +15,7 @@ from openlit.semcov import SemanticConvetion
|
|
15
15
|
logger = logging.getLogger(__name__)
|
16
16
|
|
17
17
|
def async_wrap_crawl(gen_ai_endpoint, version, environment, application_name,
|
18
|
-
tracer, pricing_info,
|
18
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
19
19
|
"""
|
20
20
|
Generates a telemetry wrapper for chat completions to collect metrics.
|
21
21
|
|
@@ -26,7 +26,7 @@ def async_wrap_crawl(gen_ai_endpoint, version, environment, application_name,
|
|
26
26
|
application_name: Name of the application using the Crawl4AI Agent.
|
27
27
|
tracer: OpenTelemetry tracer for creating spans.
|
28
28
|
pricing_info: Information used for calculating the cost of Crawl4AI usage.
|
29
|
-
|
29
|
+
capture_message_content: Flag indicating whether to trace the actual content.
|
30
30
|
|
31
31
|
Returns:
|
32
32
|
A function that wraps the chat completions method to add telemetry.
|
@@ -15,7 +15,7 @@ from openlit.semcov import SemanticConvetion
|
|
15
15
|
logger = logging.getLogger(__name__)
|
16
16
|
|
17
17
|
def wrap_crawl(gen_ai_endpoint, version, environment, application_name,
|
18
|
-
tracer, pricing_info,
|
18
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
19
19
|
"""
|
20
20
|
Generates a telemetry wrapper for chat completions to collect metrics.
|
21
21
|
|
@@ -26,7 +26,7 @@ def wrap_crawl(gen_ai_endpoint, version, environment, application_name,
|
|
26
26
|
application_name: Name of the application using the Crawl4AI Agent.
|
27
27
|
tracer: OpenTelemetry tracer for creating spans.
|
28
28
|
pricing_info: Information used for calculating the cost of Crawl4AI usage.
|
29
|
-
|
29
|
+
capture_message_content: Flag indicating whether to trace the actual content.
|
30
30
|
|
31
31
|
Returns:
|
32
32
|
A function that wraps the chat completions method to add telemetry.
|
@@ -26,7 +26,7 @@ class CrewAIInstrumentor(BaseInstrumentor):
|
|
26
26
|
tracer = kwargs.get("tracer")
|
27
27
|
metrics = kwargs.get("metrics_dict")
|
28
28
|
pricing_info = kwargs.get("pricing_info", {})
|
29
|
-
|
29
|
+
capture_message_content = kwargs.get("capture_message_content", False)
|
30
30
|
disable_metrics = kwargs.get("disable_metrics")
|
31
31
|
version = importlib.metadata.version("crewai")
|
32
32
|
|
@@ -34,14 +34,14 @@ class CrewAIInstrumentor(BaseInstrumentor):
|
|
34
34
|
"crewai.agent",
|
35
35
|
"Agent.execute_task",
|
36
36
|
crew_wrap("crewai.agent_execute_task", version, environment, application_name,
|
37
|
-
tracer, pricing_info,
|
37
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
38
38
|
)
|
39
39
|
|
40
40
|
wrap_function_wrapper(
|
41
41
|
"crewai.task",
|
42
42
|
"Task._execute_core",
|
43
43
|
crew_wrap("crewai.task_execute_core", version, environment, application_name,
|
44
|
-
tracer, pricing_info,
|
44
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
45
45
|
)
|
46
46
|
|
47
47
|
|
@@ -28,7 +28,7 @@ def _parse_tools(tools):
|
|
28
28
|
return json.dumps(result)
|
29
29
|
|
30
30
|
def crew_wrap(gen_ai_endpoint, version, environment, application_name,
|
31
|
-
tracer, pricing_info,
|
31
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
32
32
|
"""
|
33
33
|
Generates a telemetry wrapper for chat completions to collect metrics.
|
34
34
|
|
@@ -39,7 +39,7 @@ def crew_wrap(gen_ai_endpoint, version, environment, application_name,
|
|
39
39
|
application_name: Name of the application using the CrewAI Agent.
|
40
40
|
tracer: OpenTelemetry tracer for creating spans.
|
41
41
|
pricing_info: Information used for calculating the cost of CrewAI usage.
|
42
|
-
|
42
|
+
capture_message_content: Flag indicating whether to trace the actual content.
|
43
43
|
|
44
44
|
Returns:
|
45
45
|
A function that wraps the chat completions method to add telemetry.
|
@@ -26,7 +26,7 @@ class DynamiqInstrumentor(BaseInstrumentor):
|
|
26
26
|
tracer = kwargs.get("tracer")
|
27
27
|
metrics = kwargs.get("metrics_dict")
|
28
28
|
pricing_info = kwargs.get("pricing_info", {})
|
29
|
-
|
29
|
+
capture_message_content = kwargs.get("capture_message_content", False)
|
30
30
|
disable_metrics = kwargs.get("disable_metrics")
|
31
31
|
version = importlib.metadata.version("dynamiq")
|
32
32
|
|
@@ -34,28 +34,28 @@ class DynamiqInstrumentor(BaseInstrumentor):
|
|
34
34
|
"dynamiq.nodes.agents.base",
|
35
35
|
"Agent.run",
|
36
36
|
dynamiq_wrap("dynamiq.agent_run", version, environment, application_name,
|
37
|
-
tracer, pricing_info,
|
37
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
38
38
|
)
|
39
39
|
|
40
40
|
wrap_function_wrapper(
|
41
41
|
"dynamiq",
|
42
42
|
"Workflow.run",
|
43
43
|
dynamiq_wrap("dynamiq.workflow_run", version, environment, application_name,
|
44
|
-
tracer, pricing_info,
|
44
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
45
45
|
)
|
46
46
|
|
47
47
|
wrap_function_wrapper(
|
48
48
|
"dynamiq.memory",
|
49
49
|
"Memory.add",
|
50
50
|
dynamiq_wrap("dynamiq.memory_add", version, environment, application_name,
|
51
|
-
tracer, pricing_info,
|
51
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
52
52
|
)
|
53
53
|
|
54
54
|
wrap_function_wrapper(
|
55
55
|
"dynamiq.memory",
|
56
56
|
"Memory.search",
|
57
57
|
dynamiq_wrap("dynamiq.memory_search", version, environment, application_name,
|
58
|
-
tracer, pricing_info,
|
58
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
59
59
|
)
|
60
60
|
|
61
61
|
|
@@ -15,7 +15,7 @@ from openlit.semcov import SemanticConvetion
|
|
15
15
|
logger = logging.getLogger(__name__)
|
16
16
|
|
17
17
|
def dynamiq_wrap(gen_ai_endpoint, version, environment, application_name,
|
18
|
-
tracer, pricing_info,
|
18
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
19
19
|
"""
|
20
20
|
Generates a telemetry wrapper for chat completions to collect metrics.
|
21
21
|
|
@@ -26,7 +26,7 @@ def dynamiq_wrap(gen_ai_endpoint, version, environment, application_name,
|
|
26
26
|
application_name: Name of the application using the dynamiq Agent.
|
27
27
|
tracer: OpenTelemetry tracer for creating spans.
|
28
28
|
pricing_info: Information used for calculating the cost of dynamiq usage.
|
29
|
-
|
29
|
+
capture_message_content: Flag indicating whether to trace the actual content.
|
30
30
|
|
31
31
|
Returns:
|
32
32
|
A function that wraps the chat completions method to add telemetry.
|