openlit 1.33.23__py3-none-any.whl → 1.34.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/__init__.py +4 -1
- openlit/instrumentation/openai/async_openai.py +20 -17
- openlit/instrumentation/openai/openai.py +25 -22
- openlit/instrumentation/pydantic_ai/__init__.py +55 -0
- openlit/instrumentation/pydantic_ai/pydantic_ai.py +51 -0
- openlit/instrumentation/pydantic_ai/utils.py +95 -0
- openlit/semcov/__init__.py +1 -0
- {openlit-1.33.23.dist-info → openlit-1.34.0.dist-info}/METADATA +1 -1
- {openlit-1.33.23.dist-info → openlit-1.34.0.dist-info}/RECORD +11 -8
- {openlit-1.33.23.dist-info → openlit-1.34.0.dist-info}/LICENSE +0 -0
- {openlit-1.33.23.dist-info → openlit-1.34.0.dist-info}/WHEEL +0 -0
openlit/__init__.py
CHANGED
@@ -66,6 +66,7 @@ from openlit.instrumentation.crawl4ai import Crawl4AIInstrumentor
|
|
66
66
|
from openlit.instrumentation.firecrawl import FireCrawlInstrumentor
|
67
67
|
from openlit.instrumentation.letta import LettaInstrumentor
|
68
68
|
from openlit.instrumentation.openai_agents import OpenAIAgentsInstrumentor
|
69
|
+
from openlit.instrumentation.pydantic_ai import PydanticAIInstrumentor
|
69
70
|
from openlit.instrumentation.gpu import GPUInstrumentor
|
70
71
|
import openlit.guard
|
71
72
|
import openlit.evals
|
@@ -294,7 +295,8 @@ def init(
|
|
294
295
|
"firecrawl": "firecrawl",
|
295
296
|
"letta": "letta",
|
296
297
|
"together": "together",
|
297
|
-
"openai-agents": "agents"
|
298
|
+
"openai-agents": "agents",
|
299
|
+
"pydantic_ai": "pydantic_ai"
|
298
300
|
}
|
299
301
|
|
300
302
|
invalid_instrumentors = [
|
@@ -414,6 +416,7 @@ def init(
|
|
414
416
|
"letta": LettaInstrumentor(),
|
415
417
|
"together": TogetherInstrumentor(),
|
416
418
|
"openai-agents": OpenAIAgentsInstrumentor(),
|
419
|
+
"pydantic_ai": PydanticAIInstrumentor(),
|
417
420
|
}
|
418
421
|
|
419
422
|
# Initialize and instrument only the enabled instrumentors
|
@@ -521,7 +521,10 @@ def async_chat_completions(version, environment, application_name,
|
|
521
521
|
self._llmresponse += content
|
522
522
|
self._response_id = chunked.get('id')
|
523
523
|
self._response_model = chunked.get('model')
|
524
|
-
|
524
|
+
try:
|
525
|
+
self._finish_reason = chunked.get('choices',[])[0].get('finish_reason')
|
526
|
+
except:
|
527
|
+
self._finish_reason = "stop"
|
525
528
|
self._openai_response_service_tier = chunked.get('service_tier') or 'auto'
|
526
529
|
self._openai_system_fingerprint = chunked.get('system_fingerprint')
|
527
530
|
return chunk
|
@@ -573,21 +576,21 @@ def async_chat_completions(version, environment, application_name,
|
|
573
576
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL,
|
574
577
|
request_model)
|
575
578
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SEED,
|
576
|
-
self._kwargs.get("seed", ""))
|
579
|
+
str(self._kwargs.get("seed", "")))
|
577
580
|
self._span.set_attribute(SemanticConvention.SERVER_PORT,
|
578
581
|
self._server_port)
|
579
582
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
580
|
-
self._kwargs.get("frequency_penalty", 0.0))
|
583
|
+
str(self._kwargs.get("frequency_penalty", 0.0)))
|
581
584
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS,
|
582
585
|
self._kwargs.get("max_tokens", -1))
|
583
586
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
584
|
-
self._kwargs.get("presence_penalty", 0.0))
|
587
|
+
str(self._kwargs.get("presence_penalty", 0.0)))
|
585
588
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_STOP_SEQUENCES,
|
586
|
-
self._kwargs.get("stop", []))
|
589
|
+
str(self._kwargs.get("stop", [])))
|
587
590
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE,
|
588
|
-
self._kwargs.get("temperature", 1.0))
|
591
|
+
str(self._kwargs.get("temperature", 1.0)))
|
589
592
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_P,
|
590
|
-
self._kwargs.get("top_p", 1.0))
|
593
|
+
str(self._kwargs.get("top_p", 1.0)))
|
591
594
|
self._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_FINISH_REASON,
|
592
595
|
[self._finish_reason])
|
593
596
|
self._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID,
|
@@ -601,7 +604,7 @@ def async_chat_completions(version, environment, application_name,
|
|
601
604
|
self._span.set_attribute(SemanticConvention.SERVER_ADDRESS,
|
602
605
|
self._server_address)
|
603
606
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SERVICE_TIER,
|
604
|
-
self._kwargs.get("service_tier", "auto"))
|
607
|
+
str(self._kwargs.get("service_tier", "auto")))
|
605
608
|
self._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_SERVICE_TIER,
|
606
609
|
self._openai_response_service_tier)
|
607
610
|
self._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_SYSTEM_FINGERPRINT,
|
@@ -619,7 +622,7 @@ def async_chat_completions(version, environment, application_name,
|
|
619
622
|
self._span.set_attribute(SERVICE_NAME,
|
620
623
|
application_name)
|
621
624
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_USER,
|
622
|
-
self._kwargs.get("user", ""))
|
625
|
+
str(self._kwargs.get("user", "")))
|
623
626
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_IS_STREAM,
|
624
627
|
True)
|
625
628
|
self._span.set_attribute(SemanticConvention.GEN_AI_USAGE_TOTAL_TOKENS,
|
@@ -760,21 +763,21 @@ def async_chat_completions(version, environment, application_name,
|
|
760
763
|
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL,
|
761
764
|
request_model)
|
762
765
|
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SEED,
|
763
|
-
kwargs.get("seed", ""))
|
766
|
+
str(kwargs.get("seed", "")))
|
764
767
|
span.set_attribute(SemanticConvention.SERVER_PORT,
|
765
768
|
server_port)
|
766
769
|
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
767
|
-
kwargs.get("frequency_penalty", 0.0))
|
770
|
+
str(kwargs.get("frequency_penalty", 0.0)))
|
768
771
|
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS,
|
769
772
|
kwargs.get("max_tokens", -1))
|
770
773
|
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
771
|
-
kwargs.get("presence_penalty", 0.0))
|
774
|
+
str(kwargs.get("presence_penalty", 0.0)))
|
772
775
|
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_STOP_SEQUENCES,
|
773
|
-
kwargs.get("stop", []))
|
776
|
+
str(kwargs.get("stop", [])))
|
774
777
|
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE,
|
775
|
-
kwargs.get("temperature", 1.0))
|
778
|
+
str(kwargs.get("temperature", 1.0)))
|
776
779
|
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_P,
|
777
|
-
kwargs.get("top_p", 1.0))
|
780
|
+
str(kwargs.get("top_p", 1.0)))
|
778
781
|
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID,
|
779
782
|
response_dict.get("id"))
|
780
783
|
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL,
|
@@ -786,7 +789,7 @@ def async_chat_completions(version, environment, application_name,
|
|
786
789
|
span.set_attribute(SemanticConvention.SERVER_ADDRESS,
|
787
790
|
server_address)
|
788
791
|
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SERVICE_TIER,
|
789
|
-
kwargs.get("service_tier", "auto"))
|
792
|
+
str(kwargs.get("service_tier", "auto")))
|
790
793
|
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_SERVICE_TIER,
|
791
794
|
response_dict.get('service_tier', 'auto'))
|
792
795
|
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_SYSTEM_FINGERPRINT,
|
@@ -798,7 +801,7 @@ def async_chat_completions(version, environment, application_name,
|
|
798
801
|
span.set_attribute(SERVICE_NAME,
|
799
802
|
application_name)
|
800
803
|
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_USER,
|
801
|
-
kwargs.get("user", ""))
|
804
|
+
str(kwargs.get("user", "")))
|
802
805
|
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_IS_STREAM,
|
803
806
|
False)
|
804
807
|
span.set_attribute(SemanticConvention.GEN_AI_USAGE_TOTAL_TOKENS,
|
@@ -148,17 +148,17 @@ def responses(version, environment, application_name,
|
|
148
148
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL,
|
149
149
|
request_model)
|
150
150
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SEED,
|
151
|
-
self._kwargs.get("seed", ""))
|
151
|
+
str(self._kwargs.get("seed", "")))
|
152
152
|
self._span.set_attribute(SemanticConvention.SERVER_PORT,
|
153
153
|
self._server_port)
|
154
154
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS,
|
155
|
-
self._kwargs.get("max_output_tokens", -1))
|
155
|
+
str(self._kwargs.get("max_output_tokens", -1)))
|
156
156
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_STOP_SEQUENCES,
|
157
|
-
self._kwargs.get("stop", []))
|
157
|
+
str(self._kwargs.get("stop", [])))
|
158
158
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE,
|
159
|
-
self._kwargs.get("temperature", 1.0))
|
159
|
+
str(self._kwargs.get("temperature", 1.0)))
|
160
160
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_P,
|
161
|
-
self._kwargs.get("top_p", 1.0))
|
161
|
+
str(self._kwargs.get("top_p", 1.0)))
|
162
162
|
self._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_FINISH_REASON,
|
163
163
|
[self._finish_reason])
|
164
164
|
self._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID,
|
@@ -521,7 +521,10 @@ def chat_completions(version, environment, application_name,
|
|
521
521
|
self._llmresponse += content
|
522
522
|
self._response_id = chunked.get('id')
|
523
523
|
self._response_model = chunked.get('model')
|
524
|
-
|
524
|
+
try:
|
525
|
+
self._finish_reason = chunked.get('choices', [])[0].get('finish_reason')
|
526
|
+
except (IndexError, AttributeError, TypeError):
|
527
|
+
self._finish_reason = "stop"
|
525
528
|
self._openai_response_service_tier = chunked.get('service_tier') or 'auto'
|
526
529
|
self._openai_system_fingerprint = chunked.get('system_fingerprint')
|
527
530
|
return chunk
|
@@ -573,21 +576,21 @@ def chat_completions(version, environment, application_name,
|
|
573
576
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL,
|
574
577
|
request_model)
|
575
578
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SEED,
|
576
|
-
self._kwargs.get("seed", ""))
|
579
|
+
str(self._kwargs.get("seed", "")))
|
577
580
|
self._span.set_attribute(SemanticConvention.SERVER_PORT,
|
578
581
|
self._server_port)
|
579
582
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
580
|
-
self._kwargs.get("frequency_penalty", 0.0))
|
583
|
+
str(self._kwargs.get("frequency_penalty", 0.0)))
|
581
584
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS,
|
582
585
|
self._kwargs.get("max_tokens", -1))
|
583
586
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
584
|
-
self._kwargs.get("presence_penalty", 0.0))
|
587
|
+
str(self._kwargs.get("presence_penalty", 0.0)))
|
585
588
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_STOP_SEQUENCES,
|
586
|
-
self._kwargs.get("stop", []))
|
589
|
+
str(self._kwargs.get("stop", [])))
|
587
590
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE,
|
588
|
-
self._kwargs.get("temperature", 1.0))
|
591
|
+
str(self._kwargs.get("temperature", 1.0)))
|
589
592
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_P,
|
590
|
-
self._kwargs.get("top_p", 1.0))
|
593
|
+
str(self._kwargs.get("top_p", 1.0)))
|
591
594
|
self._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_FINISH_REASON,
|
592
595
|
[self._finish_reason])
|
593
596
|
self._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID,
|
@@ -601,7 +604,7 @@ def chat_completions(version, environment, application_name,
|
|
601
604
|
self._span.set_attribute(SemanticConvention.SERVER_ADDRESS,
|
602
605
|
self._server_address)
|
603
606
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SERVICE_TIER,
|
604
|
-
self._kwargs.get("service_tier", "auto"))
|
607
|
+
str(self._kwargs.get("service_tier", "auto")))
|
605
608
|
self._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_SERVICE_TIER,
|
606
609
|
self._openai_response_service_tier)
|
607
610
|
self._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_SYSTEM_FINGERPRINT,
|
@@ -619,7 +622,7 @@ def chat_completions(version, environment, application_name,
|
|
619
622
|
self._span.set_attribute(SERVICE_NAME,
|
620
623
|
application_name)
|
621
624
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_USER,
|
622
|
-
self._kwargs.get("user", ""))
|
625
|
+
str(self._kwargs.get("user", "")))
|
623
626
|
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_IS_STREAM,
|
624
627
|
True)
|
625
628
|
self._span.set_attribute(SemanticConvention.GEN_AI_USAGE_TOTAL_TOKENS,
|
@@ -760,21 +763,21 @@ def chat_completions(version, environment, application_name,
|
|
760
763
|
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL,
|
761
764
|
request_model)
|
762
765
|
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SEED,
|
763
|
-
kwargs.get("seed", ""))
|
766
|
+
str(kwargs.get("seed", "")))
|
764
767
|
span.set_attribute(SemanticConvention.SERVER_PORT,
|
765
768
|
server_port)
|
766
769
|
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
767
|
-
kwargs.get("frequency_penalty", 0.0))
|
770
|
+
str(kwargs.get("frequency_penalty", 0.0)))
|
768
771
|
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS,
|
769
|
-
kwargs.get("max_tokens", -1))
|
772
|
+
str(kwargs.get("max_tokens", -1)))
|
770
773
|
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
771
|
-
kwargs.get("presence_penalty", 0.0))
|
774
|
+
str(kwargs.get("presence_penalty", 0.0)))
|
772
775
|
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_STOP_SEQUENCES,
|
773
|
-
kwargs.get("stop", []))
|
776
|
+
str(kwargs.get("stop", [])))
|
774
777
|
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE,
|
775
|
-
kwargs.get("temperature", 1.0))
|
778
|
+
str(kwargs.get("temperature", 1.0)))
|
776
779
|
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_P,
|
777
|
-
kwargs.get("top_p", 1.0))
|
780
|
+
str(kwargs.get("top_p", 1.0)))
|
778
781
|
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID,
|
779
782
|
response_dict.get("id"))
|
780
783
|
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL,
|
@@ -786,7 +789,7 @@ def chat_completions(version, environment, application_name,
|
|
786
789
|
span.set_attribute(SemanticConvention.SERVER_ADDRESS,
|
787
790
|
server_address)
|
788
791
|
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SERVICE_TIER,
|
789
|
-
kwargs.get("service_tier", "auto"))
|
792
|
+
str(kwargs.get("service_tier", "auto")))
|
790
793
|
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_SERVICE_TIER,
|
791
794
|
response_dict.get('service_tier', 'auto'))
|
792
795
|
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_SYSTEM_FINGERPRINT,
|
@@ -0,0 +1,55 @@
|
|
1
|
+
"""Initializer of Auto Instrumentation of Pydantic AI Functions"""
|
2
|
+
|
3
|
+
from typing import Collection
|
4
|
+
import importlib.metadata
|
5
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
6
|
+
from wrapt import wrap_function_wrapper
|
7
|
+
|
8
|
+
from openlit.instrumentation.pydantic_ai.pydantic_ai import (
|
9
|
+
agent_create, agent_run, async_agent_run
|
10
|
+
)
|
11
|
+
|
12
|
+
_instruments = ('pydantic-ai >= 0.2.17',)
|
13
|
+
|
14
|
+
class PydanticAIInstrumentor(BaseInstrumentor):
|
15
|
+
"""
|
16
|
+
An instrumentor for Pydantic AI's client library.
|
17
|
+
"""
|
18
|
+
|
19
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
20
|
+
return _instruments
|
21
|
+
|
22
|
+
def _instrument(self, **kwargs):
|
23
|
+
application_name = kwargs.get('application_name', 'default_application')
|
24
|
+
environment = kwargs.get('environment', 'default_environment')
|
25
|
+
tracer = kwargs.get('tracer')
|
26
|
+
metrics = kwargs.get('metrics_dict')
|
27
|
+
pricing_info = kwargs.get('pricing_info', {})
|
28
|
+
capture_message_content = kwargs.get('capture_message_content', False)
|
29
|
+
disable_metrics = kwargs.get('disable_metrics')
|
30
|
+
version = importlib.metadata.version('pydantic-ai')
|
31
|
+
|
32
|
+
wrap_function_wrapper(
|
33
|
+
'pydantic_ai.agent',
|
34
|
+
'Agent.__init__',
|
35
|
+
agent_create(version, environment, application_name,
|
36
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
37
|
+
)
|
38
|
+
|
39
|
+
wrap_function_wrapper(
|
40
|
+
'pydantic_ai.agent',
|
41
|
+
'Agent.run_sync',
|
42
|
+
agent_run(version, environment, application_name,
|
43
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
44
|
+
)
|
45
|
+
|
46
|
+
wrap_function_wrapper(
|
47
|
+
'pydantic_ai.agent',
|
48
|
+
'Agent.run',
|
49
|
+
async_agent_run(version, environment, application_name,
|
50
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
51
|
+
)
|
52
|
+
|
53
|
+
def _uninstrument(self, **kwargs):
|
54
|
+
# Proper uninstrumentation logic to revert patched methods
|
55
|
+
pass
|
@@ -0,0 +1,51 @@
|
|
1
|
+
"""
|
2
|
+
Module for monitoring Pydantic AI API calls.
|
3
|
+
"""
|
4
|
+
|
5
|
+
from openlit.instrumentation.pydantic_ai.utils import (
|
6
|
+
common_agent_run,
|
7
|
+
common_agent_create
|
8
|
+
)
|
9
|
+
|
10
|
+
def agent_create(version, environment, application_name,
|
11
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
12
|
+
|
13
|
+
"""
|
14
|
+
Generates a telemetry wrapper for GenAI function call
|
15
|
+
"""
|
16
|
+
|
17
|
+
def wrapper(wrapped, instance, args, kwargs):
|
18
|
+
response = wrapped(*args, **kwargs)
|
19
|
+
return common_agent_create(wrapped, instance, args, kwargs, tracer,
|
20
|
+
version, environment, application_name,
|
21
|
+
capture_message_content, response=response)
|
22
|
+
|
23
|
+
return wrapper
|
24
|
+
|
25
|
+
def agent_run(version, environment, application_name,
|
26
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
27
|
+
"""
|
28
|
+
Generates a telemetry wrapper for GenAI function call
|
29
|
+
"""
|
30
|
+
|
31
|
+
def wrapper(wrapped, instance, args, kwargs):
|
32
|
+
response = wrapped(*args, **kwargs)
|
33
|
+
return common_agent_run(wrapped, instance, args, kwargs, tracer,
|
34
|
+
version, environment, application_name,
|
35
|
+
capture_message_content, response=response)
|
36
|
+
|
37
|
+
return wrapper
|
38
|
+
|
39
|
+
def async_agent_run(version, environment, application_name,
|
40
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
41
|
+
"""
|
42
|
+
Generates a telemetry wrapper for GenAI function call
|
43
|
+
"""
|
44
|
+
|
45
|
+
async def wrapper(wrapped, instance, args, kwargs):
|
46
|
+
response = await wrapped(*args, **kwargs)
|
47
|
+
return common_agent_run(wrapped, instance, args, kwargs, tracer,
|
48
|
+
version, environment, application_name,
|
49
|
+
capture_message_content, response=response)
|
50
|
+
|
51
|
+
return wrapper
|
@@ -0,0 +1,95 @@
|
|
1
|
+
"""
|
2
|
+
Pydantic AI OpenTelemetry instrumentation utility functions
|
3
|
+
"""
|
4
|
+
import logging
|
5
|
+
from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
|
6
|
+
from opentelemetry.trace import Status, StatusCode, SpanKind
|
7
|
+
from openlit.__helpers import (
|
8
|
+
handle_exception
|
9
|
+
)
|
10
|
+
from openlit.semcov import SemanticConvention
|
11
|
+
|
12
|
+
# Initialize logger for logging potential issues and operations
|
13
|
+
logger = logging.getLogger(__name__)
|
14
|
+
|
15
|
+
def set_span_attributes(span, version, operation_name, environment,
|
16
|
+
application_name, server_address, server_port, request_model, agent_name):
|
17
|
+
"""
|
18
|
+
Set common OpenTelemetry span attributes for Pydantic AI operations.
|
19
|
+
"""
|
20
|
+
|
21
|
+
# Set Span attributes (OTel Semconv)
|
22
|
+
span.set_attribute(TELEMETRY_SDK_NAME, 'openlit')
|
23
|
+
span.set_attribute(SemanticConvention.GEN_AI_OPERATION, operation_name)
|
24
|
+
span.set_attribute(SemanticConvention.GEN_AI_SYSTEM, SemanticConvention.GEN_AI_SYSTEM_PYDANTIC_AI)
|
25
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME, agent_name)
|
26
|
+
span.set_attribute(SemanticConvention.SERVER_ADDRESS, server_address)
|
27
|
+
span.set_attribute(SemanticConvention.SERVER_PORT, server_port)
|
28
|
+
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, request_model)
|
29
|
+
|
30
|
+
# Set Span attributes (Extras)
|
31
|
+
span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
|
32
|
+
span.set_attribute(SERVICE_NAME, application_name)
|
33
|
+
span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION, version)
|
34
|
+
|
35
|
+
def common_agent_run(wrapped, instance, args, kwargs, tracer, version, environment, application_name,
|
36
|
+
capture_message_content, response):
|
37
|
+
"""
|
38
|
+
Handle telemetry for Pydantic AI agent run operations.
|
39
|
+
"""
|
40
|
+
|
41
|
+
server_address, server_port = instance.model.base_url, 443
|
42
|
+
agent_name = instance.name or "pydantic_agent"
|
43
|
+
request_model = str(instance.model.model_name)
|
44
|
+
span_name = f'{SemanticConvention.GEN_AI_OPERATION_TYPE_EXECUTE_AGENT_TASK} {agent_name}'
|
45
|
+
|
46
|
+
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
47
|
+
try:
|
48
|
+
set_span_attributes(span, version, SemanticConvention.GEN_AI_OPERATION_TYPE_EXECUTE_AGENT_TASK,
|
49
|
+
environment, application_name, server_address, server_port, request_model, agent_name)
|
50
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_DESCRIPTION, str(instance._system_prompts))
|
51
|
+
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL, request_model)
|
52
|
+
|
53
|
+
if capture_message_content:
|
54
|
+
span.add_event(
|
55
|
+
name=SemanticConvention.GEN_AI_CONTENT_COMPLETION_EVENT,
|
56
|
+
attributes={
|
57
|
+
SemanticConvention.GEN_AI_CONTENT_COMPLETION: response.output,
|
58
|
+
},
|
59
|
+
)
|
60
|
+
|
61
|
+
span.set_status(Status(StatusCode.OK))
|
62
|
+
|
63
|
+
return response
|
64
|
+
|
65
|
+
except Exception as e:
|
66
|
+
handle_exception(span, e)
|
67
|
+
logger.error('Error in trace creation: %s', e)
|
68
|
+
return response
|
69
|
+
|
70
|
+
def common_agent_create(wrapped, instance, args, kwargs, tracer, version, environment, application_name,
|
71
|
+
capture_message_content, response):
|
72
|
+
"""
|
73
|
+
Handle telemetry for Pydantic AI agent creation operations.
|
74
|
+
"""
|
75
|
+
|
76
|
+
server_address, server_port = '127.0.0.1', 80
|
77
|
+
agent_name = kwargs.get("name", "pydantic_agent")
|
78
|
+
span_name = f'{SemanticConvention.GEN_AI_OPERATION_TYPE_CREATE_AGENT} {agent_name}'
|
79
|
+
|
80
|
+
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
81
|
+
try:
|
82
|
+
request_model = args[0] or kwargs.get("model", "google-gla:gemini-1.5-flash")
|
83
|
+
set_span_attributes(span, version, SemanticConvention.GEN_AI_OPERATION_TYPE_CREATE_AGENT,
|
84
|
+
environment, application_name, server_address, server_port, request_model, agent_name)
|
85
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_DESCRIPTION, str(kwargs.get("system_prompt", "")))
|
86
|
+
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL, request_model)
|
87
|
+
|
88
|
+
span.set_status(Status(StatusCode.OK))
|
89
|
+
|
90
|
+
return response
|
91
|
+
|
92
|
+
except Exception as e:
|
93
|
+
handle_exception(span, e)
|
94
|
+
logger.error('Error in trace creation: %s', e)
|
95
|
+
return response
|
openlit/semcov/__init__.py
CHANGED
@@ -130,6 +130,7 @@ class SemanticConvention:
|
|
130
130
|
GEN_AI_SYSTEM_FIRECRAWL = "firecrawl"
|
131
131
|
GEN_AI_SYSTEM_LETTA = "letta"
|
132
132
|
GEN_AI_SYSTEM_TOGETHER = "together"
|
133
|
+
GEN_AI_SYSTEM_PYDANTIC_AI = "pydantic_ai"
|
133
134
|
|
134
135
|
# GenAI Request Attributes (Extra)
|
135
136
|
GEN_AI_REQUEST_IS_STREAM = "gen_ai.request.is_stream"
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: openlit
|
3
|
-
Version: 1.
|
3
|
+
Version: 1.34.0
|
4
4
|
Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
|
5
5
|
License: Apache-2.0
|
6
6
|
Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
|
@@ -1,5 +1,5 @@
|
|
1
1
|
openlit/__helpers.py,sha256=sg0EGJGC_OlZePR84cLK77l_lZRBPJwdjWjq_RuaYS0,11444
|
2
|
-
openlit/__init__.py,sha256=
|
2
|
+
openlit/__init__.py,sha256=ris6-GY0ePSbK_jvawHTXymGClVF7yeKdIT95IRBl18,24086
|
3
3
|
openlit/evals/__init__.py,sha256=nJe99nuLo1b5rf7pt9U9BCdSDedzbVi2Fj96cgl7msM,380
|
4
4
|
openlit/evals/all.py,sha256=oWrue3PotE-rB5WePG3MRYSA-ro6WivkclSHjYlAqGs,7154
|
5
5
|
openlit/evals/bias_detection.py,sha256=mCdsfK7x1vX7S3psC3g641IMlZ-7df3h-V6eiICj5N8,8154
|
@@ -96,8 +96,8 @@ openlit/instrumentation/ollama/async_ollama.py,sha256=zJPDr2ROh1nvFGoxgdTbe04Zr1
|
|
96
96
|
openlit/instrumentation/ollama/ollama.py,sha256=MNUltiP9XVT4azmO_-E2vjhFaoHQyJ0Z6c-HnB0_jCE,6563
|
97
97
|
openlit/instrumentation/ollama/utils.py,sha256=41uvYaYkGwWfRyHYqhOOwrFy6cMzBlG1urJYUat9Q24,14819
|
98
98
|
openlit/instrumentation/openai/__init__.py,sha256=FiL4OHDhs957spa3k9sNC_VLt0-txtwbnujQwnevQ5I,5564
|
99
|
-
openlit/instrumentation/openai/async_openai.py,sha256=
|
100
|
-
openlit/instrumentation/openai/openai.py,sha256=
|
99
|
+
openlit/instrumentation/openai/async_openai.py,sha256=gxA9Fs_b0hsOlJh8F55zi0TqgarJUlZA6eK1-ghvy90,71945
|
100
|
+
openlit/instrumentation/openai/openai.py,sha256=Gky-NPUhjXhGOG4nWKkuKGTEKWJSgebzHb5dmqJp7fU,71754
|
101
101
|
openlit/instrumentation/openai_agents/__init__.py,sha256=tRTSIrUtkXc_lfQnVanXmQLd2Sy9RqBNTHF5FhhZx7o,1530
|
102
102
|
openlit/instrumentation/openai_agents/openai_agents.py,sha256=kRWPgjofcOviMi3w7CsRvJO3SCjqPmuq-PM800vIM7g,2678
|
103
103
|
openlit/instrumentation/phidata/__init__.py,sha256=tqls5-UI6FzbjxYgq_qqAfALhWJm8dHn2NtgqiQA4f8,1557
|
@@ -106,6 +106,9 @@ openlit/instrumentation/pinecone/__init__.py,sha256=0guSEPmObaZiOF8yHExpOGY-qW_e
|
|
106
106
|
openlit/instrumentation/pinecone/pinecone.py,sha256=7hVUlC0HOj0yQyvLasfdb6kS46hRJQdoSRzZQ4ixIkk,8850
|
107
107
|
openlit/instrumentation/premai/__init__.py,sha256=uyw3sn5iHuO1Clg7iLs6LYpOWg1ahJp_orb2tySshZE,1836
|
108
108
|
openlit/instrumentation/premai/premai.py,sha256=cM7FyMvVD0I0SfM826uFm6bcBOBQpkMSM_UBGG-y7BQ,28046
|
109
|
+
openlit/instrumentation/pydantic_ai/__init__.py,sha256=mq52QanFI4xDx6JK-qW5yzhFPXwznJqIYsuxRoBA2Xg,2023
|
110
|
+
openlit/instrumentation/pydantic_ai/pydantic_ai.py,sha256=2F2hrowGqcPjTDLG9IeLY8OO-lXZKhLSU93XtZ3tt5A,1868
|
111
|
+
openlit/instrumentation/pydantic_ai/utils.py,sha256=b0TqhSDnRqkPdM_qsOgMuXT3lwTvHzMYpaBv2qibiVo,4307
|
109
112
|
openlit/instrumentation/qdrant/__init__.py,sha256=5prYH46yQt2hSA5zgg7kKM6P_F_7s9OQtfRE_lqsaVc,8970
|
110
113
|
openlit/instrumentation/qdrant/async_qdrant.py,sha256=dwMQx8bI4Lp8Tgze87esIdVMOffbQcK80lKNLjxsNOU,15263
|
111
114
|
openlit/instrumentation/qdrant/qdrant.py,sha256=pafjlAzMPzYLRYFfTtWXsLKYVQls-grkHVO3YmFuNPg,15689
|
@@ -126,8 +129,8 @@ openlit/instrumentation/vllm/vllm.py,sha256=FxDIR4WH1VySivi0wop4E1DBo2HXyCr8nZ9x
|
|
126
129
|
openlit/otel/events.py,sha256=VrMjTpvnLtYRBHCiFwJojTQqqNpRCxoD4yJYeQrtPsk,3560
|
127
130
|
openlit/otel/metrics.py,sha256=GM2PDloBGRhBTkHHkYaqmOwIAQkY124ZhW4sEqW1Fgk,7086
|
128
131
|
openlit/otel/tracing.py,sha256=tjV2bEbEDPUB1Z46gE-UsJsb04sRdFrfbhIDkxViZc0,3103
|
129
|
-
openlit/semcov/__init__.py,sha256=
|
130
|
-
openlit-1.
|
131
|
-
openlit-1.
|
132
|
-
openlit-1.
|
133
|
-
openlit-1.
|
132
|
+
openlit/semcov/__init__.py,sha256=ptyo37PY-FHDx_PShEvbdns71cD4YvvXw15bCRXKCKM,13461
|
133
|
+
openlit-1.34.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
134
|
+
openlit-1.34.0.dist-info/METADATA,sha256=RCozi6Py2qG7G6-x9NlC7T9VYy74W1Ochq2rFKl4PgE,23469
|
135
|
+
openlit-1.34.0.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
136
|
+
openlit-1.34.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|