openlit 1.34.20__py3-none-any.whl → 1.34.23__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/__helpers.py +40 -0
- openlit/__init__.py +3 -0
- openlit/instrumentation/ag2/__init__.py +22 -18
- openlit/instrumentation/ag2/ag2.py +75 -124
- openlit/instrumentation/ag2/async_ag2.py +114 -0
- openlit/instrumentation/ag2/utils.py +175 -0
- openlit/instrumentation/langchain/__init__.py +11 -35
- openlit/instrumentation/langchain/async_langchain.py +51 -337
- openlit/instrumentation/langchain/langchain.py +50 -310
- openlit/instrumentation/langchain/utils.py +252 -0
- openlit/instrumentation/langchain_community/__init__.py +74 -0
- openlit/instrumentation/langchain_community/async_langchain_community.py +49 -0
- openlit/instrumentation/langchain_community/langchain_community.py +49 -0
- openlit/instrumentation/langchain_community/utils.py +69 -0
- openlit/instrumentation/openai/__init__.py +63 -68
- openlit/instrumentation/openai/async_openai.py +203 -1277
- openlit/instrumentation/openai/openai.py +200 -1274
- openlit/instrumentation/openai/utils.py +794 -0
- openlit/instrumentation/vertexai/__init__.py +18 -23
- openlit/instrumentation/vertexai/async_vertexai.py +46 -364
- openlit/instrumentation/vertexai/utils.py +204 -0
- openlit/instrumentation/vertexai/vertexai.py +46 -364
- {openlit-1.34.20.dist-info → openlit-1.34.23.dist-info}/METADATA +1 -1
- {openlit-1.34.20.dist-info → openlit-1.34.23.dist-info}/RECORD +26 -17
- {openlit-1.34.20.dist-info → openlit-1.34.23.dist-info}/LICENSE +0 -0
- {openlit-1.34.20.dist-info → openlit-1.34.23.dist-info}/WHEEL +0 -0
@@ -0,0 +1,74 @@
|
|
1
|
+
"""Initializer of Auto Instrumentation of LangChain Community Functions"""
|
2
|
+
from typing import Collection
|
3
|
+
import importlib.metadata
|
4
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
5
|
+
from wrapt import wrap_function_wrapper
|
6
|
+
|
7
|
+
from openlit.instrumentation.langchain_community.langchain_community import (
|
8
|
+
general_wrap,
|
9
|
+
)
|
10
|
+
from openlit.instrumentation.langchain_community.async_langchain_community import (
|
11
|
+
async_general_wrap,
|
12
|
+
)
|
13
|
+
|
14
|
+
_instruments = ("langchain-community >= 0.2.0",)
|
15
|
+
|
16
|
+
WRAPPED_METHODS = [
|
17
|
+
{
|
18
|
+
"package": "langchain_community.document_loaders.base",
|
19
|
+
"object": "BaseLoader.load",
|
20
|
+
"endpoint": "langchain_community.retrieve.load",
|
21
|
+
"wrapper": general_wrap,
|
22
|
+
},
|
23
|
+
{
|
24
|
+
"package": "langchain_community.document_loaders.base",
|
25
|
+
"object": "BaseLoader.aload",
|
26
|
+
"endpoint": "langchain_community.retrieve.load",
|
27
|
+
"wrapper": async_general_wrap,
|
28
|
+
},
|
29
|
+
{
|
30
|
+
"package": "langchain_text_splitters.base",
|
31
|
+
"object": "TextSplitter.split_documents",
|
32
|
+
"endpoint": "langchain_community.retrieve.split_documents",
|
33
|
+
"wrapper": general_wrap,
|
34
|
+
},
|
35
|
+
{
|
36
|
+
"package": "langchain_text_splitters.base",
|
37
|
+
"object": "TextSplitter.create_documents",
|
38
|
+
"endpoint": "langchain_community.retrieve.create_documents",
|
39
|
+
"wrapper": general_wrap,
|
40
|
+
},
|
41
|
+
]
|
42
|
+
|
43
|
+
class LangChainCommunityInstrumentor(BaseInstrumentor):
|
44
|
+
"""
|
45
|
+
An instrumentor for LangChain Community client library.
|
46
|
+
"""
|
47
|
+
|
48
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
49
|
+
return _instruments
|
50
|
+
|
51
|
+
def _instrument(self, **kwargs):
|
52
|
+
version = importlib.metadata.version("langchain-community")
|
53
|
+
environment = kwargs.get("environment", "default")
|
54
|
+
application_name = kwargs.get("application_name", "default")
|
55
|
+
tracer = kwargs.get("tracer")
|
56
|
+
pricing_info = kwargs.get("pricing_info", {})
|
57
|
+
capture_message_content = kwargs.get("capture_message_content", False)
|
58
|
+
metrics = kwargs.get("metrics_dict")
|
59
|
+
disable_metrics = kwargs.get("disable_metrics")
|
60
|
+
|
61
|
+
for wrapped_method in WRAPPED_METHODS:
|
62
|
+
wrap_package = wrapped_method.get("package")
|
63
|
+
wrap_object = wrapped_method.get("object")
|
64
|
+
gen_ai_endpoint = wrapped_method.get("endpoint")
|
65
|
+
wrapper = wrapped_method.get("wrapper")
|
66
|
+
wrap_function_wrapper(
|
67
|
+
wrap_package,
|
68
|
+
wrap_object,
|
69
|
+
wrapper(gen_ai_endpoint, version, environment, application_name,
|
70
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
71
|
+
)
|
72
|
+
|
73
|
+
def _uninstrument(self, **kwargs):
|
74
|
+
pass
|
@@ -0,0 +1,49 @@
|
|
1
|
+
"""
|
2
|
+
Module for monitoring async LangChain Community operations.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import logging
|
6
|
+
from opentelemetry import trace
|
7
|
+
from opentelemetry.trace import Status, StatusCode
|
8
|
+
|
9
|
+
from openlit.__helpers import handle_exception
|
10
|
+
from openlit.instrumentation.langchain_community.utils import process_general_response
|
11
|
+
|
12
|
+
# Initialize logger for LangChain Community instrumentation
|
13
|
+
logger = logging.getLogger(__name__)
|
14
|
+
|
15
|
+
def async_general_wrap(gen_ai_endpoint, version, environment, application_name, tracer, pricing_info,
|
16
|
+
capture_message_content, metrics, disable_metrics):
|
17
|
+
"""
|
18
|
+
Generates a telemetry wrapper for GenAI operations.
|
19
|
+
"""
|
20
|
+
|
21
|
+
async def wrapper(wrapped, instance, args, kwargs):
|
22
|
+
"""
|
23
|
+
Wraps the GenAI operation call.
|
24
|
+
"""
|
25
|
+
|
26
|
+
# Prepare server address and port
|
27
|
+
server_address = "127.0.0.1"
|
28
|
+
server_port = "80"
|
29
|
+
|
30
|
+
# Get the parent span from the tracer
|
31
|
+
with tracer.start_as_current_span(gen_ai_endpoint, kind=trace.SpanKind.CLIENT) as span:
|
32
|
+
try:
|
33
|
+
# Call the original async function
|
34
|
+
response = await wrapped(*args, **kwargs)
|
35
|
+
|
36
|
+
# Process the response using the utility function
|
37
|
+
response = process_general_response(
|
38
|
+
response, gen_ai_endpoint, server_port, server_address,
|
39
|
+
environment, application_name, span, version
|
40
|
+
)
|
41
|
+
|
42
|
+
span.set_status(Status(StatusCode.OK))
|
43
|
+
|
44
|
+
except Exception as e:
|
45
|
+
handle_exception(span, e)
|
46
|
+
|
47
|
+
return response
|
48
|
+
|
49
|
+
return wrapper
|
@@ -0,0 +1,49 @@
|
|
1
|
+
"""
|
2
|
+
Module for monitoring LangChain Community operations.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import logging
|
6
|
+
from opentelemetry import trace
|
7
|
+
from opentelemetry.trace import Status, StatusCode
|
8
|
+
|
9
|
+
from openlit.__helpers import handle_exception
|
10
|
+
from openlit.instrumentation.langchain_community.utils import process_general_response
|
11
|
+
|
12
|
+
# Initialize logger for LangChain Community instrumentation
|
13
|
+
logger = logging.getLogger(__name__)
|
14
|
+
|
15
|
+
def general_wrap(gen_ai_endpoint, version, environment, application_name, tracer, pricing_info,
|
16
|
+
capture_message_content, metrics, disable_metrics):
|
17
|
+
"""
|
18
|
+
Generates a telemetry wrapper for GenAI operations.
|
19
|
+
"""
|
20
|
+
|
21
|
+
def wrapper(wrapped, instance, args, kwargs):
|
22
|
+
"""
|
23
|
+
Wraps the GenAI operation call.
|
24
|
+
"""
|
25
|
+
|
26
|
+
# Prepare server address and port
|
27
|
+
server_address = "127.0.0.1"
|
28
|
+
server_port = "80"
|
29
|
+
|
30
|
+
# Get the parent span from the tracer
|
31
|
+
with tracer.start_as_current_span(gen_ai_endpoint, kind=trace.SpanKind.CLIENT) as span:
|
32
|
+
try:
|
33
|
+
# Call the original function
|
34
|
+
response = wrapped(*args, **kwargs)
|
35
|
+
|
36
|
+
# Process the response using the utility function
|
37
|
+
response = process_general_response(
|
38
|
+
response, gen_ai_endpoint, server_port, server_address,
|
39
|
+
environment, application_name, span, version
|
40
|
+
)
|
41
|
+
|
42
|
+
span.set_status(Status(StatusCode.OK))
|
43
|
+
|
44
|
+
except Exception as e:
|
45
|
+
handle_exception(span, e)
|
46
|
+
|
47
|
+
return response
|
48
|
+
|
49
|
+
return wrapper
|
@@ -0,0 +1,69 @@
|
|
1
|
+
"""
|
2
|
+
Utility functions for LangChain Community instrumentation.
|
3
|
+
"""
|
4
|
+
|
5
|
+
from opentelemetry.trace import Status, StatusCode
|
6
|
+
from openlit.semcov import SemanticConvention
|
7
|
+
|
8
|
+
def process_general_response(response, gen_ai_endpoint, server_port, server_address,
|
9
|
+
environment, application_name, span, version="1.0.0"):
|
10
|
+
"""
|
11
|
+
Process general LangChain Community operations (document loading, text splitting) and generate telemetry.
|
12
|
+
|
13
|
+
Args:
|
14
|
+
response: The response object from the LangChain Community operation
|
15
|
+
gen_ai_endpoint: The endpoint identifier for the operation
|
16
|
+
server_port: Server port (empty for community operations)
|
17
|
+
server_address: Server address (empty for community operations)
|
18
|
+
environment: Environment name
|
19
|
+
application_name: Application name
|
20
|
+
span: OpenTelemetry span
|
21
|
+
version: Version string
|
22
|
+
|
23
|
+
Returns:
|
24
|
+
The original response object
|
25
|
+
"""
|
26
|
+
|
27
|
+
# Set span attributes for general operations
|
28
|
+
span.set_attribute(SemanticConvention.GEN_AI_SYSTEM, SemanticConvention.GEN_AI_SYSTEM_LANGCHAIN)
|
29
|
+
span.set_attribute(SemanticConvention.GEN_AI_ENDPOINT, gen_ai_endpoint)
|
30
|
+
span.set_attribute(SemanticConvention.GEN_AI_OPERATION, SemanticConvention.GEN_AI_OPERATION_TYPE_FRAMEWORK)
|
31
|
+
span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION, version)
|
32
|
+
span.set_attribute(SemanticConvention.GEN_AI_ENVIRONMENT, environment)
|
33
|
+
span.set_attribute(SemanticConvention.GEN_AI_APPLICATION_NAME, application_name)
|
34
|
+
|
35
|
+
# Try to extract source information for document loading operations
|
36
|
+
if gen_ai_endpoint and "retrieve.load" in gen_ai_endpoint:
|
37
|
+
try:
|
38
|
+
if hasattr(response, "__iter__") and len(response) > 0:
|
39
|
+
# For document loaders, try to get source from first document
|
40
|
+
first_doc = response[0]
|
41
|
+
if hasattr(first_doc, "metadata") and isinstance(first_doc.metadata, dict):
|
42
|
+
source = first_doc.metadata.get("source", "unknown")
|
43
|
+
span.set_attribute(SemanticConvention.GEN_AI_RETRIEVAL_SOURCE, source)
|
44
|
+
|
45
|
+
# Count number of documents loaded
|
46
|
+
span.set_attribute("gen_ai.retrieval.documents.count", len(response))
|
47
|
+
except (AttributeError, KeyError, IndexError, TypeError):
|
48
|
+
# If we cant extract metadata, just continue without it
|
49
|
+
pass
|
50
|
+
|
51
|
+
# For text splitting operations
|
52
|
+
elif gen_ai_endpoint and ("split_documents" in gen_ai_endpoint or "create_documents" in gen_ai_endpoint):
|
53
|
+
try:
|
54
|
+
if hasattr(response, "__iter__") and len(response) > 0:
|
55
|
+
# Count number of text chunks created
|
56
|
+
span.set_attribute("gen_ai.text_splitter.chunks.count", len(response))
|
57
|
+
|
58
|
+
# Try to get average chunk size
|
59
|
+
total_chars = sum(len(doc.page_content) for doc in response if hasattr(doc, "page_content"))
|
60
|
+
if total_chars > 0:
|
61
|
+
avg_chunk_size = total_chars // len(response)
|
62
|
+
span.set_attribute("gen_ai.text_splitter.avg_chunk_size", avg_chunk_size)
|
63
|
+
except (AttributeError, TypeError):
|
64
|
+
# If we cant extract chunk information, just continue without it
|
65
|
+
pass
|
66
|
+
|
67
|
+
span.set_status(Status(StatusCode.OK))
|
68
|
+
|
69
|
+
return response
|
@@ -1,147 +1,142 @@
|
|
1
|
-
# pylint: disable=useless-return, bad-staticmethod-argument, disable=duplicate-code
|
2
1
|
"""Initializer of Auto Instrumentation of OpenAI Functions"""
|
3
2
|
from typing import Collection
|
4
3
|
import importlib.metadata
|
5
4
|
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
6
5
|
from wrapt import wrap_function_wrapper
|
7
6
|
|
8
|
-
from openlit.instrumentation.openai.openai import
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
from openlit.instrumentation.openai.async_openai import
|
13
|
-
|
7
|
+
from openlit.instrumentation.openai.openai import (
|
8
|
+
chat_completions, embedding, responses, chat_completions_parse,
|
9
|
+
image_generate, image_variatons, audio_create
|
10
|
+
)
|
11
|
+
from openlit.instrumentation.openai.async_openai import (
|
12
|
+
async_chat_completions, async_embedding, async_chat_completions_parse,
|
13
|
+
async_image_generate, async_image_variations, async_audio_create, async_responses
|
14
|
+
)
|
14
15
|
|
15
16
|
_instruments = ("openai >= 1.92.0",)
|
16
17
|
|
17
18
|
class OpenAIInstrumentor(BaseInstrumentor):
|
18
|
-
"""
|
19
|
+
"""
|
20
|
+
An instrumentor for OpenAI client library.
|
21
|
+
"""
|
19
22
|
|
20
23
|
def instrumentation_dependencies(self) -> Collection[str]:
|
21
24
|
return _instruments
|
22
25
|
|
23
26
|
def _instrument(self, **kwargs):
|
24
|
-
|
25
|
-
environment = kwargs.get("environment")
|
27
|
+
version = importlib.metadata.version("openai")
|
28
|
+
environment = kwargs.get("environment", "default")
|
29
|
+
application_name = kwargs.get("application_name", "default")
|
26
30
|
tracer = kwargs.get("tracer")
|
31
|
+
pricing_info = kwargs.get("pricing_info", {})
|
32
|
+
capture_message_content = kwargs.get("capture_message_content", False)
|
27
33
|
metrics = kwargs.get("metrics_dict")
|
28
|
-
pricing_info = kwargs.get("pricing_info")
|
29
|
-
capture_message_content = kwargs.get("capture_message_content")
|
30
34
|
disable_metrics = kwargs.get("disable_metrics")
|
31
|
-
version = importlib.metadata.version("openai")
|
32
35
|
|
36
|
+
# chat completions
|
33
37
|
wrap_function_wrapper(
|
34
38
|
"openai.resources.chat.completions",
|
35
39
|
"Completions.create",
|
36
40
|
chat_completions(version, environment, application_name,
|
37
|
-
|
38
|
-
metrics, disable_metrics),
|
41
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
39
42
|
)
|
40
43
|
|
41
44
|
wrap_function_wrapper(
|
42
45
|
"openai.resources.chat.completions",
|
43
46
|
"AsyncCompletions.create",
|
44
47
|
async_chat_completions(version, environment, application_name,
|
45
|
-
|
46
|
-
metrics, disable_metrics),
|
48
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
47
49
|
)
|
48
50
|
|
51
|
+
# chat completions parse
|
49
52
|
wrap_function_wrapper(
|
50
|
-
"openai.resources.
|
51
|
-
"
|
52
|
-
|
53
|
-
|
54
|
-
metrics, disable_metrics),
|
53
|
+
"openai.resources.chat.completions",
|
54
|
+
"Completions.parse",
|
55
|
+
chat_completions_parse(version, environment, application_name,
|
56
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
55
57
|
)
|
56
58
|
|
57
59
|
wrap_function_wrapper(
|
58
|
-
"openai.resources.
|
59
|
-
"
|
60
|
-
|
61
|
-
|
62
|
-
metrics, disable_metrics),
|
60
|
+
"openai.resources.chat.completions",
|
61
|
+
"AsyncCompletions.parse",
|
62
|
+
async_chat_completions_parse(version, environment, application_name,
|
63
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
63
64
|
)
|
64
65
|
|
66
|
+
# responses
|
65
67
|
wrap_function_wrapper(
|
66
|
-
"openai.resources.
|
67
|
-
"
|
68
|
-
|
69
|
-
|
70
|
-
metrics, disable_metrics),
|
68
|
+
"openai.resources.responses.responses",
|
69
|
+
"Responses.create",
|
70
|
+
responses(version, environment, application_name,
|
71
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
71
72
|
)
|
72
73
|
|
73
74
|
wrap_function_wrapper(
|
74
|
-
"openai.resources.
|
75
|
-
"
|
76
|
-
|
77
|
-
|
78
|
-
metrics, disable_metrics),
|
75
|
+
"openai.resources.responses.responses",
|
76
|
+
"AsyncResponses.create",
|
77
|
+
async_responses(version, environment, application_name,
|
78
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
79
79
|
)
|
80
80
|
|
81
|
+
# embeddings
|
81
82
|
wrap_function_wrapper(
|
82
83
|
"openai.resources.embeddings",
|
83
84
|
"Embeddings.create",
|
84
85
|
embedding(version, environment, application_name,
|
85
|
-
|
86
|
-
metrics, disable_metrics),
|
86
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
87
87
|
)
|
88
88
|
|
89
89
|
wrap_function_wrapper(
|
90
90
|
"openai.resources.embeddings",
|
91
91
|
"AsyncEmbeddings.create",
|
92
92
|
async_embedding(version, environment, application_name,
|
93
|
-
|
94
|
-
|
93
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
94
|
+
)
|
95
|
+
|
96
|
+
# image generation
|
97
|
+
wrap_function_wrapper(
|
98
|
+
"openai.resources.images",
|
99
|
+
"Images.generate",
|
100
|
+
image_generate(version, environment, application_name,
|
101
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
95
102
|
)
|
96
103
|
|
104
|
+
wrap_function_wrapper(
|
105
|
+
"openai.resources.images",
|
106
|
+
"AsyncImages.generate",
|
107
|
+
async_image_generate(version, environment, application_name,
|
108
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
109
|
+
)
|
110
|
+
|
111
|
+
# image variations
|
97
112
|
wrap_function_wrapper(
|
98
113
|
"openai.resources.images",
|
99
114
|
"Images.create_variation",
|
100
|
-
image_variatons(version,
|
101
|
-
|
102
|
-
tracer, pricing_info, capture_message_content,
|
103
|
-
metrics, disable_metrics),
|
115
|
+
image_variatons(version, environment, application_name,
|
116
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
104
117
|
)
|
105
118
|
|
106
119
|
wrap_function_wrapper(
|
107
120
|
"openai.resources.images",
|
108
121
|
"AsyncImages.create_variation",
|
109
|
-
|
110
|
-
|
111
|
-
tracer, pricing_info, capture_message_content,
|
112
|
-
metrics, disable_metrics),
|
122
|
+
async_image_variations(version, environment, application_name,
|
123
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
113
124
|
)
|
114
125
|
|
126
|
+
# audio generation
|
115
127
|
wrap_function_wrapper(
|
116
128
|
"openai.resources.audio.speech",
|
117
129
|
"Speech.create",
|
118
130
|
audio_create(version, environment, application_name,
|
119
|
-
|
120
|
-
metrics, disable_metrics),
|
131
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
121
132
|
)
|
122
133
|
|
123
134
|
wrap_function_wrapper(
|
124
135
|
"openai.resources.audio.speech",
|
125
136
|
"AsyncSpeech.create",
|
126
137
|
async_audio_create(version, environment, application_name,
|
127
|
-
|
128
|
-
metrics, disable_metrics),
|
129
|
-
)
|
130
|
-
|
131
|
-
wrap_function_wrapper(
|
132
|
-
"openai.resources.chat.completions",
|
133
|
-
"Completions.parse",
|
134
|
-
chat_completions_parse(version, environment, application_name, tracer, pricing_info,
|
135
|
-
capture_message_content, metrics, disable_metrics),
|
136
|
-
)
|
137
|
-
|
138
|
-
wrap_function_wrapper(
|
139
|
-
"openai.resources.chat.completions",
|
140
|
-
"AsyncCompletions.parse",
|
141
|
-
async_chat_completions_parse(version, environment, application_name, tracer, pricing_info,
|
142
|
-
capture_message_content, metrics, disable_metrics),
|
138
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
143
139
|
)
|
144
140
|
|
145
|
-
@staticmethod
|
146
141
|
def _uninstrument(self, **kwargs):
|
147
142
|
pass
|