openlit 1.34.22__py3-none-any.whl → 1.34.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/__helpers.py +48 -3
- openlit/__init__.py +3 -0
- openlit/instrumentation/ag2/__init__.py +22 -18
- openlit/instrumentation/ag2/ag2.py +75 -124
- openlit/instrumentation/ag2/async_ag2.py +114 -0
- openlit/instrumentation/ag2/utils.py +175 -0
- openlit/instrumentation/langchain/__init__.py +11 -35
- openlit/instrumentation/langchain/async_langchain.py +51 -337
- openlit/instrumentation/langchain/langchain.py +50 -310
- openlit/instrumentation/langchain/utils.py +252 -0
- openlit/instrumentation/langchain_community/__init__.py +74 -0
- openlit/instrumentation/langchain_community/async_langchain_community.py +49 -0
- openlit/instrumentation/langchain_community/langchain_community.py +49 -0
- openlit/instrumentation/langchain_community/utils.py +69 -0
- openlit/instrumentation/pinecone/__init__.py +128 -20
- openlit/instrumentation/pinecone/async_pinecone.py +59 -0
- openlit/instrumentation/pinecone/pinecone.py +36 -150
- openlit/instrumentation/pinecone/utils.py +182 -0
- openlit/semcov/__init__.py +13 -1
- {openlit-1.34.22.dist-info → openlit-1.34.24.dist-info}/METADATA +1 -1
- {openlit-1.34.22.dist-info → openlit-1.34.24.dist-info}/RECORD +23 -14
- {openlit-1.34.22.dist-info → openlit-1.34.24.dist-info}/LICENSE +0 -0
- {openlit-1.34.22.dist-info → openlit-1.34.24.dist-info}/WHEEL +0 -0
@@ -0,0 +1,74 @@
|
|
1
|
+
"""Initializer of Auto Instrumentation of LangChain Community Functions"""
|
2
|
+
from typing import Collection
|
3
|
+
import importlib.metadata
|
4
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
5
|
+
from wrapt import wrap_function_wrapper
|
6
|
+
|
7
|
+
from openlit.instrumentation.langchain_community.langchain_community import (
|
8
|
+
general_wrap,
|
9
|
+
)
|
10
|
+
from openlit.instrumentation.langchain_community.async_langchain_community import (
|
11
|
+
async_general_wrap,
|
12
|
+
)
|
13
|
+
|
14
|
+
_instruments = ("langchain-community >= 0.2.0",)
|
15
|
+
|
16
|
+
WRAPPED_METHODS = [
|
17
|
+
{
|
18
|
+
"package": "langchain_community.document_loaders.base",
|
19
|
+
"object": "BaseLoader.load",
|
20
|
+
"endpoint": "langchain_community.retrieve.load",
|
21
|
+
"wrapper": general_wrap,
|
22
|
+
},
|
23
|
+
{
|
24
|
+
"package": "langchain_community.document_loaders.base",
|
25
|
+
"object": "BaseLoader.aload",
|
26
|
+
"endpoint": "langchain_community.retrieve.load",
|
27
|
+
"wrapper": async_general_wrap,
|
28
|
+
},
|
29
|
+
{
|
30
|
+
"package": "langchain_text_splitters.base",
|
31
|
+
"object": "TextSplitter.split_documents",
|
32
|
+
"endpoint": "langchain_community.retrieve.split_documents",
|
33
|
+
"wrapper": general_wrap,
|
34
|
+
},
|
35
|
+
{
|
36
|
+
"package": "langchain_text_splitters.base",
|
37
|
+
"object": "TextSplitter.create_documents",
|
38
|
+
"endpoint": "langchain_community.retrieve.create_documents",
|
39
|
+
"wrapper": general_wrap,
|
40
|
+
},
|
41
|
+
]
|
42
|
+
|
43
|
+
class LangChainCommunityInstrumentor(BaseInstrumentor):
|
44
|
+
"""
|
45
|
+
An instrumentor for LangChain Community client library.
|
46
|
+
"""
|
47
|
+
|
48
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
49
|
+
return _instruments
|
50
|
+
|
51
|
+
def _instrument(self, **kwargs):
|
52
|
+
version = importlib.metadata.version("langchain-community")
|
53
|
+
environment = kwargs.get("environment", "default")
|
54
|
+
application_name = kwargs.get("application_name", "default")
|
55
|
+
tracer = kwargs.get("tracer")
|
56
|
+
pricing_info = kwargs.get("pricing_info", {})
|
57
|
+
capture_message_content = kwargs.get("capture_message_content", False)
|
58
|
+
metrics = kwargs.get("metrics_dict")
|
59
|
+
disable_metrics = kwargs.get("disable_metrics")
|
60
|
+
|
61
|
+
for wrapped_method in WRAPPED_METHODS:
|
62
|
+
wrap_package = wrapped_method.get("package")
|
63
|
+
wrap_object = wrapped_method.get("object")
|
64
|
+
gen_ai_endpoint = wrapped_method.get("endpoint")
|
65
|
+
wrapper = wrapped_method.get("wrapper")
|
66
|
+
wrap_function_wrapper(
|
67
|
+
wrap_package,
|
68
|
+
wrap_object,
|
69
|
+
wrapper(gen_ai_endpoint, version, environment, application_name,
|
70
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
71
|
+
)
|
72
|
+
|
73
|
+
def _uninstrument(self, **kwargs):
|
74
|
+
pass
|
@@ -0,0 +1,49 @@
|
|
1
|
+
"""
|
2
|
+
Module for monitoring async LangChain Community operations.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import logging
|
6
|
+
from opentelemetry import trace
|
7
|
+
from opentelemetry.trace import Status, StatusCode
|
8
|
+
|
9
|
+
from openlit.__helpers import handle_exception
|
10
|
+
from openlit.instrumentation.langchain_community.utils import process_general_response
|
11
|
+
|
12
|
+
# Initialize logger for LangChain Community instrumentation
|
13
|
+
logger = logging.getLogger(__name__)
|
14
|
+
|
15
|
+
def async_general_wrap(gen_ai_endpoint, version, environment, application_name, tracer, pricing_info,
|
16
|
+
capture_message_content, metrics, disable_metrics):
|
17
|
+
"""
|
18
|
+
Generates a telemetry wrapper for GenAI operations.
|
19
|
+
"""
|
20
|
+
|
21
|
+
async def wrapper(wrapped, instance, args, kwargs):
|
22
|
+
"""
|
23
|
+
Wraps the GenAI operation call.
|
24
|
+
"""
|
25
|
+
|
26
|
+
# Prepare server address and port
|
27
|
+
server_address = "127.0.0.1"
|
28
|
+
server_port = "80"
|
29
|
+
|
30
|
+
# Get the parent span from the tracer
|
31
|
+
with tracer.start_as_current_span(gen_ai_endpoint, kind=trace.SpanKind.CLIENT) as span:
|
32
|
+
try:
|
33
|
+
# Call the original async function
|
34
|
+
response = await wrapped(*args, **kwargs)
|
35
|
+
|
36
|
+
# Process the response using the utility function
|
37
|
+
response = process_general_response(
|
38
|
+
response, gen_ai_endpoint, server_port, server_address,
|
39
|
+
environment, application_name, span, version
|
40
|
+
)
|
41
|
+
|
42
|
+
span.set_status(Status(StatusCode.OK))
|
43
|
+
|
44
|
+
except Exception as e:
|
45
|
+
handle_exception(span, e)
|
46
|
+
|
47
|
+
return response
|
48
|
+
|
49
|
+
return wrapper
|
@@ -0,0 +1,49 @@
|
|
1
|
+
"""
|
2
|
+
Module for monitoring LangChain Community operations.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import logging
|
6
|
+
from opentelemetry import trace
|
7
|
+
from opentelemetry.trace import Status, StatusCode
|
8
|
+
|
9
|
+
from openlit.__helpers import handle_exception
|
10
|
+
from openlit.instrumentation.langchain_community.utils import process_general_response
|
11
|
+
|
12
|
+
# Initialize logger for LangChain Community instrumentation
|
13
|
+
logger = logging.getLogger(__name__)
|
14
|
+
|
15
|
+
def general_wrap(gen_ai_endpoint, version, environment, application_name, tracer, pricing_info,
|
16
|
+
capture_message_content, metrics, disable_metrics):
|
17
|
+
"""
|
18
|
+
Generates a telemetry wrapper for GenAI operations.
|
19
|
+
"""
|
20
|
+
|
21
|
+
def wrapper(wrapped, instance, args, kwargs):
|
22
|
+
"""
|
23
|
+
Wraps the GenAI operation call.
|
24
|
+
"""
|
25
|
+
|
26
|
+
# Prepare server address and port
|
27
|
+
server_address = "127.0.0.1"
|
28
|
+
server_port = "80"
|
29
|
+
|
30
|
+
# Get the parent span from the tracer
|
31
|
+
with tracer.start_as_current_span(gen_ai_endpoint, kind=trace.SpanKind.CLIENT) as span:
|
32
|
+
try:
|
33
|
+
# Call the original function
|
34
|
+
response = wrapped(*args, **kwargs)
|
35
|
+
|
36
|
+
# Process the response using the utility function
|
37
|
+
response = process_general_response(
|
38
|
+
response, gen_ai_endpoint, server_port, server_address,
|
39
|
+
environment, application_name, span, version
|
40
|
+
)
|
41
|
+
|
42
|
+
span.set_status(Status(StatusCode.OK))
|
43
|
+
|
44
|
+
except Exception as e:
|
45
|
+
handle_exception(span, e)
|
46
|
+
|
47
|
+
return response
|
48
|
+
|
49
|
+
return wrapper
|
@@ -0,0 +1,69 @@
|
|
1
|
+
"""
|
2
|
+
Utility functions for LangChain Community instrumentation.
|
3
|
+
"""
|
4
|
+
|
5
|
+
from opentelemetry.trace import Status, StatusCode
|
6
|
+
from openlit.semcov import SemanticConvention
|
7
|
+
|
8
|
+
def process_general_response(response, gen_ai_endpoint, server_port, server_address,
|
9
|
+
environment, application_name, span, version="1.0.0"):
|
10
|
+
"""
|
11
|
+
Process general LangChain Community operations (document loading, text splitting) and generate telemetry.
|
12
|
+
|
13
|
+
Args:
|
14
|
+
response: The response object from the LangChain Community operation
|
15
|
+
gen_ai_endpoint: The endpoint identifier for the operation
|
16
|
+
server_port: Server port (empty for community operations)
|
17
|
+
server_address: Server address (empty for community operations)
|
18
|
+
environment: Environment name
|
19
|
+
application_name: Application name
|
20
|
+
span: OpenTelemetry span
|
21
|
+
version: Version string
|
22
|
+
|
23
|
+
Returns:
|
24
|
+
The original response object
|
25
|
+
"""
|
26
|
+
|
27
|
+
# Set span attributes for general operations
|
28
|
+
span.set_attribute(SemanticConvention.GEN_AI_SYSTEM, SemanticConvention.GEN_AI_SYSTEM_LANGCHAIN)
|
29
|
+
span.set_attribute(SemanticConvention.GEN_AI_ENDPOINT, gen_ai_endpoint)
|
30
|
+
span.set_attribute(SemanticConvention.GEN_AI_OPERATION, SemanticConvention.GEN_AI_OPERATION_TYPE_FRAMEWORK)
|
31
|
+
span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION, version)
|
32
|
+
span.set_attribute(SemanticConvention.GEN_AI_ENVIRONMENT, environment)
|
33
|
+
span.set_attribute(SemanticConvention.GEN_AI_APPLICATION_NAME, application_name)
|
34
|
+
|
35
|
+
# Try to extract source information for document loading operations
|
36
|
+
if gen_ai_endpoint and "retrieve.load" in gen_ai_endpoint:
|
37
|
+
try:
|
38
|
+
if hasattr(response, "__iter__") and len(response) > 0:
|
39
|
+
# For document loaders, try to get source from first document
|
40
|
+
first_doc = response[0]
|
41
|
+
if hasattr(first_doc, "metadata") and isinstance(first_doc.metadata, dict):
|
42
|
+
source = first_doc.metadata.get("source", "unknown")
|
43
|
+
span.set_attribute(SemanticConvention.GEN_AI_RETRIEVAL_SOURCE, source)
|
44
|
+
|
45
|
+
# Count number of documents loaded
|
46
|
+
span.set_attribute("gen_ai.retrieval.documents.count", len(response))
|
47
|
+
except (AttributeError, KeyError, IndexError, TypeError):
|
48
|
+
# If we cant extract metadata, just continue without it
|
49
|
+
pass
|
50
|
+
|
51
|
+
# For text splitting operations
|
52
|
+
elif gen_ai_endpoint and ("split_documents" in gen_ai_endpoint or "create_documents" in gen_ai_endpoint):
|
53
|
+
try:
|
54
|
+
if hasattr(response, "__iter__") and len(response) > 0:
|
55
|
+
# Count number of text chunks created
|
56
|
+
span.set_attribute("gen_ai.text_splitter.chunks.count", len(response))
|
57
|
+
|
58
|
+
# Try to get average chunk size
|
59
|
+
total_chars = sum(len(doc.page_content) for doc in response if hasattr(doc, "page_content"))
|
60
|
+
if total_chars > 0:
|
61
|
+
avg_chunk_size = total_chars // len(response)
|
62
|
+
span.set_attribute("gen_ai.text_splitter.avg_chunk_size", avg_chunk_size)
|
63
|
+
except (AttributeError, TypeError):
|
64
|
+
# If we cant extract chunk information, just continue without it
|
65
|
+
pass
|
66
|
+
|
67
|
+
span.set_status(Status(StatusCode.OK))
|
68
|
+
|
69
|
+
return response
|
@@ -1,66 +1,174 @@
|
|
1
|
-
# pylint: disable=useless-return, bad-staticmethod-argument, disable=duplicate-code
|
2
1
|
"""Initializer of Auto Instrumentation of Pinecone Functions"""
|
2
|
+
|
3
3
|
from typing import Collection
|
4
4
|
import importlib.metadata
|
5
5
|
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
6
6
|
from wrapt import wrap_function_wrapper
|
7
7
|
|
8
8
|
from openlit.instrumentation.pinecone.pinecone import general_wrap
|
9
|
+
from openlit.instrumentation.pinecone.async_pinecone import async_general_wrap
|
9
10
|
|
10
|
-
_instruments = ("pinecone
|
11
|
+
_instruments = ("pinecone >= 7.3.0",)
|
11
12
|
|
12
13
|
class PineconeInstrumentor(BaseInstrumentor):
|
13
|
-
"""
|
14
|
+
"""
|
15
|
+
An instrumentor for Pinecone's client library.
|
16
|
+
"""
|
14
17
|
|
15
18
|
def instrumentation_dependencies(self) -> Collection[str]:
|
16
19
|
return _instruments
|
17
20
|
|
18
21
|
def _instrument(self, **kwargs):
|
19
|
-
|
20
|
-
environment = kwargs.get("environment")
|
22
|
+
version = importlib.metadata.version("pinecone")
|
23
|
+
environment = kwargs.get("environment", "default")
|
24
|
+
application_name = kwargs.get("application_name", "default")
|
21
25
|
tracer = kwargs.get("tracer")
|
26
|
+
pricing_info = kwargs.get("pricing_info", {})
|
27
|
+
capture_message_content = kwargs.get("capture_message_content", False)
|
22
28
|
metrics = kwargs.get("metrics_dict")
|
23
|
-
pricing_info = kwargs.get("pricing_info")
|
24
|
-
capture_message_content = kwargs.get("capture_message_content")
|
25
29
|
disable_metrics = kwargs.get("disable_metrics")
|
26
|
-
version = importlib.metadata.version("pinecone-client")
|
27
30
|
|
31
|
+
# Sync operations
|
28
32
|
wrap_function_wrapper(
|
29
|
-
"pinecone.
|
33
|
+
"pinecone.pinecone",
|
30
34
|
"Pinecone.create_index",
|
31
|
-
general_wrap("pinecone.
|
32
|
-
|
35
|
+
general_wrap("pinecone.create_collection", version, environment, application_name,
|
36
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
37
|
+
)
|
38
|
+
|
39
|
+
wrap_function_wrapper(
|
40
|
+
"pinecone.pinecone",
|
41
|
+
"Pinecone.create_index_for_model",
|
42
|
+
general_wrap("pinecone.create_collection", version, environment, application_name,
|
43
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
33
44
|
)
|
34
45
|
|
35
46
|
wrap_function_wrapper(
|
36
|
-
"pinecone.
|
47
|
+
"pinecone.db_data.index",
|
37
48
|
"Index.upsert",
|
38
49
|
general_wrap("pinecone.upsert", version, environment, application_name,
|
39
|
-
|
50
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
51
|
+
)
|
52
|
+
|
53
|
+
wrap_function_wrapper(
|
54
|
+
"pinecone.db_data.index",
|
55
|
+
"Index.upsert_records",
|
56
|
+
general_wrap("pinecone.upsert_records", version, environment, application_name,
|
57
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
40
58
|
)
|
41
59
|
|
42
60
|
wrap_function_wrapper(
|
43
|
-
"pinecone.
|
61
|
+
"pinecone.db_data.index",
|
44
62
|
"Index.query",
|
45
63
|
general_wrap("pinecone.query", version, environment, application_name,
|
46
|
-
|
64
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
65
|
+
)
|
66
|
+
|
67
|
+
wrap_function_wrapper(
|
68
|
+
"pinecone.db_data.index",
|
69
|
+
"Index.search",
|
70
|
+
general_wrap("pinecone.search", version, environment, application_name,
|
71
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
47
72
|
)
|
48
73
|
|
49
74
|
wrap_function_wrapper(
|
50
|
-
"pinecone.
|
75
|
+
"pinecone.db_data.index",
|
76
|
+
"Index.fetch",
|
77
|
+
general_wrap("pinecone.fetch", version, environment, application_name,
|
78
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
79
|
+
)
|
80
|
+
|
81
|
+
wrap_function_wrapper(
|
82
|
+
"pinecone.db_data.index",
|
83
|
+
"Index.search_records",
|
84
|
+
general_wrap("pinecone.search_records", version, environment, application_name,
|
85
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
86
|
+
)
|
87
|
+
|
88
|
+
wrap_function_wrapper(
|
89
|
+
"pinecone.db_data.index",
|
51
90
|
"Index.update",
|
52
91
|
general_wrap("pinecone.update", version, environment, application_name,
|
53
|
-
|
92
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
54
93
|
)
|
55
94
|
|
56
95
|
wrap_function_wrapper(
|
57
|
-
"pinecone.
|
96
|
+
"pinecone.db_data.index",
|
58
97
|
"Index.delete",
|
59
98
|
general_wrap("pinecone.delete", version, environment, application_name,
|
60
|
-
|
99
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
61
100
|
)
|
62
101
|
|
102
|
+
# Async operations
|
103
|
+
wrap_function_wrapper(
|
104
|
+
"pinecone.pinecone_asyncio",
|
105
|
+
"PineconeAsyncio.create_index",
|
106
|
+
async_general_wrap("pinecone.create_index", version, environment, application_name,
|
107
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
108
|
+
)
|
109
|
+
|
110
|
+
wrap_function_wrapper(
|
111
|
+
"pinecone.pinecone_asyncio",
|
112
|
+
"PineconeAsyncio.create_index_for_model",
|
113
|
+
async_general_wrap("pinecone.create_index", version, environment, application_name,
|
114
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
115
|
+
)
|
116
|
+
|
117
|
+
wrap_function_wrapper(
|
118
|
+
"pinecone.db_data.index_asyncio",
|
119
|
+
"_IndexAsyncio.upsert",
|
120
|
+
async_general_wrap("pinecone.upsert", version, environment, application_name,
|
121
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
122
|
+
)
|
123
|
+
|
124
|
+
wrap_function_wrapper(
|
125
|
+
"pinecone.db_data.index_asyncio",
|
126
|
+
"_IndexAsyncio.upsert_records",
|
127
|
+
async_general_wrap("pinecone.upsert_records", version, environment, application_name,
|
128
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
129
|
+
)
|
130
|
+
|
131
|
+
wrap_function_wrapper(
|
132
|
+
"pinecone.db_data.index_asyncio",
|
133
|
+
"_IndexAsyncio.query",
|
134
|
+
async_general_wrap("pinecone.query", version, environment, application_name,
|
135
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
136
|
+
)
|
137
|
+
|
138
|
+
wrap_function_wrapper(
|
139
|
+
"pinecone.db_data.index_asyncio",
|
140
|
+
"_IndexAsyncio.search",
|
141
|
+
async_general_wrap("pinecone.search", version, environment, application_name,
|
142
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
143
|
+
)
|
144
|
+
|
145
|
+
wrap_function_wrapper(
|
146
|
+
"pinecone.db_data.index_asyncio",
|
147
|
+
"_IndexAsyncio.fetch",
|
148
|
+
async_general_wrap("pinecone.fetch", version, environment, application_name,
|
149
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
150
|
+
)
|
151
|
+
|
152
|
+
wrap_function_wrapper(
|
153
|
+
"pinecone.db_data.index_asyncio",
|
154
|
+
"_IndexAsyncio.search_records",
|
155
|
+
async_general_wrap("pinecone.search_records", version, environment, application_name,
|
156
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
157
|
+
)
|
158
|
+
|
159
|
+
wrap_function_wrapper(
|
160
|
+
"pinecone.db_data.index_asyncio",
|
161
|
+
"_IndexAsyncio.update",
|
162
|
+
async_general_wrap("pinecone.update", version, environment, application_name,
|
163
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
164
|
+
)
|
165
|
+
|
166
|
+
wrap_function_wrapper(
|
167
|
+
"pinecone.db_data.index_asyncio",
|
168
|
+
"_IndexAsyncio.delete",
|
169
|
+
async_general_wrap("pinecone.delete", version, environment, application_name,
|
170
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
171
|
+
)
|
63
172
|
|
64
|
-
@staticmethod
|
65
173
|
def _uninstrument(self, **kwargs):
|
66
174
|
pass
|
@@ -0,0 +1,59 @@
|
|
1
|
+
"""
|
2
|
+
Module for monitoring Pinecone async API calls.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import time
|
6
|
+
from opentelemetry.trace import SpanKind
|
7
|
+
from opentelemetry import context as context_api
|
8
|
+
from openlit.__helpers import (
|
9
|
+
handle_exception,
|
10
|
+
set_server_address_and_port,
|
11
|
+
)
|
12
|
+
from openlit.instrumentation.pinecone.utils import (
|
13
|
+
process_vectordb_response,
|
14
|
+
DB_OPERATION_MAP,
|
15
|
+
)
|
16
|
+
|
17
|
+
def async_general_wrap(gen_ai_endpoint, version, environment, application_name,
|
18
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
19
|
+
"""
|
20
|
+
Generates a telemetry wrapper for Pinecone async function calls.
|
21
|
+
"""
|
22
|
+
|
23
|
+
async def wrapper(wrapped, instance, args, kwargs):
|
24
|
+
"""
|
25
|
+
Wraps the Pinecone async function call.
|
26
|
+
"""
|
27
|
+
|
28
|
+
if context_api.get_value(context_api._SUPPRESS_INSTRUMENTATION_KEY):
|
29
|
+
return await wrapped(*args, **kwargs)
|
30
|
+
|
31
|
+
# Get server address and port using the standard helper
|
32
|
+
server_address, server_port = set_server_address_and_port(instance, "pinecone.io", 443)
|
33
|
+
|
34
|
+
db_operation = DB_OPERATION_MAP.get(gen_ai_endpoint, "unknown")
|
35
|
+
if db_operation == "create_collection":
|
36
|
+
namespace = kwargs.get("name") or (args[0] if args else "unknown")
|
37
|
+
else:
|
38
|
+
namespace = kwargs.get("namespace") or (args[0] if args else "unknown")
|
39
|
+
span_name = f"{db_operation} {namespace}"
|
40
|
+
|
41
|
+
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
42
|
+
try:
|
43
|
+
start_time = time.time()
|
44
|
+
response = await wrapped(*args, **kwargs)
|
45
|
+
|
46
|
+
# Process response and generate telemetry
|
47
|
+
response = process_vectordb_response(
|
48
|
+
response, db_operation, server_address, server_port,
|
49
|
+
environment, application_name, metrics, start_time, span,
|
50
|
+
capture_message_content, disable_metrics, version, instance, args, **kwargs
|
51
|
+
)
|
52
|
+
|
53
|
+
return response
|
54
|
+
|
55
|
+
except Exception as e:
|
56
|
+
handle_exception(span, e)
|
57
|
+
raise
|
58
|
+
|
59
|
+
return wrapper
|