langtrace-python-sdk 1.0.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- instrumentation/__init__.py +0 -0
- instrumentation/chroma/__init__.py +0 -0
- instrumentation/chroma/instrumentation.py +33 -0
- instrumentation/chroma/lib/__init__.py +0 -0
- instrumentation/chroma/lib/apis.py +40 -0
- instrumentation/chroma/patch.py +46 -0
- instrumentation/constants.py +18 -0
- instrumentation/langchain/__init__.py +0 -0
- instrumentation/langchain/instrumentation.py +74 -0
- instrumentation/langchain/patch.py +84 -0
- instrumentation/langchain_community/__init__.py +0 -0
- instrumentation/langchain_community/instrumentation.py +99 -0
- instrumentation/langchain_community/patch.py +78 -0
- instrumentation/langchain_core/__init__.py +0 -0
- instrumentation/langchain_core/instrumentation.py +101 -0
- instrumentation/langchain_core/patch.py +168 -0
- instrumentation/llamaindex/__init__.py +0 -0
- instrumentation/llamaindex/instrumentation.py +73 -0
- instrumentation/llamaindex/patch.py +40 -0
- instrumentation/openai/__init__.py +0 -0
- instrumentation/openai/instrumentation.py +41 -0
- instrumentation/openai/lib/__init__.py +0 -0
- instrumentation/openai/lib/apis.py +16 -0
- instrumentation/openai/lib/constants.py +30 -0
- instrumentation/openai/patch.py +209 -0
- instrumentation/pinecone/__init__.py +0 -0
- instrumentation/pinecone/instrumentation.py +43 -0
- instrumentation/pinecone/lib/__init__.py +0 -0
- instrumentation/pinecone/lib/apis.py +19 -0
- instrumentation/pinecone/patch.py +45 -0
- instrumentation/setup.py +50 -0
- instrumentation/utils.py +27 -0
- instrumentation/with_root_span.py +28 -0
- langtrace_python_sdk-1.0.9.dist-info/LICENSE +674 -0
- langtrace_python_sdk-1.0.9.dist-info/METADATA +169 -0
- langtrace_python_sdk-1.0.9.dist-info/RECORD +38 -0
- langtrace_python_sdk-1.0.9.dist-info/WHEEL +5 -0
- langtrace_python_sdk-1.0.9.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This module contains the patching functions for the langchain_core package.
|
|
3
|
+
"""
|
|
4
|
+
import json
|
|
5
|
+
|
|
6
|
+
from langtrace.trace_attributes import FrameworkSpanAttributes
|
|
7
|
+
from opentelemetry.trace import SpanKind, StatusCode
|
|
8
|
+
from opentelemetry.trace.status import Status
|
|
9
|
+
|
|
10
|
+
from instrumentation.constants import SERVICE_PROVIDERS
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def generic_patch(method_name, task, tracer, version, trace_output=True, trace_input=True):
|
|
14
|
+
"""
|
|
15
|
+
Wrapper function to trace a generic method.
|
|
16
|
+
method_name: The name of the method to trace.
|
|
17
|
+
task: The name used to identify the type of task in `generic_patch`.
|
|
18
|
+
tracer: The tracer object used in `generic_patch`.
|
|
19
|
+
version: The version parameter used in `generic_patch`.
|
|
20
|
+
trace_output: Whether to trace the output of the patched methods.
|
|
21
|
+
trace_input: Whether to trace the input of the patched methods.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def traced_method(wrapped, instance, args, kwargs):
|
|
25
|
+
service_provider = SERVICE_PROVIDERS['LANGCHAIN']
|
|
26
|
+
span_attributes = {
|
|
27
|
+
'langtrace.service.name': service_provider,
|
|
28
|
+
'langtrace.service.type': 'framework',
|
|
29
|
+
'langtrace.service.version': version,
|
|
30
|
+
'langtrace.version': '1.0.0',
|
|
31
|
+
'langchain.task.name': task,
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
if len(args) > 0 and trace_input:
|
|
35
|
+
span_attributes['langchain.inputs'] = to_json_string(args)
|
|
36
|
+
|
|
37
|
+
attributes = FrameworkSpanAttributes(**span_attributes)
|
|
38
|
+
|
|
39
|
+
with tracer.start_as_current_span(method_name, kind=SpanKind.CLIENT) as span:
|
|
40
|
+
for field, value in attributes.model_dump(by_alias=True).items():
|
|
41
|
+
if value is not None:
|
|
42
|
+
span.set_attribute(field, value)
|
|
43
|
+
try:
|
|
44
|
+
# Attempt to call the original method
|
|
45
|
+
result = wrapped(*args, **kwargs)
|
|
46
|
+
if trace_output:
|
|
47
|
+
span.set_attribute(
|
|
48
|
+
'langchain.outputs', to_json_string(result))
|
|
49
|
+
|
|
50
|
+
span.set_status(StatusCode.OK)
|
|
51
|
+
return result
|
|
52
|
+
except Exception as e:
|
|
53
|
+
# Record the exception in the span
|
|
54
|
+
span.record_exception(e)
|
|
55
|
+
|
|
56
|
+
# Set the span status to indicate an error
|
|
57
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
58
|
+
|
|
59
|
+
# Reraise the exception to ensure it's not swallowed
|
|
60
|
+
raise
|
|
61
|
+
|
|
62
|
+
return traced_method
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def runnable_patch(method_name, task, tracer, version, trace_output=True, trace_input=True):
|
|
66
|
+
"""
|
|
67
|
+
Wrapper function to trace a runnable
|
|
68
|
+
method_name: The name of the method to trace.
|
|
69
|
+
task: The name used to identify the type of task in `generic_patch`.
|
|
70
|
+
tracer: The tracer object used in `generic_patch`.
|
|
71
|
+
version: The version parameter used in `generic_patch`.
|
|
72
|
+
trace_output: Whether to trace the output of the patched methods.
|
|
73
|
+
trace_input: Whether to trace the input of the patched methods.
|
|
74
|
+
"""
|
|
75
|
+
def traced_method(wrapped, instance, args, kwargs):
|
|
76
|
+
service_provider = SERVICE_PROVIDERS['LANGCHAIN']
|
|
77
|
+
span_attributes = {
|
|
78
|
+
'langtrace.service.name': service_provider,
|
|
79
|
+
'langtrace.service.type': 'framework',
|
|
80
|
+
'langtrace.service.version': version,
|
|
81
|
+
'langtrace.version': '1.0.0',
|
|
82
|
+
'langchain.task.name': task,
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
if trace_input:
|
|
86
|
+
inputs = {}
|
|
87
|
+
args_list = []
|
|
88
|
+
if len(args) > 0:
|
|
89
|
+
for value in args:
|
|
90
|
+
if isinstance(value, str):
|
|
91
|
+
args_list.append(value)
|
|
92
|
+
inputs['args'] = args_list
|
|
93
|
+
|
|
94
|
+
for field, value in instance.steps.items() if hasattr(instance, "steps") and \
|
|
95
|
+
isinstance(instance.steps, dict) else {}:
|
|
96
|
+
inputs[field] = value.__class__.__name__
|
|
97
|
+
|
|
98
|
+
span_attributes['langchain.inputs'] = to_json_string(inputs)
|
|
99
|
+
|
|
100
|
+
attributes = FrameworkSpanAttributes(**span_attributes)
|
|
101
|
+
|
|
102
|
+
with tracer.start_as_current_span(method_name, kind=SpanKind.CLIENT) as span:
|
|
103
|
+
for field, value in attributes.model_dump(by_alias=True).items():
|
|
104
|
+
if value is not None:
|
|
105
|
+
span.set_attribute(field, value)
|
|
106
|
+
try:
|
|
107
|
+
# Attempt to call the original method
|
|
108
|
+
result = wrapped(*args, **kwargs)
|
|
109
|
+
if trace_output:
|
|
110
|
+
outputs = {}
|
|
111
|
+
if isinstance(result, dict):
|
|
112
|
+
for field, value in result.items() if hasattr(result, "items") else {}:
|
|
113
|
+
if isinstance(value, list):
|
|
114
|
+
for item in value:
|
|
115
|
+
if item.__class__.__name__ == "Document":
|
|
116
|
+
outputs[field] = "Document"
|
|
117
|
+
else:
|
|
118
|
+
outputs[field] = item.__class__.__name__
|
|
119
|
+
if isinstance(value, str):
|
|
120
|
+
outputs[field] = value
|
|
121
|
+
span.set_attribute(
|
|
122
|
+
'langchain.outputs', to_json_string(outputs))
|
|
123
|
+
if isinstance(result, str):
|
|
124
|
+
span.set_attribute(
|
|
125
|
+
'langchain.outputs', result)
|
|
126
|
+
|
|
127
|
+
span.set_status(StatusCode.OK)
|
|
128
|
+
return result
|
|
129
|
+
except Exception as e:
|
|
130
|
+
# Record the exception in the span
|
|
131
|
+
span.record_exception(e)
|
|
132
|
+
|
|
133
|
+
# Set the span status to indicate an error
|
|
134
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
135
|
+
|
|
136
|
+
# Reraise the exception to ensure it's not swallowed
|
|
137
|
+
raise
|
|
138
|
+
|
|
139
|
+
return traced_method
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def clean_empty(d):
|
|
143
|
+
"""Recursively remove empty lists, empty dicts, or None elements from a dictionary."""
|
|
144
|
+
if not isinstance(d, (dict, list)):
|
|
145
|
+
return d
|
|
146
|
+
if isinstance(d, list):
|
|
147
|
+
return [v for v in (clean_empty(v) for v in d) if v != [] and v is not None]
|
|
148
|
+
return {
|
|
149
|
+
k: v
|
|
150
|
+
for k, v in ((k, clean_empty(v)) for k, v in d.items())
|
|
151
|
+
if v is not None and v != {}
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def custom_serializer(obj):
|
|
156
|
+
"""Fallback function to convert unserializable objects."""
|
|
157
|
+
if hasattr(obj, "__dict__"):
|
|
158
|
+
# Attempt to serialize custom objects by their __dict__ attribute.
|
|
159
|
+
return clean_empty(obj.__dict__)
|
|
160
|
+
else:
|
|
161
|
+
# For other types, just convert to string
|
|
162
|
+
return str(obj)
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def to_json_string(any_object):
|
|
166
|
+
"""Converts any object to a JSON-parseable string, omitting empty or None values."""
|
|
167
|
+
cleaned_object = clean_empty(any_object)
|
|
168
|
+
return json.dumps(cleaned_object, default=custom_serializer, indent=2)
|
|
File without changes
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
import importlib.metadata
|
|
2
|
+
from typing import Collection
|
|
3
|
+
|
|
4
|
+
from langtrace.trace_attributes import LlamaIndexMethods
|
|
5
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
|
6
|
+
from opentelemetry.trace import get_tracer
|
|
7
|
+
from wrapt import wrap_function_wrapper
|
|
8
|
+
|
|
9
|
+
from instrumentation.llamaindex.patch import generic_patch
|
|
10
|
+
|
|
11
|
+
MODULES = [
|
|
12
|
+
"llama_index.core.query_pipeline.query",
|
|
13
|
+
]
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class LlamaindexInstrumentation(BaseInstrumentor):
|
|
17
|
+
|
|
18
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
|
19
|
+
return ["llama-index >= 0.10.0"]
|
|
20
|
+
|
|
21
|
+
def _instrument(self, **kwargs):
|
|
22
|
+
tracer_provider = kwargs.get("tracer_provider")
|
|
23
|
+
tracer = get_tracer(__name__, "", tracer_provider)
|
|
24
|
+
version = importlib.metadata.version('llama-index')
|
|
25
|
+
|
|
26
|
+
wrap_function_wrapper(
|
|
27
|
+
'llama_index.core.base.base_query_engine',
|
|
28
|
+
'BaseQueryEngine.query',
|
|
29
|
+
generic_patch(LlamaIndexMethods.QUERYENGINE_QUERY.value,
|
|
30
|
+
'query', tracer, version)
|
|
31
|
+
)
|
|
32
|
+
wrap_function_wrapper(
|
|
33
|
+
'llama_index.core.base.base_retriever',
|
|
34
|
+
'BaseRetriever.retrieve',
|
|
35
|
+
generic_patch(LlamaIndexMethods.RETRIEVER_RETRIEVE.value,
|
|
36
|
+
'retrieve', tracer, version)
|
|
37
|
+
)
|
|
38
|
+
wrap_function_wrapper(
|
|
39
|
+
'llama_index.core.extractors.interface',
|
|
40
|
+
'BaseExtractor.extract',
|
|
41
|
+
generic_patch(
|
|
42
|
+
LlamaIndexMethods.BASEEXTRACTOR_EXTRACT.value, 'extract', tracer, version)
|
|
43
|
+
)
|
|
44
|
+
wrap_function_wrapper(
|
|
45
|
+
'llama_index.core.extractors.interface',
|
|
46
|
+
'BaseExtractor.aextract',
|
|
47
|
+
generic_patch(
|
|
48
|
+
LlamaIndexMethods.BASEEXTRACTOR_AEXTRACT.value, 'extract', tracer, version)
|
|
49
|
+
)
|
|
50
|
+
wrap_function_wrapper(
|
|
51
|
+
'llama_index.core.readers.file.base',
|
|
52
|
+
'SimpleDirectoryReader.load_data',
|
|
53
|
+
generic_patch(
|
|
54
|
+
LlamaIndexMethods.BASEREADER_LOADDATA.value, 'loaddata', tracer, version)
|
|
55
|
+
)
|
|
56
|
+
wrap_function_wrapper(
|
|
57
|
+
'llama_index.core.chat_engine.types',
|
|
58
|
+
'BaseChatEngine.chat',
|
|
59
|
+
generic_patch(
|
|
60
|
+
LlamaIndexMethods.CHATENGINE_CHAT.value, 'chat', tracer, version)
|
|
61
|
+
)
|
|
62
|
+
wrap_function_wrapper(
|
|
63
|
+
'llama_index.core.chat_engine.types',
|
|
64
|
+
'BaseChatEngine.achat',
|
|
65
|
+
generic_patch(
|
|
66
|
+
LlamaIndexMethods.CHATENGINE_ACHAT.value, 'chat', tracer, version)
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
def _instrument_module(self, module_name):
|
|
70
|
+
print(module_name)
|
|
71
|
+
|
|
72
|
+
def _uninstrument(self, **kwargs):
|
|
73
|
+
print(kwargs)
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
from langtrace.trace_attributes import FrameworkSpanAttributes
|
|
2
|
+
from opentelemetry.trace import SpanKind, StatusCode
|
|
3
|
+
from opentelemetry.trace.status import Status, StatusCode
|
|
4
|
+
|
|
5
|
+
from instrumentation.constants import SERVICE_PROVIDERS
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def generic_patch(method, task, tracer, version):
|
|
9
|
+
def traced_method(wrapped, instance, args, kwargs):
|
|
10
|
+
service_provider = SERVICE_PROVIDERS['LLAMAINDEX']
|
|
11
|
+
span_attributes = {
|
|
12
|
+
'langtrace.service.name': service_provider,
|
|
13
|
+
'langtrace.service.type': 'framework',
|
|
14
|
+
'langtrace.service.version': version,
|
|
15
|
+
'langtrace.version': '1.0.0',
|
|
16
|
+
'llamaindex.task.name': task,
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
attributes = FrameworkSpanAttributes(**span_attributes)
|
|
20
|
+
|
|
21
|
+
with tracer.start_as_current_span(method, kind=SpanKind.CLIENT) as span:
|
|
22
|
+
for field, value in attributes.model_dump(by_alias=True).items():
|
|
23
|
+
if value is not None:
|
|
24
|
+
span.set_attribute(field, value)
|
|
25
|
+
try:
|
|
26
|
+
# Attempt to call the original method
|
|
27
|
+
result = wrapped(*args, **kwargs)
|
|
28
|
+
span.set_status(StatusCode.OK)
|
|
29
|
+
return result
|
|
30
|
+
except Exception as e:
|
|
31
|
+
# Record the exception in the span
|
|
32
|
+
span.record_exception(e)
|
|
33
|
+
|
|
34
|
+
# Set the span status to indicate an error
|
|
35
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
36
|
+
|
|
37
|
+
# Reraise the exception to ensure it's not swallowed
|
|
38
|
+
raise
|
|
39
|
+
|
|
40
|
+
return traced_method
|
|
File without changes
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
import importlib.metadata
|
|
2
|
+
from typing import Collection
|
|
3
|
+
|
|
4
|
+
import openai
|
|
5
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
|
6
|
+
from opentelemetry.trace import get_tracer
|
|
7
|
+
from wrapt import wrap_function_wrapper
|
|
8
|
+
|
|
9
|
+
from instrumentation.openai.patch import (chat_completions_create,
|
|
10
|
+
embeddings_create, images_generate)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class OpenAIInstrumentation(BaseInstrumentor):
|
|
14
|
+
|
|
15
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
|
16
|
+
return ["openai >= 0.27.0"]
|
|
17
|
+
|
|
18
|
+
def _instrument(self, **kwargs):
|
|
19
|
+
tracer_provider = kwargs.get("tracer_provider")
|
|
20
|
+
tracer = get_tracer(__name__, "", tracer_provider)
|
|
21
|
+
version = importlib.metadata.version('openai')
|
|
22
|
+
wrap_function_wrapper(
|
|
23
|
+
'openai.resources.chat.completions',
|
|
24
|
+
'Completions.create',
|
|
25
|
+
chat_completions_create(
|
|
26
|
+
openai.chat.completions.create, version, tracer)
|
|
27
|
+
)
|
|
28
|
+
wrap_function_wrapper(
|
|
29
|
+
'openai.resources.images',
|
|
30
|
+
'Images.generate',
|
|
31
|
+
images_generate(openai.images.generate, version, tracer)
|
|
32
|
+
)
|
|
33
|
+
wrap_function_wrapper(
|
|
34
|
+
'openai.resources.embeddings',
|
|
35
|
+
'Embeddings.create',
|
|
36
|
+
embeddings_create(openai.embeddings.create,
|
|
37
|
+
version, tracer)
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
def _uninstrument(self, **kwargs):
|
|
41
|
+
pass
|
|
File without changes
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
from langtrace.trace_attributes import OpenAIMethods
|
|
2
|
+
|
|
3
|
+
APIS = {
|
|
4
|
+
"CHAT_COMPLETION": {
|
|
5
|
+
"METHOD": OpenAIMethods.CHAT_COMPLETION.value,
|
|
6
|
+
"ENDPOINT": "/chat/completions",
|
|
7
|
+
},
|
|
8
|
+
"IMAGES_GENERATION": {
|
|
9
|
+
"METHOD": OpenAIMethods.IMAGES_GENERATION.value,
|
|
10
|
+
"ENDPOINT": "/images/generations",
|
|
11
|
+
},
|
|
12
|
+
"EMBEDDINGS_CREATE": {
|
|
13
|
+
"METHOD": OpenAIMethods.EMBEDDINGS_CREATE.value,
|
|
14
|
+
"ENDPOINT": "/embeddings",
|
|
15
|
+
},
|
|
16
|
+
}
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
OPENAI_COST_TABLE = {
|
|
2
|
+
"gpt-4-0125-preview": {
|
|
3
|
+
"input": 0.01,
|
|
4
|
+
"output": 0.03,
|
|
5
|
+
},
|
|
6
|
+
"gpt-4-1106-preview": {
|
|
7
|
+
"input": 0.01,
|
|
8
|
+
"output": 0.03,
|
|
9
|
+
},
|
|
10
|
+
"gpt-4-1106-vision-preview": {
|
|
11
|
+
"input": 0.01,
|
|
12
|
+
"output": 0.03,
|
|
13
|
+
},
|
|
14
|
+
"gpt-4": {
|
|
15
|
+
"input": 0.03,
|
|
16
|
+
"output": 0.06,
|
|
17
|
+
},
|
|
18
|
+
"gpt-4-32k": {
|
|
19
|
+
"input": 0.06,
|
|
20
|
+
"output": 0.12,
|
|
21
|
+
},
|
|
22
|
+
"gpt-3.5-turbo-0125": {
|
|
23
|
+
"input": 0.0005,
|
|
24
|
+
"output": 0.0015,
|
|
25
|
+
},
|
|
26
|
+
"gpt-3.5-turbo-instruct": {
|
|
27
|
+
"input": 0.0015,
|
|
28
|
+
"output": 0.002,
|
|
29
|
+
},
|
|
30
|
+
}
|
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
import json
|
|
2
|
+
|
|
3
|
+
from langtrace.trace_attributes import Event, LLMSpanAttributes
|
|
4
|
+
from opentelemetry.trace import SpanKind, StatusCode
|
|
5
|
+
from opentelemetry.trace.status import Status, StatusCode
|
|
6
|
+
|
|
7
|
+
from instrumentation.constants import SERVICE_PROVIDERS
|
|
8
|
+
from instrumentation.openai.lib.apis import APIS
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def images_generate(original_method, version, tracer):
|
|
12
|
+
def traced_method(wrapped, instance, args, kwargs):
|
|
13
|
+
base_url = str(instance._client._base_url) if hasattr(
|
|
14
|
+
instance, '_client') and hasattr(instance._client, '_base_url') else ""
|
|
15
|
+
service_provider = SERVICE_PROVIDERS['OPENAI']
|
|
16
|
+
span_attributes = {
|
|
17
|
+
"langtrace.service.name": service_provider,
|
|
18
|
+
"langtrace.service.type": "llm",
|
|
19
|
+
"langtrace.service.version": version,
|
|
20
|
+
"langtrace.version": "1.0.0",
|
|
21
|
+
"url.full": base_url,
|
|
22
|
+
"llm.api": APIS["IMAGES_GENERATION"]["ENDPOINT"],
|
|
23
|
+
"llm.model": kwargs.get('model'),
|
|
24
|
+
"llm.stream": kwargs.get('stream'),
|
|
25
|
+
"llm.prompts": json.dumps([kwargs.get('prompt', [])])
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
attributes = LLMSpanAttributes(**span_attributes)
|
|
29
|
+
|
|
30
|
+
with tracer.start_as_current_span(APIS["IMAGES_GENERATION"]["METHOD"], kind=SpanKind.CLIENT) as span:
|
|
31
|
+
for field, value in attributes.model_dump(by_alias=True).items():
|
|
32
|
+
if value is not None:
|
|
33
|
+
span.set_attribute(field, value)
|
|
34
|
+
try:
|
|
35
|
+
# Attempt to call the original method
|
|
36
|
+
result = original_method(*args, **kwargs)
|
|
37
|
+
if kwargs.get('stream') is False or kwargs.get('stream') is None:
|
|
38
|
+
data = result.data[0] if hasattr(
|
|
39
|
+
result, 'data') and len(result.data) > 0 else {}
|
|
40
|
+
response = [{
|
|
41
|
+
"url": data.url if hasattr(data, 'url') else "",
|
|
42
|
+
"revised_prompt": data.revised_prompt if hasattr(data, 'revised_prompt') else "",
|
|
43
|
+
}]
|
|
44
|
+
span.set_attribute(
|
|
45
|
+
"llm.responses", json.dumps(response))
|
|
46
|
+
|
|
47
|
+
span.set_status(StatusCode.OK)
|
|
48
|
+
return result
|
|
49
|
+
except Exception as e:
|
|
50
|
+
# Record the exception in the span
|
|
51
|
+
span.record_exception(e)
|
|
52
|
+
|
|
53
|
+
# Set the span status to indicate an error
|
|
54
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
55
|
+
|
|
56
|
+
# Reraise the exception to ensure it's not swallowed
|
|
57
|
+
raise
|
|
58
|
+
|
|
59
|
+
return traced_method
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def chat_completions_create(original_method, version, tracer):
|
|
63
|
+
def traced_method(wrapped, instance, args, kwargs):
|
|
64
|
+
base_url = str(instance._client._base_url) if hasattr(
|
|
65
|
+
instance, '_client') and hasattr(instance._client, '_base_url') else ""
|
|
66
|
+
service_provider = SERVICE_PROVIDERS['OPENAI']
|
|
67
|
+
span_attributes = {
|
|
68
|
+
"langtrace.service.name": service_provider,
|
|
69
|
+
"langtrace.service.type": "llm",
|
|
70
|
+
"langtrace.service.version": version,
|
|
71
|
+
"langtrace.version": "1.0.0",
|
|
72
|
+
"url.full": base_url,
|
|
73
|
+
"llm.api": APIS["CHAT_COMPLETION"]["ENDPOINT"],
|
|
74
|
+
"llm.model": kwargs.get('model'),
|
|
75
|
+
"llm.prompts": json.dumps(kwargs.get('messages', [])),
|
|
76
|
+
"llm.stream": kwargs.get('stream'),
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
attributes = LLMSpanAttributes(**span_attributes)
|
|
80
|
+
|
|
81
|
+
if kwargs.get('temperature') is not None:
|
|
82
|
+
attributes.llm_temperature = kwargs.get('temperature')
|
|
83
|
+
if kwargs.get('top_p') is not None:
|
|
84
|
+
attributes.llm_top_p = kwargs.get('top_p')
|
|
85
|
+
if kwargs.get('user') is not None:
|
|
86
|
+
attributes.llm_user = kwargs.get('user')
|
|
87
|
+
|
|
88
|
+
with tracer.start_as_current_span(APIS["CHAT_COMPLETION"]["METHOD"], kind=SpanKind.CLIENT) as span:
|
|
89
|
+
for field, value in attributes.model_dump(by_alias=True).items():
|
|
90
|
+
if value is not None:
|
|
91
|
+
span.set_attribute(field, value)
|
|
92
|
+
try:
|
|
93
|
+
# Attempt to call the original method
|
|
94
|
+
result = original_method(*args, **kwargs)
|
|
95
|
+
if kwargs.get('stream') is False:
|
|
96
|
+
if hasattr(result, 'choices') and result.choices is not None:
|
|
97
|
+
responses = [
|
|
98
|
+
{
|
|
99
|
+
"message": choice.message.content if choice.message and choice.message.content else "",
|
|
100
|
+
**({"content_filter_results": choice["content_filter_results"]} if "content_filter_results" in choice else {})
|
|
101
|
+
}
|
|
102
|
+
for choice in result.choices
|
|
103
|
+
]
|
|
104
|
+
else:
|
|
105
|
+
responses = []
|
|
106
|
+
span.set_attribute("llm.responses", json.dumps(responses))
|
|
107
|
+
|
|
108
|
+
if hasattr(result, 'system_fingerprint') and result.system_fingerprint is not None:
|
|
109
|
+
span.set_attribute(
|
|
110
|
+
"llm.system.fingerprint", result.system_fingerprint)
|
|
111
|
+
|
|
112
|
+
# Get the usage
|
|
113
|
+
if hasattr(result, 'usage') and result.usage is not None:
|
|
114
|
+
usage = result.usage
|
|
115
|
+
if usage is not None:
|
|
116
|
+
usage_dict = {
|
|
117
|
+
"prompt_tokens": result.usage.prompt_tokens,
|
|
118
|
+
"completion_tokens": usage.completion_tokens,
|
|
119
|
+
"total_tokens": usage.total_tokens
|
|
120
|
+
}
|
|
121
|
+
span.set_attribute(
|
|
122
|
+
"llm.token.counts", json.dumps(usage_dict))
|
|
123
|
+
|
|
124
|
+
span.set_status(StatusCode.OK)
|
|
125
|
+
return result
|
|
126
|
+
else:
|
|
127
|
+
result_content = []
|
|
128
|
+
span.add_event(Event.STREAM_START.value)
|
|
129
|
+
|
|
130
|
+
for chunk in result:
|
|
131
|
+
# Assuming `chunk` has a structure similar to what OpenAI might return,
|
|
132
|
+
# adjust the access accordingly based on actual response structure.
|
|
133
|
+
if hasattr(chunk, 'choices') and chunk.choices is not None:
|
|
134
|
+
content = [
|
|
135
|
+
choice.delta.content if choice.delta and choice.delta.content else ""
|
|
136
|
+
for choice in chunk.choices
|
|
137
|
+
]
|
|
138
|
+
else:
|
|
139
|
+
content = []
|
|
140
|
+
span.add_event(Event.STREAM_OUTPUT.value, {
|
|
141
|
+
"response": "".join(content)
|
|
142
|
+
})
|
|
143
|
+
result_content.append(
|
|
144
|
+
content[0] if len(content) > 0 else "")
|
|
145
|
+
span.add_event(Event.STREAM_END.value)
|
|
146
|
+
span.set_attribute("llm.responses", json.dumps(
|
|
147
|
+
{"message": {"role": "assistant", "content": "".join(result_content)}}))
|
|
148
|
+
|
|
149
|
+
except Exception as e:
|
|
150
|
+
# Record the exception in the span
|
|
151
|
+
span.record_exception(e)
|
|
152
|
+
|
|
153
|
+
# Set the span status to indicate an error
|
|
154
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
155
|
+
|
|
156
|
+
# Reraise the exception to ensure it's not swallowed
|
|
157
|
+
raise
|
|
158
|
+
|
|
159
|
+
# return the wrapped method
|
|
160
|
+
return traced_method
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def embeddings_create(original_method, version, tracer):
|
|
164
|
+
def traced_method(wrapped, instance, args, kwargs):
|
|
165
|
+
base_url = str(instance._client._base_url) if hasattr(
|
|
166
|
+
instance, '_client') and hasattr(instance._client, '_base_url') else ""
|
|
167
|
+
|
|
168
|
+
service_provider = SERVICE_PROVIDERS['OPENAI']
|
|
169
|
+
span_attributes = {
|
|
170
|
+
"langtrace.service.name": service_provider,
|
|
171
|
+
"langtrace.service.type": "llm",
|
|
172
|
+
"langtrace.service.version": version,
|
|
173
|
+
"langtrace.version": "1.0.0",
|
|
174
|
+
"url.full": base_url,
|
|
175
|
+
"llm.api": APIS["EMBEDDINGS_CREATE"]["ENDPOINT"],
|
|
176
|
+
"llm.model": kwargs.get('model'),
|
|
177
|
+
"llm.prompts": "",
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
attributes = LLMSpanAttributes(**span_attributes)
|
|
181
|
+
kwargs.get('encoding_format')
|
|
182
|
+
|
|
183
|
+
if kwargs.get('encoding_format') is not None:
|
|
184
|
+
attributes.llm_encoding_format = kwargs.get('encoding_format')
|
|
185
|
+
if kwargs.get('dimensions') is not None:
|
|
186
|
+
attributes["llm.dimensions"] = kwargs.get('dimensions')
|
|
187
|
+
if kwargs.get('user') is not None:
|
|
188
|
+
attributes["llm.user"] = kwargs.get('user')
|
|
189
|
+
|
|
190
|
+
with tracer.start_as_current_span(APIS["EMBEDDINGS_CREATE"]["METHOD"], kind=SpanKind.CLIENT) as span:
|
|
191
|
+
for field, value in attributes.model_dump(by_alias=True).items():
|
|
192
|
+
if value is not None:
|
|
193
|
+
span.set_attribute(field, value)
|
|
194
|
+
try:
|
|
195
|
+
# Attempt to call the original method
|
|
196
|
+
result = original_method(*args, **kwargs)
|
|
197
|
+
span.set_status(StatusCode.OK)
|
|
198
|
+
return result
|
|
199
|
+
except Exception as e:
|
|
200
|
+
# Record the exception in the span
|
|
201
|
+
span.record_exception(e)
|
|
202
|
+
|
|
203
|
+
# Set the span status to indicate an error
|
|
204
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
205
|
+
|
|
206
|
+
# Reraise the exception to ensure it's not swallowed
|
|
207
|
+
raise
|
|
208
|
+
|
|
209
|
+
return traced_method
|
|
File without changes
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
import importlib.metadata
|
|
2
|
+
from typing import Collection
|
|
3
|
+
|
|
4
|
+
import pinecone
|
|
5
|
+
from langtrace.trace_attributes import PineconeMethods
|
|
6
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
|
7
|
+
from opentelemetry.trace import get_tracer
|
|
8
|
+
from wrapt import wrap_function_wrapper
|
|
9
|
+
|
|
10
|
+
from instrumentation.pinecone.lib.apis import APIS
|
|
11
|
+
from instrumentation.pinecone.patch import generic_patch
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class PineconeInstrumentation(BaseInstrumentor):
|
|
15
|
+
|
|
16
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
|
17
|
+
return ["pinecone-client >= 3.1.0"]
|
|
18
|
+
|
|
19
|
+
def _instrument(self, **kwargs):
|
|
20
|
+
tracer_provider = kwargs.get("tracer_provider")
|
|
21
|
+
tracer = get_tracer(__name__, "", tracer_provider)
|
|
22
|
+
version = importlib.metadata.version('pinecone-client')
|
|
23
|
+
for operation_name, details in APIS.items():
|
|
24
|
+
method_ref = details["METHOD"]
|
|
25
|
+
method = None
|
|
26
|
+
if method_ref is PineconeMethods.UPSERT.value:
|
|
27
|
+
method = pinecone.Index.upsert
|
|
28
|
+
elif method_ref is PineconeMethods.QUERY.value:
|
|
29
|
+
method = pinecone.Index.query
|
|
30
|
+
elif method_ref is PineconeMethods.DELETE.value:
|
|
31
|
+
method = pinecone.Index.delete
|
|
32
|
+
operation = details["OPERATION"]
|
|
33
|
+
|
|
34
|
+
# Dynamically creating the patching call
|
|
35
|
+
wrap_function_wrapper(
|
|
36
|
+
'pinecone.data.index',
|
|
37
|
+
f'Index.{operation}',
|
|
38
|
+
generic_patch(method, operation_name,
|
|
39
|
+
version, tracer)
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
def _uninstrument(self, **kwargs):
|
|
43
|
+
print(kwargs)
|
|
File without changes
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
from langtrace.trace_attributes import PineconeMethods
|
|
2
|
+
|
|
3
|
+
APIS = {
|
|
4
|
+
"UPSERT": {
|
|
5
|
+
"METHOD": PineconeMethods.UPSERT.value,
|
|
6
|
+
"ENDPOINT": "/vectors/upsert",
|
|
7
|
+
"OPERATION": "upsert",
|
|
8
|
+
},
|
|
9
|
+
"QUERY": {
|
|
10
|
+
"METHOD": PineconeMethods.QUERY.value,
|
|
11
|
+
"ENDPOINT": "/query",
|
|
12
|
+
"OPERATION": "query",
|
|
13
|
+
},
|
|
14
|
+
"DELETE": {
|
|
15
|
+
"METHOD": PineconeMethods.DELETE.value,
|
|
16
|
+
"ENDPOINT": "/vectors/delete",
|
|
17
|
+
"OPERATION": "delete",
|
|
18
|
+
}
|
|
19
|
+
}
|