langtrace-python-sdk 2.0.3__py3-none-any.whl → 2.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/anthropic_example/completion.py +1 -1
- examples/chroma_example/basic.py +1 -1
- examples/cohere_example/chat.py +7 -3
- examples/cohere_example/chat_stream.py +7 -2
- examples/cohere_example/embed.py +2 -1
- examples/cohere_example/rerank.py +2 -1
- examples/cohere_example/tools.py +21 -5
- examples/fastapi_example/basic_route.py +1 -1
- examples/hiveagent_example/basic.py +1 -1
- examples/langchain_example/groq_example.py +3 -1
- examples/langchain_example/langgraph_example.py +11 -12
- examples/llamaindex_example/agent.py +1 -1
- examples/llamaindex_example/basic.py +1 -1
- examples/openai_example/async_tool_calling_nonstreaming.py +11 -4
- examples/openai_example/async_tool_calling_streaming.py +41 -29
- examples/openai_example/chat_completion.py +12 -8
- examples/openai_example/embeddings_create.py +2 -1
- examples/openai_example/function_calling.py +11 -6
- examples/openai_example/images_generate.py +2 -1
- examples/openai_example/tool_calling.py +1 -1
- examples/openai_example/tool_calling_nonstreaming.py +11 -3
- examples/openai_example/tool_calling_streaming.py +42 -29
- examples/perplexity_example/basic.py +1 -1
- examples/pinecone_example/basic.py +4 -1
- examples/qdrant_example/basic.py +8 -6
- langtrace_python_sdk/constants/instrumentation/groq.py +0 -2
- langtrace_python_sdk/extensions/langtrace_exporter.py +4 -12
- langtrace_python_sdk/instrumentation/anthropic/instrumentation.py +1 -2
- langtrace_python_sdk/instrumentation/anthropic/patch.py +14 -4
- langtrace_python_sdk/instrumentation/chroma/patch.py +4 -2
- langtrace_python_sdk/instrumentation/cohere/instrumentation.py +6 -3
- langtrace_python_sdk/instrumentation/groq/instrumentation.py +3 -1
- langtrace_python_sdk/instrumentation/groq/patch.py +26 -11
- langtrace_python_sdk/instrumentation/langchain/patch.py +4 -2
- langtrace_python_sdk/instrumentation/langchain_community/instrumentation.py +1 -2
- langtrace_python_sdk/instrumentation/langchain_community/patch.py +4 -3
- langtrace_python_sdk/instrumentation/langchain_core/instrumentation.py +3 -1
- langtrace_python_sdk/instrumentation/langchain_core/patch.py +4 -2
- langtrace_python_sdk/instrumentation/langgraph/instrumentation.py +17 -8
- langtrace_python_sdk/instrumentation/langgraph/patch.py +47 -26
- langtrace_python_sdk/instrumentation/llamaindex/patch.py +3 -1
- langtrace_python_sdk/instrumentation/openai/instrumentation.py +7 -3
- langtrace_python_sdk/instrumentation/openai/patch.py +40 -19
- langtrace_python_sdk/instrumentation/pinecone/patch.py +4 -2
- langtrace_python_sdk/instrumentation/qdrant/patch.py +4 -2
- langtrace_python_sdk/langtrace.py +128 -64
- langtrace_python_sdk/types/__init__.py +29 -0
- langtrace_python_sdk/utils/llm.py +2 -4
- langtrace_python_sdk/utils/with_root_span.py +3 -3
- langtrace_python_sdk/version.py +1 -1
- {langtrace_python_sdk-2.0.3.dist-info → langtrace_python_sdk-2.0.5.dist-info}/METADATA +2 -2
- {langtrace_python_sdk-2.0.3.dist-info → langtrace_python_sdk-2.0.5.dist-info}/RECORD +59 -58
- tests/chroma/test_chroma.py +26 -20
- tests/langchain/test_langchain.py +29 -16
- tests/langchain/test_langchain_community.py +28 -15
- tests/langchain/test_langchain_core.py +52 -26
- tests/pinecone/test_pinecone.py +27 -18
- {langtrace_python_sdk-2.0.3.dist-info → langtrace_python_sdk-2.0.5.dist-info}/WHEEL +0 -0
- {langtrace_python_sdk-2.0.3.dist-info → langtrace_python_sdk-2.0.5.dist-info}/licenses/LICENSE +0 -0
|
@@ -9,7 +9,8 @@ from langtrace_python_sdk import langtrace
|
|
|
9
9
|
|
|
10
10
|
_ = load_dotenv(find_dotenv())
|
|
11
11
|
|
|
12
|
-
langtrace.init(
|
|
12
|
+
langtrace.init(write_spans_to_console=True)
|
|
13
|
+
|
|
13
14
|
|
|
14
15
|
client = OpenAI()
|
|
15
16
|
|
|
@@ -21,7 +22,9 @@ def get_current_weather(location, unit="fahrenheit"):
|
|
|
21
22
|
if "tokyo" in location.lower():
|
|
22
23
|
return json.dumps({"location": "Tokyo", "temperature": "10", "unit": unit})
|
|
23
24
|
elif "san francisco" in location.lower():
|
|
24
|
-
return json.dumps(
|
|
25
|
+
return json.dumps(
|
|
26
|
+
{"location": "San Francisco", "temperature": "72", "unit": unit}
|
|
27
|
+
)
|
|
25
28
|
elif "paris" in location.lower():
|
|
26
29
|
return json.dumps({"location": "Paris", "temperature": "22", "unit": unit})
|
|
27
30
|
else:
|
|
@@ -42,7 +45,12 @@ def get_current_time(location):
|
|
|
42
45
|
|
|
43
46
|
def run_conversation():
|
|
44
47
|
# Step 1: send the conversation and available functions to the model
|
|
45
|
-
messages = [
|
|
48
|
+
messages = [
|
|
49
|
+
{
|
|
50
|
+
"role": "user",
|
|
51
|
+
"content": "What's the weather like in San Francisco, Tokyo, and Paris?",
|
|
52
|
+
}
|
|
53
|
+
]
|
|
46
54
|
tools = [
|
|
47
55
|
{
|
|
48
56
|
"type": "function",
|
|
@@ -78,7 +86,7 @@ def run_conversation():
|
|
|
78
86
|
"required": ["location"],
|
|
79
87
|
},
|
|
80
88
|
},
|
|
81
|
-
}
|
|
89
|
+
},
|
|
82
90
|
]
|
|
83
91
|
response = client.chat.completions.create(
|
|
84
92
|
model="gpt-4",
|
|
@@ -95,29 +103,34 @@ def run_conversation():
|
|
|
95
103
|
name = ""
|
|
96
104
|
arguments = ""
|
|
97
105
|
for chunk in response:
|
|
98
|
-
if
|
|
106
|
+
if (
|
|
107
|
+
chunk.choices[0].delta is not None
|
|
108
|
+
and chunk.choices[0].delta.tool_calls is not None
|
|
109
|
+
):
|
|
99
110
|
for choice in chunk.choices:
|
|
100
111
|
for tool_call in choice.delta.tool_calls:
|
|
101
112
|
if tool_call.id and id != tool_call.id:
|
|
102
113
|
id = tool_call.id if tool_call.id else ""
|
|
103
|
-
name =
|
|
114
|
+
name = (
|
|
115
|
+
tool_call.function.name
|
|
116
|
+
if tool_call.function and tool_call.function.name
|
|
117
|
+
else ""
|
|
118
|
+
)
|
|
104
119
|
tool_call_dict[name] = {
|
|
105
120
|
"id": id,
|
|
106
|
-
"function": {
|
|
107
|
-
|
|
108
|
-
"arguments": arguments
|
|
109
|
-
},
|
|
110
|
-
"type": "function"
|
|
121
|
+
"function": {"name": name, "arguments": arguments},
|
|
122
|
+
"type": "function",
|
|
111
123
|
}
|
|
112
|
-
arguments +=
|
|
124
|
+
arguments += (
|
|
125
|
+
tool_call.function.arguments
|
|
126
|
+
if tool_call.function and tool_call.function.arguments
|
|
127
|
+
else ""
|
|
128
|
+
)
|
|
113
129
|
if name != "":
|
|
114
130
|
tool_call_dict[name] = {
|
|
115
131
|
"id": id,
|
|
116
|
-
"function": {
|
|
117
|
-
|
|
118
|
-
"arguments": arguments
|
|
119
|
-
},
|
|
120
|
-
"type": "function"
|
|
132
|
+
"function": {"name": name, "arguments": arguments},
|
|
133
|
+
"type": "function",
|
|
121
134
|
}
|
|
122
135
|
for key, value in tool_call_dict.items():
|
|
123
136
|
tool_calls.append(value)
|
|
@@ -133,9 +146,9 @@ def run_conversation():
|
|
|
133
146
|
# messages.append(response_message) # extend conversation with assistant's reply
|
|
134
147
|
# Step 4: send the info for each function call and function response to the model
|
|
135
148
|
for tool_call in tool_calls:
|
|
136
|
-
function_name = tool_call[
|
|
149
|
+
function_name = tool_call["function"]["name"]
|
|
137
150
|
function_to_call = available_functions[function_name]
|
|
138
|
-
function_args = json.loads(tool_call[
|
|
151
|
+
function_args = json.loads(tool_call["function"]["arguments"])
|
|
139
152
|
function_response = function_to_call(
|
|
140
153
|
location=function_args.get("location"),
|
|
141
154
|
unit=function_args.get("unit"),
|
|
@@ -143,10 +156,7 @@ def run_conversation():
|
|
|
143
156
|
func_res = json.loads(function_response)
|
|
144
157
|
content = f"Use the below information to answer the user's question: The current weather in {func_res['location']} is {func_res['temperature']} degrees {func_res['unit']}"
|
|
145
158
|
messages.append(
|
|
146
|
-
{
|
|
147
|
-
"role": "system",
|
|
148
|
-
"content": content
|
|
149
|
-
}
|
|
159
|
+
{"role": "system", "content": content}
|
|
150
160
|
) # extend conversation with function response
|
|
151
161
|
print(messages)
|
|
152
162
|
second_response = client.chat.completions.create(
|
|
@@ -158,10 +168,13 @@ def run_conversation():
|
|
|
158
168
|
for chunk in second_response:
|
|
159
169
|
if chunk.choices[0].delta.content is not None:
|
|
160
170
|
content = [
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
171
|
+
(
|
|
172
|
+
choice.delta.content
|
|
173
|
+
if choice.delta and choice.delta.content
|
|
174
|
+
else ""
|
|
175
|
+
)
|
|
176
|
+
for choice in chunk.choices
|
|
177
|
+
]
|
|
178
|
+
result.append(content[0] if len(content) > 0 else "")
|
|
166
179
|
print("".join(result))
|
|
167
|
-
# return second_response
|
|
180
|
+
# return second_response
|
|
@@ -8,7 +8,7 @@ from langtrace_python_sdk.utils.with_root_span import (
|
|
|
8
8
|
|
|
9
9
|
# _ = load_dotenv(find_dotenv())
|
|
10
10
|
|
|
11
|
-
langtrace.init(
|
|
11
|
+
langtrace.init(write_spans_to_console=True)
|
|
12
12
|
client = OpenAI(base_url="https://api.perplexity.ai", api_key="PPLX_API_KEY")
|
|
13
13
|
|
|
14
14
|
|
|
@@ -11,7 +11,10 @@ from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
|
|
|
11
11
|
|
|
12
12
|
_ = load_dotenv(find_dotenv())
|
|
13
13
|
|
|
14
|
-
langtrace.init(
|
|
14
|
+
langtrace.init(
|
|
15
|
+
write_spans_to_console=True,
|
|
16
|
+
disable_instrumentations={"all_except": ["pinecone", "openai"]},
|
|
17
|
+
)
|
|
15
18
|
|
|
16
19
|
client = OpenAI()
|
|
17
20
|
pinecone = Pinecone()
|
examples/qdrant_example/basic.py
CHANGED
|
@@ -8,7 +8,7 @@ from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
|
|
|
8
8
|
|
|
9
9
|
_ = load_dotenv(find_dotenv())
|
|
10
10
|
|
|
11
|
-
langtrace.init(
|
|
11
|
+
langtrace.init()
|
|
12
12
|
|
|
13
13
|
|
|
14
14
|
@with_langtrace_root_span()
|
|
@@ -16,10 +16,13 @@ def basic():
|
|
|
16
16
|
client = QdrantClient(":memory:")
|
|
17
17
|
cohere_client = cohere.Client()
|
|
18
18
|
|
|
19
|
-
client.create_collection(
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
19
|
+
client.create_collection(
|
|
20
|
+
collection_name="MyCollection4",
|
|
21
|
+
vectors_config=VectorParams(
|
|
22
|
+
size=1024,
|
|
23
|
+
distance=Distance.COSINE,
|
|
24
|
+
),
|
|
25
|
+
)
|
|
23
26
|
|
|
24
27
|
client.upsert(
|
|
25
28
|
collection_name="MyCollection4",
|
|
@@ -40,7 +43,6 @@ def basic():
|
|
|
40
43
|
input_type="search_query", # Input type for search queries
|
|
41
44
|
texts=["Which database is written in Rust?"],
|
|
42
45
|
).embeddings[0],
|
|
43
|
-
|
|
44
46
|
)
|
|
45
47
|
print(answer[0])
|
|
46
48
|
|
|
@@ -25,18 +25,16 @@ class LangTraceExporter(SpanExporter):
|
|
|
25
25
|
**Attributes:**
|
|
26
26
|
|
|
27
27
|
* `api_key` (str): An API key to authenticate with the LangTrace collector (required).
|
|
28
|
-
* `write_to_remote_url` (bool): A flag indicating whether to send spans to the remote URL (defaults to False).
|
|
29
28
|
|
|
30
29
|
**Methods:**
|
|
31
30
|
|
|
32
|
-
* `__init__(api_key: str = None, url: str = None
|
|
31
|
+
* `__init__(api_key: str = None, url: str = None) -> None`:
|
|
33
32
|
- Initializes a `LangTraceExporter` instance.
|
|
34
33
|
- Retrieves the API key and URL from environment variables if not provided explicitly.
|
|
35
|
-
- Raises a `ValueError` if the API key is missing
|
|
34
|
+
- Raises a `ValueError` if the API key is missing.
|
|
36
35
|
* `export(self, spans: typing.Sequence[ReadableSpan]) -> SpanExportResult`:
|
|
37
36
|
- Exports a batch of `opentelemetry.trace.Span` objects to LangTrace.
|
|
38
37
|
- Converts each span into a dictionary representation including trace ID, instrumentation library, events, dropped data counts, duration, and other span attributes.
|
|
39
|
-
- If `write_to_remote_url` is False, returns `SpanExportResult.SUCCESS` without sending data.
|
|
40
38
|
- Otherwise, sends the data to the configured URL using a POST request with JSON data and the API key in the header.
|
|
41
39
|
- Returns `SpanExportResult.SUCCESS` on successful export or `SpanExportResult.FAILURE` on errors.
|
|
42
40
|
* `shutdown(self) -> None`:
|
|
@@ -44,23 +42,20 @@ class LangTraceExporter(SpanExporter):
|
|
|
44
42
|
|
|
45
43
|
**Raises:**
|
|
46
44
|
|
|
47
|
-
* `ValueError`: If the API key is not provided
|
|
45
|
+
* `ValueError`: If the API key is not provided.
|
|
48
46
|
"""
|
|
49
47
|
|
|
50
48
|
api_key: str
|
|
51
|
-
write_to_remote_url: bool
|
|
52
49
|
|
|
53
50
|
def __init__(
|
|
54
51
|
self,
|
|
55
52
|
api_key: str = None,
|
|
56
|
-
write_to_remote_url: bool = False,
|
|
57
53
|
api_host: typing.Optional[str] = None,
|
|
58
54
|
) -> None:
|
|
59
55
|
self.api_key = api_key or os.environ.get("LANGTRACE_API_KEY")
|
|
60
|
-
self.write_to_remote_url = write_to_remote_url
|
|
61
56
|
self.api_host: str = api_host or LANGTRACE_REMOTE_URL
|
|
62
57
|
|
|
63
|
-
if
|
|
58
|
+
if not self.api_key:
|
|
64
59
|
raise ValueError("No API key provided")
|
|
65
60
|
|
|
66
61
|
def export(self, spans: typing.Sequence[ReadableSpan]) -> SpanExportResult:
|
|
@@ -86,9 +81,6 @@ class LangTraceExporter(SpanExporter):
|
|
|
86
81
|
for span in spans
|
|
87
82
|
]
|
|
88
83
|
|
|
89
|
-
if not self.write_to_remote_url:
|
|
90
|
-
return
|
|
91
|
-
|
|
92
84
|
# Send data to remote URL
|
|
93
85
|
try:
|
|
94
86
|
requests.post(
|
|
@@ -22,8 +22,7 @@ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
|
|
22
22
|
from opentelemetry.trace import get_tracer
|
|
23
23
|
from wrapt import wrap_function_wrapper
|
|
24
24
|
|
|
25
|
-
from langtrace_python_sdk.instrumentation.anthropic.patch import
|
|
26
|
-
messages_create
|
|
25
|
+
from langtrace_python_sdk.instrumentation.anthropic.patch import messages_create
|
|
27
26
|
|
|
28
27
|
logging.basicConfig(level=logging.FATAL)
|
|
29
28
|
|
|
@@ -23,7 +23,9 @@ from opentelemetry.trace.status import Status, StatusCode
|
|
|
23
23
|
|
|
24
24
|
from langtrace_python_sdk.constants.instrumentation.anthropic import APIS
|
|
25
25
|
from langtrace_python_sdk.constants.instrumentation.common import (
|
|
26
|
-
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
|
|
26
|
+
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
|
|
27
|
+
SERVICE_PROVIDERS,
|
|
28
|
+
)
|
|
27
29
|
|
|
28
30
|
|
|
29
31
|
def messages_create(original_method, version, tracer):
|
|
@@ -58,7 +60,7 @@ def messages_create(original_method, version, tracer):
|
|
|
58
60
|
"llm.model": kwargs.get("model"),
|
|
59
61
|
"llm.prompts": prompts,
|
|
60
62
|
"llm.stream": kwargs.get("stream"),
|
|
61
|
-
**(extra_attributes if extra_attributes is not None else {})
|
|
63
|
+
**(extra_attributes if extra_attributes is not None else {}),
|
|
62
64
|
}
|
|
63
65
|
|
|
64
66
|
attributes = LLMSpanAttributes(**span_attributes)
|
|
@@ -85,7 +87,10 @@ def messages_create(original_method, version, tracer):
|
|
|
85
87
|
result = wrapped(*args, **kwargs)
|
|
86
88
|
if kwargs.get("stream") is False:
|
|
87
89
|
if hasattr(result, "content") and result.content is not None:
|
|
88
|
-
span.set_attribute(
|
|
90
|
+
span.set_attribute(
|
|
91
|
+
"llm.model",
|
|
92
|
+
result.model if result.model else kwargs.get("model"),
|
|
93
|
+
)
|
|
89
94
|
span.set_attribute(
|
|
90
95
|
"llm.responses",
|
|
91
96
|
json.dumps(
|
|
@@ -140,7 +145,12 @@ def messages_create(original_method, version, tracer):
|
|
|
140
145
|
output_tokens = 0
|
|
141
146
|
try:
|
|
142
147
|
for chunk in result:
|
|
143
|
-
if
|
|
148
|
+
if (
|
|
149
|
+
hasattr(chunk, "message")
|
|
150
|
+
and chunk.message is not None
|
|
151
|
+
and hasattr(chunk.message, "model")
|
|
152
|
+
and chunk.message.model is not None
|
|
153
|
+
):
|
|
144
154
|
span.set_attribute("llm.model", chunk.message.model)
|
|
145
155
|
content = ""
|
|
146
156
|
if hasattr(chunk, "delta") and chunk.delta is not None:
|
|
@@ -21,7 +21,9 @@ from opentelemetry.trace.status import Status, StatusCode
|
|
|
21
21
|
|
|
22
22
|
from langtrace_python_sdk.constants.instrumentation.chroma import APIS
|
|
23
23
|
from langtrace_python_sdk.constants.instrumentation.common import (
|
|
24
|
-
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
|
|
24
|
+
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
|
|
25
|
+
SERVICE_PROVIDERS,
|
|
26
|
+
)
|
|
25
27
|
|
|
26
28
|
|
|
27
29
|
def collection_patch(method, version, tracer):
|
|
@@ -42,7 +44,7 @@ def collection_patch(method, version, tracer):
|
|
|
42
44
|
"langtrace.version": "1.0.0",
|
|
43
45
|
"db.system": "chromadb",
|
|
44
46
|
"db.operation": api["OPERATION"],
|
|
45
|
-
**(extra_attributes if extra_attributes is not None else {})
|
|
47
|
+
**(extra_attributes if extra_attributes is not None else {}),
|
|
46
48
|
}
|
|
47
49
|
|
|
48
50
|
if hasattr(instance, "name") and instance.name is not None:
|
|
@@ -21,9 +21,12 @@ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
|
|
21
21
|
from opentelemetry.trace import get_tracer
|
|
22
22
|
from wrapt import wrap_function_wrapper
|
|
23
23
|
|
|
24
|
-
from langtrace_python_sdk.instrumentation.cohere.patch import (
|
|
25
|
-
|
|
26
|
-
|
|
24
|
+
from langtrace_python_sdk.instrumentation.cohere.patch import (
|
|
25
|
+
chat_create,
|
|
26
|
+
chat_stream,
|
|
27
|
+
embed,
|
|
28
|
+
rerank,
|
|
29
|
+
)
|
|
27
30
|
|
|
28
31
|
|
|
29
32
|
class CohereInstrumentation(BaseInstrumentor):
|
|
@@ -23,7 +23,9 @@ from opentelemetry.trace import get_tracer
|
|
|
23
23
|
from wrapt import wrap_function_wrapper
|
|
24
24
|
|
|
25
25
|
from langtrace_python_sdk.instrumentation.groq.patch import (
|
|
26
|
-
async_chat_completions_create,
|
|
26
|
+
async_chat_completions_create,
|
|
27
|
+
chat_completions_create,
|
|
28
|
+
)
|
|
27
29
|
|
|
28
30
|
logging.basicConfig(level=logging.FATAL)
|
|
29
31
|
|
|
@@ -22,10 +22,11 @@ from opentelemetry.trace import SpanKind
|
|
|
22
22
|
from opentelemetry.trace.status import Status, StatusCode
|
|
23
23
|
|
|
24
24
|
from langtrace_python_sdk.constants.instrumentation.common import (
|
|
25
|
-
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
|
|
25
|
+
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
|
|
26
|
+
SERVICE_PROVIDERS,
|
|
27
|
+
)
|
|
26
28
|
from langtrace_python_sdk.constants.instrumentation.groq import APIS
|
|
27
|
-
from langtrace_python_sdk.utils.llm import
|
|
28
|
-
estimate_tokens)
|
|
29
|
+
from langtrace_python_sdk.utils.llm import calculate_prompt_tokens, estimate_tokens
|
|
29
30
|
|
|
30
31
|
|
|
31
32
|
def chat_completions_create(original_method, version, tracer):
|
|
@@ -136,7 +137,8 @@ def chat_completions_create(original_method, version, tracer):
|
|
|
136
137
|
if "content_filter_results" in choice
|
|
137
138
|
else {}
|
|
138
139
|
),
|
|
139
|
-
}
|
|
140
|
+
}
|
|
141
|
+
for choice in result.choices
|
|
140
142
|
]
|
|
141
143
|
span.set_attribute("llm.responses", json.dumps(responses))
|
|
142
144
|
else:
|
|
@@ -224,16 +226,22 @@ def chat_completions_create(original_method, version, tracer):
|
|
|
224
226
|
elif tool_calls:
|
|
225
227
|
for choice in chunk.choices:
|
|
226
228
|
tool_call = ""
|
|
227
|
-
if
|
|
229
|
+
if choice.delta and choice.delta.tool_calls is not None:
|
|
228
230
|
toolcalls = choice.delta.tool_calls
|
|
229
231
|
content = []
|
|
230
232
|
for tool_call in toolcalls:
|
|
231
|
-
if
|
|
233
|
+
if (
|
|
234
|
+
tool_call
|
|
235
|
+
and tool_call.function is not None
|
|
236
|
+
and tool_call.function.arguments is not None
|
|
237
|
+
):
|
|
232
238
|
token_counts = estimate_tokens(
|
|
233
239
|
tool_call.function.arguments
|
|
234
240
|
)
|
|
235
241
|
completion_tokens += token_counts
|
|
236
|
-
content = content + [
|
|
242
|
+
content = content + [
|
|
243
|
+
tool_call.function.arguments
|
|
244
|
+
]
|
|
237
245
|
else:
|
|
238
246
|
content = content + []
|
|
239
247
|
else:
|
|
@@ -389,7 +397,8 @@ def async_chat_completions_create(original_method, version, tracer):
|
|
|
389
397
|
if "content_filter_results" in choice
|
|
390
398
|
else {}
|
|
391
399
|
),
|
|
392
|
-
}
|
|
400
|
+
}
|
|
401
|
+
for choice in result.choices
|
|
393
402
|
]
|
|
394
403
|
span.set_attribute("llm.responses", json.dumps(responses))
|
|
395
404
|
else:
|
|
@@ -477,16 +486,22 @@ def async_chat_completions_create(original_method, version, tracer):
|
|
|
477
486
|
elif tool_calls:
|
|
478
487
|
for choice in chunk.choices:
|
|
479
488
|
tool_call = ""
|
|
480
|
-
if
|
|
489
|
+
if choice.delta and choice.delta.tool_calls is not None:
|
|
481
490
|
toolcalls = choice.delta.tool_calls
|
|
482
491
|
content = []
|
|
483
492
|
for tool_call in toolcalls:
|
|
484
|
-
if
|
|
493
|
+
if (
|
|
494
|
+
tool_call
|
|
495
|
+
and tool_call.function is not None
|
|
496
|
+
and tool_call.function.arguments is not None
|
|
497
|
+
):
|
|
485
498
|
token_counts = estimate_tokens(
|
|
486
499
|
tool_call.function.arguments
|
|
487
500
|
)
|
|
488
501
|
completion_tokens += token_counts
|
|
489
|
-
content = content + [
|
|
502
|
+
content = content + [
|
|
503
|
+
tool_call.function.arguments
|
|
504
|
+
]
|
|
490
505
|
else:
|
|
491
506
|
content = content + []
|
|
492
507
|
else:
|
|
@@ -22,7 +22,9 @@ from opentelemetry.trace import SpanKind, StatusCode
|
|
|
22
22
|
from opentelemetry.trace.status import Status
|
|
23
23
|
|
|
24
24
|
from langtrace_python_sdk.constants.instrumentation.common import (
|
|
25
|
-
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
|
|
25
|
+
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
|
|
26
|
+
SERVICE_PROVIDERS,
|
|
27
|
+
)
|
|
26
28
|
|
|
27
29
|
|
|
28
30
|
def generic_patch(
|
|
@@ -43,7 +45,7 @@ def generic_patch(
|
|
|
43
45
|
"langtrace.service.version": version,
|
|
44
46
|
"langtrace.version": "1.0.0",
|
|
45
47
|
"langchain.task.name": task,
|
|
46
|
-
**(extra_attributes if extra_attributes is not None else {})
|
|
48
|
+
**(extra_attributes if extra_attributes is not None else {}),
|
|
47
49
|
}
|
|
48
50
|
|
|
49
51
|
if len(args) > 0 and trace_input:
|
|
@@ -22,8 +22,7 @@ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
|
|
22
22
|
from opentelemetry.trace import get_tracer
|
|
23
23
|
from wrapt import wrap_function_wrapper
|
|
24
24
|
|
|
25
|
-
from langtrace_python_sdk.instrumentation.langchain_community.patch import
|
|
26
|
-
generic_patch
|
|
25
|
+
from langtrace_python_sdk.instrumentation.langchain_community.patch import generic_patch
|
|
27
26
|
|
|
28
27
|
|
|
29
28
|
def patch_module_classes(
|
|
@@ -14,7 +14,6 @@ See the License for the specific language governing permissions and
|
|
|
14
14
|
limitations under the License.
|
|
15
15
|
"""
|
|
16
16
|
|
|
17
|
-
|
|
18
17
|
import json
|
|
19
18
|
|
|
20
19
|
from langtrace.trace_attributes import FrameworkSpanAttributes
|
|
@@ -23,7 +22,9 @@ from opentelemetry.trace import SpanKind
|
|
|
23
22
|
from opentelemetry.trace.status import Status, StatusCode
|
|
24
23
|
|
|
25
24
|
from langtrace_python_sdk.constants.instrumentation.common import (
|
|
26
|
-
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
|
|
25
|
+
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
|
|
26
|
+
SERVICE_PROVIDERS,
|
|
27
|
+
)
|
|
27
28
|
|
|
28
29
|
|
|
29
30
|
def generic_patch(
|
|
@@ -40,7 +41,7 @@ def generic_patch(
|
|
|
40
41
|
"langtrace.service.version": version,
|
|
41
42
|
"langtrace.version": "1.0.0",
|
|
42
43
|
"langchain.task.name": task,
|
|
43
|
-
**(extra_attributes if extra_attributes is not None else {})
|
|
44
|
+
**(extra_attributes if extra_attributes is not None else {}),
|
|
44
45
|
}
|
|
45
46
|
|
|
46
47
|
if trace_input and len(args) > 0:
|
|
@@ -23,7 +23,9 @@ from opentelemetry.trace import get_tracer
|
|
|
23
23
|
from wrapt import wrap_function_wrapper
|
|
24
24
|
|
|
25
25
|
from langtrace_python_sdk.instrumentation.langchain_core.patch import (
|
|
26
|
-
generic_patch,
|
|
26
|
+
generic_patch,
|
|
27
|
+
runnable_patch,
|
|
28
|
+
)
|
|
27
29
|
|
|
28
30
|
|
|
29
31
|
# pylint: disable=dangerous-default-value
|
|
@@ -22,7 +22,9 @@ from opentelemetry.trace import SpanKind, StatusCode
|
|
|
22
22
|
from opentelemetry.trace.status import Status
|
|
23
23
|
|
|
24
24
|
from langtrace_python_sdk.constants.instrumentation.common import (
|
|
25
|
-
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
|
|
25
|
+
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
|
|
26
|
+
SERVICE_PROVIDERS,
|
|
27
|
+
)
|
|
26
28
|
|
|
27
29
|
|
|
28
30
|
def generic_patch(
|
|
@@ -49,7 +51,7 @@ def generic_patch(
|
|
|
49
51
|
"langtrace.service.version": version,
|
|
50
52
|
"langtrace.version": "1.0.0",
|
|
51
53
|
"langchain.task.name": task,
|
|
52
|
-
**(extra_attributes if extra_attributes is not None else {})
|
|
54
|
+
**(extra_attributes if extra_attributes is not None else {}),
|
|
53
55
|
}
|
|
54
56
|
|
|
55
57
|
if len(args) > 0 and trace_input:
|
|
@@ -22,8 +22,7 @@ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
|
|
22
22
|
from opentelemetry.trace import get_tracer
|
|
23
23
|
from wrapt import wrap_function_wrapper
|
|
24
24
|
|
|
25
|
-
from langtrace_python_sdk.instrumentation.langgraph.patch import
|
|
26
|
-
patch_graph_methods
|
|
25
|
+
from langtrace_python_sdk.instrumentation.langgraph.patch import patch_graph_methods
|
|
27
26
|
|
|
28
27
|
|
|
29
28
|
class LanggraphInstrumentation(BaseInstrumentor):
|
|
@@ -41,24 +40,34 @@ class LanggraphInstrumentation(BaseInstrumentor):
|
|
|
41
40
|
|
|
42
41
|
# List of modules to patch, with their corresponding patch names
|
|
43
42
|
modules_to_patch = [
|
|
44
|
-
(
|
|
43
|
+
(
|
|
44
|
+
"langgraph.graph.graph",
|
|
45
|
+
[
|
|
46
|
+
"add_node",
|
|
47
|
+
"add_edge",
|
|
48
|
+
"set_entry_point",
|
|
49
|
+
"set_finish_point",
|
|
50
|
+
"add_conditional_edges",
|
|
51
|
+
],
|
|
52
|
+
),
|
|
45
53
|
]
|
|
46
54
|
|
|
47
55
|
for module_name, methods in modules_to_patch:
|
|
48
56
|
module = importlib.import_module(module_name)
|
|
49
57
|
for name, obj in inspect.getmembers(
|
|
50
58
|
module,
|
|
51
|
-
lambda member: inspect.isclass(member)
|
|
59
|
+
lambda member: inspect.isclass(member)
|
|
60
|
+
and member.__module__ == module.__name__,
|
|
52
61
|
):
|
|
53
|
-
for method_name, _ in inspect.getmembers(
|
|
62
|
+
for method_name, _ in inspect.getmembers(
|
|
63
|
+
obj, predicate=inspect.isfunction
|
|
64
|
+
):
|
|
54
65
|
if method_name in methods:
|
|
55
66
|
module = f"{name}.{method_name}"
|
|
56
67
|
wrap_function_wrapper(
|
|
57
68
|
module_name,
|
|
58
69
|
module,
|
|
59
|
-
patch_graph_methods(
|
|
60
|
-
module, tracer, version
|
|
61
|
-
),
|
|
70
|
+
patch_graph_methods(module, tracer, version),
|
|
62
71
|
)
|
|
63
72
|
|
|
64
73
|
def _uninstrument(self, **kwargs):
|