langtrace-python-sdk 1.2.25__py3-none-any.whl → 1.3.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/anthropic_example/completion.py +1 -1
- examples/chroma_example/basic.py +1 -1
- examples/cohere_example/__init__.py +0 -0
- examples/cohere_example/chat.py +26 -0
- examples/cohere_example/chat_stream.py +24 -0
- examples/cohere_example/embed_create.py +22 -0
- examples/fastapi_example/basic_route.py +40 -0
- examples/hiveagent_example/basic.py +23 -0
- examples/llamaindex_example/agent.py +86 -0
- examples/llamaindex_example/basic.py +1 -1
- examples/openai/chat_completion.py +1 -1
- examples/openai/function_calling.py +1 -1
- examples/perplexity_example/basic.py +5 -3
- examples/pinecone_example/basic.py +1 -1
- langtrace_python_sdk/constants/instrumentation/cohere.py +17 -0
- langtrace_python_sdk/constants/instrumentation/common.py +1 -0
- langtrace_python_sdk/extensions/langtrace_exporter.py +10 -2
- langtrace_python_sdk/instrumentation/cohere/__init__.py +0 -0
- langtrace_python_sdk/instrumentation/cohere/instrumentation.py +53 -0
- langtrace_python_sdk/instrumentation/cohere/patch.py +397 -0
- langtrace_python_sdk/instrumentation/llamaindex/instrumentation.py +19 -1
- langtrace_python_sdk/instrumentation/llamaindex/patch.py +46 -2
- langtrace_python_sdk/instrumentation/openai/instrumentation.py +25 -0
- langtrace_python_sdk/instrumentation/openai/patch.py +391 -12
- langtrace_python_sdk/langtrace.py +15 -7
- langtrace_python_sdk/version.py +1 -1
- {langtrace_python_sdk-1.2.25.dist-info → langtrace_python_sdk-1.3.2.dist-info}/METADATA +4 -2
- {langtrace_python_sdk-1.2.25.dist-info → langtrace_python_sdk-1.3.2.dist-info}/RECORD +30 -19
- {langtrace_python_sdk-1.2.25.dist-info → langtrace_python_sdk-1.3.2.dist-info}/WHEEL +1 -1
- {langtrace_python_sdk-1.2.25.dist-info → langtrace_python_sdk-1.3.2.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,397 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This module contains the patching logic for the Anthropic library."""
|
|
3
|
+
|
|
4
|
+
import json
|
|
5
|
+
|
|
6
|
+
from langtrace.trace_attributes import Event, LLMSpanAttributes
|
|
7
|
+
from opentelemetry import baggage
|
|
8
|
+
from opentelemetry.trace import SpanKind
|
|
9
|
+
from opentelemetry.trace.status import Status, StatusCode
|
|
10
|
+
|
|
11
|
+
from langtrace_python_sdk.constants.instrumentation.cohere import APIS
|
|
12
|
+
from langtrace_python_sdk.constants.instrumentation.common import (
|
|
13
|
+
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
|
|
14
|
+
SERVICE_PROVIDERS,
|
|
15
|
+
)
|
|
16
|
+
from langtrace_python_sdk.utils.llm import estimate_tokens
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def embed_create(original_method, version, tracer):
|
|
20
|
+
"""Wrap the `embed_create` method."""
|
|
21
|
+
|
|
22
|
+
def traced_method(wrapped, instance, args, kwargs):
|
|
23
|
+
service_provider = SERVICE_PROVIDERS["COHERE"]
|
|
24
|
+
extra_attributes = baggage.get_baggage(LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY)
|
|
25
|
+
|
|
26
|
+
span_attributes = {
|
|
27
|
+
"langtrace.sdk.name": "langtrace-python-sdk",
|
|
28
|
+
"langtrace.service.name": service_provider,
|
|
29
|
+
"langtrace.service.type": "llm",
|
|
30
|
+
"langtrace.service.version": version,
|
|
31
|
+
"langtrace.version": "1.0.0",
|
|
32
|
+
"url.full": APIS["EMBED_CREATE"]["URL"],
|
|
33
|
+
"llm.api": APIS["EMBED_CREATE"]["ENDPOINT"],
|
|
34
|
+
"llm.model": kwargs.get("model"),
|
|
35
|
+
"llm.prompts": "",
|
|
36
|
+
"llm.embedding_dataset_id": kwargs.get("dataset_id"),
|
|
37
|
+
"llm.embedding_input_type": kwargs.get("input_type"),
|
|
38
|
+
"llm.embedding_job_name": kwargs.get("name"),
|
|
39
|
+
**(extra_attributes if extra_attributes is not None else {}),
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
attributes = LLMSpanAttributes(**span_attributes)
|
|
43
|
+
|
|
44
|
+
if kwargs.get("user") is not None:
|
|
45
|
+
attributes.llm_user = kwargs.get("user")
|
|
46
|
+
|
|
47
|
+
span = tracer.start_span(APIS["EMBED_CREATE"]["METHOD"], kind=SpanKind.CLIENT)
|
|
48
|
+
for field, value in attributes.model_dump(by_alias=True).items():
|
|
49
|
+
if value is not None:
|
|
50
|
+
span.set_attribute(field, value)
|
|
51
|
+
try:
|
|
52
|
+
# Attempt to call the original method
|
|
53
|
+
result = wrapped(*args, **kwargs)
|
|
54
|
+
span.set_status(StatusCode.OK)
|
|
55
|
+
span.end()
|
|
56
|
+
return result
|
|
57
|
+
|
|
58
|
+
except Exception as error:
|
|
59
|
+
span.record_exception(error)
|
|
60
|
+
span.set_status(Status(StatusCode.ERROR, str(error)))
|
|
61
|
+
span.end()
|
|
62
|
+
raise
|
|
63
|
+
|
|
64
|
+
return traced_method
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def chat_create(original_method, version, tracer):
|
|
68
|
+
"""Wrap the `chat_create` method."""
|
|
69
|
+
|
|
70
|
+
def traced_method(wrapped, instance, args, kwargs):
|
|
71
|
+
service_provider = SERVICE_PROVIDERS["COHERE"]
|
|
72
|
+
|
|
73
|
+
message = kwargs.get("message", "")
|
|
74
|
+
prompts = json.dumps([{"role": "USER", "content": message}])
|
|
75
|
+
preamble = kwargs.get("preamble")
|
|
76
|
+
if preamble:
|
|
77
|
+
prompts = json.dumps(
|
|
78
|
+
[{"role": "system", "content": preamble}]
|
|
79
|
+
+ [{"role": "USER", "content": message}]
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
chat_history = kwargs.get("chat_history")
|
|
83
|
+
if chat_history:
|
|
84
|
+
history = [
|
|
85
|
+
{
|
|
86
|
+
"message": {
|
|
87
|
+
"role": (
|
|
88
|
+
item.get("role") if item.get("role") is not None else "USER"
|
|
89
|
+
),
|
|
90
|
+
"content": (
|
|
91
|
+
item.get("message")
|
|
92
|
+
if item.get("message") is not None
|
|
93
|
+
else ""
|
|
94
|
+
),
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
for item in chat_history
|
|
98
|
+
]
|
|
99
|
+
prompts = prompts + json.dumps(history)
|
|
100
|
+
|
|
101
|
+
extra_attributes = baggage.get_baggage(LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY)
|
|
102
|
+
|
|
103
|
+
span_attributes = {
|
|
104
|
+
"langtrace.sdk.name": "langtrace-python-sdk",
|
|
105
|
+
"langtrace.service.name": service_provider,
|
|
106
|
+
"langtrace.service.type": "llm",
|
|
107
|
+
"langtrace.service.version": version,
|
|
108
|
+
"langtrace.version": "1.0.0",
|
|
109
|
+
"url.full": APIS["CHAT_CREATE"]["URL"],
|
|
110
|
+
"llm.api": APIS["CHAT_CREATE"]["ENDPOINT"],
|
|
111
|
+
"llm.model": (
|
|
112
|
+
kwargs.get("model") if kwargs.get("model") is not None else "command-r"
|
|
113
|
+
),
|
|
114
|
+
"llm.stream": False,
|
|
115
|
+
"llm.prompts": prompts,
|
|
116
|
+
**(extra_attributes if extra_attributes is not None else {}),
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
attributes = LLMSpanAttributes(**span_attributes)
|
|
120
|
+
|
|
121
|
+
if kwargs.get("temperature") is not None:
|
|
122
|
+
attributes.llm_temperature = kwargs.get("temperature")
|
|
123
|
+
if kwargs.get("max_tokens") is not None:
|
|
124
|
+
attributes.max_tokens = kwargs.get("max_tokens")
|
|
125
|
+
if kwargs.get("max_input_tokens") is not None:
|
|
126
|
+
attributes.max_input_tokens = kwargs.get("max_input_tokens")
|
|
127
|
+
if kwargs.get("p") is not None:
|
|
128
|
+
attributes.llm_top_p = kwargs.get("p")
|
|
129
|
+
if kwargs.get("k") is not None:
|
|
130
|
+
attributes.llm_top_p = kwargs.get("k")
|
|
131
|
+
if kwargs.get("user") is not None:
|
|
132
|
+
attributes.llm_user = kwargs.get("user")
|
|
133
|
+
if kwargs.get("conversation_id") is not None:
|
|
134
|
+
attributes.conversation_id = kwargs.get("conversation_id")
|
|
135
|
+
if kwargs.get("seed") is not None:
|
|
136
|
+
attributes.seed = kwargs.get("seed")
|
|
137
|
+
if kwargs.get("frequency_penalty") is not None:
|
|
138
|
+
attributes.frequency_penalty = kwargs.get("frequency_penalty")
|
|
139
|
+
if kwargs.get("presence_penalty") is not None:
|
|
140
|
+
attributes.presence_penalty = kwargs.get("presence_penalty")
|
|
141
|
+
if kwargs.get("connectors") is not None:
|
|
142
|
+
# stringify the list of objects
|
|
143
|
+
attributes.llm_connectors = json.dumps(kwargs.get("connectors"))
|
|
144
|
+
if kwargs.get("tools") is not None:
|
|
145
|
+
# stringify the list of objects
|
|
146
|
+
attributes.llm_tools = json.dumps(kwargs.get("tools"))
|
|
147
|
+
if kwargs.get("tool_results") is not None:
|
|
148
|
+
# stringify the list of objects
|
|
149
|
+
attributes.llm_tool_results = json.dumps(kwargs.get("tool_results"))
|
|
150
|
+
|
|
151
|
+
span = tracer.start_span(APIS["CHAT_CREATE"]["METHOD"], kind=SpanKind.CLIENT)
|
|
152
|
+
|
|
153
|
+
# Set the attributes on the span
|
|
154
|
+
for field, value in attributes.model_dump(by_alias=True).items():
|
|
155
|
+
if value is not None:
|
|
156
|
+
span.set_attribute(field, value)
|
|
157
|
+
try:
|
|
158
|
+
# Attempt to call the original method
|
|
159
|
+
result = wrapped(*args, **kwargs)
|
|
160
|
+
|
|
161
|
+
# Set the response attributes
|
|
162
|
+
if (hasattr(result, "generation_id")) and (
|
|
163
|
+
result.generation_id is not None
|
|
164
|
+
):
|
|
165
|
+
span.set_attribute("llm.generation_id", result.generation_id)
|
|
166
|
+
if (hasattr(result, "response_id")) and (result.response_id is not None):
|
|
167
|
+
span.set_attribute("llm.response_id", result.response_id)
|
|
168
|
+
if (hasattr(result, "is_search_required")) and (
|
|
169
|
+
result.is_search_required is not None
|
|
170
|
+
):
|
|
171
|
+
span.set_attribute("llm.is_search_required", result.is_search_required)
|
|
172
|
+
|
|
173
|
+
if kwargs.get("stream") is False or kwargs.get("stream") is None:
|
|
174
|
+
if hasattr(result, "text") and result.text is not None:
|
|
175
|
+
if (
|
|
176
|
+
hasattr(result, "chat_history")
|
|
177
|
+
and result.chat_history is not None
|
|
178
|
+
):
|
|
179
|
+
responses = [
|
|
180
|
+
{
|
|
181
|
+
"message": {
|
|
182
|
+
"role": (
|
|
183
|
+
item.role
|
|
184
|
+
if hasattr(item, "role")
|
|
185
|
+
and item.role is not None
|
|
186
|
+
else "USER"
|
|
187
|
+
),
|
|
188
|
+
"content": (
|
|
189
|
+
item.message
|
|
190
|
+
if hasattr(item, "message")
|
|
191
|
+
and item.message is not None
|
|
192
|
+
else ""
|
|
193
|
+
),
|
|
194
|
+
}
|
|
195
|
+
}
|
|
196
|
+
for item in result.chat_history
|
|
197
|
+
]
|
|
198
|
+
span.set_attribute("llm.responses", json.dumps(responses))
|
|
199
|
+
else:
|
|
200
|
+
responses = [
|
|
201
|
+
{"message": {"role": "CHATBOT", "content": result.text}}
|
|
202
|
+
]
|
|
203
|
+
span.set_attribute("llm.responses", json.dumps(responses))
|
|
204
|
+
else:
|
|
205
|
+
responses = []
|
|
206
|
+
span.set_attribute("llm.responses", json.dumps(responses))
|
|
207
|
+
|
|
208
|
+
# Get the usage
|
|
209
|
+
if hasattr(result, "meta") and result.meta is not None:
|
|
210
|
+
if (
|
|
211
|
+
hasattr(result.meta, "billed_units")
|
|
212
|
+
and result.meta.billed_units is not None
|
|
213
|
+
):
|
|
214
|
+
usage = result.meta.billed_units
|
|
215
|
+
if usage is not None:
|
|
216
|
+
usage_dict = {
|
|
217
|
+
"input_tokens": (
|
|
218
|
+
usage.input_tokens
|
|
219
|
+
if usage.input_tokens is not None
|
|
220
|
+
else 0
|
|
221
|
+
),
|
|
222
|
+
"output_tokens": (
|
|
223
|
+
usage.output_tokens
|
|
224
|
+
if usage.output_tokens is not None
|
|
225
|
+
else 0
|
|
226
|
+
),
|
|
227
|
+
"total_tokens": (
|
|
228
|
+
usage.input_tokens + usage.output_tokens
|
|
229
|
+
if usage.input_tokens is not None
|
|
230
|
+
and usage.output_tokens is not None
|
|
231
|
+
else 0
|
|
232
|
+
),
|
|
233
|
+
}
|
|
234
|
+
span.set_attribute(
|
|
235
|
+
"llm.token.counts", json.dumps(usage_dict)
|
|
236
|
+
)
|
|
237
|
+
span.set_status(StatusCode.OK)
|
|
238
|
+
span.end()
|
|
239
|
+
return result
|
|
240
|
+
else:
|
|
241
|
+
# For older version, stream was passed as a parameter
|
|
242
|
+
return result
|
|
243
|
+
|
|
244
|
+
except Exception as error:
|
|
245
|
+
span.record_exception(error)
|
|
246
|
+
span.set_status(Status(StatusCode.ERROR, str(error)))
|
|
247
|
+
span.end()
|
|
248
|
+
raise
|
|
249
|
+
|
|
250
|
+
return traced_method
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
def chat_stream(original_method, version, tracer):
|
|
254
|
+
"""Wrap the `messages_stream` method."""
|
|
255
|
+
|
|
256
|
+
def traced_method(wrapped, instance, args, kwargs):
|
|
257
|
+
service_provider = SERVICE_PROVIDERS["COHERE"]
|
|
258
|
+
|
|
259
|
+
message = kwargs.get("message", "")
|
|
260
|
+
prompt_tokens = estimate_tokens(message)
|
|
261
|
+
prompts = json.dumps([{"role": "USER", "content": message}])
|
|
262
|
+
preamble = kwargs.get("preamble")
|
|
263
|
+
if preamble:
|
|
264
|
+
prompts = json.dumps(
|
|
265
|
+
[{"role": "system", "content": preamble}]
|
|
266
|
+
+ [{"role": "USER", "content": message}]
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
chat_history = kwargs.get("chat_history")
|
|
270
|
+
if chat_history:
|
|
271
|
+
history = [
|
|
272
|
+
{
|
|
273
|
+
"message": {
|
|
274
|
+
"role": (
|
|
275
|
+
item.get("role") if item.get("role") is not None else "USER"
|
|
276
|
+
),
|
|
277
|
+
"content": (
|
|
278
|
+
item.get("message")
|
|
279
|
+
if item.get("message") is not None
|
|
280
|
+
else ""
|
|
281
|
+
),
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
for item in chat_history
|
|
285
|
+
]
|
|
286
|
+
prompts = prompts + json.dumps(history)
|
|
287
|
+
|
|
288
|
+
extra_attributes = baggage.get_baggage(LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY)
|
|
289
|
+
|
|
290
|
+
span_attributes = {
|
|
291
|
+
"langtrace.sdk.name": "langtrace-python-sdk",
|
|
292
|
+
"langtrace.service.name": service_provider,
|
|
293
|
+
"langtrace.service.type": "llm",
|
|
294
|
+
"langtrace.service.version": version,
|
|
295
|
+
"langtrace.version": "1.0.0",
|
|
296
|
+
"url.full": APIS["CHAT_STREAM"]["URL"],
|
|
297
|
+
"llm.api": APIS["CHAT_STREAM"]["ENDPOINT"],
|
|
298
|
+
"llm.model": (
|
|
299
|
+
kwargs.get("model") if kwargs.get("model") is not None else "command-r"
|
|
300
|
+
),
|
|
301
|
+
"llm.stream": True,
|
|
302
|
+
"llm.prompts": prompts,
|
|
303
|
+
**(extra_attributes if extra_attributes is not None else {}),
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
attributes = LLMSpanAttributes(**span_attributes)
|
|
307
|
+
|
|
308
|
+
if kwargs.get("temperature") is not None:
|
|
309
|
+
attributes.llm_temperature = kwargs.get("temperature")
|
|
310
|
+
if kwargs.get("max_tokens") is not None:
|
|
311
|
+
attributes.max_tokens = kwargs.get("max_tokens")
|
|
312
|
+
if kwargs.get("max_input_tokens") is not None:
|
|
313
|
+
attributes.max_input_tokens = kwargs.get("max_input_tokens")
|
|
314
|
+
if kwargs.get("p") is not None:
|
|
315
|
+
attributes.llm_top_p = kwargs.get("p")
|
|
316
|
+
if kwargs.get("k") is not None:
|
|
317
|
+
attributes.llm_top_p = kwargs.get("k")
|
|
318
|
+
if kwargs.get("user") is not None:
|
|
319
|
+
attributes.llm_user = kwargs.get("user")
|
|
320
|
+
if kwargs.get("conversation_id") is not None:
|
|
321
|
+
attributes.conversation_id = kwargs.get("conversation_id")
|
|
322
|
+
if kwargs.get("seed") is not None:
|
|
323
|
+
attributes.seed = kwargs.get("seed")
|
|
324
|
+
if kwargs.get("frequency_penalty") is not None:
|
|
325
|
+
attributes.frequency_penalty = kwargs.get("frequency_penalty")
|
|
326
|
+
if kwargs.get("presence_penalty") is not None:
|
|
327
|
+
attributes.presence_penalty = kwargs.get("presence_penalty")
|
|
328
|
+
if kwargs.get("connectors") is not None:
|
|
329
|
+
# stringify the list of objects
|
|
330
|
+
attributes.llm_connectors = json.dumps(kwargs.get("connectors"))
|
|
331
|
+
if kwargs.get("tools") is not None:
|
|
332
|
+
# stringify the list of objects
|
|
333
|
+
attributes.llm_tools = json.dumps(kwargs.get("tools"))
|
|
334
|
+
if kwargs.get("tool_results") is not None:
|
|
335
|
+
# stringify the list of objects
|
|
336
|
+
attributes.llm_tool_results = json.dumps(kwargs.get("tool_results"))
|
|
337
|
+
|
|
338
|
+
span = tracer.start_span(APIS["CHAT_CREATE"]["METHOD"], kind=SpanKind.CLIENT)
|
|
339
|
+
for field, value in attributes.model_dump(by_alias=True).items():
|
|
340
|
+
if value is not None:
|
|
341
|
+
span.set_attribute(field, value)
|
|
342
|
+
try:
|
|
343
|
+
# Attempt to call the original method
|
|
344
|
+
result = wrapped(*args, **kwargs)
|
|
345
|
+
|
|
346
|
+
result_content = []
|
|
347
|
+
span.add_event(Event.STREAM_START.value)
|
|
348
|
+
completion_tokens = 0
|
|
349
|
+
try:
|
|
350
|
+
for event in result:
|
|
351
|
+
if hasattr(event, "text") and event.text is not None:
|
|
352
|
+
completion_tokens += estimate_tokens(event.text)
|
|
353
|
+
content = event.text
|
|
354
|
+
else:
|
|
355
|
+
content = ""
|
|
356
|
+
span.add_event(
|
|
357
|
+
Event.STREAM_OUTPUT.value, {"response": "".join(content)}
|
|
358
|
+
)
|
|
359
|
+
result_content.append(content)
|
|
360
|
+
yield event
|
|
361
|
+
finally:
|
|
362
|
+
|
|
363
|
+
# Finalize span after processing all chunks
|
|
364
|
+
span.add_event(Event.STREAM_END.value)
|
|
365
|
+
span.set_attribute(
|
|
366
|
+
"llm.token.counts",
|
|
367
|
+
json.dumps(
|
|
368
|
+
{
|
|
369
|
+
"input_tokens": prompt_tokens,
|
|
370
|
+
"output_tokens": completion_tokens,
|
|
371
|
+
"total_tokens": prompt_tokens + completion_tokens,
|
|
372
|
+
}
|
|
373
|
+
),
|
|
374
|
+
)
|
|
375
|
+
span.set_attribute(
|
|
376
|
+
"llm.responses",
|
|
377
|
+
json.dumps(
|
|
378
|
+
[
|
|
379
|
+
{
|
|
380
|
+
"message": {
|
|
381
|
+
"role": "CHATBOT",
|
|
382
|
+
"content": "".join(result_content),
|
|
383
|
+
}
|
|
384
|
+
}
|
|
385
|
+
]
|
|
386
|
+
),
|
|
387
|
+
)
|
|
388
|
+
span.set_status(StatusCode.OK)
|
|
389
|
+
span.end()
|
|
390
|
+
|
|
391
|
+
except Exception as error:
|
|
392
|
+
span.record_exception(error)
|
|
393
|
+
span.set_status(Status(StatusCode.ERROR, str(error)))
|
|
394
|
+
span.end()
|
|
395
|
+
raise
|
|
396
|
+
|
|
397
|
+
return traced_method
|
|
@@ -10,7 +10,14 @@ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
|
|
10
10
|
from opentelemetry.trace import get_tracer
|
|
11
11
|
from wrapt import wrap_function_wrapper
|
|
12
12
|
|
|
13
|
-
from langtrace_python_sdk.instrumentation.llamaindex.patch import
|
|
13
|
+
from langtrace_python_sdk.instrumentation.llamaindex.patch import (
|
|
14
|
+
generic_patch,
|
|
15
|
+
async_generic_patch,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
import logging
|
|
19
|
+
|
|
20
|
+
logging.basicConfig(level=logging.FATAL)
|
|
14
21
|
|
|
15
22
|
import logging
|
|
16
23
|
|
|
@@ -62,6 +69,17 @@ class LlamaindexInstrumentation(BaseInstrumentor):
|
|
|
62
69
|
),
|
|
63
70
|
)
|
|
64
71
|
|
|
72
|
+
wrap_function_wrapper(
|
|
73
|
+
module_name,
|
|
74
|
+
".".join([name, method_name]),
|
|
75
|
+
async_generic_patch(
|
|
76
|
+
f"llamaindex.{name}.{method_name}",
|
|
77
|
+
task,
|
|
78
|
+
tracer,
|
|
79
|
+
version,
|
|
80
|
+
),
|
|
81
|
+
)
|
|
82
|
+
|
|
65
83
|
def _instrument_module(self, module_name):
|
|
66
84
|
pass
|
|
67
85
|
|
|
@@ -8,7 +8,9 @@ from opentelemetry.trace import SpanKind
|
|
|
8
8
|
from opentelemetry.trace.status import Status, StatusCode
|
|
9
9
|
|
|
10
10
|
from langtrace_python_sdk.constants.instrumentation.common import (
|
|
11
|
-
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
|
|
11
|
+
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
|
|
12
|
+
SERVICE_PROVIDERS,
|
|
13
|
+
)
|
|
12
14
|
|
|
13
15
|
|
|
14
16
|
def generic_patch(method, task, tracer, version):
|
|
@@ -26,7 +28,7 @@ def generic_patch(method, task, tracer, version):
|
|
|
26
28
|
"langtrace.service.version": version,
|
|
27
29
|
"langtrace.version": "1.0.0",
|
|
28
30
|
"llamaindex.task.name": task,
|
|
29
|
-
**(extra_attributes if extra_attributes is not None else {})
|
|
31
|
+
**(extra_attributes if extra_attributes is not None else {}),
|
|
30
32
|
}
|
|
31
33
|
|
|
32
34
|
attributes = FrameworkSpanAttributes(**span_attributes)
|
|
@@ -51,3 +53,45 @@ def generic_patch(method, task, tracer, version):
|
|
|
51
53
|
raise
|
|
52
54
|
|
|
53
55
|
return traced_method
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def async_generic_patch(method, task, tracer, version):
|
|
59
|
+
"""
|
|
60
|
+
A generic patch method that wraps a function with a span"""
|
|
61
|
+
|
|
62
|
+
async def traced_method(wrapped, instance, args, kwargs):
|
|
63
|
+
service_provider = SERVICE_PROVIDERS["LLAMAINDEX"]
|
|
64
|
+
extra_attributes = baggage.get_baggage(LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY)
|
|
65
|
+
|
|
66
|
+
span_attributes = {
|
|
67
|
+
"langtrace.sdk.name": "langtrace-python-sdk",
|
|
68
|
+
"langtrace.service.name": service_provider,
|
|
69
|
+
"langtrace.service.type": "framework",
|
|
70
|
+
"langtrace.service.version": version,
|
|
71
|
+
"langtrace.version": "1.0.0",
|
|
72
|
+
"llamaindex.task.name": task,
|
|
73
|
+
**(extra_attributes if extra_attributes is not None else {}),
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
attributes = FrameworkSpanAttributes(**span_attributes)
|
|
77
|
+
|
|
78
|
+
with tracer.start_as_current_span(method, kind=SpanKind.CLIENT) as span:
|
|
79
|
+
async for field, value in attributes.model_dump(by_alias=True).items():
|
|
80
|
+
if value is not None:
|
|
81
|
+
span.set_attribute(field, value)
|
|
82
|
+
try:
|
|
83
|
+
# Attempt to call the original method
|
|
84
|
+
result = await wrapped(*args, **kwargs)
|
|
85
|
+
span.set_status(StatusCode.OK)
|
|
86
|
+
return result
|
|
87
|
+
except Exception as e:
|
|
88
|
+
# Record the exception in the span
|
|
89
|
+
span.record_exception(e)
|
|
90
|
+
|
|
91
|
+
# Set the span status to indicate an error
|
|
92
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
93
|
+
|
|
94
|
+
# Reraise the exception to ensure it's not swallowed
|
|
95
|
+
raise
|
|
96
|
+
|
|
97
|
+
return traced_method
|
|
@@ -6,9 +6,12 @@ from opentelemetry.trace import get_tracer
|
|
|
6
6
|
from wrapt import wrap_function_wrapper
|
|
7
7
|
|
|
8
8
|
from langtrace_python_sdk.instrumentation.openai.patch import (
|
|
9
|
+
async_embeddings_create,
|
|
10
|
+
async_images_generate,
|
|
9
11
|
chat_completions_create,
|
|
10
12
|
embeddings_create,
|
|
11
13
|
images_generate,
|
|
14
|
+
async_chat_completions_create,
|
|
12
15
|
)
|
|
13
16
|
|
|
14
17
|
import logging
|
|
@@ -25,21 +28,43 @@ class OpenAIInstrumentation(BaseInstrumentor):
|
|
|
25
28
|
tracer_provider = kwargs.get("tracer_provider")
|
|
26
29
|
tracer = get_tracer(__name__, "", tracer_provider)
|
|
27
30
|
version = importlib.metadata.version("openai")
|
|
31
|
+
|
|
28
32
|
wrap_function_wrapper(
|
|
29
33
|
"openai.resources.chat.completions",
|
|
30
34
|
"Completions.create",
|
|
31
35
|
chat_completions_create("openai.chat.completions.create", version, tracer),
|
|
32
36
|
)
|
|
37
|
+
|
|
38
|
+
wrap_function_wrapper(
|
|
39
|
+
"openai.resources.chat.completions",
|
|
40
|
+
"AsyncCompletions.create",
|
|
41
|
+
async_chat_completions_create(
|
|
42
|
+
"openai.chat.completions.create_stream", version, tracer
|
|
43
|
+
),
|
|
44
|
+
)
|
|
45
|
+
|
|
33
46
|
wrap_function_wrapper(
|
|
34
47
|
"openai.resources.images",
|
|
35
48
|
"Images.generate",
|
|
36
49
|
images_generate("openai.images.generate", version, tracer),
|
|
37
50
|
)
|
|
51
|
+
|
|
52
|
+
wrap_function_wrapper(
|
|
53
|
+
"openai.resources.images",
|
|
54
|
+
"AsyncImages.generate",
|
|
55
|
+
async_images_generate("openai.images.generate", version, tracer),
|
|
56
|
+
)
|
|
38
57
|
wrap_function_wrapper(
|
|
39
58
|
"openai.resources.embeddings",
|
|
40
59
|
"Embeddings.create",
|
|
41
60
|
embeddings_create("openai.embeddings.create", version, tracer),
|
|
42
61
|
)
|
|
43
62
|
|
|
63
|
+
wrap_function_wrapper(
|
|
64
|
+
"openai.resources.embeddings",
|
|
65
|
+
"AsyncEmbeddings.create",
|
|
66
|
+
async_embeddings_create("openai.embeddings.create", version, tracer),
|
|
67
|
+
)
|
|
68
|
+
|
|
44
69
|
def _uninstrument(self, **kwargs):
|
|
45
70
|
pass
|