langtrace-python-sdk 1.3.4__py3-none-any.whl → 1.3.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/openai/async_tool_calling_nonstreaming.py +93 -0
- examples/openai/async_tool_calling_streaming.py +167 -0
- examples/openai/chat_completion.py +15 -16
- examples/openai/function_calling.py +14 -14
- examples/openai/tool_calling_nonstreaming.py +92 -0
- examples/openai/tool_calling_streaming.py +167 -0
- langtrace_python_sdk/instrumentation/openai/patch.py +175 -99
- langtrace_python_sdk/version.py +1 -1
- {langtrace_python_sdk-1.3.4.dist-info → langtrace_python_sdk-1.3.6.dist-info}/METADATA +5 -1
- {langtrace_python_sdk-1.3.4.dist-info → langtrace_python_sdk-1.3.6.dist-info}/RECORD +29 -8
- {langtrace_python_sdk-1.3.4.dist-info → langtrace_python_sdk-1.3.6.dist-info}/WHEEL +1 -1
- tests/__init__.py +0 -0
- tests/anthropic/test_anthropic.py +73 -0
- tests/chroma/test_chroma.py +64 -0
- tests/langchain/test_langchain.py +69 -0
- tests/langchain/test_langchain_community.py +69 -0
- tests/langchain/test_langchain_core.py +115 -0
- tests/openai/cassettes/test_async_chat_completion_streaming.yaml +158 -0
- tests/openai/cassettes/test_async_image_generation.yaml +97 -0
- tests/openai/cassettes/test_chat_completion.yaml +101 -0
- tests/openai/cassettes/test_chat_completion_streaming.yaml +200860 -0
- tests/openai/cassettes/test_image_generation.yaml +97 -0
- tests/openai/conftest.py +45 -0
- tests/openai/test_chat_completion.py +142 -0
- tests/openai/test_embeddings.py +0 -0
- tests/openai/test_image_generation.py +77 -0
- tests/pinecone/test_pinecone.py +72 -0
- tests/utils.py +21 -0
- {langtrace_python_sdk-1.3.4.dist-info → langtrace_python_sdk-1.3.6.dist-info}/licenses/LICENSE +0 -0
|
@@ -75,12 +75,12 @@ def images_generate(original_method, version, tracer):
|
|
|
75
75
|
|
|
76
76
|
span.set_status(StatusCode.OK)
|
|
77
77
|
return result
|
|
78
|
-
except Exception as
|
|
78
|
+
except Exception as err:
|
|
79
79
|
# Record the exception in the span
|
|
80
|
-
span.record_exception(
|
|
80
|
+
span.record_exception(err)
|
|
81
81
|
|
|
82
82
|
# Set the span status to indicate an error
|
|
83
|
-
span.set_status(Status(StatusCode.ERROR, str(
|
|
83
|
+
span.set_status(Status(StatusCode.ERROR, str(err)))
|
|
84
84
|
|
|
85
85
|
# Reraise the exception to ensure it's not swallowed
|
|
86
86
|
raise
|
|
@@ -121,7 +121,8 @@ def async_images_generate(original_method, version, tracer):
|
|
|
121
121
|
with tracer.start_as_current_span(
|
|
122
122
|
APIS["IMAGES_GENERATION"]["METHOD"], kind=SpanKind.CLIENT
|
|
123
123
|
) as span:
|
|
124
|
-
|
|
124
|
+
items = attributes.model_dump(by_alias=True).items()
|
|
125
|
+
for field, value in items:
|
|
125
126
|
if value is not None:
|
|
126
127
|
span.set_attribute(field, value)
|
|
127
128
|
try:
|
|
@@ -147,12 +148,12 @@ def async_images_generate(original_method, version, tracer):
|
|
|
147
148
|
|
|
148
149
|
span.set_status(StatusCode.OK)
|
|
149
150
|
return result
|
|
150
|
-
except Exception as
|
|
151
|
+
except Exception as err:
|
|
151
152
|
# Record the exception in the span
|
|
152
|
-
span.record_exception(
|
|
153
|
+
span.record_exception(err)
|
|
153
154
|
|
|
154
155
|
# Set the span status to indicate an error
|
|
155
|
-
span.set_status(Status(StatusCode.ERROR, str(
|
|
156
|
+
span.set_status(Status(StatusCode.ERROR, str(err)))
|
|
156
157
|
|
|
157
158
|
# Reraise the exception to ensure it's not swallowed
|
|
158
159
|
raise
|
|
@@ -181,9 +182,9 @@ def chat_completions_create(original_method, version, tracer):
|
|
|
181
182
|
# handle tool calls in the kwargs
|
|
182
183
|
llm_prompts = []
|
|
183
184
|
for item in kwargs.get("messages", []):
|
|
184
|
-
if "tool_calls"
|
|
185
|
+
if hasattr(item, "tool_calls") and item.tool_calls is not None:
|
|
185
186
|
tool_calls = []
|
|
186
|
-
for tool_call in item
|
|
187
|
+
for tool_call in item.tool_calls:
|
|
187
188
|
tool_call_dict = {
|
|
188
189
|
"id": tool_call.id if hasattr(tool_call, "id") else "",
|
|
189
190
|
"type": tool_call.type if hasattr(tool_call, "type") else "",
|
|
@@ -202,8 +203,9 @@ def chat_completions_create(original_method, version, tracer):
|
|
|
202
203
|
),
|
|
203
204
|
}
|
|
204
205
|
tool_calls.append(tool_call_dict)
|
|
205
|
-
|
|
206
|
-
|
|
206
|
+
llm_prompts.append(tool_calls)
|
|
207
|
+
else:
|
|
208
|
+
llm_prompts.append(item)
|
|
207
209
|
|
|
208
210
|
span_attributes = {
|
|
209
211
|
"langtrace.sdk.name": "langtrace-python-sdk",
|
|
@@ -213,13 +215,14 @@ def chat_completions_create(original_method, version, tracer):
|
|
|
213
215
|
"langtrace.version": "1.0.0",
|
|
214
216
|
"url.full": base_url,
|
|
215
217
|
"llm.api": APIS["CHAT_COMPLETION"]["ENDPOINT"],
|
|
216
|
-
"llm.prompts": json.dumps(
|
|
218
|
+
"llm.prompts": json.dumps(llm_prompts),
|
|
217
219
|
"llm.stream": kwargs.get("stream"),
|
|
218
220
|
**(extra_attributes if extra_attributes is not None else {}),
|
|
219
221
|
}
|
|
220
222
|
|
|
221
223
|
attributes = LLMSpanAttributes(**span_attributes)
|
|
222
224
|
|
|
225
|
+
tools = []
|
|
223
226
|
if kwargs.get("temperature") is not None:
|
|
224
227
|
attributes.llm_temperature = kwargs.get("temperature")
|
|
225
228
|
if kwargs.get("top_p") is not None:
|
|
@@ -227,7 +230,11 @@ def chat_completions_create(original_method, version, tracer):
|
|
|
227
230
|
if kwargs.get("user") is not None:
|
|
228
231
|
attributes.llm_user = kwargs.get("user")
|
|
229
232
|
if kwargs.get("functions") is not None:
|
|
230
|
-
|
|
233
|
+
tools.append(json.dumps(kwargs.get("functions")))
|
|
234
|
+
if kwargs.get("tools") is not None:
|
|
235
|
+
tools.append(json.dumps(kwargs.get("tools")))
|
|
236
|
+
if len(tools) > 0:
|
|
237
|
+
attributes.llm_tools = json.dumps(tools)
|
|
231
238
|
|
|
232
239
|
# TODO(Karthik): Gotta figure out how to handle streaming with context
|
|
233
240
|
# with tracer.start_as_current_span(APIS["CHAT_COMPLETION"]["METHOD"],
|
|
@@ -252,16 +259,7 @@ def chat_completions_create(original_method, version, tracer):
|
|
|
252
259
|
if choice.message and choice.message.role
|
|
253
260
|
else "assistant"
|
|
254
261
|
),
|
|
255
|
-
"content": (
|
|
256
|
-
choice.message.content
|
|
257
|
-
if choice.message and choice.message.content
|
|
258
|
-
else (
|
|
259
|
-
choice.message.function_call.arguments
|
|
260
|
-
if choice.message
|
|
261
|
-
and choice.message.function_call.arguments
|
|
262
|
-
else ""
|
|
263
|
-
)
|
|
264
|
-
),
|
|
262
|
+
"content": extract_content(choice),
|
|
265
263
|
**(
|
|
266
264
|
{
|
|
267
265
|
"content_filter_results": choice[
|
|
@@ -319,6 +317,7 @@ def chat_completions_create(original_method, version, tracer):
|
|
|
319
317
|
span,
|
|
320
318
|
prompt_tokens,
|
|
321
319
|
function_call=kwargs.get("functions") is not None,
|
|
320
|
+
tool_calls=kwargs.get("tools") is not None,
|
|
322
321
|
)
|
|
323
322
|
|
|
324
323
|
except Exception as error:
|
|
@@ -327,7 +326,9 @@ def chat_completions_create(original_method, version, tracer):
|
|
|
327
326
|
span.end()
|
|
328
327
|
raise
|
|
329
328
|
|
|
330
|
-
def handle_streaming_response(
|
|
329
|
+
def handle_streaming_response(
|
|
330
|
+
result, span, prompt_tokens, function_call=False, tool_calls=False
|
|
331
|
+
):
|
|
331
332
|
"""Process and yield streaming response chunks."""
|
|
332
333
|
result_content = []
|
|
333
334
|
span.add_event(Event.STREAM_START.value)
|
|
@@ -337,37 +338,40 @@ def chat_completions_create(original_method, version, tracer):
|
|
|
337
338
|
if hasattr(chunk, "model") and chunk.model is not None:
|
|
338
339
|
span.set_attribute("llm.model", chunk.model)
|
|
339
340
|
if hasattr(chunk, "choices") and chunk.choices is not None:
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
)
|
|
364
|
-
)
|
|
365
|
-
for choice in chunk.choices
|
|
366
|
-
]
|
|
341
|
+
if not function_call and not tool_calls:
|
|
342
|
+
for choice in chunk.choices:
|
|
343
|
+
if choice.delta and choice.delta.content is not None:
|
|
344
|
+
token_counts = estimate_tokens(choice.delta.content)
|
|
345
|
+
completion_tokens += token_counts
|
|
346
|
+
content = [choice.delta.content]
|
|
347
|
+
elif function_call:
|
|
348
|
+
for choice in chunk.choices:
|
|
349
|
+
if (
|
|
350
|
+
choice.delta
|
|
351
|
+
and choice.delta.function_call
|
|
352
|
+
and choice.delta.function_call.arguments is not None
|
|
353
|
+
):
|
|
354
|
+
token_counts = estimate_tokens(
|
|
355
|
+
choice.delta.function_call.arguments
|
|
356
|
+
)
|
|
357
|
+
completion_tokens += token_counts
|
|
358
|
+
content = [choice.delta.function_call.arguments]
|
|
359
|
+
elif tool_calls:
|
|
360
|
+
# TODO(Karthik): Tool calls streaming is tricky. The chunks after the
|
|
361
|
+
# first one are missing the function name and id though the arguments
|
|
362
|
+
# are spread across the chunks.
|
|
363
|
+
content = []
|
|
367
364
|
else:
|
|
368
365
|
content = []
|
|
369
366
|
span.add_event(
|
|
370
|
-
Event.STREAM_OUTPUT.value,
|
|
367
|
+
Event.STREAM_OUTPUT.value,
|
|
368
|
+
{
|
|
369
|
+
"response": (
|
|
370
|
+
"".join(content)
|
|
371
|
+
if len(content) > 0 and content[0] is not None
|
|
372
|
+
else ""
|
|
373
|
+
)
|
|
374
|
+
},
|
|
371
375
|
)
|
|
372
376
|
result_content.append(content[0] if len(content) > 0 else "")
|
|
373
377
|
yield chunk
|
|
@@ -422,6 +426,34 @@ def async_chat_completions_create(original_method, version, tracer):
|
|
|
422
426
|
|
|
423
427
|
extra_attributes = baggage.get_baggage(LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY)
|
|
424
428
|
|
|
429
|
+
# handle tool calls in the kwargs
|
|
430
|
+
llm_prompts = []
|
|
431
|
+
for item in kwargs.get("messages", []):
|
|
432
|
+
if hasattr(item, "tool_calls") and item.tool_calls is not None:
|
|
433
|
+
tool_calls = []
|
|
434
|
+
for tool_call in item.tool_calls:
|
|
435
|
+
tool_call_dict = {
|
|
436
|
+
"id": tool_call.id if hasattr(tool_call, "id") else "",
|
|
437
|
+
"type": tool_call.type if hasattr(tool_call, "type") else "",
|
|
438
|
+
}
|
|
439
|
+
if hasattr(tool_call, "function"):
|
|
440
|
+
tool_call_dict["function"] = {
|
|
441
|
+
"name": (
|
|
442
|
+
tool_call.function.name
|
|
443
|
+
if hasattr(tool_call.function, "name")
|
|
444
|
+
else ""
|
|
445
|
+
),
|
|
446
|
+
"arguments": (
|
|
447
|
+
tool_call.function.arguments
|
|
448
|
+
if hasattr(tool_call.function, "arguments")
|
|
449
|
+
else ""
|
|
450
|
+
),
|
|
451
|
+
}
|
|
452
|
+
tool_calls.append(tool_call_dict)
|
|
453
|
+
llm_prompts.append(tool_calls)
|
|
454
|
+
else:
|
|
455
|
+
llm_prompts.append(item)
|
|
456
|
+
|
|
425
457
|
span_attributes = {
|
|
426
458
|
"langtrace.sdk.name": "langtrace-python-sdk",
|
|
427
459
|
"langtrace.service.name": service_provider,
|
|
@@ -430,13 +462,14 @@ def async_chat_completions_create(original_method, version, tracer):
|
|
|
430
462
|
"langtrace.version": "1.0.0",
|
|
431
463
|
"url.full": base_url,
|
|
432
464
|
"llm.api": APIS["CHAT_COMPLETION"]["ENDPOINT"],
|
|
433
|
-
"llm.prompts": json.dumps(
|
|
465
|
+
"llm.prompts": json.dumps(llm_prompts),
|
|
434
466
|
"llm.stream": kwargs.get("stream"),
|
|
435
467
|
**(extra_attributes if extra_attributes is not None else {}),
|
|
436
468
|
}
|
|
437
469
|
|
|
438
470
|
attributes = LLMSpanAttributes(**span_attributes)
|
|
439
471
|
|
|
472
|
+
tools = []
|
|
440
473
|
if kwargs.get("temperature") is not None:
|
|
441
474
|
attributes.llm_temperature = kwargs.get("temperature")
|
|
442
475
|
if kwargs.get("top_p") is not None:
|
|
@@ -444,7 +477,11 @@ def async_chat_completions_create(original_method, version, tracer):
|
|
|
444
477
|
if kwargs.get("user") is not None:
|
|
445
478
|
attributes.llm_user = kwargs.get("user")
|
|
446
479
|
if kwargs.get("functions") is not None:
|
|
447
|
-
|
|
480
|
+
tools.append(json.dumps(kwargs.get("functions")))
|
|
481
|
+
if kwargs.get("tools") is not None:
|
|
482
|
+
tools.append(json.dumps(kwargs.get("tools")))
|
|
483
|
+
if len(tools) > 0:
|
|
484
|
+
attributes.llm_tools = json.dumps(tools)
|
|
448
485
|
|
|
449
486
|
# TODO(Karthik): Gotta figure out how to handle streaming with context
|
|
450
487
|
# with tracer.start_as_current_span(APIS["CHAT_COMPLETION"]["METHOD"],
|
|
@@ -469,16 +506,7 @@ def async_chat_completions_create(original_method, version, tracer):
|
|
|
469
506
|
if choice.message and choice.message.role
|
|
470
507
|
else "assistant"
|
|
471
508
|
),
|
|
472
|
-
"content": (
|
|
473
|
-
choice.message.content
|
|
474
|
-
if choice.message and choice.message.content
|
|
475
|
-
else (
|
|
476
|
-
choice.message.function_call.arguments
|
|
477
|
-
if choice.message
|
|
478
|
-
and choice.message.function_call.arguments
|
|
479
|
-
else ""
|
|
480
|
-
)
|
|
481
|
-
),
|
|
509
|
+
"content": extract_content(choice),
|
|
482
510
|
**(
|
|
483
511
|
{
|
|
484
512
|
"content_filter_results": choice[
|
|
@@ -536,6 +564,7 @@ def async_chat_completions_create(original_method, version, tracer):
|
|
|
536
564
|
span,
|
|
537
565
|
prompt_tokens,
|
|
538
566
|
function_call=kwargs.get("functions") is not None,
|
|
567
|
+
tool_calls=kwargs.get("tools") is not None,
|
|
539
568
|
)
|
|
540
569
|
|
|
541
570
|
except Exception as error:
|
|
@@ -545,7 +574,7 @@ def async_chat_completions_create(original_method, version, tracer):
|
|
|
545
574
|
raise
|
|
546
575
|
|
|
547
576
|
async def ahandle_streaming_response(
|
|
548
|
-
result, span, prompt_tokens, function_call=False
|
|
577
|
+
result, span, prompt_tokens, function_call=False, tool_calls=False
|
|
549
578
|
):
|
|
550
579
|
"""Process and yield streaming response chunks."""
|
|
551
580
|
result_content = []
|
|
@@ -556,37 +585,40 @@ def async_chat_completions_create(original_method, version, tracer):
|
|
|
556
585
|
if hasattr(chunk, "model") and chunk.model is not None:
|
|
557
586
|
span.set_attribute("llm.model", chunk.model)
|
|
558
587
|
if hasattr(chunk, "choices") and chunk.choices is not None:
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
)
|
|
583
|
-
)
|
|
584
|
-
for choice in chunk.choices
|
|
585
|
-
]
|
|
588
|
+
if not function_call and not tool_calls:
|
|
589
|
+
for choice in chunk.choices:
|
|
590
|
+
if choice.delta and choice.delta.content is not None:
|
|
591
|
+
token_counts = estimate_tokens(choice.delta.content)
|
|
592
|
+
completion_tokens += token_counts
|
|
593
|
+
content = [choice.delta.content]
|
|
594
|
+
elif function_call:
|
|
595
|
+
for choice in chunk.choices:
|
|
596
|
+
if (
|
|
597
|
+
choice.delta
|
|
598
|
+
and choice.delta.function_call
|
|
599
|
+
and choice.delta.function_call.arguments is not None
|
|
600
|
+
):
|
|
601
|
+
token_counts = estimate_tokens(
|
|
602
|
+
choice.delta.function_call.arguments
|
|
603
|
+
)
|
|
604
|
+
completion_tokens += token_counts
|
|
605
|
+
content = [choice.delta.function_call.arguments]
|
|
606
|
+
elif tool_calls:
|
|
607
|
+
# TODO(Karthik): Tool calls streaming is tricky. The chunks after the
|
|
608
|
+
# first one are missing the function name and id though the arguments
|
|
609
|
+
# are spread across the chunks.
|
|
610
|
+
content = []
|
|
586
611
|
else:
|
|
587
612
|
content = []
|
|
588
613
|
span.add_event(
|
|
589
|
-
Event.STREAM_OUTPUT.value,
|
|
614
|
+
Event.STREAM_OUTPUT.value,
|
|
615
|
+
{
|
|
616
|
+
"response": (
|
|
617
|
+
"".join(content)
|
|
618
|
+
if len(content) > 0 and content[0] is not None
|
|
619
|
+
else ""
|
|
620
|
+
)
|
|
621
|
+
},
|
|
590
622
|
)
|
|
591
623
|
result_content.append(content[0] if len(content) > 0 else "")
|
|
592
624
|
yield chunk
|
|
@@ -673,12 +705,12 @@ def embeddings_create(original_method, version, tracer):
|
|
|
673
705
|
result = wrapped(*args, **kwargs)
|
|
674
706
|
span.set_status(StatusCode.OK)
|
|
675
707
|
return result
|
|
676
|
-
except Exception as
|
|
708
|
+
except Exception as err:
|
|
677
709
|
# Record the exception in the span
|
|
678
|
-
span.record_exception(
|
|
710
|
+
span.record_exception(err)
|
|
679
711
|
|
|
680
712
|
# Set the span status to indicate an error
|
|
681
|
-
span.set_status(Status(StatusCode.ERROR, str(
|
|
713
|
+
span.set_status(Status(StatusCode.ERROR, str(err)))
|
|
682
714
|
|
|
683
715
|
# Reraise the exception to ensure it's not swallowed
|
|
684
716
|
raise
|
|
@@ -736,14 +768,58 @@ def async_embeddings_create(original_method, version, tracer):
|
|
|
736
768
|
result = await wrapped(*args, **kwargs)
|
|
737
769
|
span.set_status(StatusCode.OK)
|
|
738
770
|
return result
|
|
739
|
-
except Exception as
|
|
771
|
+
except Exception as err:
|
|
740
772
|
# Record the exception in the span
|
|
741
|
-
span.record_exception(
|
|
773
|
+
span.record_exception(err)
|
|
742
774
|
|
|
743
775
|
# Set the span status to indicate an error
|
|
744
|
-
span.set_status(Status(StatusCode.ERROR, str(
|
|
776
|
+
span.set_status(Status(StatusCode.ERROR, str(err)))
|
|
745
777
|
|
|
746
778
|
# Reraise the exception to ensure it's not swallowed
|
|
747
779
|
raise
|
|
748
780
|
|
|
749
781
|
return traced_method
|
|
782
|
+
|
|
783
|
+
|
|
784
|
+
def extract_content(choice):
|
|
785
|
+
# Check if choice.message exists and has a content attribute
|
|
786
|
+
if (
|
|
787
|
+
hasattr(choice, "message")
|
|
788
|
+
and hasattr(choice.message, "content")
|
|
789
|
+
and choice.message.content is not None
|
|
790
|
+
):
|
|
791
|
+
return choice.message.content
|
|
792
|
+
|
|
793
|
+
# Check if choice.message has tool_calls and extract information accordingly
|
|
794
|
+
elif (
|
|
795
|
+
hasattr(choice, "message")
|
|
796
|
+
and hasattr(choice.message, "tool_calls")
|
|
797
|
+
and choice.message.tool_calls is not None
|
|
798
|
+
):
|
|
799
|
+
result = [
|
|
800
|
+
{
|
|
801
|
+
"id": tool_call.id,
|
|
802
|
+
"type": tool_call.type,
|
|
803
|
+
"function": {
|
|
804
|
+
"name": tool_call.function.name,
|
|
805
|
+
"arguments": tool_call.function.arguments,
|
|
806
|
+
},
|
|
807
|
+
}
|
|
808
|
+
for tool_call in choice.message.tool_calls
|
|
809
|
+
]
|
|
810
|
+
return result
|
|
811
|
+
|
|
812
|
+
# Check if choice.message has a function_call and extract information accordingly
|
|
813
|
+
elif (
|
|
814
|
+
hasattr(choice, "message")
|
|
815
|
+
and hasattr(choice.message, "function_call")
|
|
816
|
+
and choice.message.function_call is not None
|
|
817
|
+
):
|
|
818
|
+
return {
|
|
819
|
+
"name": choice.message.function_call.name,
|
|
820
|
+
"arguments": choice.message.function_call.arguments,
|
|
821
|
+
}
|
|
822
|
+
|
|
823
|
+
# Return an empty string if none of the above conditions are met
|
|
824
|
+
else:
|
|
825
|
+
return ""
|
langtrace_python_sdk/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "1.3.
|
|
1
|
+
__version__ = "1.3.6"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: langtrace-python-sdk
|
|
3
|
-
Version: 1.3.
|
|
3
|
+
Version: 1.3.6
|
|
4
4
|
Summary: Python SDK for LangTrace
|
|
5
5
|
Project-URL: Homepage, https://github.com/Scale3-Labs/langtrace-python-sdk
|
|
6
6
|
Author-email: Scale3 Labs <engineering@scale3labs.com>
|
|
@@ -25,6 +25,10 @@ Requires-Dist: langchain-openai; extra == 'dev'
|
|
|
25
25
|
Requires-Dist: llama-index; extra == 'dev'
|
|
26
26
|
Requires-Dist: openai; extra == 'dev'
|
|
27
27
|
Requires-Dist: python-dotenv; extra == 'dev'
|
|
28
|
+
Provides-Extra: test
|
|
29
|
+
Requires-Dist: pytest; extra == 'test'
|
|
30
|
+
Requires-Dist: pytest-asyncio; extra == 'test'
|
|
31
|
+
Requires-Dist: pytest-vcr; extra == 'test'
|
|
28
32
|
Description-Content-Type: text/markdown
|
|
29
33
|
|
|
30
34
|
# [Langtrace](https://www.langtrace.ai)
|
|
@@ -17,16 +17,20 @@ examples/llamaindex_example/agent.py,sha256=_iIXy9lfDz6ySf6aTeeRqejlfGnXZ7msxLBj
|
|
|
17
17
|
examples/llamaindex_example/basic.py,sha256=gvns3oDUy0c4I5ewnj9-B36_1La8y6qD3VQaq6v3syM,654
|
|
18
18
|
examples/llamaindex_example/data/abramov.txt,sha256=Ou-GyWZm5AjHLgxviBoRE9ikNv5MScsF0cd--0vVVhI,32667
|
|
19
19
|
examples/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
20
|
-
examples/openai/
|
|
20
|
+
examples/openai/async_tool_calling_nonstreaming.py,sha256=foomPKwpju0PMazdE3xNrdZWguUYMnNXibqq01-uBvc,3806
|
|
21
|
+
examples/openai/async_tool_calling_streaming.py,sha256=TayOsb0jcYYlFpnWWUYxwCVKuJhTfGfE8LkjYJNGSz4,6900
|
|
22
|
+
examples/openai/chat_completion.py,sha256=B7djvaprhEhegKqQxAh95yhALIYVtanWid_w75uredg,1754
|
|
21
23
|
examples/openai/embeddings_create.py,sha256=AhDNAqg-WzRYLJAE_b2RKGjuVCh4aZSU7MxcZv2kCHQ,518
|
|
22
|
-
examples/openai/function_calling.py,sha256=
|
|
24
|
+
examples/openai/function_calling.py,sha256=6Nm1ZjP4iKx1Za7ch3zIciQ5zcXWBb2-mpYfIhPs8oo,2320
|
|
23
25
|
examples/openai/images_generate.py,sha256=ZioxTuHKE_yYlhpESqXKVzdkiwdegkmLVB7N8T2LU00,506
|
|
26
|
+
examples/openai/tool_calling_nonstreaming.py,sha256=MxjUGD6Q2zg522E6kymGvXOikoL3qMoZf6pLQgws8zw,3776
|
|
27
|
+
examples/openai/tool_calling_streaming.py,sha256=WnWWlgDqKuqN2DtWbpJs_JvmmQehBZp0Ke3ZXvCJdQw,6860
|
|
24
28
|
examples/perplexity_example/basic.py,sha256=oTLwEYlvpD4wEnqEUrUSlQ0SeQ0u50Jeab4ggkikQg0,671
|
|
25
29
|
examples/pinecone_example/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
26
30
|
examples/pinecone_example/basic.py,sha256=OkYjN3J5kxw-kloOV3Q-iyI6opkbarWsMom-_AMP2ZA,893
|
|
27
31
|
langtrace_python_sdk/__init__.py,sha256=SlHg447-nQBbw8exRNJP_OyHUZ39Sldb7aaQ35hIRm8,262
|
|
28
32
|
langtrace_python_sdk/langtrace.py,sha256=83-AkdASO7UF9FHR9BDZUSeYv9GFZkJJQD2YLKbqzo8,3562
|
|
29
|
-
langtrace_python_sdk/version.py,sha256=
|
|
33
|
+
langtrace_python_sdk/version.py,sha256=5ZbAQtod5QalTI1C2N07edlxplzG_Q2XvGOSyOok4uA,22
|
|
30
34
|
langtrace_python_sdk/constants/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
31
35
|
langtrace_python_sdk/constants/exporter/langtrace_exporter.py,sha256=5MNjnAOg-4am78J3gVMH6FSwq5N8TOj72ugkhsw4vi0,46
|
|
32
36
|
langtrace_python_sdk/constants/instrumentation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -62,14 +66,31 @@ langtrace_python_sdk/instrumentation/llamaindex/instrumentation.py,sha256=D7_HPv
|
|
|
62
66
|
langtrace_python_sdk/instrumentation/llamaindex/patch.py,sha256=8IM2dedF81w8_vVyA56JptyvlQl_bQO4UcB56sptuGs,3700
|
|
63
67
|
langtrace_python_sdk/instrumentation/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
64
68
|
langtrace_python_sdk/instrumentation/openai/instrumentation.py,sha256=Pv4n4z_kSxvZGVxrj3AopBoWQSxIOtMKolkxHrchRdM,2162
|
|
65
|
-
langtrace_python_sdk/instrumentation/openai/patch.py,sha256=
|
|
69
|
+
langtrace_python_sdk/instrumentation/openai/patch.py,sha256=ou8_48lmHmtI9UTU--QVl7KaAIeBnNtqOuHtnUQqjpU,34590
|
|
66
70
|
langtrace_python_sdk/instrumentation/pinecone/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
67
71
|
langtrace_python_sdk/instrumentation/pinecone/instrumentation.py,sha256=o0EUd5jvHaDKOUTj4NjnL5UfDHDHxyXkWGlTW4oeRDk,1784
|
|
68
72
|
langtrace_python_sdk/instrumentation/pinecone/patch.py,sha256=5lF7hQmg2-U2EWtOC0w8_peRaNMysBomb0fjiNoS6eQ,2200
|
|
69
73
|
langtrace_python_sdk/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
70
74
|
langtrace_python_sdk/utils/llm.py,sha256=4z2e-md_ELXCEuOIRVWracR6qH2pmsOxCqpkuF9_3Nw,1589
|
|
71
75
|
langtrace_python_sdk/utils/with_root_span.py,sha256=N7ONrcF0myZbHBy5gpQffDbX-Kf63Crsz9szG0i3m08,1889
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
+
tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
77
|
+
tests/utils.py,sha256=wdbR00LjYfDXzTBFvkksQYEz1hZjGTNKEiw5KPE_bqI,625
|
|
78
|
+
tests/anthropic/test_anthropic.py,sha256=vvrDJCg9KCws72NEuvPklq8RqQBgGwmV-fSkxGSvUFw,2632
|
|
79
|
+
tests/chroma/test_chroma.py,sha256=5zeInDcP5VplrM9ex2iuVKcpVKMDfEf_ZDK9D6Tc700,2392
|
|
80
|
+
tests/langchain/test_langchain.py,sha256=GGGRcxz0isNmegeum37XFrlJqI6jB6_iUvv8AJ5iG24,2481
|
|
81
|
+
tests/langchain/test_langchain_community.py,sha256=m9lBmMZIeUouKSq1JfdBupV0-0ef39GD6BKsA0Cf_08,2515
|
|
82
|
+
tests/langchain/test_langchain_core.py,sha256=hCuKkIUvDQOUBM2oEgMG3Iq_KNTwC2sH7_Y_IR5FIno,4238
|
|
83
|
+
tests/openai/conftest.py,sha256=r-Scvq1pP62gkvI4CC13nR19twlRQFUx8WuMe9qcesM,1138
|
|
84
|
+
tests/openai/test_chat_completion.py,sha256=iXz8RTU5oCfP2CWOhKJXlWwK-IfLubI8SW396sPxnV0,5310
|
|
85
|
+
tests/openai/test_embeddings.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
86
|
+
tests/openai/test_image_generation.py,sha256=tUyLTELi-nBOlp6yZ0hyPbLp04S_-qo-EtqAIJZeXuQ,3889
|
|
87
|
+
tests/openai/cassettes/test_async_chat_completion_streaming.yaml,sha256=0aZHFy9NvXegEDjGWyoG-_ItLr7JYAKbaBLIPSl-pfM,6844
|
|
88
|
+
tests/openai/cassettes/test_async_image_generation.yaml,sha256=_LYZcrqxrnSqcWVQn2Y0XMVGxF-wBrSAd-v3LTAIAeo,3597
|
|
89
|
+
tests/openai/cassettes/test_chat_completion.yaml,sha256=YkNFgK9VHAzNqGWuxFcTiE194GdEie8eDf1FSsffjd8,2944
|
|
90
|
+
tests/openai/cassettes/test_chat_completion_streaming.yaml,sha256=nkx_TemQMYSZxUF_b-LCEFwCRDm0AkQHLf4sdJVuZBw,2592394
|
|
91
|
+
tests/openai/cassettes/test_image_generation.yaml,sha256=gn5aSVp6V6_hb_rt2NnkAWd_idzDxo-7VzhZII0Wslw,3562
|
|
92
|
+
tests/pinecone/test_pinecone.py,sha256=_wlJbSKnY7gyzVcwxIWKft1P_t8dWwcIKNfGCrRLiHs,2633
|
|
93
|
+
langtrace_python_sdk-1.3.6.dist-info/METADATA,sha256=j3UsVVaot2VIVi5KP9cRUmUriQUP4WjuY1yFKEmJwtU,9244
|
|
94
|
+
langtrace_python_sdk-1.3.6.dist-info/WHEEL,sha256=zEMcRr9Kr03x1ozGwg5v9NQBKn3kndp6LSoSlVg-jhU,87
|
|
95
|
+
langtrace_python_sdk-1.3.6.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
|
|
96
|
+
langtrace_python_sdk-1.3.6.dist-info/RECORD,,
|
tests/__init__.py
ADDED
|
File without changes
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
import unittest
|
|
2
|
+
from unittest.mock import MagicMock, call
|
|
3
|
+
from langtrace_python_sdk.instrumentation.anthropic.patch import messages_create
|
|
4
|
+
from opentelemetry.trace import SpanKind
|
|
5
|
+
import importlib.metadata
|
|
6
|
+
from langtrace_python_sdk.constants.instrumentation.anthropic import APIS
|
|
7
|
+
from opentelemetry.trace.status import Status, StatusCode
|
|
8
|
+
import json
|
|
9
|
+
from langtrace.trace_attributes import Event, LLMSpanAttributes
|
|
10
|
+
|
|
11
|
+
from tests.utils import common_setup
|
|
12
|
+
|
|
13
|
+
class TestAnthropic(unittest.TestCase):
|
|
14
|
+
|
|
15
|
+
data = {
|
|
16
|
+
"content" : [MagicMock(text="Some text", type="text")],
|
|
17
|
+
"system_fingerprint" : "None",
|
|
18
|
+
"usage" : MagicMock(input_tokens=23, output_tokens=44),
|
|
19
|
+
"chunks" : [MagicMock(delta="Some text", message="text")]}
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def setUp(self):
|
|
23
|
+
|
|
24
|
+
# Mock the original method
|
|
25
|
+
self.anthropic_mock, self.tracer, self.span = common_setup(self.data, None)
|
|
26
|
+
|
|
27
|
+
def tearDown(self):
|
|
28
|
+
pass
|
|
29
|
+
|
|
30
|
+
def test_anthropic(self):
|
|
31
|
+
# Arrange
|
|
32
|
+
version = importlib.metadata.version('anthropic')
|
|
33
|
+
kwargs = {
|
|
34
|
+
"model": "claude-3-opus-20240229",
|
|
35
|
+
"messages" : [{"role": "user", "content": "How are you today?"}],
|
|
36
|
+
"stream": False
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
# Act
|
|
40
|
+
wrapped_function = messages_create("anthropic.messages.create", version, self.tracer)
|
|
41
|
+
result = wrapped_function(self.anthropic_mock, MagicMock(), (), kwargs)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
# Assert
|
|
45
|
+
self.assertTrue(self.tracer.start_as_current_span.called_once_with("anthropic.messages.create", kind=SpanKind.CLIENT))
|
|
46
|
+
self.assertTrue(self.span.set_status.has_calls([call(Status(StatusCode.OK))]))
|
|
47
|
+
|
|
48
|
+
expected_attributes = {
|
|
49
|
+
"langtrace.sdk.name": "langtrace-python-sdk",
|
|
50
|
+
"langtrace.service.name": "Anthropic",
|
|
51
|
+
"langtrace.service.type": "llm",
|
|
52
|
+
"langtrace.service.version": version,
|
|
53
|
+
"langtrace.version": "1.0.0",
|
|
54
|
+
"url.full": "/v1/messages",
|
|
55
|
+
"llm.api": APIS["MESSAGES_CREATE"]["ENDPOINT"],
|
|
56
|
+
"llm.model": kwargs.get("model"),
|
|
57
|
+
"llm.prompts": json.dumps(kwargs.get("messages", [])),
|
|
58
|
+
"llm.stream": kwargs.get("stream"),
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
self.assertTrue(
|
|
62
|
+
self.span.set_attribute.has_calls(
|
|
63
|
+
[call(key, value) for key, value in expected_attributes.items()], any_order=True
|
|
64
|
+
)
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
expected_result_data = {"system_fingerprint": "None" }
|
|
68
|
+
|
|
69
|
+
self.assertEqual(result.system_fingerprint, expected_result_data["system_fingerprint"])
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
if __name__ == '__main__':
|
|
73
|
+
unittest.main()
|