promptlayer 1.0.53__tar.gz → 1.0.55__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of promptlayer might be problematic. Click here for more details.
- {promptlayer-1.0.53 → promptlayer-1.0.55}/PKG-INFO +1 -1
- {promptlayer-1.0.53 → promptlayer-1.0.55}/promptlayer/__init__.py +1 -1
- {promptlayer-1.0.53 → promptlayer-1.0.55}/promptlayer/types/prompt_template.py +1 -0
- {promptlayer-1.0.53 → promptlayer-1.0.55}/promptlayer/utils.py +82 -83
- {promptlayer-1.0.53 → promptlayer-1.0.55}/pyproject.toml +1 -1
- {promptlayer-1.0.53 → promptlayer-1.0.55}/LICENSE +0 -0
- {promptlayer-1.0.53 → promptlayer-1.0.55}/README.md +0 -0
- {promptlayer-1.0.53 → promptlayer-1.0.55}/promptlayer/groups/__init__.py +0 -0
- {promptlayer-1.0.53 → promptlayer-1.0.55}/promptlayer/groups/groups.py +0 -0
- {promptlayer-1.0.53 → promptlayer-1.0.55}/promptlayer/promptlayer.py +0 -0
- {promptlayer-1.0.53 → promptlayer-1.0.55}/promptlayer/promptlayer_base.py +0 -0
- {promptlayer-1.0.53 → promptlayer-1.0.55}/promptlayer/promptlayer_mixins.py +0 -0
- {promptlayer-1.0.53 → promptlayer-1.0.55}/promptlayer/span_exporter.py +0 -0
- {promptlayer-1.0.53 → promptlayer-1.0.55}/promptlayer/templates.py +0 -0
- {promptlayer-1.0.53 → promptlayer-1.0.55}/promptlayer/track/__init__.py +0 -0
- {promptlayer-1.0.53 → promptlayer-1.0.55}/promptlayer/track/track.py +0 -0
- {promptlayer-1.0.53 → promptlayer-1.0.55}/promptlayer/types/__init__.py +0 -0
- {promptlayer-1.0.53 → promptlayer-1.0.55}/promptlayer/types/request_log.py +0 -0
|
@@ -691,6 +691,52 @@ async def apromptlayer_track_score(
|
|
|
691
691
|
return True
|
|
692
692
|
|
|
693
693
|
|
|
694
|
+
def build_anthropic_content_blocks(events):
|
|
695
|
+
content_blocks = []
|
|
696
|
+
current_block = None
|
|
697
|
+
current_signature = ""
|
|
698
|
+
current_thinking = ""
|
|
699
|
+
current_text = ""
|
|
700
|
+
usage = None
|
|
701
|
+
stop_reason = None
|
|
702
|
+
|
|
703
|
+
for event in events:
|
|
704
|
+
if event.type == "content_block_start":
|
|
705
|
+
current_block = deepcopy(event.content_block)
|
|
706
|
+
if current_block.type == "thinking":
|
|
707
|
+
current_signature = ""
|
|
708
|
+
current_thinking = ""
|
|
709
|
+
elif current_block.type == "text":
|
|
710
|
+
current_text = ""
|
|
711
|
+
elif event.type == "content_block_delta" and current_block is not None:
|
|
712
|
+
if current_block.type == "thinking":
|
|
713
|
+
if hasattr(event.delta, "signature"):
|
|
714
|
+
current_signature = event.delta.signature
|
|
715
|
+
if hasattr(event.delta, "thinking"):
|
|
716
|
+
current_thinking += event.delta.thinking
|
|
717
|
+
elif current_block.type == "text":
|
|
718
|
+
if hasattr(event.delta, "text"):
|
|
719
|
+
current_text += event.delta.text
|
|
720
|
+
elif event.type == "content_block_stop" and current_block is not None:
|
|
721
|
+
if current_block.type == "thinking":
|
|
722
|
+
current_block.signature = current_signature
|
|
723
|
+
current_block.thinking = current_thinking
|
|
724
|
+
elif current_block.type == "text":
|
|
725
|
+
current_block.text = current_text
|
|
726
|
+
|
|
727
|
+
content_blocks.append(current_block)
|
|
728
|
+
current_block = None
|
|
729
|
+
current_signature = ""
|
|
730
|
+
current_thinking = ""
|
|
731
|
+
current_text = ""
|
|
732
|
+
elif event.type == "message_delta":
|
|
733
|
+
if hasattr(event, "usage"):
|
|
734
|
+
usage = event.usage
|
|
735
|
+
if hasattr(event.delta, "stop_reason"):
|
|
736
|
+
stop_reason = event.delta.stop_reason
|
|
737
|
+
return content_blocks, usage, stop_reason
|
|
738
|
+
|
|
739
|
+
|
|
694
740
|
class GeneratorProxy:
|
|
695
741
|
def __init__(self, generator, api_request_arguments, api_key):
|
|
696
742
|
self.generator = generator
|
|
@@ -808,59 +854,15 @@ class GeneratorProxy:
|
|
|
808
854
|
if getattr(last_event, "type", None) == "message_stop":
|
|
809
855
|
final_result = deepcopy(self.results[0].message)
|
|
810
856
|
|
|
811
|
-
content_blocks =
|
|
812
|
-
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
current_block = deepcopy(event.content_block)
|
|
821
|
-
|
|
822
|
-
if getattr(event.content_block, "type", None) == "thinking":
|
|
823
|
-
current_signature = ""
|
|
824
|
-
current_thinking = ""
|
|
825
|
-
elif getattr(event.content_block, "type", None) == "text":
|
|
826
|
-
current_text = ""
|
|
827
|
-
|
|
828
|
-
elif getattr(event, "type", None) == "content_block_delta" and current_block is not None:
|
|
829
|
-
if getattr(current_block, "type", None) == "thinking":
|
|
830
|
-
if hasattr(event.delta, "signature"):
|
|
831
|
-
current_signature = event.delta.signature
|
|
832
|
-
if hasattr(event.delta, "thinking"):
|
|
833
|
-
current_thinking += event.delta.thinking
|
|
834
|
-
|
|
835
|
-
elif getattr(current_block, "type", None) == "text":
|
|
836
|
-
if hasattr(event.delta, "text"):
|
|
837
|
-
current_text += event.delta.text
|
|
838
|
-
|
|
839
|
-
elif getattr(event, "type", None) == "content_block_stop" and current_block is not None:
|
|
840
|
-
if getattr(current_block, "type", None) == "thinking":
|
|
841
|
-
current_block.signature = current_signature
|
|
842
|
-
current_block.thinking = current_thinking
|
|
843
|
-
elif getattr(current_block, "type", None) == "text":
|
|
844
|
-
current_block.text = current_text
|
|
845
|
-
|
|
846
|
-
content_blocks.append(current_block)
|
|
847
|
-
|
|
848
|
-
current_block = None
|
|
849
|
-
current_signature = ""
|
|
850
|
-
current_thinking = ""
|
|
851
|
-
current_text = ""
|
|
852
|
-
|
|
853
|
-
final_result.content = content_blocks
|
|
854
|
-
for event in reversed(self.results):
|
|
855
|
-
if hasattr(event, "usage") and hasattr(event.usage, "output_tokens"):
|
|
856
|
-
final_result.usage.output_tokens = event.usage.output_tokens
|
|
857
|
-
break
|
|
858
|
-
|
|
859
|
-
return final_result
|
|
860
|
-
|
|
861
|
-
# 3) Otherwise (not a “stream”), fall back to returning the last raw message
|
|
862
|
-
else:
|
|
863
|
-
return deepcopy(self.results[-1])
|
|
857
|
+
content_blocks, usage, stop_reason = build_anthropic_content_blocks(self.results)
|
|
858
|
+
final_result.content = content_blocks
|
|
859
|
+
if usage:
|
|
860
|
+
final_result.usage.output_tokens = usage.output_tokens
|
|
861
|
+
if stop_reason:
|
|
862
|
+
final_result.stop_reason = stop_reason
|
|
863
|
+
return final_result
|
|
864
|
+
else:
|
|
865
|
+
return deepcopy(self.results[-1])
|
|
864
866
|
if hasattr(self.results[0].choices[0], "text"): # this is regular completion
|
|
865
867
|
response = ""
|
|
866
868
|
for result in self.results:
|
|
@@ -1425,7 +1427,7 @@ async def aopenai_stream_completion(generator: AsyncIterable[Any]) -> Any:
|
|
|
1425
1427
|
|
|
1426
1428
|
|
|
1427
1429
|
def anthropic_stream_message(results: list):
|
|
1428
|
-
from anthropic.types import Message, MessageStreamEvent,
|
|
1430
|
+
from anthropic.types import Message, MessageStreamEvent, Usage
|
|
1429
1431
|
|
|
1430
1432
|
message_stream_events: List[MessageStreamEvent] = results
|
|
1431
1433
|
response: Message = Message(
|
|
@@ -1438,24 +1440,24 @@ def anthropic_stream_message(results: list):
|
|
|
1438
1440
|
stop_sequence=None,
|
|
1439
1441
|
usage=Usage(input_tokens=0, output_tokens=0),
|
|
1440
1442
|
)
|
|
1441
|
-
|
|
1442
|
-
for
|
|
1443
|
-
if
|
|
1444
|
-
response =
|
|
1445
|
-
|
|
1446
|
-
|
|
1447
|
-
|
|
1448
|
-
|
|
1449
|
-
|
|
1450
|
-
|
|
1451
|
-
|
|
1452
|
-
|
|
1453
|
-
|
|
1443
|
+
|
|
1444
|
+
for event in message_stream_events:
|
|
1445
|
+
if event.type == "message_start":
|
|
1446
|
+
response = event.message
|
|
1447
|
+
break
|
|
1448
|
+
|
|
1449
|
+
content_blocks, usage, stop_reason = build_anthropic_content_blocks(message_stream_events)
|
|
1450
|
+
response.content = content_blocks
|
|
1451
|
+
if usage:
|
|
1452
|
+
response.usage.output_tokens = usage.output_tokens
|
|
1453
|
+
if stop_reason:
|
|
1454
|
+
response.stop_reason = stop_reason
|
|
1455
|
+
|
|
1454
1456
|
return response
|
|
1455
1457
|
|
|
1456
1458
|
|
|
1457
1459
|
async def aanthropic_stream_message(generator: AsyncIterable[Any]) -> Any:
|
|
1458
|
-
from anthropic.types import Message, MessageStreamEvent,
|
|
1460
|
+
from anthropic.types import Message, MessageStreamEvent, Usage
|
|
1459
1461
|
|
|
1460
1462
|
message_stream_events: List[MessageStreamEvent] = []
|
|
1461
1463
|
response: Message = Message(
|
|
@@ -1468,22 +1470,19 @@ async def aanthropic_stream_message(generator: AsyncIterable[Any]) -> Any:
|
|
|
1468
1470
|
stop_sequence=None,
|
|
1469
1471
|
usage=Usage(input_tokens=0, output_tokens=0),
|
|
1470
1472
|
)
|
|
1471
|
-
content = ""
|
|
1472
1473
|
|
|
1473
|
-
async for
|
|
1474
|
-
|
|
1475
|
-
|
|
1476
|
-
|
|
1477
|
-
|
|
1478
|
-
|
|
1479
|
-
|
|
1480
|
-
|
|
1481
|
-
|
|
1482
|
-
|
|
1483
|
-
|
|
1484
|
-
|
|
1485
|
-
|
|
1486
|
-
response.content.append(TextBlock(type="text", text=content))
|
|
1474
|
+
async for event in generator:
|
|
1475
|
+
if event.type == "message_start":
|
|
1476
|
+
response = event.message
|
|
1477
|
+
message_stream_events.append(event)
|
|
1478
|
+
|
|
1479
|
+
content_blocks, usage, stop_reason = build_anthropic_content_blocks(message_stream_events)
|
|
1480
|
+
response.content = content_blocks
|
|
1481
|
+
if usage:
|
|
1482
|
+
response.usage.output_tokens = usage.output_tokens
|
|
1483
|
+
if stop_reason:
|
|
1484
|
+
response.stop_reason = stop_reason
|
|
1485
|
+
|
|
1487
1486
|
return response
|
|
1488
1487
|
|
|
1489
1488
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|