promptlayer 1.0.52__py3-none-any.whl → 1.0.54__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of promptlayer might be problematic. Click here for more details.

promptlayer/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
1
  from .promptlayer import AsyncPromptLayer, PromptLayer
2
2
 
3
- __version__ = "1.0.52"
3
+ __version__ = "1.0.54"
4
4
  __all__ = ["PromptLayer", "AsyncPromptLayer", "__version__"]
@@ -23,6 +23,12 @@ class TextContent(TypedDict, total=False):
23
23
  text: str
24
24
 
25
25
 
26
+ class ThinkingContent(TypedDict, total=False):
27
+ signature: str | None = None
28
+ type: Literal["thinking"]
29
+ thinking: str
30
+
31
+
26
32
  class ImageContent(TypedDict, total=False):
27
33
  type: Literal["image_url"]
28
34
  image_url: ImageUrl
@@ -44,7 +50,7 @@ class MediaVariable(TypedDict, total=False):
44
50
  name: str
45
51
 
46
52
 
47
- Content = Union[TextContent, ImageContent, MediaContnt, MediaVariable]
53
+ Content = Union[TextContent, ThinkingContent, ImageContent, MediaContnt, MediaVariable]
48
54
 
49
55
 
50
56
  class Function(TypedDict, total=False):
promptlayer/utils.py CHANGED
@@ -691,6 +691,52 @@ async def apromptlayer_track_score(
691
691
  return True
692
692
 
693
693
 
694
+ def build_anthropic_content_blocks(events):
695
+ content_blocks = []
696
+ current_block = None
697
+ current_signature = ""
698
+ current_thinking = ""
699
+ current_text = ""
700
+ usage = None
701
+ stop_reason = None
702
+
703
+ for event in events:
704
+ if event.type == "content_block_start":
705
+ current_block = deepcopy(event.content_block)
706
+ if current_block.type == "thinking":
707
+ current_signature = ""
708
+ current_thinking = ""
709
+ elif current_block.type == "text":
710
+ current_text = ""
711
+ elif event.type == "content_block_delta" and current_block is not None:
712
+ if current_block.type == "thinking":
713
+ if hasattr(event.delta, "signature"):
714
+ current_signature = event.delta.signature
715
+ if hasattr(event.delta, "thinking"):
716
+ current_thinking += event.delta.thinking
717
+ elif current_block.type == "text":
718
+ if hasattr(event.delta, "text"):
719
+ current_text += event.delta.text
720
+ elif event.type == "content_block_stop" and current_block is not None:
721
+ if current_block.type == "thinking":
722
+ current_block.signature = current_signature
723
+ current_block.thinking = current_thinking
724
+ elif current_block.type == "text":
725
+ current_block.text = current_text
726
+
727
+ content_blocks.append(current_block)
728
+ current_block = None
729
+ current_signature = ""
730
+ current_thinking = ""
731
+ current_text = ""
732
+ elif event.type == "message_delta":
733
+ if hasattr(event, "usage"):
734
+ usage = event.usage
735
+ if hasattr(event.delta, "stop_reason"):
736
+ stop_reason = event.delta.stop_reason
737
+ return content_blocks, usage, stop_reason
738
+
739
+
694
740
  class GeneratorProxy:
695
741
  def __init__(self, generator, api_request_arguments, api_key):
696
742
  self.generator = generator
@@ -788,30 +834,35 @@ class GeneratorProxy:
788
834
  response = ""
789
835
  for result in self.results:
790
836
  if hasattr(result, "completion"):
791
- response = f"{response}{result.completion}"
837
+ response += result.completion
792
838
  elif hasattr(result, "message") and isinstance(result.message, str):
793
- response = f"{response}{result.message}"
839
+ response += result.message
794
840
  elif (
795
841
  hasattr(result, "content_block")
796
842
  and hasattr(result.content_block, "text")
797
- and "type" in result
798
- and result.type != "message_stop"
843
+ and getattr(result, "type", None) != "message_stop"
799
844
  ):
800
- response = f"{response}{result.content_block.text}"
801
- elif hasattr(result, "delta") and hasattr(result.delta, "text"):
802
- response = f"{response}{result.delta.text}"
803
- if (
804
- hasattr(self.results[-1], "type") and self.results[-1].type == "message_stop"
805
- ): # this is a message stream and not the correct event
845
+ response += result.content_block.text
846
+ elif hasattr(result, "delta"):
847
+ if hasattr(result.delta, "thinking"):
848
+ response += result.delta.thinking
849
+ elif hasattr(result.delta, "text"):
850
+ response += result.delta.text
851
+
852
+ # 2) If this is a “stream” (ended by message_stop), reconstruct both ThinkingBlock & TextBlock
853
+ last_event = self.results[-1]
854
+ if getattr(last_event, "type", None) == "message_stop":
806
855
  final_result = deepcopy(self.results[0].message)
807
- final_result.usage = None
808
- content_block = deepcopy(self.results[1].content_block)
809
- content_block.text = response
810
- final_result.content = [content_block]
811
- else:
812
- final_result = deepcopy(self.results[-1])
813
- final_result.completion = response
856
+
857
+ content_blocks, usage, stop_reason = build_anthropic_content_blocks(self.results)
858
+ final_result.content = content_blocks
859
+ if usage:
860
+ final_result.usage.output_tokens = usage.output_tokens
861
+ if stop_reason:
862
+ final_result.stop_reason = stop_reason
814
863
  return final_result
864
+ else:
865
+ return deepcopy(self.results[-1])
815
866
  if hasattr(self.results[0].choices[0], "text"): # this is regular completion
816
867
  response = ""
817
868
  for result in self.results:
@@ -1376,7 +1427,7 @@ async def aopenai_stream_completion(generator: AsyncIterable[Any]) -> Any:
1376
1427
 
1377
1428
 
1378
1429
  def anthropic_stream_message(results: list):
1379
- from anthropic.types import Message, MessageStreamEvent, TextBlock, Usage
1430
+ from anthropic.types import Message, MessageStreamEvent, Usage
1380
1431
 
1381
1432
  message_stream_events: List[MessageStreamEvent] = results
1382
1433
  response: Message = Message(
@@ -1389,24 +1440,24 @@ def anthropic_stream_message(results: list):
1389
1440
  stop_sequence=None,
1390
1441
  usage=Usage(input_tokens=0, output_tokens=0),
1391
1442
  )
1392
- content = ""
1393
- for result in message_stream_events:
1394
- if result.type == "message_start":
1395
- response = result.message
1396
- elif result.type == "content_block_delta":
1397
- if result.delta.type == "text_delta":
1398
- content = f"{content}{result.delta.text}"
1399
- elif result.type == "message_delta":
1400
- if hasattr(result, "usage"):
1401
- response.usage.output_tokens = result.usage.output_tokens
1402
- if hasattr(result.delta, "stop_reason"):
1403
- response.stop_reason = result.delta.stop_reason
1404
- response.content.append(TextBlock(type="text", text=content))
1443
+
1444
+ for event in message_stream_events:
1445
+ if event.type == "message_start":
1446
+ response = event.message
1447
+ break
1448
+
1449
+ content_blocks, usage, stop_reason = build_anthropic_content_blocks(message_stream_events)
1450
+ response.content = content_blocks
1451
+ if usage:
1452
+ response.usage.output_tokens = usage.output_tokens
1453
+ if stop_reason:
1454
+ response.stop_reason = stop_reason
1455
+
1405
1456
  return response
1406
1457
 
1407
1458
 
1408
1459
  async def aanthropic_stream_message(generator: AsyncIterable[Any]) -> Any:
1409
- from anthropic.types import Message, MessageStreamEvent, TextBlock, Usage
1460
+ from anthropic.types import Message, MessageStreamEvent, Usage
1410
1461
 
1411
1462
  message_stream_events: List[MessageStreamEvent] = []
1412
1463
  response: Message = Message(
@@ -1419,22 +1470,19 @@ async def aanthropic_stream_message(generator: AsyncIterable[Any]) -> Any:
1419
1470
  stop_sequence=None,
1420
1471
  usage=Usage(input_tokens=0, output_tokens=0),
1421
1472
  )
1422
- content = ""
1423
1473
 
1424
- async for result in generator:
1425
- message_stream_events.append(result)
1426
- if result.type == "message_start":
1427
- response = result.message
1428
- elif result.type == "content_block_delta":
1429
- if result.delta.type == "text_delta":
1430
- content = f"{content}{result.delta.text}"
1431
- elif result.type == "message_delta":
1432
- if hasattr(result, "usage"):
1433
- response.usage.output_tokens = result.usage.output_tokens
1434
- if hasattr(result.delta, "stop_reason"):
1435
- response.stop_reason = result.delta.stop_reason
1436
-
1437
- response.content.append(TextBlock(type="text", text=content))
1474
+ async for event in generator:
1475
+ if event.type == "message_start":
1476
+ response = event.message
1477
+ message_stream_events.append(event)
1478
+
1479
+ content_blocks, usage, stop_reason = build_anthropic_content_blocks(message_stream_events)
1480
+ response.content = content_blocks
1481
+ if usage:
1482
+ response.usage.output_tokens = usage.output_tokens
1483
+ if stop_reason:
1484
+ response.stop_reason = stop_reason
1485
+
1438
1486
  return response
1439
1487
 
1440
1488
 
@@ -1898,11 +1946,11 @@ MAP_TYPE_TO_GOOGLE_FUNCTION = {
1898
1946
  }
1899
1947
 
1900
1948
 
1901
- def google_request(request: GetPromptTemplateResponse, _: dict, function_kwargs: dict):
1949
+ def google_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
1902
1950
  from google import genai
1903
1951
 
1904
1952
  client = genai.Client()
1905
- request_to_make = MAP_TYPE_TO_GOOGLE_FUNCTION[request["prompt_template"]["type"]]
1953
+ request_to_make = MAP_TYPE_TO_GOOGLE_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1906
1954
  return request_to_make(client, **function_kwargs)
1907
1955
 
1908
1956
 
@@ -1936,11 +1984,11 @@ AMAP_TYPE_TO_GOOGLE_FUNCTION = {
1936
1984
  }
1937
1985
 
1938
1986
 
1939
- async def agoogle_request(request: GetPromptTemplateResponse, _: dict, function_kwargs: dict):
1987
+ async def agoogle_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
1940
1988
  from google import genai
1941
1989
 
1942
1990
  client = genai.Client()
1943
- request_to_make = AMAP_TYPE_TO_GOOGLE_FUNCTION[request["prompt_template"]["type"]]
1991
+ request_to_make = AMAP_TYPE_TO_GOOGLE_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1944
1992
  return await request_to_make(client, **function_kwargs)
1945
1993
 
1946
1994
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: promptlayer
3
- Version: 1.0.52
3
+ Version: 1.0.54
4
4
  Summary: PromptLayer is a platform for prompt engineering and tracks your LLM requests.
5
5
  License: Apache-2.0
6
6
  Author: Magniv
@@ -1,4 +1,4 @@
1
- promptlayer/__init__.py,sha256=iPgrSckrRsUyjKcOVRNOVz5tvY4EBFx7f6BpoAOY2Yw,140
1
+ promptlayer/__init__.py,sha256=2e8KBwUPRkROzOicakNfmG1sPrkjOo1nyBvGQnqxcD4,140
2
2
  promptlayer/groups/__init__.py,sha256=xhOAolLUBkr76ZHvJr29OwjCIk1V9qKQXjZCuyTJUIY,429
3
3
  promptlayer/groups/groups.py,sha256=YPROicy-TzpkrpA8vOpZS2lwvJ6VRtlbQ1S2oT1N0vM,338
4
4
  promptlayer/promptlayer.py,sha256=K4KRW9eB1FF_Cdllu_Z-fpof058P45WhITnap29vlOk,21644
@@ -9,10 +9,10 @@ promptlayer/templates.py,sha256=7ObDPMzHXjttDdJdCXA_pDL9XAnmcujIWucmgZJcOC8,1179
9
9
  promptlayer/track/__init__.py,sha256=tyweLTAY7UpYpBHWwY-T3pOPDIlGjcgccYXqU_r0694,1710
10
10
  promptlayer/track/track.py,sha256=A-awcYwsSwxktrlCMchy8NITIquwxU1UXbgLZMwqrA0,3164
11
11
  promptlayer/types/__init__.py,sha256=xJcvQuOk91ZBBePb40-1FDNDKYrZoH5lPE2q6_UhprM,111
12
- promptlayer/types/prompt_template.py,sha256=GoYSorgBmUgvtyXaGAOv0KgVC61Llzn8bND6PF1fW50,4929
12
+ promptlayer/types/prompt_template.py,sha256=meL3KMeFnEZIIf16Wk0MX3f6Tw9qkQU6PgaRGoZ-2YA,5076
13
13
  promptlayer/types/request_log.py,sha256=xU6bcxQar6GaBOJlgZTavXUV3FjE8sF_nSjPu4Ya_00,174
14
- promptlayer/utils.py,sha256=iKA7kIBdOzYIORHdkS556Wf9MRCnD7ctEo4obI0etL8,69712
15
- promptlayer-1.0.52.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
16
- promptlayer-1.0.52.dist-info/METADATA,sha256=CKfXX6iAXGCTna9bETZhCupDGsQ8jbUL4Qq3gd7yTHc,4819
17
- promptlayer-1.0.52.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
18
- promptlayer-1.0.52.dist-info/RECORD,,
14
+ promptlayer/utils.py,sha256=H0rZBGC21RurBx50S1d-sIkGK6LXGScMlMqnNelL9rs,71267
15
+ promptlayer-1.0.54.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
16
+ promptlayer-1.0.54.dist-info/METADATA,sha256=MIW_OFvwN7okuw7MhO94YZPBXJdRhDC0HxYFjI_e8t8,4819
17
+ promptlayer-1.0.54.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
18
+ promptlayer-1.0.54.dist-info/RECORD,,