promptlayer 1.0.56__tar.gz → 1.0.58__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of promptlayer might be problematic. Click here for more details.
- {promptlayer-1.0.56 → promptlayer-1.0.58}/PKG-INFO +1 -1
- {promptlayer-1.0.56 → promptlayer-1.0.58}/promptlayer/__init__.py +1 -1
- {promptlayer-1.0.56 → promptlayer-1.0.58}/promptlayer/utils.py +77 -16
- {promptlayer-1.0.56 → promptlayer-1.0.58}/pyproject.toml +1 -1
- {promptlayer-1.0.56 → promptlayer-1.0.58}/LICENSE +0 -0
- {promptlayer-1.0.56 → promptlayer-1.0.58}/README.md +0 -0
- {promptlayer-1.0.56 → promptlayer-1.0.58}/promptlayer/groups/__init__.py +0 -0
- {promptlayer-1.0.56 → promptlayer-1.0.58}/promptlayer/groups/groups.py +0 -0
- {promptlayer-1.0.56 → promptlayer-1.0.58}/promptlayer/promptlayer.py +0 -0
- {promptlayer-1.0.56 → promptlayer-1.0.58}/promptlayer/promptlayer_base.py +0 -0
- {promptlayer-1.0.56 → promptlayer-1.0.58}/promptlayer/promptlayer_mixins.py +0 -0
- {promptlayer-1.0.56 → promptlayer-1.0.58}/promptlayer/span_exporter.py +0 -0
- {promptlayer-1.0.56 → promptlayer-1.0.58}/promptlayer/templates.py +0 -0
- {promptlayer-1.0.56 → promptlayer-1.0.58}/promptlayer/track/__init__.py +0 -0
- {promptlayer-1.0.56 → promptlayer-1.0.58}/promptlayer/track/track.py +0 -0
- {promptlayer-1.0.56 → promptlayer-1.0.58}/promptlayer/types/__init__.py +0 -0
- {promptlayer-1.0.56 → promptlayer-1.0.58}/promptlayer/types/prompt_template.py +0 -0
- {promptlayer-1.0.56 → promptlayer-1.0.58}/promptlayer/types/request_log.py +0 -0
|
@@ -697,6 +697,7 @@ def build_anthropic_content_blocks(events):
|
|
|
697
697
|
current_signature = ""
|
|
698
698
|
current_thinking = ""
|
|
699
699
|
current_text = ""
|
|
700
|
+
current_tool_input_json = ""
|
|
700
701
|
usage = None
|
|
701
702
|
stop_reason = None
|
|
702
703
|
|
|
@@ -708,6 +709,8 @@ def build_anthropic_content_blocks(events):
|
|
|
708
709
|
current_thinking = ""
|
|
709
710
|
elif current_block.type == "text":
|
|
710
711
|
current_text = ""
|
|
712
|
+
elif current_block.type == "tool_use":
|
|
713
|
+
current_tool_input_json = ""
|
|
711
714
|
elif event.type == "content_block_delta" and current_block is not None:
|
|
712
715
|
if current_block.type == "thinking":
|
|
713
716
|
if hasattr(event.delta, "signature"):
|
|
@@ -717,18 +720,26 @@ def build_anthropic_content_blocks(events):
|
|
|
717
720
|
elif current_block.type == "text":
|
|
718
721
|
if hasattr(event.delta, "text"):
|
|
719
722
|
current_text += event.delta.text
|
|
723
|
+
elif current_block.type == "tool_use":
|
|
724
|
+
if hasattr(event.delta, "partial_json"):
|
|
725
|
+
current_tool_input_json += event.delta.partial_json
|
|
720
726
|
elif event.type == "content_block_stop" and current_block is not None:
|
|
721
727
|
if current_block.type == "thinking":
|
|
722
728
|
current_block.signature = current_signature
|
|
723
729
|
current_block.thinking = current_thinking
|
|
724
730
|
elif current_block.type == "text":
|
|
725
731
|
current_block.text = current_text
|
|
726
|
-
|
|
732
|
+
elif current_block.type == "tool_use":
|
|
733
|
+
try:
|
|
734
|
+
current_block.input = json.loads(current_tool_input_json)
|
|
735
|
+
except json.JSONDecodeError:
|
|
736
|
+
current_block.input = {}
|
|
727
737
|
content_blocks.append(current_block)
|
|
728
738
|
current_block = None
|
|
729
739
|
current_signature = ""
|
|
730
740
|
current_thinking = ""
|
|
731
741
|
current_text = ""
|
|
742
|
+
current_tool_input_json = ""
|
|
732
743
|
elif event.type == "message_delta":
|
|
733
744
|
if hasattr(event, "usage"):
|
|
734
745
|
usage = event.usage
|
|
@@ -1900,7 +1911,7 @@ def google_chat_request(client, **kwargs):
|
|
|
1900
1911
|
history = [Content(**item) for item in kwargs.get("history", [])]
|
|
1901
1912
|
generation_config = kwargs.get("generation_config", {})
|
|
1902
1913
|
chat = client.chats.create(model=model, history=history, config=generation_config)
|
|
1903
|
-
last_message = history[-1].parts[0] if history else
|
|
1914
|
+
last_message = history[-1].parts[0] if history else ""
|
|
1904
1915
|
if stream:
|
|
1905
1916
|
return chat.send_message_stream(message=last_message)
|
|
1906
1917
|
return chat.send_message(message=last_message)
|
|
@@ -1916,6 +1927,31 @@ def google_completions_request(client, **kwargs):
|
|
|
1916
1927
|
return client.models.generate_content(model=model, contents=contents, config=config)
|
|
1917
1928
|
|
|
1918
1929
|
|
|
1930
|
+
def _build_google_response_from_parts(thought_content: str, regular_content: str, function_calls: list, last_result):
|
|
1931
|
+
"""Helper function to build Google response with thought, regular, and function call parts."""
|
|
1932
|
+
from google.genai.chats import Part
|
|
1933
|
+
|
|
1934
|
+
response = last_result.model_copy()
|
|
1935
|
+
final_parts = []
|
|
1936
|
+
|
|
1937
|
+
if thought_content:
|
|
1938
|
+
thought_part = Part(text=thought_content, thought=True)
|
|
1939
|
+
final_parts.append(thought_part)
|
|
1940
|
+
|
|
1941
|
+
if regular_content:
|
|
1942
|
+
text_part = Part(text=regular_content, thought=None)
|
|
1943
|
+
final_parts.append(text_part)
|
|
1944
|
+
|
|
1945
|
+
for function_call in function_calls:
|
|
1946
|
+
function_part = Part(function_call=function_call, thought=None)
|
|
1947
|
+
final_parts.append(function_part)
|
|
1948
|
+
|
|
1949
|
+
if final_parts:
|
|
1950
|
+
response.candidates[0].content.parts = final_parts
|
|
1951
|
+
|
|
1952
|
+
return response
|
|
1953
|
+
|
|
1954
|
+
|
|
1919
1955
|
def map_google_stream_response(results: list):
|
|
1920
1956
|
from google.genai.chats import GenerateContentResponse
|
|
1921
1957
|
|
|
@@ -1923,13 +1959,23 @@ def map_google_stream_response(results: list):
|
|
|
1923
1959
|
if not results:
|
|
1924
1960
|
return response
|
|
1925
1961
|
results: List[GenerateContentResponse] = results
|
|
1926
|
-
|
|
1962
|
+
|
|
1963
|
+
thought_content = ""
|
|
1964
|
+
regular_content = ""
|
|
1965
|
+
function_calls = []
|
|
1966
|
+
|
|
1927
1967
|
for result in results:
|
|
1928
|
-
|
|
1929
|
-
|
|
1930
|
-
|
|
1931
|
-
|
|
1932
|
-
|
|
1968
|
+
if result.candidates and result.candidates[0].content.parts:
|
|
1969
|
+
for part in result.candidates[0].content.parts:
|
|
1970
|
+
if hasattr(part, "text") and part.text:
|
|
1971
|
+
if hasattr(part, "thought") and part.thought:
|
|
1972
|
+
thought_content = f"{thought_content}{part.text}"
|
|
1973
|
+
else:
|
|
1974
|
+
regular_content = f"{regular_content}{part.text}"
|
|
1975
|
+
elif hasattr(part, "function_call") and part.function_call:
|
|
1976
|
+
function_calls.append(part.function_call)
|
|
1977
|
+
|
|
1978
|
+
return _build_google_response_from_parts(thought_content, regular_content, function_calls, results[-1])
|
|
1933
1979
|
|
|
1934
1980
|
|
|
1935
1981
|
def google_stream_chat(results: list):
|
|
@@ -1962,7 +2008,7 @@ async def agoogle_chat_request(client, **kwargs):
|
|
|
1962
2008
|
history = [Content(**item) for item in kwargs.get("history", [])]
|
|
1963
2009
|
generation_config = kwargs.get("generation_config", {})
|
|
1964
2010
|
chat = client.aio.chats.create(model=model, history=history, config=generation_config)
|
|
1965
|
-
last_message = history[-1].parts[0] if history else
|
|
2011
|
+
last_message = history[-1].parts[0] if history else ""
|
|
1966
2012
|
if stream:
|
|
1967
2013
|
return await chat.send_message_stream(message=last_message)
|
|
1968
2014
|
return await chat.send_message(message=last_message)
|
|
@@ -1995,14 +2041,29 @@ async def agoogle_request(prompt_blueprint: GetPromptTemplateResponse, client_kw
|
|
|
1995
2041
|
async def amap_google_stream_response(generator: AsyncIterable[Any]):
|
|
1996
2042
|
from google.genai.chats import GenerateContentResponse
|
|
1997
2043
|
|
|
1998
|
-
GenerateContentResponse()
|
|
1999
|
-
|
|
2044
|
+
response = GenerateContentResponse()
|
|
2045
|
+
|
|
2046
|
+
thought_content = ""
|
|
2047
|
+
regular_content = ""
|
|
2048
|
+
function_calls = []
|
|
2049
|
+
last_result = None
|
|
2050
|
+
|
|
2000
2051
|
async for result in generator:
|
|
2001
|
-
|
|
2002
|
-
|
|
2003
|
-
|
|
2004
|
-
|
|
2005
|
-
|
|
2052
|
+
last_result = result
|
|
2053
|
+
if result.candidates and result.candidates[0].content.parts:
|
|
2054
|
+
for part in result.candidates[0].content.parts:
|
|
2055
|
+
if hasattr(part, "text") and part.text:
|
|
2056
|
+
if hasattr(part, "thought") and part.thought:
|
|
2057
|
+
thought_content = f"{thought_content}{part.text}"
|
|
2058
|
+
else:
|
|
2059
|
+
regular_content = f"{regular_content}{part.text}"
|
|
2060
|
+
elif hasattr(part, "function_call") and part.function_call:
|
|
2061
|
+
function_calls.append(part.function_call)
|
|
2062
|
+
|
|
2063
|
+
if not last_result:
|
|
2064
|
+
return response
|
|
2065
|
+
|
|
2066
|
+
return _build_google_response_from_parts(thought_content, regular_content, function_calls, last_result)
|
|
2006
2067
|
|
|
2007
2068
|
|
|
2008
2069
|
async def agoogle_stream_chat(generator: AsyncIterable[Any]):
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|