promptlayer 1.0.55__py3-none-any.whl → 1.0.57__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of promptlayer might be problematic. Click here for more details.
- promptlayer/__init__.py +1 -1
- promptlayer/promptlayer.py +16 -0
- promptlayer/promptlayer_mixins.py +6 -0
- promptlayer/utils.py +75 -14
- {promptlayer-1.0.55.dist-info → promptlayer-1.0.57.dist-info}/METADATA +1 -1
- {promptlayer-1.0.55.dist-info → promptlayer-1.0.57.dist-info}/RECORD +8 -8
- {promptlayer-1.0.55.dist-info → promptlayer-1.0.57.dist-info}/LICENSE +0 -0
- {promptlayer-1.0.55.dist-info → promptlayer-1.0.57.dist-info}/WHEEL +0 -0
promptlayer/__init__.py
CHANGED
promptlayer/promptlayer.py
CHANGED
|
@@ -126,12 +126,16 @@ class PromptLayer(PromptLayerMixin):
|
|
|
126
126
|
group_id: Union[int, None] = None,
|
|
127
127
|
stream: bool = False,
|
|
128
128
|
pl_run_span_id: Union[str, None] = None,
|
|
129
|
+
provider: Union[str, None] = None,
|
|
130
|
+
model: Union[str, None] = None,
|
|
129
131
|
) -> Dict[str, Any]:
|
|
130
132
|
get_prompt_template_params = self._prepare_get_prompt_template_params(
|
|
131
133
|
prompt_version=prompt_version,
|
|
132
134
|
prompt_release_label=prompt_release_label,
|
|
133
135
|
input_variables=input_variables,
|
|
134
136
|
metadata=metadata,
|
|
137
|
+
provider=provider,
|
|
138
|
+
model=model,
|
|
135
139
|
)
|
|
136
140
|
prompt_blueprint = self.templates.get(prompt_name, get_prompt_template_params)
|
|
137
141
|
prompt_blueprint_model = self._validate_and_extract_model_from_prompt_blueprint(
|
|
@@ -216,6 +220,8 @@ class PromptLayer(PromptLayerMixin):
|
|
|
216
220
|
metadata: Union[Dict[str, str], None] = None,
|
|
217
221
|
group_id: Union[int, None] = None,
|
|
218
222
|
stream: bool = False,
|
|
223
|
+
provider: Union[str, None] = None,
|
|
224
|
+
model: Union[str, None] = None,
|
|
219
225
|
) -> Dict[str, Any]:
|
|
220
226
|
_run_internal_kwargs = {
|
|
221
227
|
"prompt_name": prompt_name,
|
|
@@ -227,6 +233,8 @@ class PromptLayer(PromptLayerMixin):
|
|
|
227
233
|
"metadata": metadata,
|
|
228
234
|
"group_id": group_id,
|
|
229
235
|
"stream": stream,
|
|
236
|
+
"provider": provider,
|
|
237
|
+
"model": model,
|
|
230
238
|
}
|
|
231
239
|
|
|
232
240
|
if self.tracer:
|
|
@@ -422,6 +430,8 @@ class AsyncPromptLayer(PromptLayerMixin):
|
|
|
422
430
|
metadata: Union[Dict[str, str], None] = None,
|
|
423
431
|
group_id: Union[int, None] = None,
|
|
424
432
|
stream: bool = False,
|
|
433
|
+
provider: Union[str, None] = None,
|
|
434
|
+
model: Union[str, None] = None,
|
|
425
435
|
) -> Dict[str, Any]:
|
|
426
436
|
_run_internal_kwargs = {
|
|
427
437
|
"prompt_name": prompt_name,
|
|
@@ -433,6 +443,8 @@ class AsyncPromptLayer(PromptLayerMixin):
|
|
|
433
443
|
"metadata": metadata,
|
|
434
444
|
"group_id": group_id,
|
|
435
445
|
"stream": stream,
|
|
446
|
+
"provider": provider,
|
|
447
|
+
"model": model,
|
|
436
448
|
}
|
|
437
449
|
|
|
438
450
|
if self.tracer:
|
|
@@ -546,12 +558,16 @@ class AsyncPromptLayer(PromptLayerMixin):
|
|
|
546
558
|
group_id: Union[int, None] = None,
|
|
547
559
|
stream: bool = False,
|
|
548
560
|
pl_run_span_id: Union[str, None] = None,
|
|
561
|
+
provider: Union[str, None] = None,
|
|
562
|
+
model: Union[str, None] = None,
|
|
549
563
|
) -> Dict[str, Any]:
|
|
550
564
|
get_prompt_template_params = self._prepare_get_prompt_template_params(
|
|
551
565
|
prompt_version=prompt_version,
|
|
552
566
|
prompt_release_label=prompt_release_label,
|
|
553
567
|
input_variables=input_variables,
|
|
554
568
|
metadata=metadata,
|
|
569
|
+
provider=provider,
|
|
570
|
+
model=model,
|
|
555
571
|
)
|
|
556
572
|
prompt_blueprint = await self.templates.get(prompt_name, get_prompt_template_params)
|
|
557
573
|
prompt_blueprint_model = self._validate_and_extract_model_from_prompt_blueprint(
|
|
@@ -183,6 +183,8 @@ class PromptLayerMixin:
|
|
|
183
183
|
prompt_release_label: Union[str, None],
|
|
184
184
|
input_variables: Union[Dict[str, Any], None],
|
|
185
185
|
metadata: Union[Dict[str, str], None],
|
|
186
|
+
provider: Union[str, None] = None,
|
|
187
|
+
model: Union[str, None] = None,
|
|
186
188
|
) -> Dict[str, Any]:
|
|
187
189
|
params = {}
|
|
188
190
|
|
|
@@ -194,6 +196,10 @@ class PromptLayerMixin:
|
|
|
194
196
|
params["input_variables"] = input_variables
|
|
195
197
|
if metadata:
|
|
196
198
|
params["metadata_filters"] = metadata
|
|
199
|
+
if provider:
|
|
200
|
+
params["provider"] = provider
|
|
201
|
+
if model:
|
|
202
|
+
params["model"] = model
|
|
197
203
|
|
|
198
204
|
return params
|
|
199
205
|
|
promptlayer/utils.py
CHANGED
|
@@ -697,6 +697,7 @@ def build_anthropic_content_blocks(events):
|
|
|
697
697
|
current_signature = ""
|
|
698
698
|
current_thinking = ""
|
|
699
699
|
current_text = ""
|
|
700
|
+
current_tool_input_json = ""
|
|
700
701
|
usage = None
|
|
701
702
|
stop_reason = None
|
|
702
703
|
|
|
@@ -708,6 +709,8 @@ def build_anthropic_content_blocks(events):
|
|
|
708
709
|
current_thinking = ""
|
|
709
710
|
elif current_block.type == "text":
|
|
710
711
|
current_text = ""
|
|
712
|
+
elif current_block.type == "tool_use":
|
|
713
|
+
current_tool_input_json = ""
|
|
711
714
|
elif event.type == "content_block_delta" and current_block is not None:
|
|
712
715
|
if current_block.type == "thinking":
|
|
713
716
|
if hasattr(event.delta, "signature"):
|
|
@@ -717,18 +720,26 @@ def build_anthropic_content_blocks(events):
|
|
|
717
720
|
elif current_block.type == "text":
|
|
718
721
|
if hasattr(event.delta, "text"):
|
|
719
722
|
current_text += event.delta.text
|
|
723
|
+
elif current_block.type == "tool_use":
|
|
724
|
+
if hasattr(event.delta, "partial_json"):
|
|
725
|
+
current_tool_input_json += event.delta.partial_json
|
|
720
726
|
elif event.type == "content_block_stop" and current_block is not None:
|
|
721
727
|
if current_block.type == "thinking":
|
|
722
728
|
current_block.signature = current_signature
|
|
723
729
|
current_block.thinking = current_thinking
|
|
724
730
|
elif current_block.type == "text":
|
|
725
731
|
current_block.text = current_text
|
|
726
|
-
|
|
732
|
+
elif current_block.type == "tool_use":
|
|
733
|
+
try:
|
|
734
|
+
current_block.input = json.loads(current_tool_input_json)
|
|
735
|
+
except json.JSONDecodeError:
|
|
736
|
+
current_block.input = {}
|
|
727
737
|
content_blocks.append(current_block)
|
|
728
738
|
current_block = None
|
|
729
739
|
current_signature = ""
|
|
730
740
|
current_thinking = ""
|
|
731
741
|
current_text = ""
|
|
742
|
+
current_tool_input_json = ""
|
|
732
743
|
elif event.type == "message_delta":
|
|
733
744
|
if hasattr(event, "usage"):
|
|
734
745
|
usage = event.usage
|
|
@@ -1916,6 +1927,31 @@ def google_completions_request(client, **kwargs):
|
|
|
1916
1927
|
return client.models.generate_content(model=model, contents=contents, config=config)
|
|
1917
1928
|
|
|
1918
1929
|
|
|
1930
|
+
def _build_google_response_from_parts(thought_content: str, regular_content: str, function_calls: list, last_result):
|
|
1931
|
+
"""Helper function to build Google response with thought, regular, and function call parts."""
|
|
1932
|
+
from google.genai.chats import Part
|
|
1933
|
+
|
|
1934
|
+
response = last_result.model_copy()
|
|
1935
|
+
final_parts = []
|
|
1936
|
+
|
|
1937
|
+
if thought_content:
|
|
1938
|
+
thought_part = Part(text=thought_content, thought=True)
|
|
1939
|
+
final_parts.append(thought_part)
|
|
1940
|
+
|
|
1941
|
+
if regular_content:
|
|
1942
|
+
text_part = Part(text=regular_content, thought=None)
|
|
1943
|
+
final_parts.append(text_part)
|
|
1944
|
+
|
|
1945
|
+
for function_call in function_calls:
|
|
1946
|
+
function_part = Part(function_call=function_call, thought=None)
|
|
1947
|
+
final_parts.append(function_part)
|
|
1948
|
+
|
|
1949
|
+
if final_parts:
|
|
1950
|
+
response.candidates[0].content.parts = final_parts
|
|
1951
|
+
|
|
1952
|
+
return response
|
|
1953
|
+
|
|
1954
|
+
|
|
1919
1955
|
def map_google_stream_response(results: list):
|
|
1920
1956
|
from google.genai.chats import GenerateContentResponse
|
|
1921
1957
|
|
|
@@ -1923,13 +1959,23 @@ def map_google_stream_response(results: list):
|
|
|
1923
1959
|
if not results:
|
|
1924
1960
|
return response
|
|
1925
1961
|
results: List[GenerateContentResponse] = results
|
|
1926
|
-
|
|
1962
|
+
|
|
1963
|
+
thought_content = ""
|
|
1964
|
+
regular_content = ""
|
|
1965
|
+
function_calls = []
|
|
1966
|
+
|
|
1927
1967
|
for result in results:
|
|
1928
|
-
|
|
1929
|
-
|
|
1930
|
-
|
|
1931
|
-
|
|
1932
|
-
|
|
1968
|
+
if result.candidates and result.candidates[0].content.parts:
|
|
1969
|
+
for part in result.candidates[0].content.parts:
|
|
1970
|
+
if hasattr(part, "text") and part.text:
|
|
1971
|
+
if hasattr(part, "thought") and part.thought:
|
|
1972
|
+
thought_content = f"{thought_content}{part.text}"
|
|
1973
|
+
else:
|
|
1974
|
+
regular_content = f"{regular_content}{part.text}"
|
|
1975
|
+
elif hasattr(part, "function_call") and part.function_call:
|
|
1976
|
+
function_calls.append(part.function_call)
|
|
1977
|
+
|
|
1978
|
+
return _build_google_response_from_parts(thought_content, regular_content, function_calls, results[-1])
|
|
1933
1979
|
|
|
1934
1980
|
|
|
1935
1981
|
def google_stream_chat(results: list):
|
|
@@ -1995,14 +2041,29 @@ async def agoogle_request(prompt_blueprint: GetPromptTemplateResponse, client_kw
|
|
|
1995
2041
|
async def amap_google_stream_response(generator: AsyncIterable[Any]):
|
|
1996
2042
|
from google.genai.chats import GenerateContentResponse
|
|
1997
2043
|
|
|
1998
|
-
GenerateContentResponse()
|
|
1999
|
-
|
|
2044
|
+
response = GenerateContentResponse()
|
|
2045
|
+
|
|
2046
|
+
thought_content = ""
|
|
2047
|
+
regular_content = ""
|
|
2048
|
+
function_calls = []
|
|
2049
|
+
last_result = None
|
|
2050
|
+
|
|
2000
2051
|
async for result in generator:
|
|
2001
|
-
|
|
2002
|
-
|
|
2003
|
-
|
|
2004
|
-
|
|
2005
|
-
|
|
2052
|
+
last_result = result
|
|
2053
|
+
if result.candidates and result.candidates[0].content.parts:
|
|
2054
|
+
for part in result.candidates[0].content.parts:
|
|
2055
|
+
if hasattr(part, "text") and part.text:
|
|
2056
|
+
if hasattr(part, "thought") and part.thought:
|
|
2057
|
+
thought_content = f"{thought_content}{part.text}"
|
|
2058
|
+
else:
|
|
2059
|
+
regular_content = f"{regular_content}{part.text}"
|
|
2060
|
+
elif hasattr(part, "function_call") and part.function_call:
|
|
2061
|
+
function_calls.append(part.function_call)
|
|
2062
|
+
|
|
2063
|
+
if not last_result:
|
|
2064
|
+
return response
|
|
2065
|
+
|
|
2066
|
+
return _build_google_response_from_parts(thought_content, regular_content, function_calls, last_result)
|
|
2006
2067
|
|
|
2007
2068
|
|
|
2008
2069
|
async def agoogle_stream_chat(generator: AsyncIterable[Any]):
|
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
promptlayer/__init__.py,sha256=
|
|
1
|
+
promptlayer/__init__.py,sha256=uvVwr1nN_g3tfP5Ka_jwOhCRuY1vQIweZrXfYK801Ak,140
|
|
2
2
|
promptlayer/groups/__init__.py,sha256=xhOAolLUBkr76ZHvJr29OwjCIk1V9qKQXjZCuyTJUIY,429
|
|
3
3
|
promptlayer/groups/groups.py,sha256=YPROicy-TzpkrpA8vOpZS2lwvJ6VRtlbQ1S2oT1N0vM,338
|
|
4
|
-
promptlayer/promptlayer.py,sha256=
|
|
4
|
+
promptlayer/promptlayer.py,sha256=4MtP_byvYdVi4yZp_VCyVpPGx3iAaijwDZHPoYDfqZc,22212
|
|
5
5
|
promptlayer/promptlayer_base.py,sha256=jOgXzNZlV1LKOOsXSSAOgn8o4hXn_EV0oY9Nf3Bsu_s,6872
|
|
6
|
-
promptlayer/promptlayer_mixins.py,sha256=
|
|
6
|
+
promptlayer/promptlayer_mixins.py,sha256=MTOIx2jyebph4pu8fZA7QQPm22W_leX_CUmCiNt1K8U,11677
|
|
7
7
|
promptlayer/span_exporter.py,sha256=Pc1-zWAcjVCSykh-4rYPqiEZvzkG9xaYLVoHFY_TWaQ,2410
|
|
8
8
|
promptlayer/templates.py,sha256=7ObDPMzHXjttDdJdCXA_pDL9XAnmcujIWucmgZJcOC8,1179
|
|
9
9
|
promptlayer/track/__init__.py,sha256=tyweLTAY7UpYpBHWwY-T3pOPDIlGjcgccYXqU_r0694,1710
|
|
@@ -11,8 +11,8 @@ promptlayer/track/track.py,sha256=A-awcYwsSwxktrlCMchy8NITIquwxU1UXbgLZMwqrA0,31
|
|
|
11
11
|
promptlayer/types/__init__.py,sha256=xJcvQuOk91ZBBePb40-1FDNDKYrZoH5lPE2q6_UhprM,111
|
|
12
12
|
promptlayer/types/prompt_template.py,sha256=blkVBhh4u5pMhgX_Dsn78sN7Rv2Vy_zhd1-NERLXTpM,5075
|
|
13
13
|
promptlayer/types/request_log.py,sha256=xU6bcxQar6GaBOJlgZTavXUV3FjE8sF_nSjPu4Ya_00,174
|
|
14
|
-
promptlayer/utils.py,sha256=
|
|
15
|
-
promptlayer-1.0.
|
|
16
|
-
promptlayer-1.0.
|
|
17
|
-
promptlayer-1.0.
|
|
18
|
-
promptlayer-1.0.
|
|
14
|
+
promptlayer/utils.py,sha256=bUrDMZtG4jSfp7nHc6A5ce3n7-5K1LCAlzEOr3y2ACw,73819
|
|
15
|
+
promptlayer-1.0.57.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
16
|
+
promptlayer-1.0.57.dist-info/METADATA,sha256=lh7g5d0UA4rZXzJTxueyn8oobJqosEB9EbM3ytnISF4,4819
|
|
17
|
+
promptlayer-1.0.57.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
|
18
|
+
promptlayer-1.0.57.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|