promptlayer 1.0.51__tar.gz → 1.0.53__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of promptlayer might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: promptlayer
3
- Version: 1.0.51
3
+ Version: 1.0.53
4
4
  Summary: PromptLayer is a platform for prompt engineering and tracks your LLM requests.
5
5
  License: Apache-2.0
6
6
  Author: Magniv
@@ -1,4 +1,4 @@
1
1
  from .promptlayer import AsyncPromptLayer, PromptLayer
2
2
 
3
- __version__ = "1.0.51"
3
+ __version__ = "1.0.53"
4
4
  __all__ = ["PromptLayer", "AsyncPromptLayer", "__version__"]
@@ -137,7 +137,7 @@ class PromptLayer(PromptLayerMixin):
137
137
  prompt_blueprint_model = self._validate_and_extract_model_from_prompt_blueprint(
138
138
  prompt_blueprint=prompt_blueprint, prompt_name=prompt_name
139
139
  )
140
- llm_request_params = self._prepare_llm_request_params(
140
+ llm_data = self._prepare_llm_data(
141
141
  prompt_blueprint=prompt_blueprint,
142
142
  prompt_template=prompt_blueprint["prompt_template"],
143
143
  prompt_blueprint_model=prompt_blueprint_model,
@@ -145,24 +145,30 @@ class PromptLayer(PromptLayerMixin):
145
145
  stream=stream,
146
146
  )
147
147
 
148
- response = llm_request_params["request_function"](
149
- llm_request_params["prompt_blueprint"], **llm_request_params["kwargs"]
148
+ # response is just whatever the LLM call returns
149
+ # streaming=False > Pydantic model instance
150
+ # streaming=True > generator that yields ChatCompletionChunk pieces as they arrive
151
+ response = llm_data["request_function"](
152
+ prompt_blueprint=llm_data["prompt_blueprint"],
153
+ client_kwargs=llm_data["client_kwargs"],
154
+ function_kwargs=llm_data["function_kwargs"],
150
155
  )
151
156
 
152
157
  if stream:
153
158
  return stream_response(
154
- response,
155
- self._create_track_request_callable(
156
- request_params=llm_request_params,
159
+ generator=response,
160
+ after_stream=self._create_track_request_callable(
161
+ request_params=llm_data,
157
162
  tags=tags,
158
163
  input_variables=input_variables,
159
164
  group_id=group_id,
160
165
  pl_run_span_id=pl_run_span_id,
161
166
  ),
162
- llm_request_params["stream_function"],
167
+ map_results=llm_data["stream_function"],
163
168
  )
169
+
164
170
  request_log = self._track_request_log(
165
- llm_request_params,
171
+ llm_data,
166
172
  tags,
167
173
  input_variables,
168
174
  group_id,
@@ -551,7 +557,7 @@ class AsyncPromptLayer(PromptLayerMixin):
551
557
  prompt_blueprint_model = self._validate_and_extract_model_from_prompt_blueprint(
552
558
  prompt_blueprint=prompt_blueprint, prompt_name=prompt_name
553
559
  )
554
- llm_request_params = self._prepare_llm_request_params(
560
+ llm_data = self._prepare_llm_data(
555
561
  prompt_blueprint=prompt_blueprint,
556
562
  prompt_template=prompt_blueprint["prompt_template"],
557
563
  prompt_blueprint_model=prompt_blueprint_model,
@@ -560,13 +566,15 @@ class AsyncPromptLayer(PromptLayerMixin):
560
566
  is_async=True,
561
567
  )
562
568
 
563
- response = await llm_request_params["request_function"](
564
- llm_request_params["prompt_blueprint"], **llm_request_params["kwargs"]
569
+ response = await llm_data["request_function"](
570
+ prompt_blueprint=llm_data["prompt_blueprint"],
571
+ client_kwargs=llm_data["client_kwargs"],
572
+ function_kwargs=llm_data["function_kwargs"],
565
573
  )
566
574
 
567
575
  if stream:
568
576
  track_request_callable = await self._create_track_request_callable(
569
- request_params=llm_request_params,
577
+ request_params=llm_data,
570
578
  tags=tags,
571
579
  input_variables=input_variables,
572
580
  group_id=group_id,
@@ -575,11 +583,11 @@ class AsyncPromptLayer(PromptLayerMixin):
575
583
  return astream_response(
576
584
  response,
577
585
  track_request_callable,
578
- llm_request_params["stream_function"],
586
+ llm_data["stream_function"],
579
587
  )
580
588
 
581
589
  request_log = await self._track_request_log(
582
- llm_request_params,
590
+ llm_data,
583
591
  tags,
584
592
  input_variables,
585
593
  group_id,
@@ -2,7 +2,7 @@ import asyncio
2
2
  import datetime
3
3
  from copy import deepcopy
4
4
  from functools import wraps
5
- from typing import Dict, Union
5
+ from typing import Any, Dict, Union
6
6
 
7
7
  from opentelemetry.sdk.resources import Resource
8
8
  from opentelemetry.sdk.trace import TracerProvider
@@ -92,11 +92,11 @@ MAP_PROVIDER_TO_FUNCTION_NAME = {
92
92
 
93
93
 
94
94
  MAP_PROVIDER_TO_FUNCTION = {
95
- "openai": openai_request,
96
95
  "anthropic": anthropic_request,
97
- "openai.azure": azure_openai_request,
98
- "mistral": mistral_request,
99
96
  "google": google_request,
97
+ "mistral": mistral_request,
98
+ "openai": openai_request,
99
+ "openai.azure": azure_openai_request,
100
100
  }
101
101
 
102
102
  AMAP_PROVIDER_TO_FUNCTION_NAME = {
@@ -154,11 +154,11 @@ AMAP_PROVIDER_TO_FUNCTION_NAME = {
154
154
 
155
155
 
156
156
  AMAP_PROVIDER_TO_FUNCTION = {
157
- "openai": aopenai_request,
158
157
  "anthropic": aanthropic_request,
159
- "openai.azure": aazure_openai_request,
160
- "mistral": amistral_request,
161
158
  "google": agoogle_request,
159
+ "mistral": amistral_request,
160
+ "openai": aopenai_request,
161
+ "openai.azure": aazure_openai_request,
162
162
  }
163
163
 
164
164
 
@@ -177,7 +177,13 @@ class PromptLayerMixin:
177
177
  return None, None
178
178
 
179
179
  @staticmethod
180
- def _prepare_get_prompt_template_params(*, prompt_version, prompt_release_label, input_variables, metadata):
180
+ def _prepare_get_prompt_template_params(
181
+ *,
182
+ prompt_version: Union[int, None],
183
+ prompt_release_label: Union[str, None],
184
+ input_variables: Union[Dict[str, Any], None],
185
+ metadata: Union[Dict[str, str], None],
186
+ ) -> Dict[str, Any]:
181
187
  params = {}
182
188
 
183
189
  if prompt_version:
@@ -192,7 +198,7 @@ class PromptLayerMixin:
192
198
  return params
193
199
 
194
200
  @staticmethod
195
- def _prepare_llm_request_params(
201
+ def _prepare_llm_data(
196
202
  *,
197
203
  prompt_blueprint,
198
204
  prompt_template,
@@ -201,8 +207,26 @@ class PromptLayerMixin:
201
207
  stream,
202
208
  is_async=False,
203
209
  ):
210
+ client_kwargs = {}
211
+ function_kwargs = deepcopy(prompt_blueprint["llm_kwargs"])
212
+ function_kwargs["stream"] = stream
204
213
  provider = prompt_blueprint_model["provider"]
205
- kwargs = deepcopy(prompt_blueprint["llm_kwargs"])
214
+
215
+ if custom_provider := prompt_blueprint.get("custom_provider"):
216
+ provider = custom_provider["client"]
217
+ client_kwargs = {
218
+ "api_key": custom_provider["api_key"],
219
+ "base_url": custom_provider["base_url"],
220
+ }
221
+ elif provider_base_url := prompt_blueprint.get("provider_base_url"):
222
+ client_kwargs["base_url"] = provider_base_url["url"]
223
+
224
+ if model_parameter_overrides:
225
+ function_kwargs.update(model_parameter_overrides)
226
+
227
+ if stream and provider in ["openai", "openai.azure"]:
228
+ function_kwargs["stream_options"] = {"include_usage": True}
229
+
206
230
  if is_async:
207
231
  config = AMAP_PROVIDER_TO_FUNCTION_NAME[provider][prompt_template["type"]]
208
232
  request_function = AMAP_PROVIDER_TO_FUNCTION[provider]
@@ -210,22 +234,13 @@ class PromptLayerMixin:
210
234
  config = MAP_PROVIDER_TO_FUNCTION_NAME[provider][prompt_template["type"]]
211
235
  request_function = MAP_PROVIDER_TO_FUNCTION[provider]
212
236
 
213
- if provider_base_url := prompt_blueprint.get("provider_base_url"):
214
- kwargs["base_url"] = provider_base_url["url"]
215
-
216
- if model_parameter_overrides:
217
- kwargs.update(model_parameter_overrides)
218
-
219
- kwargs["stream"] = stream
220
- if stream and provider in ["openai", "openai.azure"]:
221
- kwargs["stream_options"] = {"include_usage": True}
222
-
223
237
  return {
224
238
  "provider": provider,
225
239
  "function_name": config["function_name"],
226
240
  "stream_function": config["stream_function"],
227
241
  "request_function": request_function,
228
- "kwargs": kwargs,
242
+ "client_kwargs": client_kwargs,
243
+ "function_kwargs": function_kwargs,
229
244
  "prompt_blueprint": prompt_blueprint,
230
245
  }
231
246
 
@@ -263,7 +278,7 @@ class PromptLayerMixin:
263
278
  "function_name": request_params["function_name"],
264
279
  "provider_type": request_params["provider"],
265
280
  "args": [],
266
- "kwargs": request_params["kwargs"],
281
+ "kwargs": request_params["function_kwargs"],
267
282
  "tags": tags,
268
283
  "request_start_time": datetime.datetime.now(datetime.timezone.utc).timestamp(),
269
284
  "request_end_time": datetime.datetime.now(datetime.timezone.utc).timestamp(),
@@ -23,6 +23,11 @@ class TextContent(TypedDict, total=False):
23
23
  text: str
24
24
 
25
25
 
26
+ class ThinkingContent(TypedDict, total=False):
27
+ type: Literal["thinking"]
28
+ thinking: str
29
+
30
+
26
31
  class ImageContent(TypedDict, total=False):
27
32
  type: Literal["image_url"]
28
33
  image_url: ImageUrl
@@ -44,7 +49,7 @@ class MediaVariable(TypedDict, total=False):
44
49
  name: str
45
50
 
46
51
 
47
- Content = Union[TextContent, ImageContent, MediaContnt, MediaVariable]
52
+ Content = Union[TextContent, ThinkingContent, ImageContent, MediaContnt, MediaVariable]
48
53
 
49
54
 
50
55
  class Function(TypedDict, total=False):
@@ -788,30 +788,79 @@ class GeneratorProxy:
788
788
  response = ""
789
789
  for result in self.results:
790
790
  if hasattr(result, "completion"):
791
- response = f"{response}{result.completion}"
791
+ response += result.completion
792
792
  elif hasattr(result, "message") and isinstance(result.message, str):
793
- response = f"{response}{result.message}"
793
+ response += result.message
794
794
  elif (
795
795
  hasattr(result, "content_block")
796
796
  and hasattr(result.content_block, "text")
797
- and "type" in result
798
- and result.type != "message_stop"
797
+ and getattr(result, "type", None) != "message_stop"
799
798
  ):
800
- response = f"{response}{result.content_block.text}"
801
- elif hasattr(result, "delta") and hasattr(result.delta, "text"):
802
- response = f"{response}{result.delta.text}"
803
- if (
804
- hasattr(self.results[-1], "type") and self.results[-1].type == "message_stop"
805
- ): # this is a message stream and not the correct event
799
+ response += result.content_block.text
800
+ elif hasattr(result, "delta"):
801
+ if hasattr(result.delta, "thinking"):
802
+ response += result.delta.thinking
803
+ elif hasattr(result.delta, "text"):
804
+ response += result.delta.text
805
+
806
+ # 2) If this is a “stream” (ended by message_stop), reconstruct both ThinkingBlock & TextBlock
807
+ last_event = self.results[-1]
808
+ if getattr(last_event, "type", None) == "message_stop":
806
809
  final_result = deepcopy(self.results[0].message)
807
- final_result.usage = None
808
- content_block = deepcopy(self.results[1].content_block)
809
- content_block.text = response
810
- final_result.content = [content_block]
810
+
811
+ content_blocks = []
812
+ current_block = None
813
+ current_signature = ""
814
+ current_thinking = ""
815
+ current_text = ""
816
+
817
+ for event in self.results:
818
+ # On a new content block starting:
819
+ if getattr(event, "type", None) == "content_block_start":
820
+ current_block = deepcopy(event.content_block)
821
+
822
+ if getattr(event.content_block, "type", None) == "thinking":
823
+ current_signature = ""
824
+ current_thinking = ""
825
+ elif getattr(event.content_block, "type", None) == "text":
826
+ current_text = ""
827
+
828
+ elif getattr(event, "type", None) == "content_block_delta" and current_block is not None:
829
+ if getattr(current_block, "type", None) == "thinking":
830
+ if hasattr(event.delta, "signature"):
831
+ current_signature = event.delta.signature
832
+ if hasattr(event.delta, "thinking"):
833
+ current_thinking += event.delta.thinking
834
+
835
+ elif getattr(current_block, "type", None) == "text":
836
+ if hasattr(event.delta, "text"):
837
+ current_text += event.delta.text
838
+
839
+ elif getattr(event, "type", None) == "content_block_stop" and current_block is not None:
840
+ if getattr(current_block, "type", None) == "thinking":
841
+ current_block.signature = current_signature
842
+ current_block.thinking = current_thinking
843
+ elif getattr(current_block, "type", None) == "text":
844
+ current_block.text = current_text
845
+
846
+ content_blocks.append(current_block)
847
+
848
+ current_block = None
849
+ current_signature = ""
850
+ current_thinking = ""
851
+ current_text = ""
852
+
853
+ final_result.content = content_blocks
854
+ for event in reversed(self.results):
855
+ if hasattr(event, "usage") and hasattr(event.usage, "output_tokens"):
856
+ final_result.usage.output_tokens = event.usage.output_tokens
857
+ break
858
+
859
+ return final_result
860
+
861
+ # 3) Otherwise (not a “stream”), fall back to returning the last raw message
811
862
  else:
812
- final_result = deepcopy(self.results[-1])
813
- final_result.completion = response
814
- return final_result
863
+ return deepcopy(self.results[-1])
815
864
  if hasattr(self.results[0].choices[0], "text"): # this is regular completion
816
865
  response = ""
817
866
  for result in self.results:
@@ -1485,7 +1534,7 @@ async def aanthropic_stream_completion(generator: AsyncIterable[Any]) -> Any:
1485
1534
  return response
1486
1535
 
1487
1536
 
1488
- def stream_response(generator: Generator, after_stream: Callable, map_results: Callable):
1537
+ def stream_response(*, generator: Generator, after_stream: Callable, map_results: Callable):
1489
1538
  data = {
1490
1539
  "request_id": None,
1491
1540
  "raw_response": None,
@@ -1544,12 +1593,12 @@ MAP_TYPE_TO_OPENAI_FUNCTION = {
1544
1593
  }
1545
1594
 
1546
1595
 
1547
- def openai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
1596
+ def openai_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
1548
1597
  from openai import OpenAI
1549
1598
 
1550
- client = OpenAI(base_url=kwargs.pop("base_url", None))
1599
+ client = OpenAI(**client_kwargs)
1551
1600
  request_to_make = MAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1552
- return request_to_make(client, **kwargs)
1601
+ return request_to_make(client, **function_kwargs)
1553
1602
 
1554
1603
 
1555
1604
  async def aopenai_chat_request(client, **kwargs):
@@ -1566,28 +1615,30 @@ AMAP_TYPE_TO_OPENAI_FUNCTION = {
1566
1615
  }
1567
1616
 
1568
1617
 
1569
- async def aopenai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
1618
+ async def aopenai_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
1570
1619
  from openai import AsyncOpenAI
1571
1620
 
1572
- client = AsyncOpenAI(base_url=kwargs.pop("base_url", None))
1621
+ client = AsyncOpenAI(**client_kwargs)
1573
1622
  request_to_make = AMAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1574
- return await request_to_make(client, **kwargs)
1623
+ return await request_to_make(client, **function_kwargs)
1575
1624
 
1576
1625
 
1577
- def azure_openai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
1626
+ def azure_openai_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
1578
1627
  from openai import AzureOpenAI
1579
1628
 
1580
- client = AzureOpenAI(azure_endpoint=kwargs.pop("base_url", None))
1629
+ client = AzureOpenAI(azure_endpoint=client_kwargs.pop("base_url", None))
1581
1630
  request_to_make = MAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1582
- return request_to_make(client, **kwargs)
1631
+ return request_to_make(client, **function_kwargs)
1583
1632
 
1584
1633
 
1585
- async def aazure_openai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
1634
+ async def aazure_openai_request(
1635
+ prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict
1636
+ ):
1586
1637
  from openai import AsyncAzureOpenAI
1587
1638
 
1588
- client = AsyncAzureOpenAI(azure_endpoint=kwargs.pop("base_url", None))
1639
+ client = AsyncAzureOpenAI(azure_endpoint=client_kwargs.pop("base_url", None))
1589
1640
  request_to_make = AMAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1590
- return await request_to_make(client, **kwargs)
1641
+ return await request_to_make(client, **function_kwargs)
1591
1642
 
1592
1643
 
1593
1644
  def anthropic_chat_request(client, **kwargs):
@@ -1604,12 +1655,12 @@ MAP_TYPE_TO_ANTHROPIC_FUNCTION = {
1604
1655
  }
1605
1656
 
1606
1657
 
1607
- def anthropic_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
1658
+ def anthropic_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
1608
1659
  from anthropic import Anthropic
1609
1660
 
1610
- client = Anthropic(base_url=kwargs.pop("base_url", None))
1661
+ client = Anthropic(**client_kwargs)
1611
1662
  request_to_make = MAP_TYPE_TO_ANTHROPIC_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1612
- return request_to_make(client, **kwargs)
1663
+ return request_to_make(client, **function_kwargs)
1613
1664
 
1614
1665
 
1615
1666
  async def aanthropic_chat_request(client, **kwargs):
@@ -1626,12 +1677,12 @@ AMAP_TYPE_TO_ANTHROPIC_FUNCTION = {
1626
1677
  }
1627
1678
 
1628
1679
 
1629
- async def aanthropic_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
1680
+ async def aanthropic_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
1630
1681
  from anthropic import AsyncAnthropic
1631
1682
 
1632
- client = AsyncAnthropic(base_url=kwargs.pop("base_url", None))
1683
+ client = AsyncAnthropic(**client_kwargs)
1633
1684
  request_to_make = AMAP_TYPE_TO_ANTHROPIC_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1634
- return await request_to_make(client, **kwargs)
1685
+ return await request_to_make(client, **function_kwargs)
1635
1686
 
1636
1687
 
1637
1688
  # do not remove! This is used in the langchain integration.
@@ -1690,31 +1741,29 @@ async def autil_log_request(api_key: str, **kwargs) -> Union[RequestLog, None]:
1690
1741
  return None
1691
1742
 
1692
1743
 
1693
- def mistral_request(
1694
- prompt_blueprint: GetPromptTemplateResponse,
1695
- **kwargs,
1696
- ):
1744
+ def mistral_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
1697
1745
  from mistralai import Mistral
1698
1746
 
1699
1747
  client = Mistral(api_key=os.environ.get("MISTRAL_API_KEY"))
1700
- if "stream" in kwargs and kwargs["stream"]:
1701
- kwargs.pop("stream")
1702
- return client.chat.stream(**kwargs)
1703
- if "stream" in kwargs:
1704
- kwargs.pop("stream")
1705
- return client.chat.complete(**kwargs)
1748
+ if "stream" in function_kwargs and function_kwargs["stream"]:
1749
+ function_kwargs.pop("stream")
1750
+ return client.chat.stream(**function_kwargs)
1751
+ if "stream" in function_kwargs:
1752
+ function_kwargs.pop("stream")
1753
+ return client.chat.complete(**function_kwargs)
1706
1754
 
1707
1755
 
1708
1756
  async def amistral_request(
1709
1757
  prompt_blueprint: GetPromptTemplateResponse,
1710
- **kwargs,
1758
+ _: dict,
1759
+ function_kwargs: dict,
1711
1760
  ):
1712
1761
  from mistralai import Mistral
1713
1762
 
1714
1763
  client = Mistral(api_key=os.environ.get("MISTRAL_API_KEY"))
1715
- if "stream" in kwargs and kwargs["stream"]:
1716
- return await client.chat.stream_async(**kwargs)
1717
- return await client.chat.complete_async(**kwargs)
1764
+ if "stream" in function_kwargs and function_kwargs["stream"]:
1765
+ return await client.chat.stream_async(**function_kwargs)
1766
+ return await client.chat.complete_async(**function_kwargs)
1718
1767
 
1719
1768
 
1720
1769
  def mistral_stream_chat(results: list):
@@ -1898,12 +1947,12 @@ MAP_TYPE_TO_GOOGLE_FUNCTION = {
1898
1947
  }
1899
1948
 
1900
1949
 
1901
- def google_request(request: GetPromptTemplateResponse, **kwargs):
1950
+ def google_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
1902
1951
  from google import genai
1903
1952
 
1904
1953
  client = genai.Client()
1905
- request_to_make = MAP_TYPE_TO_GOOGLE_FUNCTION[request["prompt_template"]["type"]]
1906
- return request_to_make(client, **kwargs)
1954
+ request_to_make = MAP_TYPE_TO_GOOGLE_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1955
+ return request_to_make(client, **function_kwargs)
1907
1956
 
1908
1957
 
1909
1958
  async def agoogle_chat_request(client, **kwargs):
@@ -1936,12 +1985,12 @@ AMAP_TYPE_TO_GOOGLE_FUNCTION = {
1936
1985
  }
1937
1986
 
1938
1987
 
1939
- async def agoogle_request(request: GetPromptTemplateResponse, **kwargs):
1988
+ async def agoogle_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
1940
1989
  from google import genai
1941
1990
 
1942
1991
  client = genai.Client()
1943
- request_to_make = AMAP_TYPE_TO_GOOGLE_FUNCTION[request["prompt_template"]["type"]]
1944
- return await request_to_make(client, **kwargs)
1992
+ request_to_make = AMAP_TYPE_TO_GOOGLE_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1993
+ return await request_to_make(client, **function_kwargs)
1945
1994
 
1946
1995
 
1947
1996
  async def amap_google_stream_response(generator: AsyncIterable[Any]):
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "promptlayer"
3
- version = "1.0.51"
3
+ version = "1.0.53"
4
4
  description = "PromptLayer is a platform for prompt engineering and tracks your LLM requests."
5
5
  authors = ["Magniv <hello@magniv.io>"]
6
6
  license = "Apache-2.0"
File without changes
File without changes