promptlayer 1.0.50__tar.gz → 1.0.52__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of promptlayer might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: promptlayer
3
- Version: 1.0.50
3
+ Version: 1.0.52
4
4
  Summary: PromptLayer is a platform for prompt engineering and tracks your LLM requests.
5
5
  License: Apache-2.0
6
6
  Author: Magniv
@@ -1,4 +1,4 @@
1
1
  from .promptlayer import AsyncPromptLayer, PromptLayer
2
2
 
3
- __version__ = "1.0.50"
3
+ __version__ = "1.0.52"
4
4
  __all__ = ["PromptLayer", "AsyncPromptLayer", "__version__"]
@@ -137,7 +137,7 @@ class PromptLayer(PromptLayerMixin):
137
137
  prompt_blueprint_model = self._validate_and_extract_model_from_prompt_blueprint(
138
138
  prompt_blueprint=prompt_blueprint, prompt_name=prompt_name
139
139
  )
140
- llm_request_params = self._prepare_llm_request_params(
140
+ llm_data = self._prepare_llm_data(
141
141
  prompt_blueprint=prompt_blueprint,
142
142
  prompt_template=prompt_blueprint["prompt_template"],
143
143
  prompt_blueprint_model=prompt_blueprint_model,
@@ -145,24 +145,30 @@ class PromptLayer(PromptLayerMixin):
145
145
  stream=stream,
146
146
  )
147
147
 
148
- response = llm_request_params["request_function"](
149
- llm_request_params["prompt_blueprint"], **llm_request_params["kwargs"]
148
+ # response is just whatever the LLM call returns
149
+ # streaming=False > Pydantic model instance
150
+ # streaming=True > generator that yields ChatCompletionChunk pieces as they arrive
151
+ response = llm_data["request_function"](
152
+ prompt_blueprint=llm_data["prompt_blueprint"],
153
+ client_kwargs=llm_data["client_kwargs"],
154
+ function_kwargs=llm_data["function_kwargs"],
150
155
  )
151
156
 
152
157
  if stream:
153
158
  return stream_response(
154
- response,
155
- self._create_track_request_callable(
156
- request_params=llm_request_params,
159
+ generator=response,
160
+ after_stream=self._create_track_request_callable(
161
+ request_params=llm_data,
157
162
  tags=tags,
158
163
  input_variables=input_variables,
159
164
  group_id=group_id,
160
165
  pl_run_span_id=pl_run_span_id,
161
166
  ),
162
- llm_request_params["stream_function"],
167
+ map_results=llm_data["stream_function"],
163
168
  )
169
+
164
170
  request_log = self._track_request_log(
165
- llm_request_params,
171
+ llm_data,
166
172
  tags,
167
173
  input_variables,
168
174
  group_id,
@@ -387,7 +393,7 @@ class AsyncPromptLayer(PromptLayerMixin):
387
393
  # Allows `workflow_name` to be passed both as keyword and positional argument
388
394
  # (virtually identical to `workflow_id_or_name`)
389
395
  workflow_name: Optional[str] = None,
390
- ) -> Dict[str, Any]:
396
+ ) -> Union[Dict[str, Any], Any]:
391
397
  try:
392
398
  return await arun_workflow_request(
393
399
  workflow_id_or_name=_get_workflow_workflow_id_or_name(workflow_id_or_name, workflow_name),
@@ -551,7 +557,7 @@ class AsyncPromptLayer(PromptLayerMixin):
551
557
  prompt_blueprint_model = self._validate_and_extract_model_from_prompt_blueprint(
552
558
  prompt_blueprint=prompt_blueprint, prompt_name=prompt_name
553
559
  )
554
- llm_request_params = self._prepare_llm_request_params(
560
+ llm_data = self._prepare_llm_data(
555
561
  prompt_blueprint=prompt_blueprint,
556
562
  prompt_template=prompt_blueprint["prompt_template"],
557
563
  prompt_blueprint_model=prompt_blueprint_model,
@@ -560,13 +566,15 @@ class AsyncPromptLayer(PromptLayerMixin):
560
566
  is_async=True,
561
567
  )
562
568
 
563
- response = await llm_request_params["request_function"](
564
- llm_request_params["prompt_blueprint"], **llm_request_params["kwargs"]
569
+ response = await llm_data["request_function"](
570
+ prompt_blueprint=llm_data["prompt_blueprint"],
571
+ client_kwargs=llm_data["client_kwargs"],
572
+ function_kwargs=llm_data["function_kwargs"],
565
573
  )
566
574
 
567
575
  if stream:
568
576
  track_request_callable = await self._create_track_request_callable(
569
- request_params=llm_request_params,
577
+ request_params=llm_data,
570
578
  tags=tags,
571
579
  input_variables=input_variables,
572
580
  group_id=group_id,
@@ -575,11 +583,11 @@ class AsyncPromptLayer(PromptLayerMixin):
575
583
  return astream_response(
576
584
  response,
577
585
  track_request_callable,
578
- llm_request_params["stream_function"],
586
+ llm_data["stream_function"],
579
587
  )
580
588
 
581
589
  request_log = await self._track_request_log(
582
- llm_request_params,
590
+ llm_data,
583
591
  tags,
584
592
  input_variables,
585
593
  group_id,
@@ -2,7 +2,7 @@ import asyncio
2
2
  import datetime
3
3
  from copy import deepcopy
4
4
  from functools import wraps
5
- from typing import Dict, Union
5
+ from typing import Any, Dict, Union
6
6
 
7
7
  from opentelemetry.sdk.resources import Resource
8
8
  from opentelemetry.sdk.trace import TracerProvider
@@ -92,11 +92,11 @@ MAP_PROVIDER_TO_FUNCTION_NAME = {
92
92
 
93
93
 
94
94
  MAP_PROVIDER_TO_FUNCTION = {
95
- "openai": openai_request,
96
95
  "anthropic": anthropic_request,
97
- "openai.azure": azure_openai_request,
98
- "mistral": mistral_request,
99
96
  "google": google_request,
97
+ "mistral": mistral_request,
98
+ "openai": openai_request,
99
+ "openai.azure": azure_openai_request,
100
100
  }
101
101
 
102
102
  AMAP_PROVIDER_TO_FUNCTION_NAME = {
@@ -154,11 +154,11 @@ AMAP_PROVIDER_TO_FUNCTION_NAME = {
154
154
 
155
155
 
156
156
  AMAP_PROVIDER_TO_FUNCTION = {
157
- "openai": aopenai_request,
158
157
  "anthropic": aanthropic_request,
159
- "openai.azure": aazure_openai_request,
160
- "mistral": amistral_request,
161
158
  "google": agoogle_request,
159
+ "mistral": amistral_request,
160
+ "openai": aopenai_request,
161
+ "openai.azure": aazure_openai_request,
162
162
  }
163
163
 
164
164
 
@@ -177,7 +177,13 @@ class PromptLayerMixin:
177
177
  return None, None
178
178
 
179
179
  @staticmethod
180
- def _prepare_get_prompt_template_params(*, prompt_version, prompt_release_label, input_variables, metadata):
180
+ def _prepare_get_prompt_template_params(
181
+ *,
182
+ prompt_version: Union[int, None],
183
+ prompt_release_label: Union[str, None],
184
+ input_variables: Union[Dict[str, Any], None],
185
+ metadata: Union[Dict[str, str], None],
186
+ ) -> Dict[str, Any]:
181
187
  params = {}
182
188
 
183
189
  if prompt_version:
@@ -192,7 +198,7 @@ class PromptLayerMixin:
192
198
  return params
193
199
 
194
200
  @staticmethod
195
- def _prepare_llm_request_params(
201
+ def _prepare_llm_data(
196
202
  *,
197
203
  prompt_blueprint,
198
204
  prompt_template,
@@ -201,8 +207,26 @@ class PromptLayerMixin:
201
207
  stream,
202
208
  is_async=False,
203
209
  ):
210
+ client_kwargs = {}
211
+ function_kwargs = deepcopy(prompt_blueprint["llm_kwargs"])
212
+ function_kwargs["stream"] = stream
204
213
  provider = prompt_blueprint_model["provider"]
205
- kwargs = deepcopy(prompt_blueprint["llm_kwargs"])
214
+
215
+ if custom_provider := prompt_blueprint.get("custom_provider"):
216
+ provider = custom_provider["client"]
217
+ client_kwargs = {
218
+ "api_key": custom_provider["api_key"],
219
+ "base_url": custom_provider["base_url"],
220
+ }
221
+ elif provider_base_url := prompt_blueprint.get("provider_base_url"):
222
+ client_kwargs["base_url"] = provider_base_url["url"]
223
+
224
+ if model_parameter_overrides:
225
+ function_kwargs.update(model_parameter_overrides)
226
+
227
+ if stream and provider in ["openai", "openai.azure"]:
228
+ function_kwargs["stream_options"] = {"include_usage": True}
229
+
206
230
  if is_async:
207
231
  config = AMAP_PROVIDER_TO_FUNCTION_NAME[provider][prompt_template["type"]]
208
232
  request_function = AMAP_PROVIDER_TO_FUNCTION[provider]
@@ -210,22 +234,13 @@ class PromptLayerMixin:
210
234
  config = MAP_PROVIDER_TO_FUNCTION_NAME[provider][prompt_template["type"]]
211
235
  request_function = MAP_PROVIDER_TO_FUNCTION[provider]
212
236
 
213
- if provider_base_url := prompt_blueprint.get("provider_base_url"):
214
- kwargs["base_url"] = provider_base_url["url"]
215
-
216
- if model_parameter_overrides:
217
- kwargs.update(model_parameter_overrides)
218
-
219
- kwargs["stream"] = stream
220
- if stream and provider in ["openai", "openai.azure"]:
221
- kwargs["stream_options"] = {"include_usage": True}
222
-
223
237
  return {
224
238
  "provider": provider,
225
239
  "function_name": config["function_name"],
226
240
  "stream_function": config["stream_function"],
227
241
  "request_function": request_function,
228
- "kwargs": kwargs,
242
+ "client_kwargs": client_kwargs,
243
+ "function_kwargs": function_kwargs,
229
244
  "prompt_blueprint": prompt_blueprint,
230
245
  }
231
246
 
@@ -263,7 +278,7 @@ class PromptLayerMixin:
263
278
  "function_name": request_params["function_name"],
264
279
  "provider_type": request_params["provider"],
265
280
  "args": [],
266
- "kwargs": request_params["kwargs"],
281
+ "kwargs": request_params["function_kwargs"],
267
282
  "tags": tags,
268
283
  "request_start_time": datetime.datetime.now(datetime.timezone.utc).timestamp(),
269
284
  "request_end_time": datetime.datetime.now(datetime.timezone.utc).timestamp(),
@@ -1485,7 +1485,7 @@ async def aanthropic_stream_completion(generator: AsyncIterable[Any]) -> Any:
1485
1485
  return response
1486
1486
 
1487
1487
 
1488
- def stream_response(generator: Generator, after_stream: Callable, map_results: Callable):
1488
+ def stream_response(*, generator: Generator, after_stream: Callable, map_results: Callable):
1489
1489
  data = {
1490
1490
  "request_id": None,
1491
1491
  "raw_response": None,
@@ -1544,12 +1544,12 @@ MAP_TYPE_TO_OPENAI_FUNCTION = {
1544
1544
  }
1545
1545
 
1546
1546
 
1547
- def openai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
1547
+ def openai_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
1548
1548
  from openai import OpenAI
1549
1549
 
1550
- client = OpenAI(base_url=kwargs.pop("base_url", None))
1550
+ client = OpenAI(**client_kwargs)
1551
1551
  request_to_make = MAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1552
- return request_to_make(client, **kwargs)
1552
+ return request_to_make(client, **function_kwargs)
1553
1553
 
1554
1554
 
1555
1555
  async def aopenai_chat_request(client, **kwargs):
@@ -1566,28 +1566,30 @@ AMAP_TYPE_TO_OPENAI_FUNCTION = {
1566
1566
  }
1567
1567
 
1568
1568
 
1569
- async def aopenai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
1569
+ async def aopenai_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
1570
1570
  from openai import AsyncOpenAI
1571
1571
 
1572
- client = AsyncOpenAI(base_url=kwargs.pop("base_url", None))
1572
+ client = AsyncOpenAI(**client_kwargs)
1573
1573
  request_to_make = AMAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1574
- return await request_to_make(client, **kwargs)
1574
+ return await request_to_make(client, **function_kwargs)
1575
1575
 
1576
1576
 
1577
- def azure_openai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
1577
+ def azure_openai_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
1578
1578
  from openai import AzureOpenAI
1579
1579
 
1580
- client = AzureOpenAI(azure_endpoint=kwargs.pop("base_url", None))
1580
+ client = AzureOpenAI(azure_endpoint=client_kwargs.pop("base_url", None))
1581
1581
  request_to_make = MAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1582
- return request_to_make(client, **kwargs)
1582
+ return request_to_make(client, **function_kwargs)
1583
1583
 
1584
1584
 
1585
- async def aazure_openai_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
1585
+ async def aazure_openai_request(
1586
+ prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict
1587
+ ):
1586
1588
  from openai import AsyncAzureOpenAI
1587
1589
 
1588
- client = AsyncAzureOpenAI(azure_endpoint=kwargs.pop("base_url", None))
1590
+ client = AsyncAzureOpenAI(azure_endpoint=client_kwargs.pop("base_url", None))
1589
1591
  request_to_make = AMAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1590
- return await request_to_make(client, **kwargs)
1592
+ return await request_to_make(client, **function_kwargs)
1591
1593
 
1592
1594
 
1593
1595
  def anthropic_chat_request(client, **kwargs):
@@ -1604,12 +1606,12 @@ MAP_TYPE_TO_ANTHROPIC_FUNCTION = {
1604
1606
  }
1605
1607
 
1606
1608
 
1607
- def anthropic_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
1609
+ def anthropic_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
1608
1610
  from anthropic import Anthropic
1609
1611
 
1610
- client = Anthropic(base_url=kwargs.pop("base_url", None))
1612
+ client = Anthropic(**client_kwargs)
1611
1613
  request_to_make = MAP_TYPE_TO_ANTHROPIC_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1612
- return request_to_make(client, **kwargs)
1614
+ return request_to_make(client, **function_kwargs)
1613
1615
 
1614
1616
 
1615
1617
  async def aanthropic_chat_request(client, **kwargs):
@@ -1626,12 +1628,12 @@ AMAP_TYPE_TO_ANTHROPIC_FUNCTION = {
1626
1628
  }
1627
1629
 
1628
1630
 
1629
- async def aanthropic_request(prompt_blueprint: GetPromptTemplateResponse, **kwargs):
1631
+ async def aanthropic_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
1630
1632
  from anthropic import AsyncAnthropic
1631
1633
 
1632
- client = AsyncAnthropic(base_url=kwargs.pop("base_url", None))
1634
+ client = AsyncAnthropic(**client_kwargs)
1633
1635
  request_to_make = AMAP_TYPE_TO_ANTHROPIC_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1634
- return await request_to_make(client, **kwargs)
1636
+ return await request_to_make(client, **function_kwargs)
1635
1637
 
1636
1638
 
1637
1639
  # do not remove! This is used in the langchain integration.
@@ -1690,31 +1692,29 @@ async def autil_log_request(api_key: str, **kwargs) -> Union[RequestLog, None]:
1690
1692
  return None
1691
1693
 
1692
1694
 
1693
- def mistral_request(
1694
- prompt_blueprint: GetPromptTemplateResponse,
1695
- **kwargs,
1696
- ):
1695
+ def mistral_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
1697
1696
  from mistralai import Mistral
1698
1697
 
1699
1698
  client = Mistral(api_key=os.environ.get("MISTRAL_API_KEY"))
1700
- if "stream" in kwargs and kwargs["stream"]:
1701
- kwargs.pop("stream")
1702
- return client.chat.stream(**kwargs)
1703
- if "stream" in kwargs:
1704
- kwargs.pop("stream")
1705
- return client.chat.complete(**kwargs)
1699
+ if "stream" in function_kwargs and function_kwargs["stream"]:
1700
+ function_kwargs.pop("stream")
1701
+ return client.chat.stream(**function_kwargs)
1702
+ if "stream" in function_kwargs:
1703
+ function_kwargs.pop("stream")
1704
+ return client.chat.complete(**function_kwargs)
1706
1705
 
1707
1706
 
1708
1707
  async def amistral_request(
1709
1708
  prompt_blueprint: GetPromptTemplateResponse,
1710
- **kwargs,
1709
+ _: dict,
1710
+ function_kwargs: dict,
1711
1711
  ):
1712
1712
  from mistralai import Mistral
1713
1713
 
1714
1714
  client = Mistral(api_key=os.environ.get("MISTRAL_API_KEY"))
1715
- if "stream" in kwargs and kwargs["stream"]:
1716
- return await client.chat.stream_async(**kwargs)
1717
- return await client.chat.complete_async(**kwargs)
1715
+ if "stream" in function_kwargs and function_kwargs["stream"]:
1716
+ return await client.chat.stream_async(**function_kwargs)
1717
+ return await client.chat.complete_async(**function_kwargs)
1718
1718
 
1719
1719
 
1720
1720
  def mistral_stream_chat(results: list):
@@ -1898,12 +1898,12 @@ MAP_TYPE_TO_GOOGLE_FUNCTION = {
1898
1898
  }
1899
1899
 
1900
1900
 
1901
- def google_request(request: GetPromptTemplateResponse, **kwargs):
1901
+ def google_request(request: GetPromptTemplateResponse, _: dict, function_kwargs: dict):
1902
1902
  from google import genai
1903
1903
 
1904
1904
  client = genai.Client()
1905
1905
  request_to_make = MAP_TYPE_TO_GOOGLE_FUNCTION[request["prompt_template"]["type"]]
1906
- return request_to_make(client, **kwargs)
1906
+ return request_to_make(client, **function_kwargs)
1907
1907
 
1908
1908
 
1909
1909
  async def agoogle_chat_request(client, **kwargs):
@@ -1936,12 +1936,12 @@ AMAP_TYPE_TO_GOOGLE_FUNCTION = {
1936
1936
  }
1937
1937
 
1938
1938
 
1939
- async def agoogle_request(request: GetPromptTemplateResponse, **kwargs):
1939
+ async def agoogle_request(request: GetPromptTemplateResponse, _: dict, function_kwargs: dict):
1940
1940
  from google import genai
1941
1941
 
1942
1942
  client = genai.Client()
1943
1943
  request_to_make = AMAP_TYPE_TO_GOOGLE_FUNCTION[request["prompt_template"]["type"]]
1944
- return await request_to_make(client, **kwargs)
1944
+ return await request_to_make(client, **function_kwargs)
1945
1945
 
1946
1946
 
1947
1947
  async def amap_google_stream_response(generator: AsyncIterable[Any]):
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "promptlayer"
3
- version = "1.0.50"
3
+ version = "1.0.52"
4
4
  description = "PromptLayer is a platform for prompt engineering and tracks your LLM requests."
5
5
  authors = ["Magniv <hello@magniv.io>"]
6
6
  license = "Apache-2.0"
File without changes
File without changes