promptlayer 1.0.31__tar.gz → 1.0.33__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of promptlayer might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: promptlayer
3
- Version: 1.0.31
3
+ Version: 1.0.33
4
4
  Summary: PromptLayer is a platform for prompt engineering and tracks your LLM requests.
5
5
  License: Apache-2.0
6
6
  Author: Magniv
@@ -1,4 +1,4 @@
1
1
  from .promptlayer import AsyncPromptLayer, PromptLayer
2
2
 
3
- __version__ = "1.0.31"
3
+ __version__ = "1.0.33"
4
4
  __all__ = ["PromptLayer", "AsyncPromptLayer", "__version__"]
@@ -483,7 +483,7 @@ class AsyncPromptLayer(PromptLayerMixin):
483
483
  )
484
484
  return await atrack_request(**track_request_kwargs)
485
485
 
486
- return await _track_request
486
+ return _track_request
487
487
 
488
488
  async def _track_request_log(
489
489
  self,
@@ -547,15 +547,16 @@ class AsyncPromptLayer(PromptLayerMixin):
547
547
  )
548
548
 
549
549
  if stream:
550
+ track_request_callable = await self._create_track_request_callable(
551
+ request_params=llm_request_params,
552
+ tags=tags,
553
+ input_variables=input_variables,
554
+ group_id=group_id,
555
+ pl_run_span_id=pl_run_span_id,
556
+ )
550
557
  return astream_response(
551
558
  response,
552
- self._create_track_request_callable(
553
- request_params=llm_request_params,
554
- tags=tags,
555
- input_variables=input_variables,
556
- group_id=group_id,
557
- pl_run_span_id=pl_run_span_id,
558
- ),
559
+ track_request_callable,
559
560
  llm_request_params["stream_function"],
560
561
  )
561
562
 
@@ -15,6 +15,8 @@ from promptlayer.utils import (
15
15
  aanthropic_stream_completion,
16
16
  aanthropic_stream_message,
17
17
  aazure_openai_request,
18
+ amistral_request,
19
+ amistral_stream_chat,
18
20
  anthropic_request,
19
21
  anthropic_stream_completion,
20
22
  anthropic_stream_message,
@@ -22,6 +24,8 @@ from promptlayer.utils import (
22
24
  aopenai_stream_chat,
23
25
  aopenai_stream_completion,
24
26
  azure_openai_request,
27
+ mistral_request,
28
+ mistral_stream_chat,
25
29
  openai_request,
26
30
  openai_stream_chat,
27
31
  openai_stream_completion,
@@ -58,6 +62,16 @@ MAP_PROVIDER_TO_FUNCTION_NAME = {
58
62
  "stream_function": openai_stream_completion,
59
63
  },
60
64
  },
65
+ "mistral": {
66
+ "chat": {
67
+ "function_name": "mistral.client.chat",
68
+ "stream_function": mistral_stream_chat,
69
+ },
70
+ "completion": {
71
+ "function_name": None,
72
+ "stream_function": None,
73
+ },
74
+ },
61
75
  }
62
76
 
63
77
 
@@ -65,6 +79,7 @@ MAP_PROVIDER_TO_FUNCTION = {
65
79
  "openai": openai_request,
66
80
  "anthropic": anthropic_request,
67
81
  "openai.azure": azure_openai_request,
82
+ "mistral": mistral_request,
68
83
  }
69
84
 
70
85
  AMAP_PROVIDER_TO_FUNCTION_NAME = {
@@ -98,6 +113,16 @@ AMAP_PROVIDER_TO_FUNCTION_NAME = {
98
113
  "stream_function": aopenai_stream_completion,
99
114
  },
100
115
  },
116
+ "mistral": {
117
+ "chat": {
118
+ "function_name": "mistral.client.chat",
119
+ "stream_function": amistral_stream_chat,
120
+ },
121
+ "completion": {
122
+ "function_name": None,
123
+ "stream_function": None,
124
+ },
125
+ },
101
126
  }
102
127
 
103
128
 
@@ -105,6 +130,7 @@ AMAP_PROVIDER_TO_FUNCTION = {
105
130
  "openai": aopenai_request,
106
131
  "anthropic": aanthropic_request,
107
132
  "openai.azure": aazure_openai_request,
133
+ "mistral": amistral_request,
108
134
  }
109
135
 
110
136
 
@@ -28,7 +28,23 @@ class ImageContent(TypedDict, total=False):
28
28
  image_url: ImageUrl
29
29
 
30
30
 
31
- Content = Union[TextContent, ImageContent]
31
+ class Media(TypedDict, total=False):
32
+ title: str
33
+ type: str
34
+ url: str
35
+
36
+
37
+ class MediaContnt(TypedDict, total=False):
38
+ type: Literal["media"]
39
+ media: Media
40
+
41
+
42
+ class MediaVariable(TypedDict, total=False):
43
+ type: Literal["media_variable"]
44
+ name: str
45
+
46
+
47
+ Content = Union[TextContent, ImageContent, MediaContnt, MediaVariable]
32
48
 
33
49
 
34
50
  class Function(TypedDict, total=False):
@@ -2,7 +2,6 @@ import asyncio
2
2
  import contextvars
3
3
  import datetime
4
4
  import functools
5
- import inspect
6
5
  import json
7
6
  import os
8
7
  import sys
@@ -1449,15 +1448,17 @@ async def astream_response(
1449
1448
  results.append(result)
1450
1449
  data["raw_response"] = result
1451
1450
  yield data
1452
- request_response = await map_results(results)
1453
- if inspect.iscoroutinefunction(after_stream):
1454
- # after_stream is an async function
1455
- response = await after_stream(request_response=request_response.model_dump())
1456
- else:
1457
- # after_stream is synchronous
1458
- response = after_stream(request_response=request_response.model_dump())
1459
- data["request_id"] = response.get("request_id")
1460
- data["prompt_blueprint"] = response.get("prompt_blueprint")
1451
+
1452
+ async def async_generator_from_list(lst):
1453
+ for item in lst:
1454
+ yield item
1455
+
1456
+ request_response = await map_results(async_generator_from_list(results))
1457
+ after_stream_response = await after_stream(
1458
+ request_response=request_response.model_dump()
1459
+ )
1460
+ data["request_id"] = after_stream_response.get("request_id")
1461
+ data["prompt_blueprint"] = after_stream_response.get("prompt_blueprint")
1461
1462
  yield data
1462
1463
 
1463
1464
 
@@ -1631,3 +1632,164 @@ async def autil_log_request(api_key: str, **kwargs) -> Union[RequestLog, None]:
1631
1632
  file=sys.stderr,
1632
1633
  )
1633
1634
  return None
1635
+
1636
+
1637
+ def mistral_request(
1638
+ prompt_blueprint: GetPromptTemplateResponse,
1639
+ **kwargs,
1640
+ ):
1641
+ from mistralai import Mistral
1642
+
1643
+ client = Mistral(api_key=os.environ.get("MISTRAL_API_KEY"))
1644
+ if "stream" in kwargs and kwargs["stream"]:
1645
+ cleaned_kwargs = kwargs.copy()
1646
+ cleaned_kwargs.pop("stream")
1647
+ return client.chat.stream(**cleaned_kwargs)
1648
+ return client.chat.complete(**kwargs)
1649
+
1650
+
1651
+ async def amistral_request(
1652
+ prompt_blueprint: GetPromptTemplateResponse,
1653
+ **kwargs,
1654
+ ):
1655
+ from mistralai import Mistral
1656
+
1657
+ client = Mistral(api_key=os.environ.get("MISTRAL_API_KEY"))
1658
+ if "stream" in kwargs and kwargs["stream"]:
1659
+ return await client.chat.stream_async(**kwargs)
1660
+ return await client.chat.complete_async(**kwargs)
1661
+
1662
+
1663
+ def mistral_stream_chat(results: list):
1664
+ from openai.types.chat import (
1665
+ ChatCompletion,
1666
+ ChatCompletionMessage,
1667
+ ChatCompletionMessageToolCall,
1668
+ )
1669
+ from openai.types.chat.chat_completion import Choice
1670
+ from openai.types.chat.chat_completion_message_tool_call import Function
1671
+
1672
+ last_result = results[-1]
1673
+ response = ChatCompletion(
1674
+ id=last_result.data.id,
1675
+ object="chat.completion",
1676
+ choices=[
1677
+ Choice(
1678
+ finish_reason=last_result.data.choices[0].finish_reason or "stop",
1679
+ index=0,
1680
+ message=ChatCompletionMessage(role="assistant"),
1681
+ )
1682
+ ],
1683
+ created=last_result.data.created,
1684
+ model=last_result.data.model,
1685
+ )
1686
+
1687
+ content = ""
1688
+ tool_calls = None
1689
+
1690
+ for result in results:
1691
+ choices = result.data.choices
1692
+ if len(choices) == 0:
1693
+ continue
1694
+
1695
+ delta = choices[0].delta
1696
+ if delta.content is not None:
1697
+ content = f"{content}{delta.content}"
1698
+
1699
+ if delta.tool_calls:
1700
+ tool_calls = tool_calls or []
1701
+ for tool_call in delta.tool_calls:
1702
+ if len(tool_calls) == 0 or tool_call.id:
1703
+ tool_calls.append(
1704
+ ChatCompletionMessageToolCall(
1705
+ id=tool_call.id or "",
1706
+ function=Function(
1707
+ name=tool_call.function.name,
1708
+ arguments=tool_call.function.arguments,
1709
+ ),
1710
+ type="function",
1711
+ )
1712
+ )
1713
+ else:
1714
+ last_tool_call = tool_calls[-1]
1715
+ if tool_call.function.name:
1716
+ last_tool_call.function.name = (
1717
+ f"{last_tool_call.function.name}{tool_call.function.name}"
1718
+ )
1719
+ if tool_call.function.arguments:
1720
+ last_tool_call.function.arguments = f"{last_tool_call.function.arguments}{tool_call.function.arguments}"
1721
+
1722
+ response.choices[0].message.content = content
1723
+ response.choices[0].message.tool_calls = tool_calls
1724
+ response.usage = last_result.data.usage
1725
+ return response
1726
+
1727
+
1728
+ async def amistral_stream_chat(generator: AsyncIterable[Any]) -> Any:
1729
+ from openai.types.chat import (
1730
+ ChatCompletion,
1731
+ ChatCompletionMessage,
1732
+ ChatCompletionMessageToolCall,
1733
+ )
1734
+ from openai.types.chat.chat_completion import Choice
1735
+ from openai.types.chat.chat_completion_message_tool_call import Function
1736
+
1737
+ completion_chunks = []
1738
+ response = ChatCompletion(
1739
+ id="",
1740
+ object="chat.completion",
1741
+ choices=[
1742
+ Choice(
1743
+ finish_reason="stop",
1744
+ index=0,
1745
+ message=ChatCompletionMessage(role="assistant"),
1746
+ )
1747
+ ],
1748
+ created=0,
1749
+ model="",
1750
+ )
1751
+ content = ""
1752
+ tool_calls = None
1753
+
1754
+ async for result in generator:
1755
+ completion_chunks.append(result)
1756
+ choices = result.data.choices
1757
+ if len(choices) == 0:
1758
+ continue
1759
+ delta = choices[0].delta
1760
+ if delta.content is not None:
1761
+ content = f"{content}{delta.content}"
1762
+
1763
+ if delta.tool_calls:
1764
+ tool_calls = tool_calls or []
1765
+ for tool_call in delta.tool_calls:
1766
+ if len(tool_calls) == 0 or tool_call.id:
1767
+ tool_calls.append(
1768
+ ChatCompletionMessageToolCall(
1769
+ id=tool_call.id or "",
1770
+ function=Function(
1771
+ name=tool_call.function.name,
1772
+ arguments=tool_call.function.arguments,
1773
+ ),
1774
+ type="function",
1775
+ )
1776
+ )
1777
+ else:
1778
+ last_tool_call = tool_calls[-1]
1779
+ if tool_call.function.name:
1780
+ last_tool_call.function.name = (
1781
+ f"{last_tool_call.function.name}{tool_call.function.name}"
1782
+ )
1783
+ if tool_call.function.arguments:
1784
+ last_tool_call.function.arguments = f"{last_tool_call.function.arguments}{tool_call.function.arguments}"
1785
+
1786
+ if completion_chunks:
1787
+ last_result = completion_chunks[-1]
1788
+ response.id = last_result.data.id
1789
+ response.created = last_result.data.created
1790
+ response.model = last_result.data.model
1791
+ response.usage = last_result.data.usage
1792
+
1793
+ response.choices[0].message.content = content
1794
+ response.choices[0].message.tool_calls = tool_calls
1795
+ return response
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "promptlayer"
3
- version = "1.0.31"
3
+ version = "1.0.33"
4
4
  description = "PromptLayer is a platform for prompt engineering and tracks your LLM requests."
5
5
  authors = ["Magniv <hello@magniv.io>"]
6
6
  license = "Apache-2.0"
File without changes
File without changes