promptlayer 1.0.41__tar.gz → 1.0.43__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of promptlayer might be problematic. Click here for more details.

@@ -1,11 +1,11 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: promptlayer
3
- Version: 1.0.41
3
+ Version: 1.0.43
4
4
  Summary: PromptLayer is a platform for prompt engineering and tracks your LLM requests.
5
5
  License: Apache-2.0
6
6
  Author: Magniv
7
7
  Author-email: hello@magniv.io
8
- Requires-Python: >=3.8.1,<4.0
8
+ Requires-Python: >=3.9,<4.0
9
9
  Classifier: License :: OSI Approved :: Apache Software License
10
10
  Classifier: Programming Language :: Python :: 3
11
11
  Classifier: Programming Language :: Python :: 3.9
@@ -1,4 +1,4 @@
1
1
  from .promptlayer import AsyncPromptLayer, PromptLayer
2
2
 
3
- __version__ = "1.0.41"
3
+ __version__ = "1.0.43"
4
4
  __all__ = ["PromptLayer", "AsyncPromptLayer", "__version__"]
@@ -35,8 +35,8 @@ class PromptLayerBase(object):
35
35
  or inspect.ismethod(attr)
36
36
  or str(type(attr)) == "<class 'anthropic.resources.completions.Completions'>"
37
37
  or str(type(attr)) == "<class 'anthropic.resources.completions.AsyncCompletions'>"
38
- or str(type(attr)) == "<class 'anthropic.resources.messages.Messages'>"
39
- or str(type(attr)) == "<class 'anthropic.resources.messages.AsyncMessages'>"
38
+ or str(type(attr)) == "<class 'anthropic.resources.messages.messages.Messages'>"
39
+ or str(type(attr)) == "<class 'anthropic.resources.messages.messages.AsyncMessages'>"
40
40
  or re.match(r"<class 'openai\.resources.*'>", str(type(attr)))
41
41
  )
42
42
  ):
@@ -15,6 +15,9 @@ from promptlayer.utils import (
15
15
  aanthropic_stream_completion,
16
16
  aanthropic_stream_message,
17
17
  aazure_openai_request,
18
+ agoogle_request,
19
+ agoogle_stream_chat,
20
+ agoogle_stream_completion,
18
21
  amistral_request,
19
22
  amistral_stream_chat,
20
23
  anthropic_request,
@@ -24,6 +27,9 @@ from promptlayer.utils import (
24
27
  aopenai_stream_chat,
25
28
  aopenai_stream_completion,
26
29
  azure_openai_request,
30
+ google_request,
31
+ google_stream_chat,
32
+ google_stream_completion,
27
33
  mistral_request,
28
34
  mistral_stream_chat,
29
35
  openai_request,
@@ -72,6 +78,16 @@ MAP_PROVIDER_TO_FUNCTION_NAME = {
72
78
  "stream_function": None,
73
79
  },
74
80
  },
81
+ "google": {
82
+ "chat": {
83
+ "function_name": "google.convo.send_message",
84
+ "stream_function": google_stream_chat,
85
+ },
86
+ "completion": {
87
+ "function_name": "google.model.generate_content",
88
+ "stream_function": google_stream_completion,
89
+ },
90
+ },
75
91
  }
76
92
 
77
93
 
@@ -80,6 +96,7 @@ MAP_PROVIDER_TO_FUNCTION = {
80
96
  "anthropic": anthropic_request,
81
97
  "openai.azure": azure_openai_request,
82
98
  "mistral": mistral_request,
99
+ "google": google_request,
83
100
  }
84
101
 
85
102
  AMAP_PROVIDER_TO_FUNCTION_NAME = {
@@ -123,6 +140,16 @@ AMAP_PROVIDER_TO_FUNCTION_NAME = {
123
140
  "stream_function": None,
124
141
  },
125
142
  },
143
+ "google": {
144
+ "chat": {
145
+ "function_name": "google.convo.send_message",
146
+ "stream_function": agoogle_stream_chat,
147
+ },
148
+ "completion": {
149
+ "function_name": "google.model.generate_content",
150
+ "stream_function": agoogle_stream_completion,
151
+ },
152
+ },
126
153
  }
127
154
 
128
155
 
@@ -131,6 +158,7 @@ AMAP_PROVIDER_TO_FUNCTION = {
131
158
  "anthropic": aanthropic_request,
132
159
  "openai.azure": aazure_openai_request,
133
160
  "mistral": amistral_request,
161
+ "google": agoogle_request,
134
162
  }
135
163
 
136
164
 
@@ -8,17 +8,7 @@ import sys
8
8
  import types
9
9
  from copy import deepcopy
10
10
  from enum import Enum
11
- from typing import (
12
- Any,
13
- AsyncGenerator,
14
- AsyncIterable,
15
- Callable,
16
- Dict,
17
- Generator,
18
- List,
19
- Optional,
20
- Union,
21
- )
11
+ from typing import Any, AsyncGenerator, AsyncIterable, Callable, Dict, Generator, List, Optional, Union
22
12
 
23
13
  import httpx
24
14
  import requests
@@ -1594,11 +1584,7 @@ async def amistral_request(
1594
1584
 
1595
1585
 
1596
1586
  def mistral_stream_chat(results: list):
1597
- from openai.types.chat import (
1598
- ChatCompletion,
1599
- ChatCompletionMessage,
1600
- ChatCompletionMessageToolCall,
1601
- )
1587
+ from openai.types.chat import ChatCompletion, ChatCompletionMessage, ChatCompletionMessageToolCall
1602
1588
  from openai.types.chat.chat_completion import Choice
1603
1589
  from openai.types.chat.chat_completion_message_tool_call import Function
1604
1590
 
@@ -1659,11 +1645,7 @@ def mistral_stream_chat(results: list):
1659
1645
 
1660
1646
 
1661
1647
  async def amistral_stream_chat(generator: AsyncIterable[Any]) -> Any:
1662
- from openai.types.chat import (
1663
- ChatCompletion,
1664
- ChatCompletionMessage,
1665
- ChatCompletionMessageToolCall,
1666
- )
1648
+ from openai.types.chat import ChatCompletion, ChatCompletionMessage, ChatCompletionMessageToolCall
1667
1649
  from openai.types.chat.chat_completion import Choice
1668
1650
  from openai.types.chat.chat_completion_message_tool_call import Function
1669
1651
 
@@ -1726,3 +1708,124 @@ async def amistral_stream_chat(generator: AsyncIterable[Any]) -> Any:
1726
1708
  response.choices[0].message.content = content
1727
1709
  response.choices[0].message.tool_calls = tool_calls
1728
1710
  return response
1711
+
1712
+
1713
+ def google_chat_request(client, **kwargs):
1714
+ from google.genai.chats import Content
1715
+
1716
+ stream = kwargs.pop("stream", False)
1717
+ model = kwargs.get("model", "gemini-2.0-flash")
1718
+ history = [Content(**item) for item in kwargs.get("history", [])]
1719
+ generation_config = kwargs.get("generation_config", {})
1720
+ chat = client.chats.create(model=model, history=history, config=generation_config)
1721
+ last_message = history[-1] if history else None
1722
+ if stream:
1723
+ return chat.send_message_stream(message=last_message)
1724
+ return chat.send_message(message=last_message)
1725
+
1726
+
1727
+ def google_completions_request(client, **kwargs):
1728
+ config = kwargs.pop("generation_config", {})
1729
+ model = kwargs.get("model", "gemini-2.0-flash")
1730
+ contents = kwargs.get("contents", [])
1731
+ stream = kwargs.pop("stream", False)
1732
+ if stream:
1733
+ return client.models.generate_content_stream(model=model, contents=contents, config=config)
1734
+ return client.models.generate_content(model=model, contents=contents, config=config)
1735
+
1736
+
1737
+ def map_google_stream_response(results: list):
1738
+ from google.genai.chats import GenerateContentResponse
1739
+
1740
+ response = GenerateContentResponse()
1741
+ if not results:
1742
+ return response
1743
+ results: List[GenerateContentResponse] = results
1744
+ content = ""
1745
+ for result in results:
1746
+ content = f"{content}{result.candidates[0].content.parts[0].text}"
1747
+ last_result = results[-1]
1748
+ response = last_result.model_copy()
1749
+ response.candidates[0].content.parts[0].text = content
1750
+ return response
1751
+
1752
+
1753
+ def google_stream_chat(results: list):
1754
+ return map_google_stream_response(results)
1755
+
1756
+
1757
+ def google_stream_completion(results: list):
1758
+ return map_google_stream_response(results)
1759
+
1760
+
1761
+ MAP_TYPE_TO_GOOGLE_FUNCTION = {
1762
+ "chat": google_chat_request,
1763
+ "completion": google_completions_request,
1764
+ }
1765
+
1766
+
1767
+ def google_request(request: GetPromptTemplateResponse, **kwargs):
1768
+ from google import genai
1769
+
1770
+ client = genai.Client()
1771
+ request_to_make = MAP_TYPE_TO_GOOGLE_FUNCTION[request["prompt_template"]["type"]]
1772
+ return request_to_make(client, **kwargs)
1773
+
1774
+
1775
+ async def agoogle_chat_request(client, **kwargs):
1776
+ from google.genai.chats import Content
1777
+
1778
+ stream = kwargs.pop("stream", False)
1779
+ model = kwargs.get("model", "gemini-2.0-flash")
1780
+ history = [Content(**item) for item in kwargs.get("history", [])]
1781
+ generation_config = kwargs.get("generation_config", {})
1782
+ chat = client.aio.chats.create(model=model, history=history, config=generation_config)
1783
+ last_message = history[-1] if history else None
1784
+ if stream:
1785
+ return await chat.send_message_stream(message=last_message)
1786
+ return await chat.send_message(message=last_message)
1787
+
1788
+
1789
+ async def agoogle_completions_request(client, **kwargs):
1790
+ config = kwargs.pop("generation_config", {})
1791
+ model = kwargs.get("model", "gemini-2.0-flash")
1792
+ contents = kwargs.get("contents", [])
1793
+ stream = kwargs.pop("stream", False)
1794
+ if stream:
1795
+ return await client.aio.models.generate_content_stream(model=model, contents=contents, config=config)
1796
+ return await client.aio.models.generate_content(model=model, contents=contents, config=config)
1797
+
1798
+
1799
+ AMAP_TYPE_TO_GOOGLE_FUNCTION = {
1800
+ "chat": agoogle_chat_request,
1801
+ "completion": agoogle_completions_request,
1802
+ }
1803
+
1804
+
1805
+ async def agoogle_request(request: GetPromptTemplateResponse, **kwargs):
1806
+ from google import genai
1807
+
1808
+ client = genai.Client()
1809
+ request_to_make = AMAP_TYPE_TO_GOOGLE_FUNCTION[request["prompt_template"]["type"]]
1810
+ return await request_to_make(client, **kwargs)
1811
+
1812
+
1813
+ async def amap_google_stream_response(generator: AsyncIterable[Any]):
1814
+ from google.genai.chats import GenerateContentResponse
1815
+
1816
+ response = GenerateContentResponse()
1817
+ content = ""
1818
+ async for result in generator:
1819
+ content = f"{content}{result.candidates[0].content.parts[0].text}"
1820
+ last_result = result
1821
+ response = last_result.model_copy()
1822
+ response.candidates[0].content.parts[0].text = content
1823
+ return response
1824
+
1825
+
1826
+ async def agoogle_stream_chat(generator: AsyncIterable[Any]):
1827
+ return await amap_google_stream_response(generator)
1828
+
1829
+
1830
+ async def agoogle_stream_completion(generator: AsyncIterable[Any]):
1831
+ return await amap_google_stream_response(generator)
@@ -1,13 +1,13 @@
1
1
  [tool.poetry]
2
2
  name = "promptlayer"
3
- version = "1.0.41"
3
+ version = "1.0.43"
4
4
  description = "PromptLayer is a platform for prompt engineering and tracks your LLM requests."
5
5
  authors = ["Magniv <hello@magniv.io>"]
6
6
  license = "Apache-2.0"
7
7
  readme = "README.md"
8
8
 
9
9
  [tool.poetry.dependencies]
10
- python = ">=3.8.1,<4.0"
10
+ python = ">=3.9,<4.0"
11
11
  requests = "^2.31.0"
12
12
  opentelemetry-api = "^1.26.0"
13
13
  opentelemetry-sdk = "^1.26.0"
@@ -20,9 +20,9 @@ nest-asyncio = "^1.6.0"
20
20
  behave = "^1.2.6"
21
21
  pytest = "^8.2.0"
22
22
  pytest-asyncio = "^0.23.6"
23
- anyio = "^4.3.0"
24
23
  anthropic = "^0.25.8"
25
24
  openai = "^1.60.1"
25
+ google-genai = "^1.5.0"
26
26
 
27
27
  [build-system]
28
28
  requires = ["poetry-core"]
File without changes
File without changes