promptlayer 1.0.67__tar.gz → 1.0.69__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of promptlayer might be problematic. Click here for more details.

Files changed (22) hide show
  1. {promptlayer-1.0.67 → promptlayer-1.0.69}/PKG-INFO +4 -2
  2. {promptlayer-1.0.67 → promptlayer-1.0.69}/promptlayer/__init__.py +1 -1
  3. {promptlayer-1.0.67 → promptlayer-1.0.69}/promptlayer/promptlayer.py +3 -3
  4. {promptlayer-1.0.67 → promptlayer-1.0.69}/promptlayer/promptlayer_mixins.py +51 -5
  5. {promptlayer-1.0.67 → promptlayer-1.0.69}/promptlayer/streaming/__init__.py +7 -1
  6. {promptlayer-1.0.67 → promptlayer-1.0.69}/promptlayer/streaming/blueprint_builder.py +129 -3
  7. {promptlayer-1.0.67 → promptlayer-1.0.69}/promptlayer/streaming/response_handlers.py +256 -0
  8. {promptlayer-1.0.67 → promptlayer-1.0.69}/promptlayer/streaming/stream_processor.py +6 -1
  9. {promptlayer-1.0.67 → promptlayer-1.0.69}/promptlayer/templates.py +4 -4
  10. {promptlayer-1.0.67 → promptlayer-1.0.69}/promptlayer/types/prompt_template.py +3 -0
  11. {promptlayer-1.0.67 → promptlayer-1.0.69}/promptlayer/utils.py +38 -12
  12. {promptlayer-1.0.67 → promptlayer-1.0.69}/pyproject.toml +1 -1
  13. {promptlayer-1.0.67 → promptlayer-1.0.69}/LICENSE +0 -0
  14. {promptlayer-1.0.67 → promptlayer-1.0.69}/README.md +0 -0
  15. {promptlayer-1.0.67 → promptlayer-1.0.69}/promptlayer/groups/__init__.py +0 -0
  16. {promptlayer-1.0.67 → promptlayer-1.0.69}/promptlayer/groups/groups.py +0 -0
  17. {promptlayer-1.0.67 → promptlayer-1.0.69}/promptlayer/promptlayer_base.py +0 -0
  18. {promptlayer-1.0.67 → promptlayer-1.0.69}/promptlayer/span_exporter.py +0 -0
  19. {promptlayer-1.0.67 → promptlayer-1.0.69}/promptlayer/track/__init__.py +0 -0
  20. {promptlayer-1.0.67 → promptlayer-1.0.69}/promptlayer/track/track.py +0 -0
  21. {promptlayer-1.0.67 → promptlayer-1.0.69}/promptlayer/types/__init__.py +0 -0
  22. {promptlayer-1.0.67 → promptlayer-1.0.69}/promptlayer/types/request_log.py +0 -0
@@ -1,8 +1,9 @@
1
- Metadata-Version: 2.3
1
+ Metadata-Version: 2.4
2
2
  Name: promptlayer
3
- Version: 1.0.67
3
+ Version: 1.0.69
4
4
  Summary: PromptLayer is a platform for prompt engineering and tracks your LLM requests.
5
5
  License: Apache-2.0
6
+ License-File: LICENSE
6
7
  Author: Magniv
7
8
  Author-email: hello@magniv.io
8
9
  Requires-Python: >=3.9,<4.0
@@ -13,6 +14,7 @@ Classifier: Programming Language :: Python :: 3.10
13
14
  Classifier: Programming Language :: Python :: 3.11
14
15
  Classifier: Programming Language :: Python :: 3.12
15
16
  Classifier: Programming Language :: Python :: 3.13
17
+ Classifier: Programming Language :: Python :: 3.14
16
18
  Requires-Dist: ably (>=2.0.11,<3.0.0)
17
19
  Requires-Dist: aiohttp (>=3.10.10,<4.0.0)
18
20
  Requires-Dist: httpx (>=0.28.1,<0.29.0)
@@ -1,4 +1,4 @@
1
1
  from .promptlayer import AsyncPromptLayer, PromptLayer
2
2
 
3
- __version__ = "1.0.67"
3
+ __version__ = "1.0.69"
4
4
  __all__ = ["PromptLayer", "AsyncPromptLayer", "__version__"]
@@ -599,10 +599,10 @@ class AsyncPromptLayer(PromptLayerMixin):
599
599
  function_kwargs=llm_data["function_kwargs"],
600
600
  )
601
601
 
602
- if isinstance(response, dict):
603
- request_response = response
604
- else:
602
+ if hasattr(response, "model_dump"):
605
603
  request_response = response.model_dump(mode="json")
604
+ else:
605
+ request_response = response
606
606
 
607
607
  if stream:
608
608
  track_request_callable = await self._create_track_request_callable(
@@ -19,12 +19,14 @@ from promptlayer.streaming import (
19
19
  amistral_stream_chat,
20
20
  anthropic_stream_completion,
21
21
  anthropic_stream_message,
22
+ aopenai_responses_stream_chat,
22
23
  aopenai_stream_chat,
23
24
  aopenai_stream_completion,
24
25
  bedrock_stream_message,
25
26
  google_stream_chat,
26
27
  google_stream_completion,
27
28
  mistral_stream_chat,
29
+ openai_responses_stream_chat,
28
30
  openai_stream_chat,
29
31
  openai_stream_completion,
30
32
  )
@@ -48,7 +50,7 @@ from promptlayer.utils import (
48
50
  )
49
51
 
50
52
  MAP_PROVIDER_TO_FUNCTION_NAME = {
51
- "openai": {
53
+ "openai:chat-completions": {
52
54
  "chat": {
53
55
  "function_name": "openai.chat.completions.create",
54
56
  "stream_function": openai_stream_chat,
@@ -58,6 +60,16 @@ MAP_PROVIDER_TO_FUNCTION_NAME = {
58
60
  "stream_function": openai_stream_completion,
59
61
  },
60
62
  },
63
+ "openai:responses": {
64
+ "chat": {
65
+ "function_name": "openai.responses.create",
66
+ "stream_function": openai_responses_stream_chat,
67
+ },
68
+ "completion": {
69
+ "function_name": "openai.responses.create",
70
+ "stream_function": openai_responses_stream_chat,
71
+ },
72
+ },
61
73
  "anthropic": {
62
74
  "chat": {
63
75
  "function_name": "anthropic.messages.create",
@@ -68,7 +80,7 @@ MAP_PROVIDER_TO_FUNCTION_NAME = {
68
80
  "stream_function": anthropic_stream_completion,
69
81
  },
70
82
  },
71
- "openai.azure": {
83
+ "openai.azure:chat-completions": {
72
84
  "chat": {
73
85
  "function_name": "openai.AzureOpenAI.chat.completions.create",
74
86
  "stream_function": openai_stream_chat,
@@ -78,6 +90,16 @@ MAP_PROVIDER_TO_FUNCTION_NAME = {
78
90
  "stream_function": openai_stream_completion,
79
91
  },
80
92
  },
93
+ "openai.azure:responses": {
94
+ "chat": {
95
+ "function_name": "openai.AzureOpenAI.responses.create",
96
+ "stream_function": openai_responses_stream_chat,
97
+ },
98
+ "completion": {
99
+ "function_name": "openai.AzureOpenAI.responses.create",
100
+ "stream_function": openai_responses_stream_chat,
101
+ },
102
+ },
81
103
  "mistral": {
82
104
  "chat": {
83
105
  "function_name": "mistral.client.chat",
@@ -133,7 +155,7 @@ MAP_PROVIDER_TO_FUNCTION = {
133
155
  }
134
156
 
135
157
  AMAP_PROVIDER_TO_FUNCTION_NAME = {
136
- "openai": {
158
+ "openai:chat-completions": {
137
159
  "chat": {
138
160
  "function_name": "openai.chat.completions.create",
139
161
  "stream_function": aopenai_stream_chat,
@@ -143,6 +165,16 @@ AMAP_PROVIDER_TO_FUNCTION_NAME = {
143
165
  "stream_function": aopenai_stream_completion,
144
166
  },
145
167
  },
168
+ "openai:responses": {
169
+ "chat": {
170
+ "function_name": "openai.responses.create",
171
+ "stream_function": aopenai_responses_stream_chat,
172
+ },
173
+ "completion": {
174
+ "function_name": "openai.responses.create",
175
+ "stream_function": aopenai_responses_stream_chat,
176
+ },
177
+ },
146
178
  "anthropic": {
147
179
  "chat": {
148
180
  "function_name": "anthropic.messages.create",
@@ -153,7 +185,7 @@ AMAP_PROVIDER_TO_FUNCTION_NAME = {
153
185
  "stream_function": aanthropic_stream_completion,
154
186
  },
155
187
  },
156
- "openai.azure": {
188
+ "openai.azure:chat-completions": {
157
189
  "chat": {
158
190
  "function_name": "openai.AzureOpenAI.chat.completions.create",
159
191
  "stream_function": aopenai_stream_chat,
@@ -163,6 +195,16 @@ AMAP_PROVIDER_TO_FUNCTION_NAME = {
163
195
  "stream_function": aopenai_stream_completion,
164
196
  },
165
197
  },
198
+ "openai.azure:responses": {
199
+ "chat": {
200
+ "function_name": "openai.AzureOpenAI.responses.create",
201
+ "stream_function": aopenai_responses_stream_chat,
202
+ },
203
+ "completion": {
204
+ "function_name": "openai.AzureOpenAI.responses.create",
205
+ "stream_function": aopenai_responses_stream_chat,
206
+ },
207
+ },
166
208
  "mistral": {
167
209
  "chat": {
168
210
  "function_name": "mistral.client.chat",
@@ -275,6 +317,7 @@ class PromptLayerMixin:
275
317
  function_kwargs = deepcopy(prompt_blueprint["llm_kwargs"])
276
318
  function_kwargs["stream"] = stream
277
319
  provider = prompt_blueprint_model["provider"]
320
+ api_type = prompt_blueprint_model["api_type"]
278
321
 
279
322
  if custom_provider := prompt_blueprint.get("custom_provider"):
280
323
  provider = custom_provider["client"]
@@ -285,7 +328,7 @@ class PromptLayerMixin:
285
328
  elif provider_base_url := prompt_blueprint.get("provider_base_url"):
286
329
  client_kwargs["base_url"] = provider_base_url["url"]
287
330
 
288
- if stream and provider in ["openai", "openai.azure"]:
331
+ if stream and provider in ["openai", "openai.azure"] and api_type == "chat-completions":
289
332
  function_kwargs["stream_options"] = {"include_usage": True}
290
333
 
291
334
  provider_function_name = provider
@@ -295,6 +338,9 @@ class PromptLayerMixin:
295
338
  elif "claude" in prompt_blueprint_model["name"]:
296
339
  provider_function_name = "anthropic"
297
340
 
341
+ if provider_function_name in ("openai", "openai.azure"):
342
+ provider_function_name = f"{provider_function_name}:{api_type}"
343
+
298
344
  if is_async:
299
345
  config = AMAP_PROVIDER_TO_FUNCTION_NAME[provider_function_name][prompt_template["type"]]
300
346
  request_function = AMAP_PROVIDER_TO_FUNCTION[provider]
@@ -9,6 +9,7 @@ from .blueprint_builder import (
9
9
  build_prompt_blueprint_from_anthropic_event,
10
10
  build_prompt_blueprint_from_google_event,
11
11
  build_prompt_blueprint_from_openai_chunk,
12
+ build_prompt_blueprint_from_openai_responses_event,
12
13
  )
13
14
  from .response_handlers import (
14
15
  aanthropic_stream_completion,
@@ -19,12 +20,14 @@ from .response_handlers import (
19
20
  amistral_stream_chat,
20
21
  anthropic_stream_completion,
21
22
  anthropic_stream_message,
23
+ aopenai_responses_stream_chat,
22
24
  aopenai_stream_chat,
23
25
  aopenai_stream_completion,
24
26
  bedrock_stream_message,
25
27
  google_stream_chat,
26
28
  google_stream_completion,
27
29
  mistral_stream_chat,
30
+ openai_responses_stream_chat,
28
31
  openai_stream_chat,
29
32
  openai_stream_completion,
30
33
  )
@@ -34,13 +37,16 @@ from .stream_processor import (
34
37
  )
35
38
 
36
39
  __all__ = [
40
+ "build_prompt_blueprint_from_anthropic_event",
37
41
  "build_prompt_blueprint_from_google_event",
38
42
  "build_prompt_blueprint_from_openai_chunk",
39
- "build_prompt_blueprint_from_anthropic_event",
43
+ "build_prompt_blueprint_from_openai_responses_event",
40
44
  "stream_response",
41
45
  "astream_response",
42
46
  "openai_stream_chat",
43
47
  "aopenai_stream_chat",
48
+ "openai_responses_stream_chat",
49
+ "aopenai_responses_stream_chat",
44
50
  "anthropic_stream_message",
45
51
  "aanthropic_stream_message",
46
52
  "openai_stream_completion",
@@ -8,14 +8,19 @@ and streaming events for different providers (OpenAI, Anthropic, etc.)
8
8
  from typing import Any, Dict, List, Optional
9
9
 
10
10
 
11
- def _create_tool_call(tool_id: str, function_name: str, arguments: Any) -> Dict[str, Any]:
11
+ def _create_tool_call(call_id: str, function_name: str, arguments: Any, tool_id: str = None) -> Dict[str, Any]:
12
12
  """Create a standardized tool call structure"""
13
- return {"id": tool_id, "type": "function", "function": {"name": function_name, "arguments": arguments}}
13
+ tool_call = {"id": call_id, "type": "function", "function": {"name": function_name, "arguments": arguments}}
14
+ if tool_id:
15
+ tool_call["tool_id"] = tool_id
16
+ return tool_call
14
17
 
15
18
 
16
- def _create_content_item(content_type: str, **kwargs) -> Dict[str, Any]:
19
+ def _create_content_item(content_type: str, item_id: str = None, **kwargs) -> Dict[str, Any]:
17
20
  """Create a standardized content item"""
18
21
  content_item = {"type": content_type}
22
+ if item_id:
23
+ content_item["id"] = item_id
19
24
  content_item.update(kwargs)
20
25
  return content_item
21
26
 
@@ -67,6 +72,127 @@ def build_prompt_blueprint_from_openai_chunk(chunk, metadata):
67
72
  return _build_prompt_blueprint(assistant_message, metadata)
68
73
 
69
74
 
75
+ def build_prompt_blueprint_from_openai_responses_event(event, metadata):
76
+ """Build a prompt blueprint from an OpenAI responses event"""
77
+
78
+ assistant_content = []
79
+ tool_calls = []
80
+
81
+ event_dict = event.model_dump() if hasattr(event, "model_dump") else event
82
+ event_type = event_dict.get("type")
83
+
84
+ if event_type == "response.reasoning_summary_text.delta":
85
+ delta = event_dict.get("delta", "")
86
+ item_id = event_dict.get("item_id")
87
+ if delta:
88
+ assistant_content.append(_create_content_item("thinking", item_id=item_id, thinking=delta, signature=None))
89
+
90
+ elif event_type == "response.reasoning_summary_text.done":
91
+ final_text = event_dict.get("text", "")
92
+ item_id = event_dict.get("item_id")
93
+ if final_text:
94
+ assistant_content.append(
95
+ _create_content_item("thinking", item_id=item_id, thinking=final_text, signature=None)
96
+ )
97
+
98
+ elif event_type == "response.reasoning_summary_part.added":
99
+ part = event_dict.get("part", {})
100
+ item_id = event_dict.get("item_id")
101
+ if part.get("type") == "summary_text":
102
+ text = part.get("text", "")
103
+ assistant_content.append(_create_content_item("thinking", item_id=item_id, thinking=text, signature=None))
104
+
105
+ elif event_type == "response.reasoning_summary_part.done":
106
+ part = event_dict.get("part", {})
107
+ item_id = event_dict.get("item_id")
108
+ if part.get("type") == "summary_text":
109
+ text = part.get("text", "")
110
+ if text:
111
+ assistant_content.append(
112
+ _create_content_item("thinking", item_id=item_id, thinking=text, signature=None)
113
+ )
114
+
115
+ elif event_type == "response.function_call_arguments.delta":
116
+ item_id = event_dict.get("item_id")
117
+ delta = event_dict.get("delta", "")
118
+ if delta:
119
+ tool_calls.append(_create_tool_call("", "", delta, tool_id=item_id))
120
+
121
+ elif event_type == "response.function_call_arguments.done":
122
+ item_id = event_dict.get("item_id")
123
+ final_arguments = event_dict.get("arguments", "")
124
+ if final_arguments:
125
+ tool_calls.append(_create_tool_call("", "", final_arguments, tool_id=item_id))
126
+
127
+ elif event_type == "response.output_item.added":
128
+ item = event_dict.get("item", {})
129
+ item_type = item.get("type")
130
+ item_id = item.get("id")
131
+
132
+ if item_type == "reasoning":
133
+ assistant_content.append(_create_content_item("thinking", item_id=item_id, thinking="", signature=None))
134
+ elif item_type == "function_call":
135
+ tool_calls.append(_create_tool_call(item.get("call_id", ""), item.get("name", ""), "", tool_id=item_id))
136
+ elif item_type == "message":
137
+ assistant_content.append(_create_content_item("text", item_id=item_id, text="[Message started]"))
138
+
139
+ elif event_type == "response.content_part.added":
140
+ item_id = event_dict.get("item_id")
141
+ part = event_dict.get("part", {})
142
+ part_type = part.get("type", "output_text")
143
+
144
+ if part_type == "output_text":
145
+ text = part.get("text", "")
146
+ assistant_content.append(
147
+ _create_content_item("text", item_id=item_id, text=text if text else "[Content part added]")
148
+ )
149
+
150
+ elif event_type == "response.output_text.delta":
151
+ item_id = event_dict.get("item_id")
152
+ delta_text = event_dict.get("delta", "")
153
+ if delta_text:
154
+ assistant_content.append(_create_content_item("text", item_id=item_id, text=delta_text))
155
+
156
+ elif event_type == "response.output_text.done":
157
+ item_id = event_dict.get("item_id")
158
+ final_text = event_dict.get("text", "")
159
+ if final_text:
160
+ assistant_content.append(_create_content_item("text", item_id=item_id, text=final_text))
161
+
162
+ elif event_type == "response.output_item.done":
163
+ item = event_dict.get("item", {})
164
+ item_type = item.get("type")
165
+ item_id = item.get("id")
166
+
167
+ if item_type == "reasoning":
168
+ summary = item.get("summary", [])
169
+ for summary_part in summary:
170
+ if summary_part.get("type") == "summary_text":
171
+ text = summary_part.get("text", "")
172
+ if text:
173
+ assistant_content.append(
174
+ _create_content_item("thinking", item_id=item_id, thinking=text, signature=None)
175
+ )
176
+
177
+ elif item_type == "function_call":
178
+ tool_calls.append(
179
+ _create_tool_call(
180
+ item.get("call_id", ""), item.get("name", ""), item.get("arguments", ""), tool_id=item_id
181
+ )
182
+ )
183
+
184
+ elif item_type == "message":
185
+ content = item.get("content", [])
186
+ for content_part in content:
187
+ if content_part.get("type") == "output_text":
188
+ text = content_part.get("text", "")
189
+ if text:
190
+ assistant_content.append(_create_content_item("text", item_id=item_id, text=text))
191
+
192
+ assistant_message = _build_assistant_message(assistant_content, tool_calls or None)
193
+ return _build_prompt_blueprint(assistant_message, metadata)
194
+
195
+
70
196
  def build_prompt_blueprint_from_anthropic_event(event, metadata):
71
197
  """Build a prompt blueprint from an Anthropic stream event"""
72
198
 
@@ -155,6 +155,262 @@ async def aopenai_stream_chat(generator: AsyncIterable[Any]) -> Any:
155
155
  return response
156
156
 
157
157
 
158
+ def _initialize_openai_response_data():
159
+ """Initialize the response data structure for OpenAI responses"""
160
+ return {
161
+ "id": None,
162
+ "object": "response",
163
+ "created_at": None,
164
+ "status": None,
165
+ "error": None,
166
+ "incomplete_details": None,
167
+ "instructions": None,
168
+ "max_output_tokens": None,
169
+ "model": None,
170
+ "output": [],
171
+ "parallel_tool_calls": True,
172
+ "previous_response_id": None,
173
+ "reasoning": {"effort": None, "summary": None},
174
+ "store": True,
175
+ "temperature": 1,
176
+ "text": {"format": {"type": "text"}},
177
+ "tool_choice": "auto",
178
+ "tools": [],
179
+ "top_p": 1,
180
+ "truncation": "disabled",
181
+ "usage": None,
182
+ "user": None,
183
+ "metadata": {},
184
+ }
185
+
186
+
187
+ def _process_openai_response_event(chunk_dict, response_data, current_items):
188
+ """Process a single OpenAI response event and update the response data"""
189
+ event_type = chunk_dict.get("type")
190
+ has_reasoning = False
191
+
192
+ if event_type == "response.created":
193
+ response_info = chunk_dict.get("response", {})
194
+ response_data["id"] = response_info.get("id")
195
+ response_data["created_at"] = response_info.get("created_at")
196
+ response_data["model"] = response_info.get("model")
197
+ response_data["status"] = response_info.get("status")
198
+ response_data["parallel_tool_calls"] = response_info.get("parallel_tool_calls", True)
199
+ response_data["temperature"] = response_info.get("temperature", 1)
200
+ response_data["tool_choice"] = response_info.get("tool_choice", "auto")
201
+ response_data["tools"] = response_info.get("tools", [])
202
+ response_data["top_p"] = response_info.get("top_p", 1)
203
+ response_data["truncation"] = response_info.get("truncation", "disabled")
204
+ response_data["max_output_tokens"] = response_info.get("max_output_tokens")
205
+ response_data["previous_response_id"] = response_info.get("previous_response_id")
206
+ response_data["store"] = response_info.get("store", True)
207
+ response_data["user"] = response_info.get("user")
208
+ response_data["metadata"] = response_info.get("metadata", {})
209
+
210
+ text_config = response_info.get("text", {})
211
+ if text_config:
212
+ response_data["text"] = text_config
213
+
214
+ reasoning = response_info.get("reasoning", {})
215
+ if reasoning:
216
+ response_data["reasoning"] = reasoning
217
+ has_reasoning = True
218
+
219
+ elif event_type == "response.in_progress":
220
+ response_info = chunk_dict.get("response", {})
221
+ response_data["status"] = response_info.get("status")
222
+
223
+ elif event_type == "response.output_item.added":
224
+ item = chunk_dict.get("item", {})
225
+ item_id = item.get("id")
226
+ item_type = item.get("type")
227
+
228
+ if item_type == "reasoning":
229
+ current_items[item_id] = {
230
+ "type": "reasoning",
231
+ "id": item_id,
232
+ "summary": [],
233
+ "status": item.get("status", "in_progress"),
234
+ }
235
+ has_reasoning = True
236
+
237
+ elif item_type == "function_call":
238
+ current_items[item_id] = {
239
+ "type": "function_call",
240
+ "id": item_id,
241
+ "call_id": item.get("call_id"),
242
+ "name": item.get("name"),
243
+ "arguments": "",
244
+ "status": item.get("status", "in_progress"),
245
+ }
246
+
247
+ elif item_type == "message":
248
+ current_items[item_id] = {
249
+ "type": "message",
250
+ "id": item_id,
251
+ "role": item.get("role", "assistant"),
252
+ "content": [],
253
+ "status": item.get("status", "in_progress"),
254
+ }
255
+
256
+ elif event_type == "response.reasoning_summary_part.added":
257
+ item_id = chunk_dict.get("item_id")
258
+ part = chunk_dict.get("part", {})
259
+
260
+ if item_id in current_items and current_items[item_id]["type"] == "reasoning":
261
+ summary_part = {"type": part.get("type", "summary_text"), "text": part.get("text", "")}
262
+ current_items[item_id]["summary"].append(summary_part)
263
+
264
+ elif event_type == "response.reasoning_summary_text.delta":
265
+ item_id = chunk_dict.get("item_id")
266
+ delta = chunk_dict.get("delta", "")
267
+ summary_index = chunk_dict.get("summary_index", 0)
268
+
269
+ if item_id in current_items and current_items[item_id]["type"] == "reasoning":
270
+ while len(current_items[item_id]["summary"]) <= summary_index:
271
+ current_items[item_id]["summary"].append({"type": "summary_text", "text": ""})
272
+
273
+ current_items[item_id]["summary"][summary_index]["text"] += delta
274
+
275
+ elif event_type == "response.reasoning_summary_text.done":
276
+ item_id = chunk_dict.get("item_id")
277
+ final_text = chunk_dict.get("text", "")
278
+ summary_index = chunk_dict.get("summary_index", 0)
279
+
280
+ if item_id in current_items and current_items[item_id]["type"] == "reasoning":
281
+ while len(current_items[item_id]["summary"]) <= summary_index:
282
+ current_items[item_id]["summary"].append({"type": "summary_text", "text": ""})
283
+
284
+ current_items[item_id]["summary"][summary_index]["text"] = final_text
285
+
286
+ elif event_type == "response.reasoning_summary_part.done":
287
+ item_id = chunk_dict.get("item_id")
288
+ part = chunk_dict.get("part", {})
289
+
290
+ if item_id in current_items and current_items[item_id]["type"] == "reasoning":
291
+ summary_index = chunk_dict.get("summary_index", 0)
292
+ if summary_index < len(current_items[item_id]["summary"]):
293
+ current_items[item_id]["summary"][summary_index] = {
294
+ "type": part.get("type", "summary_text"),
295
+ "text": part.get("text", ""),
296
+ }
297
+
298
+ elif event_type == "response.function_call_arguments.delta":
299
+ item_id = chunk_dict.get("item_id")
300
+ delta = chunk_dict.get("delta", "")
301
+
302
+ if item_id in current_items:
303
+ current_items[item_id]["arguments"] += delta
304
+
305
+ elif event_type == "response.function_call_arguments.done":
306
+ item_id = chunk_dict.get("item_id")
307
+ final_arguments = chunk_dict.get("arguments", "")
308
+
309
+ if item_id in current_items:
310
+ current_items[item_id]["arguments"] = final_arguments
311
+
312
+ elif event_type == "response.content_part.added":
313
+ part = chunk_dict.get("part", {})
314
+
315
+ message_item = None
316
+ for item in current_items.values():
317
+ if item.get("type") == "message":
318
+ message_item = item
319
+ break
320
+
321
+ if message_item:
322
+ content_part = {
323
+ "type": part.get("type", "output_text"),
324
+ "text": part.get("text", ""),
325
+ "annotations": part.get("annotations", []),
326
+ }
327
+ message_item["content"].append(content_part)
328
+
329
+ elif event_type == "response.output_text.delta":
330
+ delta_text = chunk_dict.get("delta", "")
331
+
332
+ for item in current_items.values():
333
+ if item.get("type") == "message" and item.get("content"):
334
+ if item["content"] and item["content"][-1].get("type") == "output_text":
335
+ item["content"][-1]["text"] += delta_text
336
+ break
337
+
338
+ elif event_type == "response.output_text.done":
339
+ final_text = chunk_dict.get("text", "")
340
+
341
+ for item in current_items.values():
342
+ if item.get("type") == "message" and item.get("content"):
343
+ if item["content"] and item["content"][-1].get("type") == "output_text":
344
+ item["content"][-1]["text"] = final_text
345
+ break
346
+
347
+ elif event_type == "response.output_item.done":
348
+ item = chunk_dict.get("item", {})
349
+ item_id = item.get("id")
350
+
351
+ if item_id in current_items:
352
+ current_items[item_id]["status"] = item.get("status", "completed")
353
+
354
+ if item.get("type") == "reasoning":
355
+ current_items[item_id].update({"summary": item.get("summary", current_items[item_id]["summary"])})
356
+ elif item.get("type") == "function_call":
357
+ current_items[item_id].update(
358
+ {
359
+ "arguments": item.get("arguments", current_items[item_id]["arguments"]),
360
+ "call_id": item.get("call_id", current_items[item_id]["call_id"]),
361
+ "name": item.get("name", current_items[item_id]["name"]),
362
+ }
363
+ )
364
+ elif item.get("type") == "message":
365
+ current_items[item_id].update(
366
+ {
367
+ "content": item.get("content", current_items[item_id]["content"]),
368
+ "role": item.get("role", current_items[item_id]["role"]),
369
+ }
370
+ )
371
+
372
+ response_data["output"].append(current_items[item_id])
373
+
374
+ elif event_type == "response.completed":
375
+ response_info = chunk_dict.get("response", {})
376
+ response_data["status"] = response_info.get("status", "completed")
377
+ response_data["usage"] = response_info.get("usage")
378
+ response_data["output"] = response_info.get("output", response_data["output"])
379
+
380
+ if response_info.get("reasoning"):
381
+ response_data["reasoning"] = response_info["reasoning"]
382
+
383
+ return has_reasoning
384
+
385
+
386
+ def openai_responses_stream_chat(results: list):
387
+ """Process OpenAI Responses streaming chat results and return response"""
388
+ from openai.types.responses import Response
389
+
390
+ response_data = _initialize_openai_response_data()
391
+ current_items = {}
392
+
393
+ for chunk in results:
394
+ chunk_dict = chunk.model_dump()
395
+ _process_openai_response_event(chunk_dict, response_data, current_items)
396
+
397
+ return Response(**response_data)
398
+
399
+
400
+ async def aopenai_responses_stream_chat(generator: AsyncIterable[Any]) -> Any:
401
+ """Async version of openai_responses_stream_chat"""
402
+ from openai.types.responses import Response
403
+
404
+ response_data = _initialize_openai_response_data()
405
+ current_items = {}
406
+
407
+ async for chunk in generator:
408
+ chunk_dict = chunk.model_dump()
409
+ _process_openai_response_event(chunk_dict, response_data, current_items)
410
+
411
+ return Response(**response_data)
412
+
413
+
158
414
  def anthropic_stream_message(results: list):
159
415
  """Process Anthropic streaming message results and return response + blueprint"""
160
416
  from anthropic.types import Message, MessageStreamEvent, Usage
@@ -5,6 +5,7 @@ from .blueprint_builder import (
5
5
  build_prompt_blueprint_from_bedrock_event,
6
6
  build_prompt_blueprint_from_google_event,
7
7
  build_prompt_blueprint_from_openai_chunk,
8
+ build_prompt_blueprint_from_openai_responses_event,
8
9
  )
9
10
 
10
11
 
@@ -14,7 +15,11 @@ def _build_stream_blueprint(result: Any, metadata: Dict) -> Any:
14
15
  model_name = model_info.get("name", "")
15
16
 
16
17
  if provider == "openai" or provider == "openai.azure":
17
- return build_prompt_blueprint_from_openai_chunk(result, metadata)
18
+ api_type = model_info.get("api_type", "chat-completions") if metadata else "chat-completions"
19
+ if api_type == "chat-completions":
20
+ return build_prompt_blueprint_from_openai_chunk(result, metadata)
21
+ elif api_type == "responses":
22
+ return build_prompt_blueprint_from_openai_responses_event(result, metadata)
18
23
 
19
24
  elif provider == "google" or (provider == "vertexai" and model_name.startswith("gemini")):
20
25
  return build_prompt_blueprint_from_google_event(result, metadata)
@@ -20,8 +20,8 @@ class TemplateManager:
20
20
  def publish(self, body: PublishPromptTemplate):
21
21
  return publish_prompt_template(body, self.api_key)
22
22
 
23
- def all(self, page: int = 1, per_page: int = 30):
24
- return get_all_prompt_templates(page, per_page, self.api_key)
23
+ def all(self, page: int = 1, per_page: int = 30, release_label: str = None):
24
+ return get_all_prompt_templates(page, per_page, self.api_key, release_label)
25
25
 
26
26
 
27
27
  class AsyncTemplateManager:
@@ -31,5 +31,5 @@ class AsyncTemplateManager:
31
31
  async def get(self, prompt_name: str, params: Union[GetPromptTemplate, None] = None):
32
32
  return await aget_prompt_template(prompt_name, params, self.api_key)
33
33
 
34
- async def all(self, page: int = 1, per_page: int = 30):
35
- return await aget_all_prompt_templates(page, per_page, self.api_key)
34
+ async def all(self, page: int = 1, per_page: int = 30, release_label: str = None):
35
+ return await aget_all_prompt_templates(page, per_page, self.api_key, release_label)
@@ -21,12 +21,14 @@ class ImageUrl(TypedDict, total=False):
21
21
  class TextContent(TypedDict, total=False):
22
22
  type: Literal["text"]
23
23
  text: str
24
+ id: Union[str, None]
24
25
 
25
26
 
26
27
  class ThinkingContent(TypedDict, total=False):
27
28
  signature: Union[str, None]
28
29
  type: Literal["thinking"]
29
30
  thinking: str
31
+ id: Union[str, None]
30
32
 
31
33
 
32
34
  class ImageContent(TypedDict, total=False):
@@ -87,6 +89,7 @@ class UserMessage(TypedDict, total=False):
87
89
 
88
90
  class ToolCall(TypedDict, total=False):
89
91
  id: str
92
+ tool_id: Union[str, None]
90
93
  type: Literal["function"]
91
94
  function: FunctionCall
92
95
 
@@ -1193,13 +1193,16 @@ async def apublish_prompt_template(
1193
1193
 
1194
1194
 
1195
1195
  def get_all_prompt_templates(
1196
- page: int = 1, per_page: int = 30, api_key: str = None
1196
+ page: int = 1, per_page: int = 30, api_key: str = None, release_label: str = None
1197
1197
  ) -> List[ListPromptTemplateResponse]:
1198
1198
  try:
1199
+ params = {"page": page, "per_page": per_page}
1200
+ if release_label:
1201
+ params["release_label"] = release_label
1199
1202
  response = requests.get(
1200
1203
  f"{URL_API_PROMPTLAYER}/prompt-templates",
1201
1204
  headers={"X-API-KEY": api_key},
1202
- params={"page": page, "per_page": per_page},
1205
+ params=params,
1203
1206
  )
1204
1207
  if response.status_code != 200:
1205
1208
  raise Exception(
@@ -1212,14 +1215,17 @@ def get_all_prompt_templates(
1212
1215
 
1213
1216
 
1214
1217
  async def aget_all_prompt_templates(
1215
- page: int = 1, per_page: int = 30, api_key: str = None
1218
+ page: int = 1, per_page: int = 30, api_key: str = None, release_label: str = None
1216
1219
  ) -> List[ListPromptTemplateResponse]:
1217
1220
  try:
1221
+ params = {"page": page, "per_page": per_page}
1222
+ if release_label:
1223
+ params["release_label"] = release_label
1218
1224
  async with _make_httpx_client() as client:
1219
1225
  response = await client.get(
1220
1226
  f"{URL_API_PROMPTLAYER}/prompt-templates",
1221
1227
  headers={"X-API-KEY": api_key},
1222
- params={"page": page, "per_page": per_page},
1228
+ params=params,
1223
1229
  )
1224
1230
 
1225
1231
  if RAISE_FOR_STATUS:
@@ -1253,8 +1259,13 @@ def openai_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: d
1253
1259
  from openai import OpenAI
1254
1260
 
1255
1261
  client = OpenAI(**client_kwargs)
1256
- request_to_make = MAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1257
- return request_to_make(client, **function_kwargs)
1262
+ api_type = prompt_blueprint["metadata"]["model"]["api_type"]
1263
+
1264
+ if api_type == "chat-completions":
1265
+ request_to_make = MAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1266
+ return request_to_make(client, **function_kwargs)
1267
+ else:
1268
+ return client.responses.create(**function_kwargs)
1258
1269
 
1259
1270
 
1260
1271
  async def aopenai_chat_request(client, **kwargs):
@@ -1275,16 +1286,26 @@ async def aopenai_request(prompt_blueprint: GetPromptTemplateResponse, client_kw
1275
1286
  from openai import AsyncOpenAI
1276
1287
 
1277
1288
  client = AsyncOpenAI(**client_kwargs)
1278
- request_to_make = AMAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1279
- return await request_to_make(client, **function_kwargs)
1289
+ api_type = prompt_blueprint["metadata"]["model"]["api_type"]
1290
+
1291
+ if api_type == "chat-completions":
1292
+ request_to_make = AMAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1293
+ return await request_to_make(client, **function_kwargs)
1294
+ else:
1295
+ return await client.responses.create(**function_kwargs)
1280
1296
 
1281
1297
 
1282
1298
  def azure_openai_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
1283
1299
  from openai import AzureOpenAI
1284
1300
 
1285
1301
  client = AzureOpenAI(azure_endpoint=client_kwargs.pop("base_url", None))
1286
- request_to_make = MAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1287
- return request_to_make(client, **function_kwargs)
1302
+ api_type = prompt_blueprint["metadata"]["model"]["api_type"]
1303
+
1304
+ if api_type == "chat-completions":
1305
+ request_to_make = MAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1306
+ return request_to_make(client, **function_kwargs)
1307
+ else:
1308
+ return client.responses.create(**function_kwargs)
1288
1309
 
1289
1310
 
1290
1311
  async def aazure_openai_request(
@@ -1293,8 +1314,13 @@ async def aazure_openai_request(
1293
1314
  from openai import AsyncAzureOpenAI
1294
1315
 
1295
1316
  client = AsyncAzureOpenAI(azure_endpoint=client_kwargs.pop("base_url", None))
1296
- request_to_make = AMAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1297
- return await request_to_make(client, **function_kwargs)
1317
+ api_type = prompt_blueprint["metadata"]["model"]["api_type"]
1318
+
1319
+ if api_type == "chat-completions":
1320
+ request_to_make = AMAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1321
+ return await request_to_make(client, **function_kwargs)
1322
+ else:
1323
+ return await client.responses.create(**function_kwargs)
1298
1324
 
1299
1325
 
1300
1326
  def anthropic_chat_request(client, **kwargs):
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "promptlayer"
3
- version = "1.0.67"
3
+ version = "1.0.69"
4
4
  description = "PromptLayer is a platform for prompt engineering and tracks your LLM requests."
5
5
  authors = ["Magniv <hello@magniv.io>"]
6
6
  license = "Apache-2.0"
File without changes
File without changes