promptlayer 1.0.67__py3-none-any.whl → 1.0.68__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of promptlayer might be problematic. Click here for more details.

promptlayer/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
1
  from .promptlayer import AsyncPromptLayer, PromptLayer
2
2
 
3
- __version__ = "1.0.67"
3
+ __version__ = "1.0.68"
4
4
  __all__ = ["PromptLayer", "AsyncPromptLayer", "__version__"]
@@ -599,10 +599,10 @@ class AsyncPromptLayer(PromptLayerMixin):
599
599
  function_kwargs=llm_data["function_kwargs"],
600
600
  )
601
601
 
602
- if isinstance(response, dict):
603
- request_response = response
604
- else:
602
+ if hasattr(response, "model_dump"):
605
603
  request_response = response.model_dump(mode="json")
604
+ else:
605
+ request_response = response
606
606
 
607
607
  if stream:
608
608
  track_request_callable = await self._create_track_request_callable(
@@ -19,12 +19,14 @@ from promptlayer.streaming import (
19
19
  amistral_stream_chat,
20
20
  anthropic_stream_completion,
21
21
  anthropic_stream_message,
22
+ aopenai_responses_stream_chat,
22
23
  aopenai_stream_chat,
23
24
  aopenai_stream_completion,
24
25
  bedrock_stream_message,
25
26
  google_stream_chat,
26
27
  google_stream_completion,
27
28
  mistral_stream_chat,
29
+ openai_responses_stream_chat,
28
30
  openai_stream_chat,
29
31
  openai_stream_completion,
30
32
  )
@@ -48,7 +50,7 @@ from promptlayer.utils import (
48
50
  )
49
51
 
50
52
  MAP_PROVIDER_TO_FUNCTION_NAME = {
51
- "openai": {
53
+ "openai:chat-completions": {
52
54
  "chat": {
53
55
  "function_name": "openai.chat.completions.create",
54
56
  "stream_function": openai_stream_chat,
@@ -58,6 +60,16 @@ MAP_PROVIDER_TO_FUNCTION_NAME = {
58
60
  "stream_function": openai_stream_completion,
59
61
  },
60
62
  },
63
+ "openai:responses": {
64
+ "chat": {
65
+ "function_name": "openai.responses.create",
66
+ "stream_function": openai_responses_stream_chat,
67
+ },
68
+ "completion": {
69
+ "function_name": "openai.responses.create",
70
+ "stream_function": openai_responses_stream_chat,
71
+ },
72
+ },
61
73
  "anthropic": {
62
74
  "chat": {
63
75
  "function_name": "anthropic.messages.create",
@@ -68,7 +80,7 @@ MAP_PROVIDER_TO_FUNCTION_NAME = {
68
80
  "stream_function": anthropic_stream_completion,
69
81
  },
70
82
  },
71
- "openai.azure": {
83
+ "openai.azure:chat-completions": {
72
84
  "chat": {
73
85
  "function_name": "openai.AzureOpenAI.chat.completions.create",
74
86
  "stream_function": openai_stream_chat,
@@ -78,6 +90,16 @@ MAP_PROVIDER_TO_FUNCTION_NAME = {
78
90
  "stream_function": openai_stream_completion,
79
91
  },
80
92
  },
93
+ "openai.azure:responses": {
94
+ "chat": {
95
+ "function_name": "openai.AzureOpenAI.responses.create",
96
+ "stream_function": openai_responses_stream_chat,
97
+ },
98
+ "completion": {
99
+ "function_name": "openai.AzureOpenAI.responses.create",
100
+ "stream_function": openai_responses_stream_chat,
101
+ },
102
+ },
81
103
  "mistral": {
82
104
  "chat": {
83
105
  "function_name": "mistral.client.chat",
@@ -133,7 +155,7 @@ MAP_PROVIDER_TO_FUNCTION = {
133
155
  }
134
156
 
135
157
  AMAP_PROVIDER_TO_FUNCTION_NAME = {
136
- "openai": {
158
+ "openai:chat-completions": {
137
159
  "chat": {
138
160
  "function_name": "openai.chat.completions.create",
139
161
  "stream_function": aopenai_stream_chat,
@@ -143,6 +165,16 @@ AMAP_PROVIDER_TO_FUNCTION_NAME = {
143
165
  "stream_function": aopenai_stream_completion,
144
166
  },
145
167
  },
168
+ "openai:responses": {
169
+ "chat": {
170
+ "function_name": "openai.responses.create",
171
+ "stream_function": aopenai_responses_stream_chat,
172
+ },
173
+ "completion": {
174
+ "function_name": "openai.responses.create",
175
+ "stream_function": aopenai_responses_stream_chat,
176
+ },
177
+ },
146
178
  "anthropic": {
147
179
  "chat": {
148
180
  "function_name": "anthropic.messages.create",
@@ -153,7 +185,7 @@ AMAP_PROVIDER_TO_FUNCTION_NAME = {
153
185
  "stream_function": aanthropic_stream_completion,
154
186
  },
155
187
  },
156
- "openai.azure": {
188
+ "openai.azure:chat-completions": {
157
189
  "chat": {
158
190
  "function_name": "openai.AzureOpenAI.chat.completions.create",
159
191
  "stream_function": aopenai_stream_chat,
@@ -163,6 +195,16 @@ AMAP_PROVIDER_TO_FUNCTION_NAME = {
163
195
  "stream_function": aopenai_stream_completion,
164
196
  },
165
197
  },
198
+ "openai.azure:responses": {
199
+ "chat": {
200
+ "function_name": "openai.AzureOpenAI.responses.create",
201
+ "stream_function": aopenai_responses_stream_chat,
202
+ },
203
+ "completion": {
204
+ "function_name": "openai.AzureOpenAI.responses.create",
205
+ "stream_function": aopenai_responses_stream_chat,
206
+ },
207
+ },
166
208
  "mistral": {
167
209
  "chat": {
168
210
  "function_name": "mistral.client.chat",
@@ -275,6 +317,7 @@ class PromptLayerMixin:
275
317
  function_kwargs = deepcopy(prompt_blueprint["llm_kwargs"])
276
318
  function_kwargs["stream"] = stream
277
319
  provider = prompt_blueprint_model["provider"]
320
+ api_type = prompt_blueprint_model["api_type"]
278
321
 
279
322
  if custom_provider := prompt_blueprint.get("custom_provider"):
280
323
  provider = custom_provider["client"]
@@ -285,7 +328,7 @@ class PromptLayerMixin:
285
328
  elif provider_base_url := prompt_blueprint.get("provider_base_url"):
286
329
  client_kwargs["base_url"] = provider_base_url["url"]
287
330
 
288
- if stream and provider in ["openai", "openai.azure"]:
331
+ if stream and provider in ["openai", "openai.azure"] and api_type == "chat-completions":
289
332
  function_kwargs["stream_options"] = {"include_usage": True}
290
333
 
291
334
  provider_function_name = provider
@@ -295,6 +338,9 @@ class PromptLayerMixin:
295
338
  elif "claude" in prompt_blueprint_model["name"]:
296
339
  provider_function_name = "anthropic"
297
340
 
341
+ if provider_function_name in ("openai", "openai.azure"):
342
+ provider_function_name = f"{provider_function_name}:{api_type}"
343
+
298
344
  if is_async:
299
345
  config = AMAP_PROVIDER_TO_FUNCTION_NAME[provider_function_name][prompt_template["type"]]
300
346
  request_function = AMAP_PROVIDER_TO_FUNCTION[provider]
@@ -9,6 +9,7 @@ from .blueprint_builder import (
9
9
  build_prompt_blueprint_from_anthropic_event,
10
10
  build_prompt_blueprint_from_google_event,
11
11
  build_prompt_blueprint_from_openai_chunk,
12
+ build_prompt_blueprint_from_openai_responses_event,
12
13
  )
13
14
  from .response_handlers import (
14
15
  aanthropic_stream_completion,
@@ -19,12 +20,14 @@ from .response_handlers import (
19
20
  amistral_stream_chat,
20
21
  anthropic_stream_completion,
21
22
  anthropic_stream_message,
23
+ aopenai_responses_stream_chat,
22
24
  aopenai_stream_chat,
23
25
  aopenai_stream_completion,
24
26
  bedrock_stream_message,
25
27
  google_stream_chat,
26
28
  google_stream_completion,
27
29
  mistral_stream_chat,
30
+ openai_responses_stream_chat,
28
31
  openai_stream_chat,
29
32
  openai_stream_completion,
30
33
  )
@@ -34,13 +37,16 @@ from .stream_processor import (
34
37
  )
35
38
 
36
39
  __all__ = [
40
+ "build_prompt_blueprint_from_anthropic_event",
37
41
  "build_prompt_blueprint_from_google_event",
38
42
  "build_prompt_blueprint_from_openai_chunk",
39
- "build_prompt_blueprint_from_anthropic_event",
43
+ "build_prompt_blueprint_from_openai_responses_event",
40
44
  "stream_response",
41
45
  "astream_response",
42
46
  "openai_stream_chat",
43
47
  "aopenai_stream_chat",
48
+ "openai_responses_stream_chat",
49
+ "aopenai_responses_stream_chat",
44
50
  "anthropic_stream_message",
45
51
  "aanthropic_stream_message",
46
52
  "openai_stream_completion",
@@ -8,14 +8,19 @@ and streaming events for different providers (OpenAI, Anthropic, etc.)
8
8
  from typing import Any, Dict, List, Optional
9
9
 
10
10
 
11
- def _create_tool_call(tool_id: str, function_name: str, arguments: Any) -> Dict[str, Any]:
11
+ def _create_tool_call(call_id: str, function_name: str, arguments: Any, tool_id: str = None) -> Dict[str, Any]:
12
12
  """Create a standardized tool call structure"""
13
- return {"id": tool_id, "type": "function", "function": {"name": function_name, "arguments": arguments}}
13
+ tool_call = {"id": call_id, "type": "function", "function": {"name": function_name, "arguments": arguments}}
14
+ if tool_id:
15
+ tool_call["tool_id"] = tool_id
16
+ return tool_call
14
17
 
15
18
 
16
- def _create_content_item(content_type: str, **kwargs) -> Dict[str, Any]:
19
+ def _create_content_item(content_type: str, item_id: str = None, **kwargs) -> Dict[str, Any]:
17
20
  """Create a standardized content item"""
18
21
  content_item = {"type": content_type}
22
+ if item_id:
23
+ content_item["id"] = item_id
19
24
  content_item.update(kwargs)
20
25
  return content_item
21
26
 
@@ -67,6 +72,127 @@ def build_prompt_blueprint_from_openai_chunk(chunk, metadata):
67
72
  return _build_prompt_blueprint(assistant_message, metadata)
68
73
 
69
74
 
75
+ def build_prompt_blueprint_from_openai_responses_event(event, metadata):
76
+ """Build a prompt blueprint from an OpenAI responses event"""
77
+
78
+ assistant_content = []
79
+ tool_calls = []
80
+
81
+ event_dict = event.model_dump() if hasattr(event, "model_dump") else event
82
+ event_type = event_dict.get("type")
83
+
84
+ if event_type == "response.reasoning_summary_text.delta":
85
+ delta = event_dict.get("delta", "")
86
+ item_id = event_dict.get("item_id")
87
+ if delta:
88
+ assistant_content.append(_create_content_item("thinking", item_id=item_id, thinking=delta, signature=None))
89
+
90
+ elif event_type == "response.reasoning_summary_text.done":
91
+ final_text = event_dict.get("text", "")
92
+ item_id = event_dict.get("item_id")
93
+ if final_text:
94
+ assistant_content.append(
95
+ _create_content_item("thinking", item_id=item_id, thinking=final_text, signature=None)
96
+ )
97
+
98
+ elif event_type == "response.reasoning_summary_part.added":
99
+ part = event_dict.get("part", {})
100
+ item_id = event_dict.get("item_id")
101
+ if part.get("type") == "summary_text":
102
+ text = part.get("text", "")
103
+ assistant_content.append(_create_content_item("thinking", item_id=item_id, thinking=text, signature=None))
104
+
105
+ elif event_type == "response.reasoning_summary_part.done":
106
+ part = event_dict.get("part", {})
107
+ item_id = event_dict.get("item_id")
108
+ if part.get("type") == "summary_text":
109
+ text = part.get("text", "")
110
+ if text:
111
+ assistant_content.append(
112
+ _create_content_item("thinking", item_id=item_id, thinking=text, signature=None)
113
+ )
114
+
115
+ elif event_type == "response.function_call_arguments.delta":
116
+ item_id = event_dict.get("item_id")
117
+ delta = event_dict.get("delta", "")
118
+ if delta:
119
+ tool_calls.append(_create_tool_call("", "", delta, tool_id=item_id))
120
+
121
+ elif event_type == "response.function_call_arguments.done":
122
+ item_id = event_dict.get("item_id")
123
+ final_arguments = event_dict.get("arguments", "")
124
+ if final_arguments:
125
+ tool_calls.append(_create_tool_call("", "", final_arguments, tool_id=item_id))
126
+
127
+ elif event_type == "response.output_item.added":
128
+ item = event_dict.get("item", {})
129
+ item_type = item.get("type")
130
+ item_id = item.get("id")
131
+
132
+ if item_type == "reasoning":
133
+ assistant_content.append(_create_content_item("thinking", item_id=item_id, thinking="", signature=None))
134
+ elif item_type == "function_call":
135
+ tool_calls.append(_create_tool_call(item.get("call_id", ""), item.get("name", ""), "", tool_id=item_id))
136
+ elif item_type == "message":
137
+ assistant_content.append(_create_content_item("text", item_id=item_id, text="[Message started]"))
138
+
139
+ elif event_type == "response.content_part.added":
140
+ item_id = event_dict.get("item_id")
141
+ part = event_dict.get("part", {})
142
+ part_type = part.get("type", "output_text")
143
+
144
+ if part_type == "output_text":
145
+ text = part.get("text", "")
146
+ assistant_content.append(
147
+ _create_content_item("text", item_id=item_id, text=text if text else "[Content part added]")
148
+ )
149
+
150
+ elif event_type == "response.output_text.delta":
151
+ item_id = event_dict.get("item_id")
152
+ delta_text = event_dict.get("delta", "")
153
+ if delta_text:
154
+ assistant_content.append(_create_content_item("text", item_id=item_id, text=delta_text))
155
+
156
+ elif event_type == "response.output_text.done":
157
+ item_id = event_dict.get("item_id")
158
+ final_text = event_dict.get("text", "")
159
+ if final_text:
160
+ assistant_content.append(_create_content_item("text", item_id=item_id, text=final_text))
161
+
162
+ elif event_type == "response.output_item.done":
163
+ item = event_dict.get("item", {})
164
+ item_type = item.get("type")
165
+ item_id = item.get("id")
166
+
167
+ if item_type == "reasoning":
168
+ summary = item.get("summary", [])
169
+ for summary_part in summary:
170
+ if summary_part.get("type") == "summary_text":
171
+ text = summary_part.get("text", "")
172
+ if text:
173
+ assistant_content.append(
174
+ _create_content_item("thinking", item_id=item_id, thinking=text, signature=None)
175
+ )
176
+
177
+ elif item_type == "function_call":
178
+ tool_calls.append(
179
+ _create_tool_call(
180
+ item.get("call_id", ""), item.get("name", ""), item.get("arguments", ""), tool_id=item_id
181
+ )
182
+ )
183
+
184
+ elif item_type == "message":
185
+ content = item.get("content", [])
186
+ for content_part in content:
187
+ if content_part.get("type") == "output_text":
188
+ text = content_part.get("text", "")
189
+ if text:
190
+ assistant_content.append(_create_content_item("text", item_id=item_id, text=text))
191
+
192
+ assistant_message = _build_assistant_message(assistant_content, tool_calls or None)
193
+ return _build_prompt_blueprint(assistant_message, metadata)
194
+
195
+
70
196
  def build_prompt_blueprint_from_anthropic_event(event, metadata):
71
197
  """Build a prompt blueprint from an Anthropic stream event"""
72
198
 
@@ -155,6 +155,262 @@ async def aopenai_stream_chat(generator: AsyncIterable[Any]) -> Any:
155
155
  return response
156
156
 
157
157
 
158
+ def _initialize_openai_response_data():
159
+ """Initialize the response data structure for OpenAI responses"""
160
+ return {
161
+ "id": None,
162
+ "object": "response",
163
+ "created_at": None,
164
+ "status": None,
165
+ "error": None,
166
+ "incomplete_details": None,
167
+ "instructions": None,
168
+ "max_output_tokens": None,
169
+ "model": None,
170
+ "output": [],
171
+ "parallel_tool_calls": True,
172
+ "previous_response_id": None,
173
+ "reasoning": {"effort": None, "summary": None},
174
+ "store": True,
175
+ "temperature": 1,
176
+ "text": {"format": {"type": "text"}},
177
+ "tool_choice": "auto",
178
+ "tools": [],
179
+ "top_p": 1,
180
+ "truncation": "disabled",
181
+ "usage": None,
182
+ "user": None,
183
+ "metadata": {},
184
+ }
185
+
186
+
187
+ def _process_openai_response_event(chunk_dict, response_data, current_items):
188
+ """Process a single OpenAI response event and update the response data"""
189
+ event_type = chunk_dict.get("type")
190
+ has_reasoning = False
191
+
192
+ if event_type == "response.created":
193
+ response_info = chunk_dict.get("response", {})
194
+ response_data["id"] = response_info.get("id")
195
+ response_data["created_at"] = response_info.get("created_at")
196
+ response_data["model"] = response_info.get("model")
197
+ response_data["status"] = response_info.get("status")
198
+ response_data["parallel_tool_calls"] = response_info.get("parallel_tool_calls", True)
199
+ response_data["temperature"] = response_info.get("temperature", 1)
200
+ response_data["tool_choice"] = response_info.get("tool_choice", "auto")
201
+ response_data["tools"] = response_info.get("tools", [])
202
+ response_data["top_p"] = response_info.get("top_p", 1)
203
+ response_data["truncation"] = response_info.get("truncation", "disabled")
204
+ response_data["max_output_tokens"] = response_info.get("max_output_tokens")
205
+ response_data["previous_response_id"] = response_info.get("previous_response_id")
206
+ response_data["store"] = response_info.get("store", True)
207
+ response_data["user"] = response_info.get("user")
208
+ response_data["metadata"] = response_info.get("metadata", {})
209
+
210
+ text_config = response_info.get("text", {})
211
+ if text_config:
212
+ response_data["text"] = text_config
213
+
214
+ reasoning = response_info.get("reasoning", {})
215
+ if reasoning:
216
+ response_data["reasoning"] = reasoning
217
+ has_reasoning = True
218
+
219
+ elif event_type == "response.in_progress":
220
+ response_info = chunk_dict.get("response", {})
221
+ response_data["status"] = response_info.get("status")
222
+
223
+ elif event_type == "response.output_item.added":
224
+ item = chunk_dict.get("item", {})
225
+ item_id = item.get("id")
226
+ item_type = item.get("type")
227
+
228
+ if item_type == "reasoning":
229
+ current_items[item_id] = {
230
+ "type": "reasoning",
231
+ "id": item_id,
232
+ "summary": [],
233
+ "status": item.get("status", "in_progress"),
234
+ }
235
+ has_reasoning = True
236
+
237
+ elif item_type == "function_call":
238
+ current_items[item_id] = {
239
+ "type": "function_call",
240
+ "id": item_id,
241
+ "call_id": item.get("call_id"),
242
+ "name": item.get("name"),
243
+ "arguments": "",
244
+ "status": item.get("status", "in_progress"),
245
+ }
246
+
247
+ elif item_type == "message":
248
+ current_items[item_id] = {
249
+ "type": "message",
250
+ "id": item_id,
251
+ "role": item.get("role", "assistant"),
252
+ "content": [],
253
+ "status": item.get("status", "in_progress"),
254
+ }
255
+
256
+ elif event_type == "response.reasoning_summary_part.added":
257
+ item_id = chunk_dict.get("item_id")
258
+ part = chunk_dict.get("part", {})
259
+
260
+ if item_id in current_items and current_items[item_id]["type"] == "reasoning":
261
+ summary_part = {"type": part.get("type", "summary_text"), "text": part.get("text", "")}
262
+ current_items[item_id]["summary"].append(summary_part)
263
+
264
+ elif event_type == "response.reasoning_summary_text.delta":
265
+ item_id = chunk_dict.get("item_id")
266
+ delta = chunk_dict.get("delta", "")
267
+ summary_index = chunk_dict.get("summary_index", 0)
268
+
269
+ if item_id in current_items and current_items[item_id]["type"] == "reasoning":
270
+ while len(current_items[item_id]["summary"]) <= summary_index:
271
+ current_items[item_id]["summary"].append({"type": "summary_text", "text": ""})
272
+
273
+ current_items[item_id]["summary"][summary_index]["text"] += delta
274
+
275
+ elif event_type == "response.reasoning_summary_text.done":
276
+ item_id = chunk_dict.get("item_id")
277
+ final_text = chunk_dict.get("text", "")
278
+ summary_index = chunk_dict.get("summary_index", 0)
279
+
280
+ if item_id in current_items and current_items[item_id]["type"] == "reasoning":
281
+ while len(current_items[item_id]["summary"]) <= summary_index:
282
+ current_items[item_id]["summary"].append({"type": "summary_text", "text": ""})
283
+
284
+ current_items[item_id]["summary"][summary_index]["text"] = final_text
285
+
286
+ elif event_type == "response.reasoning_summary_part.done":
287
+ item_id = chunk_dict.get("item_id")
288
+ part = chunk_dict.get("part", {})
289
+
290
+ if item_id in current_items and current_items[item_id]["type"] == "reasoning":
291
+ summary_index = chunk_dict.get("summary_index", 0)
292
+ if summary_index < len(current_items[item_id]["summary"]):
293
+ current_items[item_id]["summary"][summary_index] = {
294
+ "type": part.get("type", "summary_text"),
295
+ "text": part.get("text", ""),
296
+ }
297
+
298
+ elif event_type == "response.function_call_arguments.delta":
299
+ item_id = chunk_dict.get("item_id")
300
+ delta = chunk_dict.get("delta", "")
301
+
302
+ if item_id in current_items:
303
+ current_items[item_id]["arguments"] += delta
304
+
305
+ elif event_type == "response.function_call_arguments.done":
306
+ item_id = chunk_dict.get("item_id")
307
+ final_arguments = chunk_dict.get("arguments", "")
308
+
309
+ if item_id in current_items:
310
+ current_items[item_id]["arguments"] = final_arguments
311
+
312
+ elif event_type == "response.content_part.added":
313
+ part = chunk_dict.get("part", {})
314
+
315
+ message_item = None
316
+ for item in current_items.values():
317
+ if item.get("type") == "message":
318
+ message_item = item
319
+ break
320
+
321
+ if message_item:
322
+ content_part = {
323
+ "type": part.get("type", "output_text"),
324
+ "text": part.get("text", ""),
325
+ "annotations": part.get("annotations", []),
326
+ }
327
+ message_item["content"].append(content_part)
328
+
329
+ elif event_type == "response.output_text.delta":
330
+ delta_text = chunk_dict.get("delta", "")
331
+
332
+ for item in current_items.values():
333
+ if item.get("type") == "message" and item.get("content"):
334
+ if item["content"] and item["content"][-1].get("type") == "output_text":
335
+ item["content"][-1]["text"] += delta_text
336
+ break
337
+
338
+ elif event_type == "response.output_text.done":
339
+ final_text = chunk_dict.get("text", "")
340
+
341
+ for item in current_items.values():
342
+ if item.get("type") == "message" and item.get("content"):
343
+ if item["content"] and item["content"][-1].get("type") == "output_text":
344
+ item["content"][-1]["text"] = final_text
345
+ break
346
+
347
+ elif event_type == "response.output_item.done":
348
+ item = chunk_dict.get("item", {})
349
+ item_id = item.get("id")
350
+
351
+ if item_id in current_items:
352
+ current_items[item_id]["status"] = item.get("status", "completed")
353
+
354
+ if item.get("type") == "reasoning":
355
+ current_items[item_id].update({"summary": item.get("summary", current_items[item_id]["summary"])})
356
+ elif item.get("type") == "function_call":
357
+ current_items[item_id].update(
358
+ {
359
+ "arguments": item.get("arguments", current_items[item_id]["arguments"]),
360
+ "call_id": item.get("call_id", current_items[item_id]["call_id"]),
361
+ "name": item.get("name", current_items[item_id]["name"]),
362
+ }
363
+ )
364
+ elif item.get("type") == "message":
365
+ current_items[item_id].update(
366
+ {
367
+ "content": item.get("content", current_items[item_id]["content"]),
368
+ "role": item.get("role", current_items[item_id]["role"]),
369
+ }
370
+ )
371
+
372
+ response_data["output"].append(current_items[item_id])
373
+
374
+ elif event_type == "response.completed":
375
+ response_info = chunk_dict.get("response", {})
376
+ response_data["status"] = response_info.get("status", "completed")
377
+ response_data["usage"] = response_info.get("usage")
378
+ response_data["output"] = response_info.get("output", response_data["output"])
379
+
380
+ if response_info.get("reasoning"):
381
+ response_data["reasoning"] = response_info["reasoning"]
382
+
383
+ return has_reasoning
384
+
385
+
386
+ def openai_responses_stream_chat(results: list):
387
+ """Process OpenAI Responses streaming chat results and return response"""
388
+ from openai.types.responses import Response
389
+
390
+ response_data = _initialize_openai_response_data()
391
+ current_items = {}
392
+
393
+ for chunk in results:
394
+ chunk_dict = chunk.model_dump()
395
+ _process_openai_response_event(chunk_dict, response_data, current_items)
396
+
397
+ return Response(**response_data)
398
+
399
+
400
+ async def aopenai_responses_stream_chat(generator: AsyncIterable[Any]) -> Any:
401
+ """Async version of openai_responses_stream_chat"""
402
+ from openai.types.responses import Response
403
+
404
+ response_data = _initialize_openai_response_data()
405
+ current_items = {}
406
+
407
+ async for chunk in generator:
408
+ chunk_dict = chunk.model_dump()
409
+ _process_openai_response_event(chunk_dict, response_data, current_items)
410
+
411
+ return Response(**response_data)
412
+
413
+
158
414
  def anthropic_stream_message(results: list):
159
415
  """Process Anthropic streaming message results and return response + blueprint"""
160
416
  from anthropic.types import Message, MessageStreamEvent, Usage
@@ -5,6 +5,7 @@ from .blueprint_builder import (
5
5
  build_prompt_blueprint_from_bedrock_event,
6
6
  build_prompt_blueprint_from_google_event,
7
7
  build_prompt_blueprint_from_openai_chunk,
8
+ build_prompt_blueprint_from_openai_responses_event,
8
9
  )
9
10
 
10
11
 
@@ -14,7 +15,11 @@ def _build_stream_blueprint(result: Any, metadata: Dict) -> Any:
14
15
  model_name = model_info.get("name", "")
15
16
 
16
17
  if provider == "openai" or provider == "openai.azure":
17
- return build_prompt_blueprint_from_openai_chunk(result, metadata)
18
+ api_type = model_info.get("api_type", "chat-completions") if metadata else "chat-completions"
19
+ if api_type == "chat-completions":
20
+ return build_prompt_blueprint_from_openai_chunk(result, metadata)
21
+ elif api_type == "responses":
22
+ return build_prompt_blueprint_from_openai_responses_event(result, metadata)
18
23
 
19
24
  elif provider == "google" or (provider == "vertexai" and model_name.startswith("gemini")):
20
25
  return build_prompt_blueprint_from_google_event(result, metadata)
@@ -21,12 +21,14 @@ class ImageUrl(TypedDict, total=False):
21
21
  class TextContent(TypedDict, total=False):
22
22
  type: Literal["text"]
23
23
  text: str
24
+ id: Union[str, None]
24
25
 
25
26
 
26
27
  class ThinkingContent(TypedDict, total=False):
27
28
  signature: Union[str, None]
28
29
  type: Literal["thinking"]
29
30
  thinking: str
31
+ id: Union[str, None]
30
32
 
31
33
 
32
34
  class ImageContent(TypedDict, total=False):
@@ -87,6 +89,7 @@ class UserMessage(TypedDict, total=False):
87
89
 
88
90
  class ToolCall(TypedDict, total=False):
89
91
  id: str
92
+ tool_id: Union[str, None]
90
93
  type: Literal["function"]
91
94
  function: FunctionCall
92
95
 
promptlayer/utils.py CHANGED
@@ -1253,8 +1253,13 @@ def openai_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: d
1253
1253
  from openai import OpenAI
1254
1254
 
1255
1255
  client = OpenAI(**client_kwargs)
1256
- request_to_make = MAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1257
- return request_to_make(client, **function_kwargs)
1256
+ api_type = prompt_blueprint["metadata"]["model"]["api_type"]
1257
+
1258
+ if api_type == "chat-completions":
1259
+ request_to_make = MAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1260
+ return request_to_make(client, **function_kwargs)
1261
+ else:
1262
+ return client.responses.create(**function_kwargs)
1258
1263
 
1259
1264
 
1260
1265
  async def aopenai_chat_request(client, **kwargs):
@@ -1275,16 +1280,26 @@ async def aopenai_request(prompt_blueprint: GetPromptTemplateResponse, client_kw
1275
1280
  from openai import AsyncOpenAI
1276
1281
 
1277
1282
  client = AsyncOpenAI(**client_kwargs)
1278
- request_to_make = AMAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1279
- return await request_to_make(client, **function_kwargs)
1283
+ api_type = prompt_blueprint["metadata"]["model"]["api_type"]
1284
+
1285
+ if api_type == "chat-completions":
1286
+ request_to_make = AMAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1287
+ return await request_to_make(client, **function_kwargs)
1288
+ else:
1289
+ return await client.responses.create(**function_kwargs)
1280
1290
 
1281
1291
 
1282
1292
  def azure_openai_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
1283
1293
  from openai import AzureOpenAI
1284
1294
 
1285
1295
  client = AzureOpenAI(azure_endpoint=client_kwargs.pop("base_url", None))
1286
- request_to_make = MAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1287
- return request_to_make(client, **function_kwargs)
1296
+ api_type = prompt_blueprint["metadata"]["model"]["api_type"]
1297
+
1298
+ if api_type == "chat-completions":
1299
+ request_to_make = MAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1300
+ return request_to_make(client, **function_kwargs)
1301
+ else:
1302
+ return client.responses.create(**function_kwargs)
1288
1303
 
1289
1304
 
1290
1305
  async def aazure_openai_request(
@@ -1293,8 +1308,13 @@ async def aazure_openai_request(
1293
1308
  from openai import AsyncAzureOpenAI
1294
1309
 
1295
1310
  client = AsyncAzureOpenAI(azure_endpoint=client_kwargs.pop("base_url", None))
1296
- request_to_make = AMAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1297
- return await request_to_make(client, **function_kwargs)
1311
+ api_type = prompt_blueprint["metadata"]["model"]["api_type"]
1312
+
1313
+ if api_type == "chat-completions":
1314
+ request_to_make = AMAP_TYPE_TO_OPENAI_FUNCTION[prompt_blueprint["prompt_template"]["type"]]
1315
+ return await request_to_make(client, **function_kwargs)
1316
+ else:
1317
+ return await client.responses.create(**function_kwargs)
1298
1318
 
1299
1319
 
1300
1320
  def anthropic_chat_request(client, **kwargs):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: promptlayer
3
- Version: 1.0.67
3
+ Version: 1.0.68
4
4
  Summary: PromptLayer is a platform for prompt engineering and tracks your LLM requests.
5
5
  License: Apache-2.0
6
6
  Author: Magniv
@@ -0,0 +1,22 @@
1
+ promptlayer/__init__.py,sha256=FAC3Yo76Lk-qsHz-D9JtL15neFV2MXqgSyrxslWsuNc,140
2
+ promptlayer/groups/__init__.py,sha256=xhOAolLUBkr76ZHvJr29OwjCIk1V9qKQXjZCuyTJUIY,429
3
+ promptlayer/groups/groups.py,sha256=YPROicy-TzpkrpA8vOpZS2lwvJ6VRtlbQ1S2oT1N0vM,338
4
+ promptlayer/promptlayer.py,sha256=hxzzq5bXkOJXvRCbBmtDLCSb4JlozpTJ00Ofk5Y_pQg,22901
5
+ promptlayer/promptlayer_base.py,sha256=jOgXzNZlV1LKOOsXSSAOgn8o4hXn_EV0oY9Nf3Bsu_s,6872
6
+ promptlayer/promptlayer_mixins.py,sha256=fFh36b4nOjCuktuii-Doi6BEJtXQwWQaxk_KexS75kA,15686
7
+ promptlayer/span_exporter.py,sha256=Pc1-zWAcjVCSykh-4rYPqiEZvzkG9xaYLVoHFY_TWaQ,2410
8
+ promptlayer/streaming/__init__.py,sha256=nKWwUsAy4FjJLT6rxntXCyVlKOov-xT2dk7sOXrgpUs,1897
9
+ promptlayer/streaming/blueprint_builder.py,sha256=lz4aGo_1tytuPB7AkA3y-xyl1ZQHnHOA2fZZl5CaK2Q,12769
10
+ promptlayer/streaming/response_handlers.py,sha256=wtl9ubb0kEB-ZuIpl0_kOvw9wZI4XRKyKqT3-d4EtWE,35410
11
+ promptlayer/streaming/stream_processor.py,sha256=atnaBg31kmTPSF433DF7uV3jiLuJOnA5Vz8tVCQ5v2Y,4083
12
+ promptlayer/templates.py,sha256=7ObDPMzHXjttDdJdCXA_pDL9XAnmcujIWucmgZJcOC8,1179
13
+ promptlayer/track/__init__.py,sha256=tyweLTAY7UpYpBHWwY-T3pOPDIlGjcgccYXqU_r0694,1710
14
+ promptlayer/track/track.py,sha256=A-awcYwsSwxktrlCMchy8NITIquwxU1UXbgLZMwqrA0,3164
15
+ promptlayer/types/__init__.py,sha256=xJcvQuOk91ZBBePb40-1FDNDKYrZoH5lPE2q6_UhprM,111
16
+ promptlayer/types/prompt_template.py,sha256=cqxNX9QQHKh1lwpwSp-IjNI8Yw9jOVNI3frAflz23sQ,5155
17
+ promptlayer/types/request_log.py,sha256=xU6bcxQar6GaBOJlgZTavXUV3FjE8sF_nSjPu4Ya_00,174
18
+ promptlayer/utils.py,sha256=7m7ocEvz8XygivNNNuaVqih8jzLAqBPqgzqMFhtYU4A,61005
19
+ promptlayer-1.0.68.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
20
+ promptlayer-1.0.68.dist-info/METADATA,sha256=tw2BIJYZSXCMC6IIhpYFdxLIHRe-70b4wyWDVZoll24,4819
21
+ promptlayer-1.0.68.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
22
+ promptlayer-1.0.68.dist-info/RECORD,,
@@ -1,22 +0,0 @@
1
- promptlayer/__init__.py,sha256=RzFSknxhW5-pfU0yRrfepqjQrkoYD3c45M8gIMpHxLE,140
2
- promptlayer/groups/__init__.py,sha256=xhOAolLUBkr76ZHvJr29OwjCIk1V9qKQXjZCuyTJUIY,429
3
- promptlayer/groups/groups.py,sha256=YPROicy-TzpkrpA8vOpZS2lwvJ6VRtlbQ1S2oT1N0vM,338
4
- promptlayer/promptlayer.py,sha256=HOb6pxWBCooXyEuaPlhCqGYjwmzxixfW4HPwr2y5Yhw,22896
5
- promptlayer/promptlayer_base.py,sha256=jOgXzNZlV1LKOOsXSSAOgn8o4hXn_EV0oY9Nf3Bsu_s,6872
6
- promptlayer/promptlayer_mixins.py,sha256=xgDwpr8D4Hl_n0OidlzeEWZtHfrhRQDIDx6a_z5Iy48,13930
7
- promptlayer/span_exporter.py,sha256=Pc1-zWAcjVCSykh-4rYPqiEZvzkG9xaYLVoHFY_TWaQ,2410
8
- promptlayer/streaming/__init__.py,sha256=s0VFWaaDrQD3oFbJLytKlmiPsDDPlgTSqNjRbFj8kBI,1641
9
- promptlayer/streaming/blueprint_builder.py,sha256=kYo8hby2eooCcPT2rXygu0Cj2iIp-_TqTZ1IGbF8moE,7337
10
- promptlayer/streaming/response_handlers.py,sha256=1LYnBOjcbw1Wgvz4s5kLOYVY2qQmDpmnAo2GK6glocE,25110
11
- promptlayer/streaming/stream_processor.py,sha256=AJfzINN6feuf5dhCpdKfk3MZ1n7KeXnopMZ5c97LjBg,3752
12
- promptlayer/templates.py,sha256=7ObDPMzHXjttDdJdCXA_pDL9XAnmcujIWucmgZJcOC8,1179
13
- promptlayer/track/__init__.py,sha256=tyweLTAY7UpYpBHWwY-T3pOPDIlGjcgccYXqU_r0694,1710
14
- promptlayer/track/track.py,sha256=A-awcYwsSwxktrlCMchy8NITIquwxU1UXbgLZMwqrA0,3164
15
- promptlayer/types/__init__.py,sha256=xJcvQuOk91ZBBePb40-1FDNDKYrZoH5lPE2q6_UhprM,111
16
- promptlayer/types/prompt_template.py,sha256=blkVBhh4u5pMhgX_Dsn78sN7Rv2Vy_zhd1-NERLXTpM,5075
17
- promptlayer/types/request_log.py,sha256=xU6bcxQar6GaBOJlgZTavXUV3FjE8sF_nSjPu4Ya_00,174
18
- promptlayer/utils.py,sha256=RAiYZPWai6BOmlXTdxF4o3m9NhfZf--bHo7_Qc9X7eo,60269
19
- promptlayer-1.0.67.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
20
- promptlayer-1.0.67.dist-info/METADATA,sha256=UfliH2klLgyPbRpwHoMUgcvyR7D_Y3yqGLiLPaCb5KI,4819
21
- promptlayer-1.0.67.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
22
- promptlayer-1.0.67.dist-info/RECORD,,