promptlayer 1.0.35__py3-none-any.whl → 1.0.78__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,13 +4,25 @@ import requests
4
4
  from opentelemetry.sdk.trace import ReadableSpan
5
5
  from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
6
6
 
7
- from promptlayer.utils import URL_API_PROMPTLAYER
7
+ from promptlayer.utils import raise_on_bad_response, retry_on_api_error
8
8
 
9
9
 
10
10
  class PromptLayerSpanExporter(SpanExporter):
11
- def __init__(self, api_key: str = None):
11
+ def __init__(self, api_key: str, base_url: str, throw_on_error: bool):
12
12
  self.api_key = api_key
13
- self.url = f"{URL_API_PROMPTLAYER}/spans-bulk"
13
+ self.url = f"{base_url}/spans-bulk"
14
+ self.throw_on_error = throw_on_error
15
+
16
+ @retry_on_api_error
17
+ def _post_spans(self, request_data):
18
+ response = requests.post(
19
+ self.url,
20
+ headers={"X-Api-Key": self.api_key, "Content-Type": "application/json"},
21
+ json={"spans": request_data},
22
+ )
23
+ if response.status_code not in (200, 201):
24
+ raise_on_bad_response(response, "PromptLayer had the following error while exporting spans")
25
+ return response
14
26
 
15
27
  def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult:
16
28
  request_data = []
@@ -19,12 +31,8 @@ class PromptLayerSpanExporter(SpanExporter):
19
31
  span_info = {
20
32
  "name": span.name,
21
33
  "context": {
22
- "trace_id": hex(span.context.trace_id)[2:].zfill(
23
- 32
24
- ), # Ensure 32 characters
25
- "span_id": hex(span.context.span_id)[2:].zfill(
26
- 16
27
- ), # Ensure 16 characters
34
+ "trace_id": hex(span.context.trace_id)[2:].zfill(32), # Ensure 32 characters
35
+ "span_id": hex(span.context.span_id)[2:].zfill(16), # Ensure 16 characters
28
36
  "trace_state": str(span.context.trace_state),
29
37
  },
30
38
  "kind": str(span.kind),
@@ -44,10 +52,7 @@ class PromptLayerSpanExporter(SpanExporter):
44
52
  }
45
53
  for event in span.events
46
54
  ],
47
- "links": [
48
- {"context": link.context, "attributes": dict(link.attributes)}
49
- for link in span.links
50
- ],
55
+ "links": [{"context": link.context, "attributes": dict(link.attributes)} for link in span.links],
51
56
  "resource": {
52
57
  "attributes": dict(span.resource.attributes),
53
58
  "schema_url": span.resource.schema_url,
@@ -56,17 +61,7 @@ class PromptLayerSpanExporter(SpanExporter):
56
61
  request_data.append(span_info)
57
62
 
58
63
  try:
59
- response = requests.post(
60
- self.url,
61
- headers={
62
- "X-Api-Key": self.api_key,
63
- "Content-Type": "application/json",
64
- },
65
- json={
66
- "spans": request_data,
67
- },
68
- )
69
- response.raise_for_status()
64
+ self._post_spans(request_data)
70
65
  return SpanExportResult.SUCCESS
71
66
  except requests.RequestException:
72
67
  return SpanExportResult.FAILURE
@@ -0,0 +1,64 @@
1
+ """
2
+ Streaming prompt blueprint support for PromptLayer
3
+
4
+ This module provides comprehensive streaming support for building prompt blueprints
5
+ from various LLM providers during streaming responses.
6
+ """
7
+
8
+ from .blueprint_builder import (
9
+ build_prompt_blueprint_from_anthropic_event,
10
+ build_prompt_blueprint_from_google_event,
11
+ build_prompt_blueprint_from_openai_chunk,
12
+ build_prompt_blueprint_from_openai_responses_event,
13
+ )
14
+ from .response_handlers import (
15
+ aanthropic_stream_completion,
16
+ aanthropic_stream_message,
17
+ abedrock_stream_message,
18
+ agoogle_stream_chat,
19
+ agoogle_stream_completion,
20
+ amistral_stream_chat,
21
+ anthropic_stream_completion,
22
+ anthropic_stream_message,
23
+ aopenai_responses_stream_chat,
24
+ aopenai_stream_chat,
25
+ aopenai_stream_completion,
26
+ bedrock_stream_message,
27
+ google_stream_chat,
28
+ google_stream_completion,
29
+ mistral_stream_chat,
30
+ openai_responses_stream_chat,
31
+ openai_stream_chat,
32
+ openai_stream_completion,
33
+ )
34
+ from .stream_processor import (
35
+ astream_response,
36
+ stream_response,
37
+ )
38
+
39
+ __all__ = [
40
+ "build_prompt_blueprint_from_anthropic_event",
41
+ "build_prompt_blueprint_from_google_event",
42
+ "build_prompt_blueprint_from_openai_chunk",
43
+ "build_prompt_blueprint_from_openai_responses_event",
44
+ "stream_response",
45
+ "astream_response",
46
+ "openai_stream_chat",
47
+ "aopenai_stream_chat",
48
+ "openai_responses_stream_chat",
49
+ "aopenai_responses_stream_chat",
50
+ "anthropic_stream_message",
51
+ "aanthropic_stream_message",
52
+ "openai_stream_completion",
53
+ "aopenai_stream_completion",
54
+ "anthropic_stream_completion",
55
+ "aanthropic_stream_completion",
56
+ "bedrock_stream_message",
57
+ "abedrock_stream_message",
58
+ "google_stream_chat",
59
+ "google_stream_completion",
60
+ "agoogle_stream_chat",
61
+ "agoogle_stream_completion",
62
+ "mistral_stream_chat",
63
+ "amistral_stream_chat",
64
+ ]
@@ -0,0 +1,382 @@
1
+ """
2
+ Blueprint builders for various LLM providers
3
+
4
+ This module contains functions to build prompt blueprints from LLM responses
5
+ and streaming events for different providers (OpenAI, Anthropic, etc.)
6
+ """
7
+
8
+ from typing import Any, Dict, List, Optional
9
+
10
+
11
+ def _create_tool_call(call_id: str, function_name: str, arguments: Any, tool_id: str = None) -> Dict[str, Any]:
12
+ """Create a standardized tool call structure"""
13
+ tool_call = {"id": call_id, "type": "function", "function": {"name": function_name, "arguments": arguments}}
14
+ if tool_id:
15
+ tool_call["tool_id"] = tool_id
16
+ return tool_call
17
+
18
+
19
+ def _create_content_item(content_type: str, item_id: str = None, **kwargs) -> Dict[str, Any]:
20
+ """Create a standardized content item"""
21
+ content_item = {"type": content_type}
22
+ if item_id:
23
+ content_item["id"] = item_id
24
+ content_item.update(kwargs)
25
+ return content_item
26
+
27
+
28
+ def _build_assistant_message(
29
+ content: List[Dict], tool_calls: Optional[List[Dict]] = None, template_format: str = "f-string"
30
+ ) -> Dict[str, Any]:
31
+ """Build a standardized assistant message structure"""
32
+ message = {"role": "assistant", "content": content, "input_variables": [], "template_format": template_format}
33
+
34
+ if tool_calls:
35
+ message["tool_calls"] = tool_calls
36
+
37
+ return message
38
+
39
+
40
+ def _build_prompt_blueprint(assistant_message: Dict[str, Any], metadata: Any) -> Dict[str, Any]:
41
+ """Build a standardized prompt blueprint structure"""
42
+ prompt_template = {"type": "chat", "messages": [assistant_message], "input_variables": []}
43
+
44
+ blueprint = {"prompt_template": prompt_template, "metadata": metadata}
45
+
46
+ return blueprint
47
+
48
+
49
+ def build_prompt_blueprint_from_openai_chunk(chunk, metadata):
50
+ """Build a prompt blueprint from an OpenAI chat completion chunk"""
51
+
52
+ assistant_content = []
53
+ tool_calls = []
54
+
55
+ if hasattr(chunk, "choices") and len(chunk.choices) > 0:
56
+ delta = chunk.choices[0].delta
57
+
58
+ if hasattr(delta, "content") and delta.content:
59
+ assistant_content.append(_create_content_item("text", text=delta.content))
60
+
61
+ if hasattr(delta, "tool_calls") and delta.tool_calls:
62
+ for tool_call in delta.tool_calls:
63
+ tool_calls.append(
64
+ _create_tool_call(
65
+ getattr(tool_call, "id", ""),
66
+ getattr(tool_call.function, "name", "") if tool_call.function else "",
67
+ getattr(tool_call.function, "arguments", "") if tool_call.function else "",
68
+ )
69
+ )
70
+
71
+ assistant_message = _build_assistant_message(assistant_content, tool_calls or None)
72
+ return _build_prompt_blueprint(assistant_message, metadata)
73
+
74
+
75
+ def build_prompt_blueprint_from_openai_responses_event(event, metadata):
76
+ """Build a prompt blueprint from an OpenAI responses event"""
77
+
78
+ assistant_content = []
79
+ tool_calls = []
80
+
81
+ event_dict = event.model_dump() if hasattr(event, "model_dump") else event
82
+ event_type = event_dict.get("type")
83
+
84
+ if event_type == "response.reasoning_summary_text.delta":
85
+ delta = event_dict.get("delta", "")
86
+ item_id = event_dict.get("item_id")
87
+ if delta:
88
+ assistant_content.append(_create_content_item("thinking", item_id=item_id, thinking=delta, signature=None))
89
+
90
+ elif event_type == "response.reasoning_summary_text.done":
91
+ final_text = event_dict.get("text", "")
92
+ item_id = event_dict.get("item_id")
93
+ if final_text:
94
+ assistant_content.append(
95
+ _create_content_item("thinking", item_id=item_id, thinking=final_text, signature=None)
96
+ )
97
+
98
+ elif event_type == "response.reasoning_summary_part.added":
99
+ part = event_dict.get("part", {})
100
+ item_id = event_dict.get("item_id")
101
+ if part.get("type") == "summary_text":
102
+ text = part.get("text", "")
103
+ assistant_content.append(_create_content_item("thinking", item_id=item_id, thinking=text, signature=None))
104
+
105
+ elif event_type == "response.reasoning_summary_part.done":
106
+ part = event_dict.get("part", {})
107
+ item_id = event_dict.get("item_id")
108
+ if part.get("type") == "summary_text":
109
+ text = part.get("text", "")
110
+ if text:
111
+ assistant_content.append(
112
+ _create_content_item("thinking", item_id=item_id, thinking=text, signature=None)
113
+ )
114
+
115
+ elif event_type == "response.function_call_arguments.delta":
116
+ item_id = event_dict.get("item_id")
117
+ delta = event_dict.get("delta", "")
118
+ if delta:
119
+ tool_calls.append(_create_tool_call("", "", delta, tool_id=item_id))
120
+
121
+ elif event_type == "response.function_call_arguments.done":
122
+ item_id = event_dict.get("item_id")
123
+ final_arguments = event_dict.get("arguments", "")
124
+ if final_arguments:
125
+ tool_calls.append(_create_tool_call("", "", final_arguments, tool_id=item_id))
126
+
127
+ elif event_type == "response.output_item.added":
128
+ item = event_dict.get("item", {})
129
+ item_type = item.get("type")
130
+ item_id = item.get("id")
131
+
132
+ if item_type == "reasoning":
133
+ assistant_content.append(_create_content_item("thinking", item_id=item_id, thinking="", signature=None))
134
+ elif item_type == "function_call":
135
+ tool_calls.append(_create_tool_call(item.get("call_id", ""), item.get("name", ""), "", tool_id=item_id))
136
+ elif item_type == "message":
137
+ assistant_content.append(_create_content_item("text", item_id=item_id, text=""))
138
+ elif item_type == "code_interpreter_call":
139
+ assistant_content.append(
140
+ _create_content_item(
141
+ "code", item_id=item_id, code=item.get("code", ""), container_id=item.get("container_id")
142
+ )
143
+ )
144
+
145
+ elif event_type == "response.content_part.added":
146
+ item_id = event_dict.get("item_id")
147
+ part = event_dict.get("part", {})
148
+ part_type = part.get("type", "output_text")
149
+
150
+ if part_type == "output_text":
151
+ text = part.get("text", "")
152
+ assistant_content.append(_create_content_item("text", item_id=item_id, text=text if text else ""))
153
+
154
+ elif event_type == "response.output_text.annotation.added":
155
+ annotation = event_dict.get("annotation", {}) or {}
156
+ atype = annotation.get("type")
157
+ mapped_annotation = None
158
+
159
+ if atype == "url_citation":
160
+ mapped_annotation = {
161
+ "type": "url_citation",
162
+ "title": annotation.get("title"),
163
+ "url": annotation.get("url"),
164
+ "start_index": annotation.get("start_index"),
165
+ "end_index": annotation.get("end_index"),
166
+ }
167
+ elif atype == "file_citation":
168
+ mapped_annotation = {
169
+ "type": "file_citation",
170
+ "index": annotation.get("index"),
171
+ "file_id": annotation.get("file_id"),
172
+ "filename": annotation.get("filename"),
173
+ }
174
+ else:
175
+ mapped_annotation = annotation
176
+
177
+ assistant_content.append(
178
+ _create_content_item("text", item_id=event_dict.get("item_id"), text="", annotation=[mapped_annotation])
179
+ )
180
+
181
+ elif event_type == "response.code_interpreter_call.in_progress":
182
+ item_id = event_dict.get("item_id")
183
+ assistant_content.append(
184
+ _create_content_item(
185
+ "code", item_id=item_id, code=event_dict.get("code"), container_id=event_dict.get("container_id")
186
+ )
187
+ )
188
+
189
+ elif event_type == "response.code_interpreter_call_code.delta":
190
+ item_id = event_dict.get("item_id")
191
+ delta_code = event_dict.get("delta", "")
192
+ if delta_code:
193
+ assistant_content.append(
194
+ _create_content_item(
195
+ "code", item_id=item_id, code=delta_code, container_id=event_dict.get("container_id")
196
+ )
197
+ )
198
+
199
+ elif event_type == "response.code_interpreter_call_code.done":
200
+ item_id = event_dict.get("item_id")
201
+ final_code = event_dict.get("code", "")
202
+ if final_code:
203
+ assistant_content.append(
204
+ _create_content_item(
205
+ "code", item_id=item_id, code=final_code, container_id=event_dict.get("container_id")
206
+ )
207
+ )
208
+
209
+ elif event_type == "response.code_interpreter_call.interpreting":
210
+ item_id = event_dict.get("item_id")
211
+ assistant_content.append(
212
+ _create_content_item(
213
+ "code", item_id=item_id, code=event_dict.get("code"), container_id=event_dict.get("container_id")
214
+ )
215
+ )
216
+
217
+ elif event_type == "response.code_interpreter_call.completed":
218
+ item_id = event_dict.get("item_id")
219
+ assistant_content.append(
220
+ _create_content_item(
221
+ "code", item_id=item_id, code=event_dict.get("code"), container_id=event_dict.get("container_id")
222
+ )
223
+ )
224
+
225
+ elif event_type == "response.output_text.delta":
226
+ item_id = event_dict.get("item_id")
227
+ delta_text = event_dict.get("delta", "")
228
+ if delta_text:
229
+ assistant_content.append(_create_content_item("text", item_id=item_id, text=delta_text))
230
+
231
+ elif event_type == "response.output_text.done":
232
+ item_id = event_dict.get("item_id")
233
+ final_text = event_dict.get("text", "")
234
+ if final_text:
235
+ assistant_content.append(_create_content_item("text", item_id=item_id, text=final_text))
236
+
237
+ elif event_type == "response.output_item.done":
238
+ item = event_dict.get("item", {})
239
+ item_type = item.get("type")
240
+ item_id = item.get("id")
241
+
242
+ if item_type == "reasoning":
243
+ summary = item.get("summary", [])
244
+ for summary_part in summary:
245
+ if summary_part.get("type") == "summary_text":
246
+ text = summary_part.get("text", "")
247
+ if text:
248
+ assistant_content.append(
249
+ _create_content_item("thinking", item_id=item_id, thinking=text, signature=None)
250
+ )
251
+
252
+ elif item_type == "function_call":
253
+ tool_calls.append(
254
+ _create_tool_call(
255
+ item.get("call_id", ""), item.get("name", ""), item.get("arguments", ""), tool_id=item_id
256
+ )
257
+ )
258
+
259
+ elif item_type == "message":
260
+ content = item.get("content", [])
261
+ for content_part in content:
262
+ if content_part.get("type") == "output_text":
263
+ text = content_part.get("text", "")
264
+ if text:
265
+ assistant_content.append(_create_content_item("text", item_id=item_id, text=text))
266
+
267
+ elif item_type == "code_interpreter_call":
268
+ assistant_content.append(
269
+ _create_content_item(
270
+ "code", item_id=item_id, code=item.get("code", ""), container_id=item.get("container_id")
271
+ )
272
+ )
273
+
274
+ assistant_message = _build_assistant_message(assistant_content, tool_calls or None)
275
+ return _build_prompt_blueprint(assistant_message, metadata)
276
+
277
+
278
+ def build_prompt_blueprint_from_anthropic_event(event, metadata):
279
+ """Build a prompt blueprint from an Anthropic stream event"""
280
+
281
+ assistant_content = []
282
+ tool_calls = []
283
+
284
+ if hasattr(event, "type"):
285
+ if event.type == "content_block_start" and hasattr(event, "content_block"):
286
+ if event.content_block.type == "thinking":
287
+ assistant_content.append(_create_content_item("thinking", thinking="", signature=None))
288
+ elif event.content_block.type == "text":
289
+ assistant_content.append(_create_content_item("text", text=""))
290
+ elif event.content_block.type == "tool_use":
291
+ tool_calls.append(
292
+ _create_tool_call(
293
+ getattr(event.content_block, "id", ""),
294
+ getattr(event.content_block, "name", ""),
295
+ getattr(event.content_block, "input", ""),
296
+ )
297
+ )
298
+ elif event.type == "content_block_delta" and hasattr(event, "delta"):
299
+ if hasattr(event.delta, "text"):
300
+ assistant_content.append(_create_content_item("text", text=event.delta.text))
301
+ elif hasattr(event.delta, "thinking"):
302
+ assistant_content.append(
303
+ _create_content_item(
304
+ "thinking", thinking=event.delta.thinking, signature=getattr(event.delta, "signature", None)
305
+ )
306
+ )
307
+ elif hasattr(event.delta, "partial_json"):
308
+ tool_calls.append(
309
+ _create_tool_call(
310
+ getattr(event.delta, "id", ""),
311
+ getattr(event.delta, "name", ""),
312
+ getattr(event.delta, "input", event.delta.partial_json),
313
+ )
314
+ )
315
+
316
+ assistant_message = _build_assistant_message(assistant_content, tool_calls or None)
317
+ return _build_prompt_blueprint(assistant_message, metadata)
318
+
319
+
320
+ def build_prompt_blueprint_from_google_event(event, metadata):
321
+ """
322
+ Build a prompt blueprint from a Google (Gemini) streaming event (raw dict or GenerateContentResponse).
323
+ """
324
+ assistant_content = []
325
+ tool_calls = []
326
+ candidate = event.candidates[0]
327
+
328
+ if candidate and hasattr(candidate, "content") and candidate.content and hasattr(candidate.content, "parts"):
329
+ for part in candidate.content.parts:
330
+ # "thought" is a boolean attribute on Part for Gemini
331
+ if hasattr(part, "thought") and part.thought is True:
332
+ assistant_content.append(
333
+ _create_content_item("thinking", thinking=getattr(part, "text", ""), signature=None)
334
+ )
335
+ elif hasattr(part, "text") and part.text:
336
+ assistant_content.append(_create_content_item("text", text=part.text))
337
+ elif hasattr(part, "function_call"):
338
+ tool_calls.append(
339
+ _create_tool_call(
340
+ getattr(part.function_call, "id", ""),
341
+ getattr(part.function_call, "name", ""),
342
+ getattr(part.function_call, "args", {}),
343
+ )
344
+ )
345
+
346
+ assistant_message = _build_assistant_message(assistant_content, tool_calls or None, template_format="f-string")
347
+ return _build_prompt_blueprint(assistant_message, metadata)
348
+
349
+
350
+ def build_prompt_blueprint_from_bedrock_event(result, metadata):
351
+ """
352
+ Build a prompt blueprint from an Amazon Bedrock streaming event.
353
+ """
354
+ assistant_content = []
355
+ tool_calls = []
356
+
357
+ if "contentBlockDelta" in result:
358
+ delta = result["contentBlockDelta"].get("delta", {})
359
+
360
+ if "reasoningContent" in delta:
361
+ reasoning_text = delta["reasoningContent"].get("text", "")
362
+ signature = delta["reasoningContent"].get("signature")
363
+ assistant_content.append(_create_content_item("thinking", thinking=reasoning_text, signature=signature))
364
+
365
+ elif "text" in delta:
366
+ assistant_content.append(_create_content_item("text", text=delta["text"]))
367
+
368
+ elif "toolUse" in delta:
369
+ tool_use = delta["toolUse"]
370
+ assistant_content.append(
371
+ _create_tool_call(tool_use.get("toolUseId", ""), tool_use.get("name", ""), tool_use.get("input", ""))
372
+ )
373
+
374
+ elif "contentBlockStart" in result:
375
+ start_block = result["contentBlockStart"].get("start", {})
376
+
377
+ if "toolUse" in start_block:
378
+ tool_use = start_block["toolUse"]
379
+ tool_calls.append(_create_tool_call(tool_use.get("toolUseId", ""), tool_use.get("name", ""), ""))
380
+
381
+ assistant_message = _build_assistant_message(assistant_content, tool_calls or None)
382
+ return _build_prompt_blueprint(assistant_message, metadata)