promptlayer 1.0.59__py3-none-any.whl → 1.0.61__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of promptlayer might be problematic. Click here for more details.

promptlayer/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
1
  from .promptlayer import AsyncPromptLayer, PromptLayer
2
2
 
3
- __version__ = "1.0.59"
3
+ __version__ = "1.0.61"
4
4
  __all__ = ["PromptLayer", "AsyncPromptLayer", "__version__"]
@@ -9,6 +9,7 @@ import nest_asyncio
9
9
  from promptlayer.groups import AsyncGroupManager, GroupManager
10
10
  from promptlayer.promptlayer_base import PromptLayerBase
11
11
  from promptlayer.promptlayer_mixins import PromptLayerMixin
12
+ from promptlayer.streaming import astream_response, stream_response
12
13
  from promptlayer.templates import AsyncTemplateManager, TemplateManager
13
14
  from promptlayer.track import AsyncTrackManager, TrackManager
14
15
  from promptlayer.types.prompt_template import PromptTemplate
@@ -16,10 +17,8 @@ from promptlayer.utils import (
16
17
  RERAISE_ORIGINAL_EXCEPTION,
17
18
  _get_workflow_workflow_id_or_name,
18
19
  arun_workflow_request,
19
- astream_response,
20
20
  atrack_request,
21
21
  autil_log_request,
22
- stream_response,
23
22
  track_request,
24
23
  util_log_request,
25
24
  )
@@ -169,6 +168,7 @@ class PromptLayer(PromptLayerMixin):
169
168
  pl_run_span_id=pl_run_span_id,
170
169
  ),
171
170
  map_results=llm_data["stream_function"],
171
+ metadata=llm_data["prompt_blueprint"]["metadata"],
172
172
  )
173
173
 
174
174
  request_log = self._track_request_log(
@@ -320,6 +320,7 @@ class PromptLayer(PromptLayerMixin):
320
320
  price: float = 0.0,
321
321
  function_name: str = "",
322
322
  score: int = 0,
323
+ prompt_id: Union[int, None] = None,
323
324
  ):
324
325
  return util_log_request(
325
326
  self.api_key,
@@ -340,6 +341,7 @@ class PromptLayer(PromptLayerMixin):
340
341
  price=price,
341
342
  function_name=function_name,
342
343
  score=score,
344
+ prompt_id=prompt_id,
343
345
  )
344
346
 
345
347
 
@@ -478,6 +480,7 @@ class AsyncPromptLayer(PromptLayerMixin):
478
480
  price: float = 0.0,
479
481
  function_name: str = "",
480
482
  score: int = 0,
483
+ prompt_id: Union[int, None] = None,
481
484
  ):
482
485
  return await autil_log_request(
483
486
  self.api_key,
@@ -498,6 +501,7 @@ class AsyncPromptLayer(PromptLayerMixin):
498
501
  price=price,
499
502
  function_name=function_name,
500
503
  score=score,
504
+ prompt_id=prompt_id,
501
505
  )
502
506
 
503
507
  async def _create_track_request_callable(
@@ -600,6 +604,7 @@ class AsyncPromptLayer(PromptLayerMixin):
600
604
  response,
601
605
  track_request_callable,
602
606
  llm_data["stream_function"],
607
+ llm_data["prompt_blueprint"]["metadata"],
603
608
  )
604
609
 
605
610
  request_log = await self._track_request_log(
@@ -10,32 +10,34 @@ from opentelemetry.sdk.trace.export import BatchSpanProcessor
10
10
  from opentelemetry.semconv.resource import ResourceAttributes
11
11
 
12
12
  from promptlayer.span_exporter import PromptLayerSpanExporter
13
- from promptlayer.utils import (
14
- aanthropic_request,
13
+ from promptlayer.streaming import (
15
14
  aanthropic_stream_completion,
16
15
  aanthropic_stream_message,
17
- aazure_openai_request,
18
- agoogle_request,
19
16
  agoogle_stream_chat,
20
17
  agoogle_stream_completion,
21
- amistral_request,
22
18
  amistral_stream_chat,
23
- anthropic_request,
24
19
  anthropic_stream_completion,
25
20
  anthropic_stream_message,
26
- aopenai_request,
27
21
  aopenai_stream_chat,
28
22
  aopenai_stream_completion,
29
- avertexai_request,
30
- azure_openai_request,
31
- google_request,
32
23
  google_stream_chat,
33
24
  google_stream_completion,
34
- mistral_request,
35
25
  mistral_stream_chat,
36
- openai_request,
37
26
  openai_stream_chat,
38
27
  openai_stream_completion,
28
+ )
29
+ from promptlayer.utils import (
30
+ aanthropic_request,
31
+ aazure_openai_request,
32
+ agoogle_request,
33
+ amistral_request,
34
+ anthropic_request,
35
+ aopenai_request,
36
+ avertexai_request,
37
+ azure_openai_request,
38
+ google_request,
39
+ mistral_request,
40
+ openai_request,
39
41
  vertexai_request,
40
42
  )
41
43
 
@@ -0,0 +1,54 @@
1
+ """
2
+ Streaming prompt blueprint support for PromptLayer
3
+
4
+ This module provides comprehensive streaming support for building prompt blueprints
5
+ from various LLM providers during streaming responses.
6
+ """
7
+
8
+ from .blueprint_builder import (
9
+ build_prompt_blueprint_from_anthropic_event,
10
+ build_prompt_blueprint_from_google_event,
11
+ build_prompt_blueprint_from_openai_chunk,
12
+ )
13
+ from .response_handlers import (
14
+ aanthropic_stream_completion,
15
+ aanthropic_stream_message,
16
+ agoogle_stream_chat,
17
+ agoogle_stream_completion,
18
+ amistral_stream_chat,
19
+ anthropic_stream_completion,
20
+ anthropic_stream_message,
21
+ aopenai_stream_chat,
22
+ aopenai_stream_completion,
23
+ google_stream_chat,
24
+ google_stream_completion,
25
+ mistral_stream_chat,
26
+ openai_stream_chat,
27
+ openai_stream_completion,
28
+ )
29
+ from .stream_processor import (
30
+ astream_response,
31
+ stream_response,
32
+ )
33
+
34
+ __all__ = [
35
+ "build_prompt_blueprint_from_google_event",
36
+ "build_prompt_blueprint_from_openai_chunk",
37
+ "build_prompt_blueprint_from_anthropic_event",
38
+ "stream_response",
39
+ "astream_response",
40
+ "openai_stream_chat",
41
+ "aopenai_stream_chat",
42
+ "anthropic_stream_message",
43
+ "aanthropic_stream_message",
44
+ "openai_stream_completion",
45
+ "aopenai_stream_completion",
46
+ "anthropic_stream_completion",
47
+ "aanthropic_stream_completion",
48
+ "google_stream_chat",
49
+ "google_stream_completion",
50
+ "agoogle_stream_chat",
51
+ "agoogle_stream_completion",
52
+ "mistral_stream_chat",
53
+ "amistral_stream_chat",
54
+ ]
@@ -0,0 +1,139 @@
1
+ """
2
+ Blueprint builders for various LLM providers
3
+
4
+ This module contains functions to build prompt blueprints from LLM responses
5
+ and streaming events for different providers (OpenAI, Anthropic, etc.)
6
+ """
7
+
8
+ from typing import Any, Dict, List, Optional
9
+
10
+
11
+ def _create_tool_call(tool_id: str, function_name: str, arguments: Any) -> Dict[str, Any]:
12
+ """Create a standardized tool call structure"""
13
+ return {"id": tool_id, "type": "function", "function": {"name": function_name, "arguments": arguments}}
14
+
15
+
16
+ def _create_content_item(content_type: str, **kwargs) -> Dict[str, Any]:
17
+ """Create a standardized content item"""
18
+ content_item = {"type": content_type}
19
+ content_item.update(kwargs)
20
+ return content_item
21
+
22
+
23
+ def _build_assistant_message(
24
+ content: List[Dict], tool_calls: Optional[List[Dict]] = None, template_format: str = "f-string"
25
+ ) -> Dict[str, Any]:
26
+ """Build a standardized assistant message structure"""
27
+ message = {"role": "assistant", "content": content, "input_variables": [], "template_format": template_format}
28
+
29
+ if tool_calls:
30
+ message["tool_calls"] = tool_calls
31
+
32
+ return message
33
+
34
+
35
+ def _build_prompt_blueprint(assistant_message: Dict[str, Any], metadata: Any) -> Dict[str, Any]:
36
+ """Build a standardized prompt blueprint structure"""
37
+ prompt_template = {"type": "chat", "messages": [assistant_message], "input_variables": []}
38
+
39
+ blueprint = {"prompt_template": prompt_template, "metadata": metadata}
40
+
41
+ return blueprint
42
+
43
+
44
+ def build_prompt_blueprint_from_openai_chunk(chunk, metadata):
45
+ """Build a prompt blueprint from an OpenAI chat completion chunk"""
46
+
47
+ assistant_content = []
48
+ tool_calls = []
49
+
50
+ if hasattr(chunk, "choices") and len(chunk.choices) > 0:
51
+ delta = chunk.choices[0].delta
52
+
53
+ if hasattr(delta, "content") and delta.content:
54
+ assistant_content.append(_create_content_item("text", text=delta.content))
55
+
56
+ if hasattr(delta, "tool_calls") and delta.tool_calls:
57
+ for tool_call in delta.tool_calls:
58
+ tool_calls.append(
59
+ _create_tool_call(
60
+ getattr(tool_call, "id", ""),
61
+ getattr(tool_call.function, "name", "") if tool_call.function else "",
62
+ getattr(tool_call.function, "arguments", "") if tool_call.function else "",
63
+ )
64
+ )
65
+
66
+ assistant_message = _build_assistant_message(assistant_content, tool_calls or None)
67
+ return _build_prompt_blueprint(assistant_message, metadata)
68
+
69
+
70
+ def build_prompt_blueprint_from_anthropic_event(event, metadata):
71
+ """Build a prompt blueprint from an Anthropic stream event"""
72
+
73
+ assistant_content = []
74
+ tool_calls = []
75
+
76
+ if hasattr(event, "type"):
77
+ if event.type == "content_block_start" and hasattr(event, "content_block"):
78
+ if event.content_block.type == "thinking":
79
+ assistant_content.append(_create_content_item("thinking", thinking="", signature=None))
80
+ elif event.content_block.type == "text":
81
+ assistant_content.append(_create_content_item("text", text=""))
82
+ elif event.content_block.type == "tool_use":
83
+ tool_calls.append(
84
+ _create_tool_call(
85
+ getattr(event.content_block, "id", ""),
86
+ getattr(event.content_block, "name", ""),
87
+ getattr(event.content_block, "input", ""),
88
+ )
89
+ )
90
+ elif event.type == "content_block_delta" and hasattr(event, "delta"):
91
+ if hasattr(event.delta, "text"):
92
+ assistant_content.append(_create_content_item("text", text=event.delta.text))
93
+ elif hasattr(event.delta, "thinking"):
94
+ assistant_content.append(
95
+ _create_content_item(
96
+ "thinking", thinking=event.delta.thinking, signature=getattr(event.delta, "signature", None)
97
+ )
98
+ )
99
+ elif hasattr(event.delta, "partial_json"):
100
+ tool_calls.append(
101
+ _create_tool_call(
102
+ getattr(event.delta, "id", ""),
103
+ getattr(event.delta, "name", ""),
104
+ getattr(event.delta, "input", event.delta.partial_json),
105
+ )
106
+ )
107
+
108
+ assistant_message = _build_assistant_message(assistant_content, tool_calls or None)
109
+ return _build_prompt_blueprint(assistant_message, metadata)
110
+
111
+
112
+ def build_prompt_blueprint_from_google_event(event, metadata):
113
+ """
114
+ Build a prompt blueprint from a Google (Gemini) streaming event (raw dict or GenerateContentResponse).
115
+ """
116
+ assistant_content = []
117
+ tool_calls = []
118
+ candidate = event.candidates[0]
119
+
120
+ if candidate and hasattr(candidate, "content") and candidate.content and hasattr(candidate.content, "parts"):
121
+ for part in candidate.content.parts:
122
+ # "thought" is a boolean attribute on Part for Gemini
123
+ if hasattr(part, "thought") and part.thought is True:
124
+ assistant_content.append(
125
+ _create_content_item("thinking", thinking=getattr(part, "text", ""), signature=None)
126
+ )
127
+ elif hasattr(part, "text") and part.text:
128
+ assistant_content.append(_create_content_item("text", text=part.text))
129
+ elif hasattr(part, "function_call"):
130
+ tool_calls.append(
131
+ _create_tool_call(
132
+ getattr(part.function_call, "id", ""),
133
+ getattr(part.function_call, "name", ""),
134
+ getattr(part.function_call, "args", {}),
135
+ )
136
+ )
137
+
138
+ assistant_message = _build_assistant_message(assistant_content, tool_calls or None, template_format="f-string")
139
+ return _build_prompt_blueprint(assistant_message, metadata)