promptlayer 1.0.61__tar.gz → 1.0.63__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of promptlayer might be problematic. Click here for more details.
- {promptlayer-1.0.61 → promptlayer-1.0.63}/PKG-INFO +1 -1
- {promptlayer-1.0.61 → promptlayer-1.0.63}/promptlayer/__init__.py +1 -1
- {promptlayer-1.0.61 → promptlayer-1.0.63}/promptlayer/promptlayer_mixins.py +24 -0
- promptlayer-1.0.63/promptlayer/streaming/stream_processor.py +77 -0
- {promptlayer-1.0.61 → promptlayer-1.0.63}/promptlayer/utils.py +48 -2
- {promptlayer-1.0.61 → promptlayer-1.0.63}/pyproject.toml +1 -1
- promptlayer-1.0.61/promptlayer/streaming/stream_processor.py +0 -100
- {promptlayer-1.0.61 → promptlayer-1.0.63}/LICENSE +0 -0
- {promptlayer-1.0.61 → promptlayer-1.0.63}/README.md +0 -0
- {promptlayer-1.0.61 → promptlayer-1.0.63}/promptlayer/groups/__init__.py +0 -0
- {promptlayer-1.0.61 → promptlayer-1.0.63}/promptlayer/groups/groups.py +0 -0
- {promptlayer-1.0.61 → promptlayer-1.0.63}/promptlayer/promptlayer.py +0 -0
- {promptlayer-1.0.61 → promptlayer-1.0.63}/promptlayer/promptlayer_base.py +0 -0
- {promptlayer-1.0.61 → promptlayer-1.0.63}/promptlayer/span_exporter.py +0 -0
- {promptlayer-1.0.61 → promptlayer-1.0.63}/promptlayer/streaming/__init__.py +0 -0
- {promptlayer-1.0.61 → promptlayer-1.0.63}/promptlayer/streaming/blueprint_builder.py +0 -0
- {promptlayer-1.0.61 → promptlayer-1.0.63}/promptlayer/streaming/response_handlers.py +0 -0
- {promptlayer-1.0.61 → promptlayer-1.0.63}/promptlayer/templates.py +0 -0
- {promptlayer-1.0.61 → promptlayer-1.0.63}/promptlayer/track/__init__.py +0 -0
- {promptlayer-1.0.61 → promptlayer-1.0.63}/promptlayer/track/track.py +0 -0
- {promptlayer-1.0.61 → promptlayer-1.0.63}/promptlayer/types/__init__.py +0 -0
- {promptlayer-1.0.61 → promptlayer-1.0.63}/promptlayer/types/prompt_template.py +0 -0
- {promptlayer-1.0.61 → promptlayer-1.0.63}/promptlayer/types/request_log.py +0 -0
|
@@ -27,10 +27,12 @@ from promptlayer.streaming import (
|
|
|
27
27
|
openai_stream_completion,
|
|
28
28
|
)
|
|
29
29
|
from promptlayer.utils import (
|
|
30
|
+
aanthropic_bedrock_request,
|
|
30
31
|
aanthropic_request,
|
|
31
32
|
aazure_openai_request,
|
|
32
33
|
agoogle_request,
|
|
33
34
|
amistral_request,
|
|
35
|
+
anthropic_bedrock_request,
|
|
34
36
|
anthropic_request,
|
|
35
37
|
aopenai_request,
|
|
36
38
|
avertexai_request,
|
|
@@ -92,6 +94,16 @@ MAP_PROVIDER_TO_FUNCTION_NAME = {
|
|
|
92
94
|
"stream_function": google_stream_completion,
|
|
93
95
|
},
|
|
94
96
|
},
|
|
97
|
+
"anthropic.bedrock": {
|
|
98
|
+
"chat": {
|
|
99
|
+
"function_name": "anthropic.messages.create",
|
|
100
|
+
"stream_function": anthropic_stream_message,
|
|
101
|
+
},
|
|
102
|
+
"completion": {
|
|
103
|
+
"function_name": "anthropic.completions.create",
|
|
104
|
+
"stream_function": anthropic_stream_completion,
|
|
105
|
+
},
|
|
106
|
+
},
|
|
95
107
|
}
|
|
96
108
|
|
|
97
109
|
|
|
@@ -102,6 +114,7 @@ MAP_PROVIDER_TO_FUNCTION = {
|
|
|
102
114
|
"openai": openai_request,
|
|
103
115
|
"openai.azure": azure_openai_request,
|
|
104
116
|
"vertexai": vertexai_request,
|
|
117
|
+
"anthropic.bedrock": anthropic_bedrock_request,
|
|
105
118
|
}
|
|
106
119
|
|
|
107
120
|
AMAP_PROVIDER_TO_FUNCTION_NAME = {
|
|
@@ -155,6 +168,16 @@ AMAP_PROVIDER_TO_FUNCTION_NAME = {
|
|
|
155
168
|
"stream_function": agoogle_stream_completion,
|
|
156
169
|
},
|
|
157
170
|
},
|
|
171
|
+
"anthropic.bedrock": {
|
|
172
|
+
"chat": {
|
|
173
|
+
"function_name": "anthropic.messages.create",
|
|
174
|
+
"stream_function": aanthropic_stream_message,
|
|
175
|
+
},
|
|
176
|
+
"completion": {
|
|
177
|
+
"function_name": "anthropic.completions.create",
|
|
178
|
+
"stream_function": aanthropic_stream_completion,
|
|
179
|
+
},
|
|
180
|
+
},
|
|
158
181
|
}
|
|
159
182
|
|
|
160
183
|
|
|
@@ -165,6 +188,7 @@ AMAP_PROVIDER_TO_FUNCTION = {
|
|
|
165
188
|
"openai": aopenai_request,
|
|
166
189
|
"openai.azure": aazure_openai_request,
|
|
167
190
|
"vertexai": avertexai_request,
|
|
191
|
+
"anthropic.bedrock": aanthropic_bedrock_request,
|
|
168
192
|
}
|
|
169
193
|
|
|
170
194
|
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
from typing import Any, AsyncGenerator, AsyncIterable, Callable, Dict, Generator
|
|
2
|
+
|
|
3
|
+
from .blueprint_builder import (
|
|
4
|
+
build_prompt_blueprint_from_anthropic_event,
|
|
5
|
+
build_prompt_blueprint_from_google_event,
|
|
6
|
+
build_prompt_blueprint_from_openai_chunk,
|
|
7
|
+
)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _build_stream_blueprint(result: Any, metadata: Dict) -> Any:
|
|
11
|
+
model_info = metadata.get("model", {}) if metadata else {}
|
|
12
|
+
provider = model_info.get("provider", "")
|
|
13
|
+
model_name = model_info.get("name", "")
|
|
14
|
+
|
|
15
|
+
if provider == "openai" or provider == "openai.azure":
|
|
16
|
+
return build_prompt_blueprint_from_openai_chunk(result, metadata)
|
|
17
|
+
|
|
18
|
+
elif provider == "google" or (provider == "vertexai" and model_name.startswith("gemini")):
|
|
19
|
+
return build_prompt_blueprint_from_google_event(result, metadata)
|
|
20
|
+
|
|
21
|
+
elif provider in ["anthropic", "anthropic.bedrock"] or (provider == "vertexai" and model_name.startswith("claude")):
|
|
22
|
+
return build_prompt_blueprint_from_anthropic_event(result, metadata)
|
|
23
|
+
|
|
24
|
+
elif provider == "mistral":
|
|
25
|
+
return build_prompt_blueprint_from_openai_chunk(result.data, metadata)
|
|
26
|
+
|
|
27
|
+
return None
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _build_stream_data(result: Any, stream_blueprint: Any, request_id: Any = None) -> Dict[str, Any]:
|
|
31
|
+
return {
|
|
32
|
+
"request_id": request_id,
|
|
33
|
+
"raw_response": result,
|
|
34
|
+
"prompt_blueprint": stream_blueprint,
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def stream_response(*, generator: Generator, after_stream: Callable, map_results: Callable, metadata: Dict):
|
|
39
|
+
results = []
|
|
40
|
+
for result in generator:
|
|
41
|
+
results.append(result)
|
|
42
|
+
|
|
43
|
+
stream_blueprint = _build_stream_blueprint(result, metadata)
|
|
44
|
+
data = _build_stream_data(result, stream_blueprint)
|
|
45
|
+
yield data
|
|
46
|
+
|
|
47
|
+
request_response = map_results(results)
|
|
48
|
+
response = after_stream(request_response=request_response.model_dump(mode="json"))
|
|
49
|
+
data["request_id"] = response.get("request_id")
|
|
50
|
+
data["prompt_blueprint"] = response.get("prompt_blueprint")
|
|
51
|
+
yield data
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
async def astream_response(
|
|
55
|
+
generator: AsyncIterable[Any],
|
|
56
|
+
after_stream: Callable[..., Any],
|
|
57
|
+
map_results: Callable[[Any], Any],
|
|
58
|
+
metadata: Dict[str, Any] = None,
|
|
59
|
+
) -> AsyncGenerator[Dict[str, Any], None]:
|
|
60
|
+
results = []
|
|
61
|
+
|
|
62
|
+
async for result in generator:
|
|
63
|
+
results.append(result)
|
|
64
|
+
|
|
65
|
+
stream_blueprint = _build_stream_blueprint(result, metadata)
|
|
66
|
+
data = _build_stream_data(result, stream_blueprint)
|
|
67
|
+
yield data
|
|
68
|
+
|
|
69
|
+
async def async_generator_from_list(lst):
|
|
70
|
+
for item in lst:
|
|
71
|
+
yield item
|
|
72
|
+
|
|
73
|
+
request_response = await map_results(async_generator_from_list(results))
|
|
74
|
+
after_stream_response = await after_stream(request_response=request_response.model_dump(mode="json"))
|
|
75
|
+
data["request_id"] = after_stream_response.get("request_id")
|
|
76
|
+
data["prompt_blueprint"] = after_stream_response.get("prompt_blueprint")
|
|
77
|
+
yield data
|
|
@@ -59,6 +59,10 @@ def _make_httpx_client():
|
|
|
59
59
|
return httpx.AsyncClient(timeout=_get_http_timeout())
|
|
60
60
|
|
|
61
61
|
|
|
62
|
+
def _make_simple_httpx_client():
|
|
63
|
+
return httpx.Client(timeout=_get_http_timeout())
|
|
64
|
+
|
|
65
|
+
|
|
62
66
|
def _get_workflow_workflow_id_or_name(workflow_id_or_name, workflow_name):
|
|
63
67
|
# This is backward compatibility code
|
|
64
68
|
if (workflow_id_or_name := workflow_name if workflow_id_or_name is None else workflow_id_or_name) is None:
|
|
@@ -1396,7 +1400,7 @@ async def autil_log_request(api_key: str, **kwargs) -> Union[RequestLog, None]:
|
|
|
1396
1400
|
def mistral_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
|
|
1397
1401
|
from mistralai import Mistral
|
|
1398
1402
|
|
|
1399
|
-
client = Mistral(api_key=os.environ.get("MISTRAL_API_KEY"))
|
|
1403
|
+
client = Mistral(api_key=os.environ.get("MISTRAL_API_KEY"), client=_make_simple_httpx_client())
|
|
1400
1404
|
if "stream" in function_kwargs and function_kwargs["stream"]:
|
|
1401
1405
|
function_kwargs.pop("stream")
|
|
1402
1406
|
return client.chat.stream(**function_kwargs)
|
|
@@ -1412,7 +1416,7 @@ async def amistral_request(
|
|
|
1412
1416
|
):
|
|
1413
1417
|
from mistralai import Mistral
|
|
1414
1418
|
|
|
1415
|
-
client = Mistral(api_key=os.environ.get("MISTRAL_API_KEY"))
|
|
1419
|
+
client = Mistral(api_key=os.environ.get("MISTRAL_API_KEY"), async_client=_make_httpx_client())
|
|
1416
1420
|
if "stream" in function_kwargs and function_kwargs["stream"]:
|
|
1417
1421
|
return await client.chat.stream_async(**function_kwargs)
|
|
1418
1422
|
return await client.chat.complete_async(**function_kwargs)
|
|
@@ -1538,3 +1542,45 @@ async def avertexai_request(prompt_blueprint: GetPromptTemplateResponse, client_
|
|
|
1538
1542
|
raise NotImplementedError(
|
|
1539
1543
|
f"Vertex AI request for model {prompt_blueprint['metadata']['model']['name']} is not implemented yet."
|
|
1540
1544
|
)
|
|
1545
|
+
|
|
1546
|
+
|
|
1547
|
+
def anthropic_bedrock_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
|
|
1548
|
+
from anthropic import AnthropicBedrock
|
|
1549
|
+
|
|
1550
|
+
client = AnthropicBedrock(
|
|
1551
|
+
aws_access_key=function_kwargs.pop("aws_access_key", None),
|
|
1552
|
+
aws_secret_key=function_kwargs.pop("aws_secret_key", None),
|
|
1553
|
+
aws_region=function_kwargs.pop("aws_region", None),
|
|
1554
|
+
aws_session_token=function_kwargs.pop("aws_session_token", None),
|
|
1555
|
+
base_url=function_kwargs.pop("base_url", None),
|
|
1556
|
+
**client_kwargs,
|
|
1557
|
+
)
|
|
1558
|
+
if prompt_blueprint["prompt_template"]["type"] == "chat":
|
|
1559
|
+
return anthropic_chat_request(client=client, **function_kwargs)
|
|
1560
|
+
elif prompt_blueprint["prompt_template"]["type"] == "completion":
|
|
1561
|
+
return anthropic_completions_request(client=client, **function_kwargs)
|
|
1562
|
+
raise NotImplementedError(
|
|
1563
|
+
f"Unsupported prompt template type {prompt_blueprint['prompt_template']['type']}' for Anthropic Bedrock"
|
|
1564
|
+
)
|
|
1565
|
+
|
|
1566
|
+
|
|
1567
|
+
async def aanthropic_bedrock_request(
|
|
1568
|
+
prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict
|
|
1569
|
+
):
|
|
1570
|
+
from anthropic import AsyncAnthropicBedrock
|
|
1571
|
+
|
|
1572
|
+
client = AsyncAnthropicBedrock(
|
|
1573
|
+
aws_access_key=function_kwargs.pop("aws_access_key", None),
|
|
1574
|
+
aws_secret_key=function_kwargs.pop("aws_secret_key", None),
|
|
1575
|
+
aws_region=function_kwargs.pop("aws_region", None),
|
|
1576
|
+
aws_session_token=function_kwargs.pop("aws_session_token", None),
|
|
1577
|
+
base_url=function_kwargs.pop("base_url", None),
|
|
1578
|
+
**client_kwargs,
|
|
1579
|
+
)
|
|
1580
|
+
if prompt_blueprint["prompt_template"]["type"] == "chat":
|
|
1581
|
+
return await aanthropic_chat_request(client=client, **function_kwargs)
|
|
1582
|
+
elif prompt_blueprint["prompt_template"]["type"] == "completion":
|
|
1583
|
+
return await aanthropic_completions_request(client=client, **function_kwargs)
|
|
1584
|
+
raise NotImplementedError(
|
|
1585
|
+
f"Unsupported prompt template type {prompt_blueprint['prompt_template']['type']}' for Anthropic Bedrock"
|
|
1586
|
+
)
|
|
@@ -1,100 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Stream processors for handling streaming responses
|
|
3
|
-
|
|
4
|
-
This module contains the main streaming logic that processes streaming responses
|
|
5
|
-
from various LLM providers and builds progressive prompt blueprints.
|
|
6
|
-
"""
|
|
7
|
-
|
|
8
|
-
from typing import Any, AsyncGenerator, AsyncIterable, Callable, Dict, Generator
|
|
9
|
-
|
|
10
|
-
from .blueprint_builder import (
|
|
11
|
-
build_prompt_blueprint_from_anthropic_event,
|
|
12
|
-
build_prompt_blueprint_from_google_event,
|
|
13
|
-
build_prompt_blueprint_from_openai_chunk,
|
|
14
|
-
)
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
def stream_response(*, generator: Generator, after_stream: Callable, map_results: Callable, metadata: Dict):
|
|
18
|
-
"""
|
|
19
|
-
Process streaming responses and build progressive prompt blueprints
|
|
20
|
-
|
|
21
|
-
Supports OpenAI, Anthropic, and Google (Gemini) streaming formats, building blueprints
|
|
22
|
-
progressively as the stream progresses.
|
|
23
|
-
"""
|
|
24
|
-
results = []
|
|
25
|
-
stream_blueprint = None
|
|
26
|
-
for result in generator:
|
|
27
|
-
results.append(result)
|
|
28
|
-
|
|
29
|
-
# Handle OpenAI streaming format - process each chunk individually
|
|
30
|
-
if hasattr(result, "choices"):
|
|
31
|
-
stream_blueprint = build_prompt_blueprint_from_openai_chunk(result, metadata)
|
|
32
|
-
|
|
33
|
-
# Handle Google streaming format (Gemini) - GenerateContentResponse objects
|
|
34
|
-
elif hasattr(result, "candidates"):
|
|
35
|
-
stream_blueprint = build_prompt_blueprint_from_google_event(result, metadata)
|
|
36
|
-
|
|
37
|
-
# Handle Anthropic streaming format - process each event individually
|
|
38
|
-
elif hasattr(result, "type"):
|
|
39
|
-
stream_blueprint = build_prompt_blueprint_from_anthropic_event(result, metadata)
|
|
40
|
-
|
|
41
|
-
data = {
|
|
42
|
-
"request_id": None,
|
|
43
|
-
"raw_response": result,
|
|
44
|
-
"prompt_blueprint": stream_blueprint,
|
|
45
|
-
}
|
|
46
|
-
yield data
|
|
47
|
-
|
|
48
|
-
request_response = map_results(results)
|
|
49
|
-
response = after_stream(request_response=request_response.model_dump(mode="json"))
|
|
50
|
-
data["request_id"] = response.get("request_id")
|
|
51
|
-
data["prompt_blueprint"] = response.get("prompt_blueprint")
|
|
52
|
-
yield data
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
async def astream_response(
|
|
56
|
-
generator: AsyncIterable[Any],
|
|
57
|
-
after_stream: Callable[..., Any],
|
|
58
|
-
map_results: Callable[[Any], Any],
|
|
59
|
-
metadata: Dict[str, Any] = None,
|
|
60
|
-
) -> AsyncGenerator[Dict[str, Any], None]:
|
|
61
|
-
"""
|
|
62
|
-
Async version of stream_response
|
|
63
|
-
|
|
64
|
-
Process streaming responses asynchronously and build progressive prompt blueprints
|
|
65
|
-
Supports OpenAI, Anthropic, and Google (Gemini) streaming formats.
|
|
66
|
-
"""
|
|
67
|
-
results = []
|
|
68
|
-
stream_blueprint = None
|
|
69
|
-
|
|
70
|
-
async for result in generator:
|
|
71
|
-
results.append(result)
|
|
72
|
-
|
|
73
|
-
# Handle OpenAI streaming format - process each chunk individually
|
|
74
|
-
if hasattr(result, "choices"):
|
|
75
|
-
stream_blueprint = build_prompt_blueprint_from_openai_chunk(result, metadata)
|
|
76
|
-
|
|
77
|
-
# Handle Google streaming format (Gemini) - GenerateContentResponse objects
|
|
78
|
-
elif hasattr(result, "candidates"):
|
|
79
|
-
stream_blueprint = build_prompt_blueprint_from_google_event(result, metadata)
|
|
80
|
-
|
|
81
|
-
# Handle Anthropic streaming format - process each event individually
|
|
82
|
-
elif hasattr(result, "type"):
|
|
83
|
-
stream_blueprint = build_prompt_blueprint_from_anthropic_event(result, metadata)
|
|
84
|
-
|
|
85
|
-
data = {
|
|
86
|
-
"request_id": None,
|
|
87
|
-
"raw_response": result,
|
|
88
|
-
"prompt_blueprint": stream_blueprint,
|
|
89
|
-
}
|
|
90
|
-
yield data
|
|
91
|
-
|
|
92
|
-
async def async_generator_from_list(lst):
|
|
93
|
-
for item in lst:
|
|
94
|
-
yield item
|
|
95
|
-
|
|
96
|
-
request_response = await map_results(async_generator_from_list(results))
|
|
97
|
-
after_stream_response = await after_stream(request_response=request_response.model_dump(mode="json"))
|
|
98
|
-
data["request_id"] = after_stream_response.get("request_id")
|
|
99
|
-
data["prompt_blueprint"] = after_stream_response.get("prompt_blueprint")
|
|
100
|
-
yield data
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|