openai-sdk-helpers 0.0.5__py3-none-any.whl → 0.0.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openai_sdk_helpers/__init__.py +62 -0
- openai_sdk_helpers/agent/__init__.py +31 -0
- openai_sdk_helpers/agent/base.py +330 -0
- openai_sdk_helpers/agent/config.py +66 -0
- openai_sdk_helpers/agent/project_manager.py +511 -0
- openai_sdk_helpers/agent/prompt_utils.py +9 -0
- openai_sdk_helpers/agent/runner.py +215 -0
- openai_sdk_helpers/agent/summarizer.py +85 -0
- openai_sdk_helpers/agent/translator.py +139 -0
- openai_sdk_helpers/agent/utils.py +47 -0
- openai_sdk_helpers/agent/validation.py +97 -0
- openai_sdk_helpers/agent/vector_search.py +462 -0
- openai_sdk_helpers/agent/web_search.py +404 -0
- openai_sdk_helpers/config.py +153 -0
- openai_sdk_helpers/enums/__init__.py +7 -0
- openai_sdk_helpers/enums/base.py +29 -0
- openai_sdk_helpers/environment.py +27 -0
- openai_sdk_helpers/prompt/__init__.py +77 -0
- openai_sdk_helpers/py.typed +0 -0
- openai_sdk_helpers/response/__init__.py +18 -0
- openai_sdk_helpers/response/base.py +501 -0
- openai_sdk_helpers/response/messages.py +211 -0
- openai_sdk_helpers/response/runner.py +104 -0
- openai_sdk_helpers/response/tool_call.py +70 -0
- openai_sdk_helpers/structure/__init__.py +43 -0
- openai_sdk_helpers/structure/agent_blueprint.py +224 -0
- openai_sdk_helpers/structure/base.py +713 -0
- openai_sdk_helpers/structure/plan/__init__.py +13 -0
- openai_sdk_helpers/structure/plan/enum.py +64 -0
- openai_sdk_helpers/structure/plan/plan.py +253 -0
- openai_sdk_helpers/structure/plan/task.py +122 -0
- openai_sdk_helpers/structure/prompt.py +24 -0
- openai_sdk_helpers/structure/responses.py +132 -0
- openai_sdk_helpers/structure/summary.py +65 -0
- openai_sdk_helpers/structure/validation.py +47 -0
- openai_sdk_helpers/structure/vector_search.py +86 -0
- openai_sdk_helpers/structure/web_search.py +46 -0
- openai_sdk_helpers/utils/__init__.py +13 -0
- openai_sdk_helpers/utils/core.py +208 -0
- openai_sdk_helpers/vector_storage/__init__.py +15 -0
- openai_sdk_helpers/vector_storage/cleanup.py +91 -0
- openai_sdk_helpers/vector_storage/storage.py +501 -0
- openai_sdk_helpers/vector_storage/types.py +58 -0
- {openai_sdk_helpers-0.0.5.dist-info → openai_sdk_helpers-0.0.6.dist-info}/METADATA +1 -1
- openai_sdk_helpers-0.0.6.dist-info/RECORD +50 -0
- openai_sdk_helpers-0.0.5.dist-info/RECORD +0 -7
- {openai_sdk_helpers-0.0.5.dist-info → openai_sdk_helpers-0.0.6.dist-info}/WHEEL +0 -0
- {openai_sdk_helpers-0.0.5.dist-info → openai_sdk_helpers-0.0.6.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
"""Message containers for shared OpenAI responses."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from datetime import datetime, timezone
|
|
7
|
+
from typing import Dict, List, Union, cast
|
|
8
|
+
|
|
9
|
+
from openai.types.responses.response_function_tool_call import ResponseFunctionToolCall
|
|
10
|
+
from openai.types.responses.response_function_tool_call_param import (
|
|
11
|
+
ResponseFunctionToolCallParam,
|
|
12
|
+
)
|
|
13
|
+
from openai.types.responses.response_input_message_content_list_param import (
|
|
14
|
+
ResponseInputMessageContentListParam,
|
|
15
|
+
)
|
|
16
|
+
from openai.types.responses.response_input_param import (
|
|
17
|
+
FunctionCallOutput,
|
|
18
|
+
ResponseInputItemParam,
|
|
19
|
+
)
|
|
20
|
+
from openai.types.responses.response_output_message import ResponseOutputMessage
|
|
21
|
+
|
|
22
|
+
from ..utils import JSONSerializable
|
|
23
|
+
from .tool_call import ResponseToolCall
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@dataclass
|
|
27
|
+
class ResponseMessage(JSONSerializable):
|
|
28
|
+
"""Single message exchanged with the OpenAI client.
|
|
29
|
+
|
|
30
|
+
Methods
|
|
31
|
+
-------
|
|
32
|
+
to_openai_format()
|
|
33
|
+
Return the payload in the format expected by the OpenAI client.
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
role: str # "user", "assistant", "tool", etc.
|
|
37
|
+
content: (
|
|
38
|
+
ResponseInputItemParam
|
|
39
|
+
| ResponseOutputMessage
|
|
40
|
+
| ResponseFunctionToolCallParam
|
|
41
|
+
| FunctionCallOutput
|
|
42
|
+
| ResponseInputMessageContentListParam
|
|
43
|
+
)
|
|
44
|
+
timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
|
45
|
+
metadata: Dict[str, Union[str, float, bool]] = field(default_factory=dict)
|
|
46
|
+
|
|
47
|
+
def to_openai_format(
|
|
48
|
+
self,
|
|
49
|
+
) -> (
|
|
50
|
+
ResponseInputItemParam
|
|
51
|
+
| ResponseOutputMessage
|
|
52
|
+
| ResponseFunctionToolCallParam
|
|
53
|
+
| FunctionCallOutput
|
|
54
|
+
| ResponseInputMessageContentListParam
|
|
55
|
+
):
|
|
56
|
+
"""Return the message in the format expected by the OpenAI client.
|
|
57
|
+
|
|
58
|
+
Returns
|
|
59
|
+
-------
|
|
60
|
+
ResponseInputItemParam | ResponseOutputMessage | ResponseFunctionToolCallParam | FunctionCallOutput | ResponseInputMessageContentListParam
|
|
61
|
+
Stored message content in OpenAI format.
|
|
62
|
+
"""
|
|
63
|
+
return self.content
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
@dataclass
|
|
67
|
+
class ResponseMessages(JSONSerializable):
|
|
68
|
+
"""Represent a collection of messages in a response.
|
|
69
|
+
|
|
70
|
+
This dataclass encapsulates user inputs and assistant outputs during an
|
|
71
|
+
OpenAI API interaction.
|
|
72
|
+
|
|
73
|
+
Methods
|
|
74
|
+
-------
|
|
75
|
+
add_system_message(content, **metadata)
|
|
76
|
+
Append a system message to the conversation.
|
|
77
|
+
add_user_message(input_content, **metadata)
|
|
78
|
+
Append a user message to the conversation.
|
|
79
|
+
add_assistant_message(content, metadata)
|
|
80
|
+
Append an assistant message to the conversation.
|
|
81
|
+
add_tool_message(content, output, **metadata)
|
|
82
|
+
Record a tool call and its output.
|
|
83
|
+
to_openai_payload()
|
|
84
|
+
Convert stored messages to the OpenAI input payload.
|
|
85
|
+
"""
|
|
86
|
+
|
|
87
|
+
messages: List[ResponseMessage] = field(default_factory=list)
|
|
88
|
+
|
|
89
|
+
def add_system_message(
|
|
90
|
+
self, content: ResponseInputMessageContentListParam, **metadata
|
|
91
|
+
) -> None:
|
|
92
|
+
"""Append a system message to the conversation.
|
|
93
|
+
|
|
94
|
+
Parameters
|
|
95
|
+
----------
|
|
96
|
+
content : ResponseInputMessageContentListParam
|
|
97
|
+
System message content in OpenAI format.
|
|
98
|
+
**metadata
|
|
99
|
+
Optional metadata to store with the message.
|
|
100
|
+
|
|
101
|
+
Returns
|
|
102
|
+
-------
|
|
103
|
+
None
|
|
104
|
+
"""
|
|
105
|
+
response_input = cast(
|
|
106
|
+
ResponseInputItemParam, {"role": "system", "content": content}
|
|
107
|
+
)
|
|
108
|
+
self.messages.append(
|
|
109
|
+
ResponseMessage(role="system", content=response_input, metadata=metadata)
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
def add_user_message(
|
|
113
|
+
self, input_content: ResponseInputItemParam, **metadata
|
|
114
|
+
) -> None:
|
|
115
|
+
"""Append a user message to the conversation.
|
|
116
|
+
|
|
117
|
+
Parameters
|
|
118
|
+
----------
|
|
119
|
+
input_content : ResponseInputItemParam
|
|
120
|
+
Message payload supplied by the user.
|
|
121
|
+
**metadata
|
|
122
|
+
Optional metadata to store with the message.
|
|
123
|
+
|
|
124
|
+
Returns
|
|
125
|
+
-------
|
|
126
|
+
None
|
|
127
|
+
"""
|
|
128
|
+
self.messages.append(
|
|
129
|
+
ResponseMessage(role="user", content=input_content, metadata=metadata)
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
def add_assistant_message(
|
|
133
|
+
self,
|
|
134
|
+
content: ResponseOutputMessage,
|
|
135
|
+
metadata: Dict[str, Union[str, float, bool]],
|
|
136
|
+
) -> None:
|
|
137
|
+
"""Append an assistant message to the conversation.
|
|
138
|
+
|
|
139
|
+
Parameters
|
|
140
|
+
----------
|
|
141
|
+
content : ResponseOutputMessage
|
|
142
|
+
Assistant response message.
|
|
143
|
+
metadata : dict[str, Union[str, float, bool]]
|
|
144
|
+
Optional metadata to store with the message.
|
|
145
|
+
|
|
146
|
+
Returns
|
|
147
|
+
-------
|
|
148
|
+
None
|
|
149
|
+
"""
|
|
150
|
+
self.messages.append(
|
|
151
|
+
ResponseMessage(role="assistant", content=content, metadata=metadata)
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
def add_tool_message(
|
|
155
|
+
self, content: ResponseFunctionToolCall, output: str, **metadata
|
|
156
|
+
) -> None:
|
|
157
|
+
"""Record a tool call and its output in the conversation history.
|
|
158
|
+
|
|
159
|
+
Parameters
|
|
160
|
+
----------
|
|
161
|
+
content : ResponseFunctionToolCall
|
|
162
|
+
Tool call received from OpenAI.
|
|
163
|
+
output : str
|
|
164
|
+
JSON string returned by the executed tool.
|
|
165
|
+
**metadata
|
|
166
|
+
Optional metadata to store with the message.
|
|
167
|
+
|
|
168
|
+
Returns
|
|
169
|
+
-------
|
|
170
|
+
None
|
|
171
|
+
"""
|
|
172
|
+
tool_call = ResponseToolCall(
|
|
173
|
+
call_id=content.call_id,
|
|
174
|
+
name=content.name,
|
|
175
|
+
arguments=content.arguments,
|
|
176
|
+
output=output,
|
|
177
|
+
)
|
|
178
|
+
function_call, function_call_output = tool_call.to_response_input_item_param()
|
|
179
|
+
self.messages.append(
|
|
180
|
+
ResponseMessage(role="tool", content=function_call, metadata=metadata)
|
|
181
|
+
)
|
|
182
|
+
self.messages.append(
|
|
183
|
+
ResponseMessage(
|
|
184
|
+
role="tool", content=function_call_output, metadata=metadata
|
|
185
|
+
)
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
def to_openai_payload(
|
|
189
|
+
self,
|
|
190
|
+
) -> List[
|
|
191
|
+
ResponseInputItemParam
|
|
192
|
+
| ResponseOutputMessage
|
|
193
|
+
| ResponseFunctionToolCallParam
|
|
194
|
+
| FunctionCallOutput
|
|
195
|
+
| ResponseInputMessageContentListParam
|
|
196
|
+
]:
|
|
197
|
+
"""Convert stored messages to the input payload expected by OpenAI.
|
|
198
|
+
|
|
199
|
+
Notes
|
|
200
|
+
-----
|
|
201
|
+
Assistant messages are model outputs and are not included in the
|
|
202
|
+
next request's input payload.
|
|
203
|
+
|
|
204
|
+
Returns
|
|
205
|
+
-------
|
|
206
|
+
list
|
|
207
|
+
List of message payloads excluding assistant outputs.
|
|
208
|
+
"""
|
|
209
|
+
return [
|
|
210
|
+
msg.to_openai_format() for msg in self.messages if msg.role != "assistant"
|
|
211
|
+
]
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
"""Convenience runners for response workflows."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
|
|
7
|
+
from typing import Any, Optional, Type, TypeVar
|
|
8
|
+
|
|
9
|
+
from .base import ResponseBase
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
R = TypeVar("R", bound=ResponseBase[Any])
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def run_sync(
|
|
16
|
+
response_cls: Type[R],
|
|
17
|
+
*,
|
|
18
|
+
content: str,
|
|
19
|
+
response_kwargs: Optional[dict[str, Any]] = None,
|
|
20
|
+
) -> Any:
|
|
21
|
+
"""Run a response workflow synchronously and close resources.
|
|
22
|
+
|
|
23
|
+
Parameters
|
|
24
|
+
----------
|
|
25
|
+
response_cls
|
|
26
|
+
Response class to instantiate.
|
|
27
|
+
content
|
|
28
|
+
Prompt text to send to the OpenAI API.
|
|
29
|
+
response_kwargs
|
|
30
|
+
Keyword arguments forwarded to ``response_cls``. Default ``None``.
|
|
31
|
+
|
|
32
|
+
Returns
|
|
33
|
+
-------
|
|
34
|
+
Any
|
|
35
|
+
Parsed response from :meth:`ResponseBase.run_response`.
|
|
36
|
+
"""
|
|
37
|
+
response = response_cls(**(response_kwargs or {}))
|
|
38
|
+
try:
|
|
39
|
+
return response.run_sync(content=content)
|
|
40
|
+
finally:
|
|
41
|
+
response.close()
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
async def run_async(
|
|
45
|
+
response_cls: Type[R],
|
|
46
|
+
*,
|
|
47
|
+
content: str,
|
|
48
|
+
response_kwargs: Optional[dict[str, Any]] = None,
|
|
49
|
+
) -> Any:
|
|
50
|
+
"""Run a response workflow asynchronously and close resources.
|
|
51
|
+
|
|
52
|
+
Parameters
|
|
53
|
+
----------
|
|
54
|
+
response_cls
|
|
55
|
+
Response class to instantiate.
|
|
56
|
+
content
|
|
57
|
+
Prompt text to send to the OpenAI API.
|
|
58
|
+
response_kwargs
|
|
59
|
+
Keyword arguments forwarded to ``response_cls``. Default ``None``.
|
|
60
|
+
|
|
61
|
+
Returns
|
|
62
|
+
-------
|
|
63
|
+
Any
|
|
64
|
+
Parsed response from :meth:`ResponseBase.run_response_async`.
|
|
65
|
+
"""
|
|
66
|
+
response = response_cls(**(response_kwargs or {}))
|
|
67
|
+
try:
|
|
68
|
+
return await response.run_async(content=content)
|
|
69
|
+
finally:
|
|
70
|
+
response.close()
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def run_streamed(
|
|
74
|
+
response_cls: Type[R],
|
|
75
|
+
*,
|
|
76
|
+
content: str,
|
|
77
|
+
response_kwargs: Optional[dict[str, Any]] = None,
|
|
78
|
+
) -> Any:
|
|
79
|
+
"""Run a response workflow and return the asynchronous result.
|
|
80
|
+
|
|
81
|
+
This mirrors the agent API for discoverability. Streaming responses are not
|
|
82
|
+
currently supported by :class:`ResponseBase`, so this returns the same value
|
|
83
|
+
as :func:`run_async`.
|
|
84
|
+
|
|
85
|
+
Parameters
|
|
86
|
+
----------
|
|
87
|
+
response_cls
|
|
88
|
+
Response class to instantiate.
|
|
89
|
+
content
|
|
90
|
+
Prompt text to send to the OpenAI API.
|
|
91
|
+
response_kwargs
|
|
92
|
+
Keyword arguments forwarded to ``response_cls``. Default ``None``.
|
|
93
|
+
|
|
94
|
+
Returns
|
|
95
|
+
-------
|
|
96
|
+
Any
|
|
97
|
+
Parsed response returned from :func:`run_async`.
|
|
98
|
+
"""
|
|
99
|
+
return asyncio.run(
|
|
100
|
+
run_async(response_cls, content=content, response_kwargs=response_kwargs)
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
__all__ = ["run_sync", "run_async", "run_streamed"]
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
"""Tool call representation for shared responses."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from typing import Tuple
|
|
7
|
+
|
|
8
|
+
from openai.types.responses.response_function_tool_call_param import (
|
|
9
|
+
ResponseFunctionToolCallParam,
|
|
10
|
+
)
|
|
11
|
+
from openai.types.responses.response_input_param import FunctionCallOutput
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass
|
|
15
|
+
class ResponseToolCall:
|
|
16
|
+
"""Container for tool call data used in a conversation.
|
|
17
|
+
|
|
18
|
+
Attributes
|
|
19
|
+
----------
|
|
20
|
+
call_id : str
|
|
21
|
+
Identifier of the tool call.
|
|
22
|
+
name : str
|
|
23
|
+
Name of the tool invoked.
|
|
24
|
+
arguments : str
|
|
25
|
+
JSON string with the arguments passed to the tool.
|
|
26
|
+
output : str
|
|
27
|
+
JSON string representing the result produced by the tool.
|
|
28
|
+
|
|
29
|
+
Methods
|
|
30
|
+
-------
|
|
31
|
+
to_response_input_item_param()
|
|
32
|
+
Convert stored data into OpenAI tool call objects.
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
call_id: str
|
|
36
|
+
name: str
|
|
37
|
+
arguments: str
|
|
38
|
+
output: str
|
|
39
|
+
|
|
40
|
+
def to_response_input_item_param(
|
|
41
|
+
self,
|
|
42
|
+
) -> Tuple[ResponseFunctionToolCallParam, FunctionCallOutput]:
|
|
43
|
+
"""Convert stored data into OpenAI tool call objects.
|
|
44
|
+
|
|
45
|
+
Returns
|
|
46
|
+
-------
|
|
47
|
+
tuple[ResponseFunctionToolCallParam, FunctionCallOutput]
|
|
48
|
+
The function call object and the corresponding output object
|
|
49
|
+
suitable for inclusion in an OpenAI request.
|
|
50
|
+
"""
|
|
51
|
+
from typing import cast
|
|
52
|
+
|
|
53
|
+
function_call = cast(
|
|
54
|
+
ResponseFunctionToolCallParam,
|
|
55
|
+
{
|
|
56
|
+
"arguments": self.arguments,
|
|
57
|
+
"call_id": self.call_id,
|
|
58
|
+
"name": self.name,
|
|
59
|
+
"type": "function_call",
|
|
60
|
+
},
|
|
61
|
+
)
|
|
62
|
+
function_call_output = cast(
|
|
63
|
+
FunctionCallOutput,
|
|
64
|
+
{
|
|
65
|
+
"call_id": self.call_id,
|
|
66
|
+
"output": self.output,
|
|
67
|
+
"type": "function_call_output",
|
|
68
|
+
},
|
|
69
|
+
)
|
|
70
|
+
return function_call, function_call_output
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
"""Shared structured output models and base helpers."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from .agent_blueprint import AgentBlueprint
|
|
6
|
+
from .plan import *
|
|
7
|
+
from .base import *
|
|
8
|
+
from .prompt import PromptStructure
|
|
9
|
+
from .responses import *
|
|
10
|
+
from .summary import *
|
|
11
|
+
from .vector_search import *
|
|
12
|
+
from .validation import ValidationResultStructure
|
|
13
|
+
from .web_search import *
|
|
14
|
+
|
|
15
|
+
__all__ = [
|
|
16
|
+
"BaseStructure",
|
|
17
|
+
"SchemaOptions",
|
|
18
|
+
"spec_field",
|
|
19
|
+
"AgentBlueprint",
|
|
20
|
+
"AgentEnum",
|
|
21
|
+
"TaskStructure",
|
|
22
|
+
"PlanStructure",
|
|
23
|
+
"PromptStructure",
|
|
24
|
+
"SummaryTopic",
|
|
25
|
+
"SummaryStructure",
|
|
26
|
+
"ExtendedSummaryStructure",
|
|
27
|
+
"WebSearchStructure",
|
|
28
|
+
"WebSearchPlanStructure",
|
|
29
|
+
"WebSearchItemStructure",
|
|
30
|
+
"WebSearchItemResultStructure",
|
|
31
|
+
"WebSearchReportStructure",
|
|
32
|
+
"VectorSearchReportStructure",
|
|
33
|
+
"VectorSearchItemStructure",
|
|
34
|
+
"VectorSearchItemResultStructure",
|
|
35
|
+
"VectorSearchItemResultsStructure",
|
|
36
|
+
"VectorSearchPlanStructure",
|
|
37
|
+
"VectorSearchStructure",
|
|
38
|
+
"ValidationResultStructure",
|
|
39
|
+
"assistant_tool_definition",
|
|
40
|
+
"assistant_format",
|
|
41
|
+
"response_tool_definition",
|
|
42
|
+
"response_format",
|
|
43
|
+
]
|
|
@@ -0,0 +1,224 @@
|
|
|
1
|
+
"""Structures for designing and planning new agents."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import List, Optional
|
|
6
|
+
|
|
7
|
+
from .plan.enum import AgentEnum
|
|
8
|
+
from .base import BaseStructure, spec_field
|
|
9
|
+
from .plan import TaskStructure, PlanStructure
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class AgentBlueprint(BaseStructure):
|
|
13
|
+
"""Capture the core requirements for creating a new agent.
|
|
14
|
+
|
|
15
|
+
Methods
|
|
16
|
+
-------
|
|
17
|
+
summary()
|
|
18
|
+
Return a human-readable overview of the blueprint.
|
|
19
|
+
build_plan()
|
|
20
|
+
Convert the blueprint into an ordered ``PlanStructure``.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
name: str = spec_field(
|
|
24
|
+
"name",
|
|
25
|
+
allow_null=False,
|
|
26
|
+
description="Name of the agent to build.",
|
|
27
|
+
examples=["ResearchCoordinator", "EvaluationRouter"],
|
|
28
|
+
)
|
|
29
|
+
mission: str = spec_field(
|
|
30
|
+
"mission",
|
|
31
|
+
allow_null=False,
|
|
32
|
+
description="Primary goal or charter for the agent.",
|
|
33
|
+
examples=["Coordinate a research sprint", "Score model outputs"],
|
|
34
|
+
)
|
|
35
|
+
capabilities: List[str] = spec_field(
|
|
36
|
+
"capabilities",
|
|
37
|
+
default_factory=list,
|
|
38
|
+
description="Core skills the agent must perform.",
|
|
39
|
+
)
|
|
40
|
+
constraints: List[str] = spec_field(
|
|
41
|
+
"constraints",
|
|
42
|
+
default_factory=list,
|
|
43
|
+
description="Boundaries, policies, or limits the agent must honor.",
|
|
44
|
+
)
|
|
45
|
+
required_tools: List[str] = spec_field(
|
|
46
|
+
"required_tools",
|
|
47
|
+
default_factory=list,
|
|
48
|
+
description="External tools the agent must integrate.",
|
|
49
|
+
)
|
|
50
|
+
data_sources: List[str] = spec_field(
|
|
51
|
+
"data_sources",
|
|
52
|
+
default_factory=list,
|
|
53
|
+
description="Data inputs that inform the agent's work.",
|
|
54
|
+
)
|
|
55
|
+
evaluation_plan: List[str] = spec_field(
|
|
56
|
+
"evaluation_plan",
|
|
57
|
+
default_factory=list,
|
|
58
|
+
description="Checks, tests, or metrics that validate the agent.",
|
|
59
|
+
)
|
|
60
|
+
rollout_plan: List[str] = spec_field(
|
|
61
|
+
"rollout_plan",
|
|
62
|
+
default_factory=list,
|
|
63
|
+
description="Deployment or launch steps for the agent.",
|
|
64
|
+
)
|
|
65
|
+
guardrails: List[str] = spec_field(
|
|
66
|
+
"guardrails",
|
|
67
|
+
default_factory=list,
|
|
68
|
+
description="Safety rules and governance requirements.",
|
|
69
|
+
)
|
|
70
|
+
notes: Optional[str] = spec_field(
|
|
71
|
+
"notes",
|
|
72
|
+
description="Additional context that informs the build.",
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
def summary(self) -> str:
|
|
76
|
+
"""Return a multi-line summary highlighting key requirements.
|
|
77
|
+
|
|
78
|
+
Returns
|
|
79
|
+
-------
|
|
80
|
+
str
|
|
81
|
+
Human-readable description of the blueprint fields.
|
|
82
|
+
"""
|
|
83
|
+
|
|
84
|
+
def _format(label: str, values: list[str]) -> str:
|
|
85
|
+
return f"{label}: " + (", ".join(values) if values else "None")
|
|
86
|
+
|
|
87
|
+
lines = [
|
|
88
|
+
f"Agent name: {self.name}",
|
|
89
|
+
f"Mission: {self.mission}",
|
|
90
|
+
_format("Capabilities", self.capabilities),
|
|
91
|
+
_format("Constraints", self.constraints),
|
|
92
|
+
_format("Required tools", self.required_tools),
|
|
93
|
+
_format("Data sources", self.data_sources),
|
|
94
|
+
_format("Guardrails", self.guardrails),
|
|
95
|
+
_format("Evaluation", self.evaluation_plan),
|
|
96
|
+
_format("Rollout", self.rollout_plan),
|
|
97
|
+
]
|
|
98
|
+
|
|
99
|
+
if self.notes:
|
|
100
|
+
lines.append(f"Notes: {self.notes}")
|
|
101
|
+
return "\n".join(lines)
|
|
102
|
+
|
|
103
|
+
def build_plan(self) -> PlanStructure:
|
|
104
|
+
"""Translate the blueprint into a structured plan of execution steps.
|
|
105
|
+
|
|
106
|
+
Returns
|
|
107
|
+
-------
|
|
108
|
+
PlanStructure
|
|
109
|
+
Ordered list of tasks representing the build lifecycle.
|
|
110
|
+
"""
|
|
111
|
+
tasks = [
|
|
112
|
+
TaskStructure(
|
|
113
|
+
task_type=AgentEnum.PLANNER,
|
|
114
|
+
prompt=self._scope_prompt(),
|
|
115
|
+
context=self.constraints,
|
|
116
|
+
),
|
|
117
|
+
TaskStructure(
|
|
118
|
+
task_type=AgentEnum.DESIGNER,
|
|
119
|
+
prompt=self._design_prompt(),
|
|
120
|
+
context=self.required_tools + self.data_sources,
|
|
121
|
+
),
|
|
122
|
+
TaskStructure(
|
|
123
|
+
task_type=AgentEnum.BUILDER,
|
|
124
|
+
prompt=self._synthesis_prompt(),
|
|
125
|
+
context=self.capabilities,
|
|
126
|
+
),
|
|
127
|
+
TaskStructure(
|
|
128
|
+
task_type=AgentEnum.VALIDATOR,
|
|
129
|
+
prompt=self._validation_prompt(),
|
|
130
|
+
context=self.guardrails,
|
|
131
|
+
),
|
|
132
|
+
TaskStructure(
|
|
133
|
+
task_type=AgentEnum.EVALUATOR,
|
|
134
|
+
prompt=self._evaluation_prompt(),
|
|
135
|
+
context=self.evaluation_plan,
|
|
136
|
+
),
|
|
137
|
+
TaskStructure(
|
|
138
|
+
task_type=AgentEnum.RELEASE_MANAGER,
|
|
139
|
+
prompt=self._deployment_prompt(),
|
|
140
|
+
context=self.rollout_plan,
|
|
141
|
+
),
|
|
142
|
+
]
|
|
143
|
+
plan = PlanStructure(tasks=tasks)
|
|
144
|
+
return plan
|
|
145
|
+
|
|
146
|
+
def _scope_prompt(self) -> str:
|
|
147
|
+
"""Return a scoping prompt based on mission, constraints, and guardrails."""
|
|
148
|
+
return "\n".join(
|
|
149
|
+
[
|
|
150
|
+
f"Mission: {self.mission}",
|
|
151
|
+
self._bullet_block("Guardrails", self.guardrails),
|
|
152
|
+
self._bullet_block("Constraints", self.constraints),
|
|
153
|
+
]
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
def _design_prompt(self) -> str:
|
|
157
|
+
"""Return a design prompt covering tools, data, and capabilities."""
|
|
158
|
+
return "\n".join(
|
|
159
|
+
[
|
|
160
|
+
self._bullet_block("Capabilities", self.capabilities),
|
|
161
|
+
self._bullet_block("Required tools", self.required_tools),
|
|
162
|
+
self._bullet_block("Data sources", self.data_sources),
|
|
163
|
+
]
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
def _synthesis_prompt(self) -> str:
|
|
167
|
+
"""Return a build prompt focused on interfaces and prompts."""
|
|
168
|
+
return "\n".join(
|
|
169
|
+
[
|
|
170
|
+
"Design system and developer prompts that cover:",
|
|
171
|
+
self._bullet_block("Mission", [self.mission]),
|
|
172
|
+
self._bullet_block(
|
|
173
|
+
"Capabilities to implement",
|
|
174
|
+
self.capabilities or ["Draft standard handlers"],
|
|
175
|
+
),
|
|
176
|
+
]
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
def _validation_prompt(self) -> str:
|
|
180
|
+
"""Return a prompt instructing validation of guardrails and behaviors."""
|
|
181
|
+
return "\n".join(
|
|
182
|
+
[
|
|
183
|
+
"Create automated validation for:",
|
|
184
|
+
self._bullet_block("Guardrails", self.guardrails),
|
|
185
|
+
self._bullet_block("Constraints", self.constraints),
|
|
186
|
+
]
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
def _evaluation_prompt(self) -> str:
|
|
190
|
+
"""Return an evaluation prompt emphasizing tests and metrics."""
|
|
191
|
+
return "\n".join(
|
|
192
|
+
[
|
|
193
|
+
"Run evaluation and red-team scenarios using:",
|
|
194
|
+
self._bullet_block("Evaluation plan", self.evaluation_plan),
|
|
195
|
+
self._bullet_block("Capabilities", self.capabilities),
|
|
196
|
+
]
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
def _deployment_prompt(self) -> str:
|
|
200
|
+
"""Return a deployment prompt capturing rollout and monitoring."""
|
|
201
|
+
return "\n".join(
|
|
202
|
+
[
|
|
203
|
+
self._bullet_block("Rollout steps", self.rollout_plan),
|
|
204
|
+
self._bullet_block(
|
|
205
|
+
"Launch checklist",
|
|
206
|
+
[
|
|
207
|
+
"Observability hooks enabled",
|
|
208
|
+
"Runbook prepared",
|
|
209
|
+
"Rollback and kill switches validated",
|
|
210
|
+
],
|
|
211
|
+
),
|
|
212
|
+
]
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
@staticmethod
|
|
216
|
+
def _bullet_block(label: str, items: list[str]) -> str:
|
|
217
|
+
"""Return a labeled bullet block for use in prompts."""
|
|
218
|
+
if not items:
|
|
219
|
+
return f"{label}: None"
|
|
220
|
+
bullets = "\n".join(f"- {item}" for item in items)
|
|
221
|
+
return f"{label}:\n{bullets}"
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
__all__ = ["AgentBlueprint"]
|