quick-agent 0.1.2__py3-none-any.whl → 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- quick_agent/agent_registry.py +5 -25
- quick_agent/llms.txt +1 -1
- quick_agent/models/loaded_agent_file.py +136 -1
- quick_agent/models/output_spec.py +1 -1
- quick_agent/prompting.py +33 -15
- quick_agent/quick_agent.py +99 -38
- {quick_agent-0.1.2.data → quick_agent-0.1.3.data}/data/quick_agent/agents/business-extract-structured.md +1 -1
- {quick_agent-0.1.2.data → quick_agent-0.1.3.data}/data/quick_agent/agents/business-extract.md +1 -1
- {quick_agent-0.1.2.data → quick_agent-0.1.3.data}/data/quick_agent/agents/function-spec-validator.md +1 -1
- {quick_agent-0.1.2.data → quick_agent-0.1.3.data}/data/quick_agent/agents/subagent-validate-eval-list.md +1 -1
- {quick_agent-0.1.2.data → quick_agent-0.1.3.data}/data/quick_agent/agents/subagent-validator-contains.md +8 -1
- {quick_agent-0.1.2.data → quick_agent-0.1.3.data}/data/quick_agent/agents/template.md +12 -1
- {quick_agent-0.1.2.dist-info → quick_agent-0.1.3.dist-info}/METADATA +5 -1
- {quick_agent-0.1.2.dist-info → quick_agent-0.1.3.dist-info}/RECORD +21 -20
- tests/test_agent.py +273 -9
- tests/test_httpx_tools.py +295 -0
- tests/test_orchestrator.py +353 -28
- {quick_agent-0.1.2.dist-info → quick_agent-0.1.3.dist-info}/WHEEL +0 -0
- {quick_agent-0.1.2.dist-info → quick_agent-0.1.3.dist-info}/entry_points.txt +0 -0
- {quick_agent-0.1.2.dist-info → quick_agent-0.1.3.dist-info}/licenses/LICENSE +0 -0
- {quick_agent-0.1.2.dist-info → quick_agent-0.1.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,295 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
import httpx
|
|
6
|
+
import pytest
|
|
7
|
+
from pydantic_ai.models.openai import OpenAIChatModel
|
|
8
|
+
from pydantic_ai.providers.openai import OpenAIProvider
|
|
9
|
+
from pydantic_ai.toolsets import FunctionToolset
|
|
10
|
+
|
|
11
|
+
from quick_agent.agent_registry import AgentRegistry
|
|
12
|
+
from quick_agent.agent_tools import AgentTools
|
|
13
|
+
from quick_agent.directory_permissions import DirectoryPermissions
|
|
14
|
+
from quick_agent.input_adaptors import TextInput
|
|
15
|
+
from quick_agent.models import AgentSpec
|
|
16
|
+
from quick_agent.models import ChainStepSpec
|
|
17
|
+
from quick_agent.models import LoadedAgentFile
|
|
18
|
+
from quick_agent.models import ModelSpec
|
|
19
|
+
from quick_agent.models.output_spec import OutputSpec
|
|
20
|
+
from quick_agent.quick_agent import QuickAgent
|
|
21
|
+
from quick_agent import quick_agent as qa_module
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def dummy_tool() -> str:
|
|
25
|
+
return "ok"
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class HttpxRequestRecorder:
|
|
29
|
+
def __init__(self, response_json: dict[str, Any]) -> None:
|
|
30
|
+
self.response_json = response_json
|
|
31
|
+
self.requests: list[httpx.Request] = []
|
|
32
|
+
self.last_json: dict[str, Any] | None = None
|
|
33
|
+
|
|
34
|
+
def __call__(self, request: httpx.Request) -> httpx.Response:
|
|
35
|
+
self.requests.append(request)
|
|
36
|
+
self.last_json = json.loads(request.content.decode("utf-8"))
|
|
37
|
+
return httpx.Response(200, json=self.response_json)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class StaticRegistry(AgentRegistry):
|
|
41
|
+
def __init__(self, loaded: LoadedAgentFile) -> None:
|
|
42
|
+
super().__init__([])
|
|
43
|
+
self._loaded = loaded
|
|
44
|
+
|
|
45
|
+
def get(self, agent_id: str) -> LoadedAgentFile:
|
|
46
|
+
return self._loaded
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class BuildModelStub:
|
|
50
|
+
def __init__(self, model: OpenAIChatModel) -> None:
|
|
51
|
+
self.model = model
|
|
52
|
+
|
|
53
|
+
def __call__(self, _: ModelSpec) -> OpenAIChatModel:
|
|
54
|
+
return self.model
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def _chat_completion_response(model_name: str) -> dict[str, Any]:
|
|
58
|
+
return {
|
|
59
|
+
"id": "chatcmpl-test",
|
|
60
|
+
"object": "chat.completion",
|
|
61
|
+
"created": 123,
|
|
62
|
+
"model": model_name,
|
|
63
|
+
"choices": [
|
|
64
|
+
{
|
|
65
|
+
"index": 0,
|
|
66
|
+
"message": {"role": "assistant", "content": "ok"},
|
|
67
|
+
"finish_reason": "stop",
|
|
68
|
+
}
|
|
69
|
+
],
|
|
70
|
+
"usage": {"prompt_tokens": 1, "completion_tokens": 1, "total_tokens": 2},
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _messages_by_role(messages: list[dict[str, Any]], role: str) -> list[dict[str, Any]]:
|
|
75
|
+
return [message for message in messages if message.get("role") == role]
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
@pytest.mark.anyio
|
|
79
|
+
async def test_single_shot_without_tools_omits_tools_in_httpx_post(
|
|
80
|
+
monkeypatch: pytest.MonkeyPatch, tmp_path: Path
|
|
81
|
+
) -> None:
|
|
82
|
+
response_json = _chat_completion_response("gpt-5")
|
|
83
|
+
recorder = HttpxRequestRecorder(response_json)
|
|
84
|
+
transport = httpx.MockTransport(recorder)
|
|
85
|
+
|
|
86
|
+
async with httpx.AsyncClient(transport=transport, base_url="https://example.test/v1") as client:
|
|
87
|
+
provider = OpenAIProvider(base_url="https://example.test/v1", api_key="test", http_client=client)
|
|
88
|
+
model = OpenAIChatModel("gpt-5", provider=provider)
|
|
89
|
+
monkeypatch.setattr(qa_module, "build_model", BuildModelStub(model))
|
|
90
|
+
|
|
91
|
+
step = ChainStepSpec(id="s1", kind="text", prompt_section="step:one")
|
|
92
|
+
spec = AgentSpec(
|
|
93
|
+
name="test",
|
|
94
|
+
model=ModelSpec(base_url="https://example.test/v1", model_name="gpt-5"),
|
|
95
|
+
chain=[step],
|
|
96
|
+
tools=[],
|
|
97
|
+
output=OutputSpec(file=None),
|
|
98
|
+
)
|
|
99
|
+
loaded = LoadedAgentFile.from_parts(
|
|
100
|
+
spec=spec,
|
|
101
|
+
instructions="system",
|
|
102
|
+
system_prompt="",
|
|
103
|
+
step_prompts={"step:one": "say hi"},
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
registry = StaticRegistry(loaded)
|
|
107
|
+
tools = AgentTools([])
|
|
108
|
+
permissions = DirectoryPermissions(tmp_path)
|
|
109
|
+
|
|
110
|
+
agent = QuickAgent(
|
|
111
|
+
registry=registry,
|
|
112
|
+
tools=tools,
|
|
113
|
+
directory_permissions=permissions,
|
|
114
|
+
agent_id="agent-1",
|
|
115
|
+
input_data=TextInput("hello"),
|
|
116
|
+
extra_tools=None,
|
|
117
|
+
write_output=False,
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
result = await agent.run()
|
|
121
|
+
|
|
122
|
+
assert result == "ok"
|
|
123
|
+
assert len(recorder.requests) == 1
|
|
124
|
+
assert recorder.last_json is not None
|
|
125
|
+
assert recorder.last_json.get("tools") is None
|
|
126
|
+
assert recorder.last_json.get("tool_choice") is None
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
@pytest.mark.anyio
|
|
130
|
+
async def test_single_shot_with_tools_includes_tools_in_httpx_post(
|
|
131
|
+
monkeypatch: pytest.MonkeyPatch, tmp_path: Path
|
|
132
|
+
) -> None:
|
|
133
|
+
response_json = _chat_completion_response("gpt-5")
|
|
134
|
+
recorder = HttpxRequestRecorder(response_json)
|
|
135
|
+
transport = httpx.MockTransport(recorder)
|
|
136
|
+
|
|
137
|
+
async with httpx.AsyncClient(transport=transport, base_url="https://example.test/v1") as client:
|
|
138
|
+
provider = OpenAIProvider(base_url="https://example.test/v1", api_key="test", http_client=client)
|
|
139
|
+
model = OpenAIChatModel("gpt-5", provider=provider)
|
|
140
|
+
monkeypatch.setattr(qa_module, "build_model", BuildModelStub(model))
|
|
141
|
+
|
|
142
|
+
step = ChainStepSpec(id="s1", kind="text", prompt_section="step:one")
|
|
143
|
+
spec = AgentSpec(
|
|
144
|
+
name="test",
|
|
145
|
+
model=ModelSpec(base_url="https://example.test/v1", model_name="gpt-5"),
|
|
146
|
+
chain=[step],
|
|
147
|
+
tools=["dummy.tool"],
|
|
148
|
+
output=OutputSpec(file=None),
|
|
149
|
+
)
|
|
150
|
+
loaded = LoadedAgentFile.from_parts(
|
|
151
|
+
spec=spec,
|
|
152
|
+
instructions="system",
|
|
153
|
+
system_prompt="",
|
|
154
|
+
step_prompts={"step:one": "say hi"},
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
registry = StaticRegistry(loaded)
|
|
158
|
+
tools = AgentTools([])
|
|
159
|
+
toolset = FunctionToolset[Any]()
|
|
160
|
+
toolset.add_function(func=dummy_tool, name="dummy_tool", description="dummy tool")
|
|
161
|
+
monkeypatch.setattr(tools, "build_toolset", lambda *_: toolset)
|
|
162
|
+
permissions = DirectoryPermissions(tmp_path)
|
|
163
|
+
|
|
164
|
+
agent = QuickAgent(
|
|
165
|
+
registry=registry,
|
|
166
|
+
tools=tools,
|
|
167
|
+
directory_permissions=permissions,
|
|
168
|
+
agent_id="agent-1",
|
|
169
|
+
input_data=TextInput("hello"),
|
|
170
|
+
extra_tools=None,
|
|
171
|
+
write_output=False,
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
result = await agent.run()
|
|
175
|
+
|
|
176
|
+
assert result == "ok"
|
|
177
|
+
assert len(recorder.requests) == 1
|
|
178
|
+
assert recorder.last_json is not None
|
|
179
|
+
tools_json = recorder.last_json.get("tools")
|
|
180
|
+
assert isinstance(tools_json, list)
|
|
181
|
+
assert tools_json
|
|
182
|
+
assert tools_json[0]["function"]["name"] == "dummy_tool"
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
@pytest.mark.anyio
|
|
186
|
+
async def test_single_shot_no_steps_system_prompt_only_includes_system_prompt(
|
|
187
|
+
monkeypatch: pytest.MonkeyPatch, tmp_path: Path
|
|
188
|
+
) -> None:
|
|
189
|
+
response_json = _chat_completion_response("gpt-5")
|
|
190
|
+
recorder = HttpxRequestRecorder(response_json)
|
|
191
|
+
transport = httpx.MockTransport(recorder)
|
|
192
|
+
|
|
193
|
+
async with httpx.AsyncClient(transport=transport, base_url="https://example.test/v1") as client:
|
|
194
|
+
provider = OpenAIProvider(base_url="https://example.test/v1", api_key="test", http_client=client)
|
|
195
|
+
model = OpenAIChatModel("gpt-5", provider=provider)
|
|
196
|
+
monkeypatch.setattr(qa_module, "build_model", BuildModelStub(model))
|
|
197
|
+
|
|
198
|
+
spec = AgentSpec(
|
|
199
|
+
name="test",
|
|
200
|
+
model=ModelSpec(base_url="https://example.test/v1", model_name="gpt-5"),
|
|
201
|
+
chain=[],
|
|
202
|
+
tools=[],
|
|
203
|
+
output=OutputSpec(file=None),
|
|
204
|
+
)
|
|
205
|
+
loaded = LoadedAgentFile.from_parts(
|
|
206
|
+
spec=spec,
|
|
207
|
+
instructions="",
|
|
208
|
+
system_prompt="You are concise.",
|
|
209
|
+
step_prompts={},
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
registry = StaticRegistry(loaded)
|
|
213
|
+
tools = AgentTools([])
|
|
214
|
+
permissions = DirectoryPermissions(tmp_path)
|
|
215
|
+
|
|
216
|
+
agent = QuickAgent(
|
|
217
|
+
registry=registry,
|
|
218
|
+
tools=tools,
|
|
219
|
+
directory_permissions=permissions,
|
|
220
|
+
agent_id="agent-1",
|
|
221
|
+
input_data=TextInput("hello"),
|
|
222
|
+
extra_tools=None,
|
|
223
|
+
write_output=False,
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
result = await agent.run()
|
|
227
|
+
|
|
228
|
+
assert result == "ok"
|
|
229
|
+
assert recorder.last_json is not None
|
|
230
|
+
messages = recorder.last_json.get("messages")
|
|
231
|
+
assert isinstance(messages, list)
|
|
232
|
+
assert messages[0]["role"] == "system"
|
|
233
|
+
assert messages[0]["content"] == "You are concise."
|
|
234
|
+
assert messages[-1]["role"] == "user"
|
|
235
|
+
assert "# Task Input" not in messages[-1]["content"]
|
|
236
|
+
assert "## Input Content" not in messages[-1]["content"]
|
|
237
|
+
assert "## Chain State (YAML)" not in messages[-1]["content"]
|
|
238
|
+
assert "## Step Instructions" not in messages[-1]["content"]
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
@pytest.mark.anyio
|
|
242
|
+
async def test_single_shot_no_steps_instructions_only_includes_instructions(
|
|
243
|
+
monkeypatch: pytest.MonkeyPatch, tmp_path: Path
|
|
244
|
+
) -> None:
|
|
245
|
+
response_json = _chat_completion_response("gpt-5")
|
|
246
|
+
recorder = HttpxRequestRecorder(response_json)
|
|
247
|
+
transport = httpx.MockTransport(recorder)
|
|
248
|
+
|
|
249
|
+
async with httpx.AsyncClient(transport=transport, base_url="https://example.test/v1") as client:
|
|
250
|
+
provider = OpenAIProvider(base_url="https://example.test/v1", api_key="test", http_client=client)
|
|
251
|
+
model = OpenAIChatModel("gpt-5", provider=provider)
|
|
252
|
+
monkeypatch.setattr(qa_module, "build_model", BuildModelStub(model))
|
|
253
|
+
|
|
254
|
+
spec = AgentSpec(
|
|
255
|
+
name="test",
|
|
256
|
+
model=ModelSpec(base_url="https://example.test/v1", model_name="gpt-5"),
|
|
257
|
+
chain=[],
|
|
258
|
+
tools=[],
|
|
259
|
+
output=OutputSpec(file=None),
|
|
260
|
+
)
|
|
261
|
+
loaded = LoadedAgentFile.from_parts(
|
|
262
|
+
spec=spec,
|
|
263
|
+
instructions="Use the tool.",
|
|
264
|
+
system_prompt="",
|
|
265
|
+
step_prompts={},
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
registry = StaticRegistry(loaded)
|
|
269
|
+
tools = AgentTools([])
|
|
270
|
+
permissions = DirectoryPermissions(tmp_path)
|
|
271
|
+
|
|
272
|
+
agent = QuickAgent(
|
|
273
|
+
registry=registry,
|
|
274
|
+
tools=tools,
|
|
275
|
+
directory_permissions=permissions,
|
|
276
|
+
agent_id="agent-1",
|
|
277
|
+
input_data=TextInput("hello"),
|
|
278
|
+
extra_tools=None,
|
|
279
|
+
write_output=False,
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
result = await agent.run()
|
|
283
|
+
|
|
284
|
+
assert result == "ok"
|
|
285
|
+
assert recorder.last_json is not None
|
|
286
|
+
messages = recorder.last_json.get("messages")
|
|
287
|
+
assert isinstance(messages, list)
|
|
288
|
+
system_messages = _messages_by_role(messages, "system")
|
|
289
|
+
system_contents = [message.get("content") for message in system_messages]
|
|
290
|
+
assert "Use the tool." in system_contents
|
|
291
|
+
assert messages[-1]["role"] == "user"
|
|
292
|
+
assert "# Task Input" not in messages[-1]["content"]
|
|
293
|
+
assert "## Input Content" not in messages[-1]["content"]
|
|
294
|
+
assert "## Chain State (YAML)" not in messages[-1]["content"]
|
|
295
|
+
assert "## Step Instructions" not in messages[-1]["content"]
|