nest-shell 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nest_shell/__init__.py +19 -0
- nest_shell/agent.py +272 -0
- nest_shell/factories.py +632 -0
- nest_shell/llm.py +203 -0
- nest_shell/py.typed +0 -0
- nest_shell/templates.py +181 -0
- nest_shell-0.1.0.dist-info/METADATA +40 -0
- nest_shell-0.1.0.dist-info/RECORD +9 -0
- nest_shell-0.1.0.dist-info/WHEEL +4 -0
nest_shell/__init__.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
"""NEST shell: Tier 2 LLM-backed reference agent."""
|
|
3
|
+
|
|
4
|
+
__version__ = "0.1.0"
|
|
5
|
+
|
|
6
|
+
from nest_shell.agent import ShellAgent as ShellAgent
|
|
7
|
+
from nest_shell.agent import shell_marketplace_factory as shell_marketplace_factory
|
|
8
|
+
from nest_shell.factories import shell_auction_factory as shell_auction_factory
|
|
9
|
+
from nest_shell.factories import shell_consensus_factory as shell_consensus_factory
|
|
10
|
+
from nest_shell.factories import shell_reputation_factory as shell_reputation_factory
|
|
11
|
+
from nest_shell.factories import shell_supply_chain_factory as shell_supply_chain_factory
|
|
12
|
+
from nest_shell.factories import shell_voting_factory as shell_voting_factory
|
|
13
|
+
from nest_shell.llm import AnthropicBackend as AnthropicBackend
|
|
14
|
+
from nest_shell.llm import LiteLLMBackend as LiteLLMBackend
|
|
15
|
+
from nest_shell.llm import LLMBackend as LLMBackend
|
|
16
|
+
from nest_shell.llm import MockLLMBackend as MockLLMBackend
|
|
17
|
+
from nest_shell.llm import OpenAIBackend as OpenAIBackend
|
|
18
|
+
from nest_shell.templates import AgentTemplate as AgentTemplate
|
|
19
|
+
from nest_shell.templates import TemplateRegistry as TemplateRegistry
|
nest_shell/agent.py
ADDED
|
@@ -0,0 +1,272 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
"""Tier 2 LLM-backed shell agent.
|
|
3
|
+
|
|
4
|
+
Replaces hardcoded state-machine logic with LLM-driven decision making.
|
|
5
|
+
The agent maintains a conversation history and asks the LLM what to do
|
|
6
|
+
on each event.
|
|
7
|
+
|
|
8
|
+
Example::
|
|
9
|
+
|
|
10
|
+
backend = MockLLMBackend()
|
|
11
|
+
agent = ShellAgent(AgentId("buyer-0"), role="buyer", backend=backend)
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
import re
|
|
17
|
+
from typing import TYPE_CHECKING, Any
|
|
18
|
+
|
|
19
|
+
from nest_core.sim.agent import AgentContext, StateMachineAgent
|
|
20
|
+
from nest_core.types import AgentId
|
|
21
|
+
|
|
22
|
+
from nest_shell.llm import LLMBackend
|
|
23
|
+
|
|
24
|
+
if TYPE_CHECKING:
|
|
25
|
+
from nest_shell.templates import AgentTemplate
|
|
26
|
+
|
|
27
|
+
_DEFAULT_SYSTEM_PROMPT = """\
|
|
28
|
+
You are an agent in a multi-agent marketplace simulation.
|
|
29
|
+
Your role is: {role}
|
|
30
|
+
|
|
31
|
+
When you receive a message, decide what action to take.
|
|
32
|
+
Respond in this exact format:
|
|
33
|
+
|
|
34
|
+
ACTION: send
|
|
35
|
+
TO: <agent-id>
|
|
36
|
+
MESSAGE: <message-content>
|
|
37
|
+
|
|
38
|
+
Or if no action is needed:
|
|
39
|
+
ACTION: none
|
|
40
|
+
|
|
41
|
+
Rules:
|
|
42
|
+
- If you are a buyer, send buy requests to sellers.
|
|
43
|
+
- If you are a seller, respond to buy requests with "sold:" or "reject:".
|
|
44
|
+
- Always include the product and price in messages.
|
|
45
|
+
- Format: buy:<product>:<price> or sold:<product>:<price> or reject:<product>:<min_price>
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def parse_action(response: str, sender: AgentId) -> dict[str, Any] | None:
|
|
50
|
+
"""Parse an LLM response into an action dict.
|
|
51
|
+
|
|
52
|
+
Example::
|
|
53
|
+
|
|
54
|
+
action = parse_action("ACTION: send\\nTO: seller-0\\nMESSAGE: buy:p:50", sender)
|
|
55
|
+
"""
|
|
56
|
+
response = response.strip()
|
|
57
|
+
|
|
58
|
+
action_match = re.search(r"ACTION:\s*(\w+)", response)
|
|
59
|
+
if not action_match:
|
|
60
|
+
return None
|
|
61
|
+
|
|
62
|
+
action_type = action_match.group(1).lower()
|
|
63
|
+
if action_type == "none":
|
|
64
|
+
return None
|
|
65
|
+
|
|
66
|
+
if action_type == "send":
|
|
67
|
+
to_match = re.search(r"TO:\s*(.+)", response)
|
|
68
|
+
msg_match = re.search(r"MESSAGE:\s*(.+)", response)
|
|
69
|
+
|
|
70
|
+
if not msg_match:
|
|
71
|
+
return None
|
|
72
|
+
|
|
73
|
+
target_str = to_match.group(1).strip() if to_match else str(sender)
|
|
74
|
+
target_str = target_str.replace("{sender}", str(sender))
|
|
75
|
+
|
|
76
|
+
message = msg_match.group(1).strip()
|
|
77
|
+
message = message.replace("{sender}", str(sender))
|
|
78
|
+
|
|
79
|
+
return {
|
|
80
|
+
"action": "send",
|
|
81
|
+
"to": AgentId(target_str),
|
|
82
|
+
"message": message.encode("utf-8"),
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
return None
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class ShellAgent(StateMachineAgent):
|
|
89
|
+
"""LLM-backed agent that uses a language model to decide actions.
|
|
90
|
+
|
|
91
|
+
Example::
|
|
92
|
+
|
|
93
|
+
agent = ShellAgent(
|
|
94
|
+
agent_id=AgentId("buyer-0"),
|
|
95
|
+
role="buyer",
|
|
96
|
+
backend=MockLLMBackend(),
|
|
97
|
+
)
|
|
98
|
+
"""
|
|
99
|
+
|
|
100
|
+
def __init__(
|
|
101
|
+
self,
|
|
102
|
+
agent_id: AgentId,
|
|
103
|
+
role: str,
|
|
104
|
+
backend: LLMBackend,
|
|
105
|
+
system_prompt: str | None = None,
|
|
106
|
+
num_sellers: int = 10,
|
|
107
|
+
rounds: int = 10,
|
|
108
|
+
template: AgentTemplate | None = None,
|
|
109
|
+
) -> None:
|
|
110
|
+
self._id = agent_id
|
|
111
|
+
self._role = role
|
|
112
|
+
self._backend = backend
|
|
113
|
+
if template is not None:
|
|
114
|
+
self._system_prompt = template.system_prompt
|
|
115
|
+
else:
|
|
116
|
+
self._system_prompt = (system_prompt or _DEFAULT_SYSTEM_PROMPT).format(role=role)
|
|
117
|
+
self._history: list[dict[str, str]] = [
|
|
118
|
+
{"role": "system", "content": self._system_prompt},
|
|
119
|
+
]
|
|
120
|
+
self._num_sellers = num_sellers
|
|
121
|
+
self._rounds = rounds
|
|
122
|
+
self._round = 0
|
|
123
|
+
self._action_count = 0
|
|
124
|
+
|
|
125
|
+
async def on_start(self, ctx: AgentContext) -> None:
|
|
126
|
+
if self._role == "buyer":
|
|
127
|
+
seller_idx = ctx.rng.randint(0, self._num_sellers - 1)
|
|
128
|
+
seller = AgentId(f"seller-{seller_idx}")
|
|
129
|
+
price = ctx.rng.randint(10, 100)
|
|
130
|
+
|
|
131
|
+
self._history.append(
|
|
132
|
+
{
|
|
133
|
+
"role": "user",
|
|
134
|
+
"content": f"Simulation started. You are {self._id}. "
|
|
135
|
+
f"Send a buy request to a seller. "
|
|
136
|
+
f"Suggested target: {seller}, suggested price: {price}.",
|
|
137
|
+
}
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
response = await self._backend.complete(self._history)
|
|
141
|
+
self._history.append({"role": "assistant", "content": response})
|
|
142
|
+
|
|
143
|
+
action = parse_action(response, seller)
|
|
144
|
+
if action and action["action"] == "send":
|
|
145
|
+
await ctx.send(action["to"], action["message"])
|
|
146
|
+
self._action_count += 1
|
|
147
|
+
else:
|
|
148
|
+
await ctx.send(seller, f"buy:product-0:{price}".encode())
|
|
149
|
+
self._action_count += 1
|
|
150
|
+
|
|
151
|
+
async def on_message(self, ctx: AgentContext, sender: AgentId, payload: bytes) -> None:
|
|
152
|
+
msg = payload.decode("utf-8", errors="replace")
|
|
153
|
+
self._round += 1
|
|
154
|
+
|
|
155
|
+
if self._round > self._rounds:
|
|
156
|
+
return
|
|
157
|
+
|
|
158
|
+
self._history.append(
|
|
159
|
+
{
|
|
160
|
+
"role": "user",
|
|
161
|
+
"content": f"Message from {sender}: {msg}",
|
|
162
|
+
}
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
if len(self._history) > 20:
|
|
166
|
+
self._history = [self._history[0]] + self._history[-18:]
|
|
167
|
+
|
|
168
|
+
response = await self._backend.complete(self._history)
|
|
169
|
+
self._history.append({"role": "assistant", "content": response})
|
|
170
|
+
|
|
171
|
+
action = parse_action(response, sender)
|
|
172
|
+
if action and action["action"] == "send":
|
|
173
|
+
await ctx.send(action["to"], action["message"])
|
|
174
|
+
self._action_count += 1
|
|
175
|
+
|
|
176
|
+
@property
|
|
177
|
+
def action_count(self) -> int:
|
|
178
|
+
return self._action_count
|
|
179
|
+
|
|
180
|
+
@property
|
|
181
|
+
def history_length(self) -> int:
|
|
182
|
+
return len(self._history)
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
def _resolve_template(
|
|
186
|
+
config: Any,
|
|
187
|
+
role: str,
|
|
188
|
+
scenario: str,
|
|
189
|
+
) -> AgentTemplate | None:
|
|
190
|
+
"""Resolve a template for a given role, if configured.
|
|
191
|
+
|
|
192
|
+
Example::
|
|
193
|
+
|
|
194
|
+
tpl = _resolve_template(config, "buyer", "marketplace")
|
|
195
|
+
"""
|
|
196
|
+
template_name: str = getattr(config.agents, "template", "")
|
|
197
|
+
if not template_name:
|
|
198
|
+
return None
|
|
199
|
+
|
|
200
|
+
from nest_shell.templates import TemplateRegistry
|
|
201
|
+
|
|
202
|
+
registry = TemplateRegistry()
|
|
203
|
+
|
|
204
|
+
if template_name == "auto":
|
|
205
|
+
try:
|
|
206
|
+
return registry.get_template(f"{scenario}-{role}")
|
|
207
|
+
except KeyError:
|
|
208
|
+
return None
|
|
209
|
+
try:
|
|
210
|
+
return registry.get_template(template_name)
|
|
211
|
+
except KeyError:
|
|
212
|
+
return None
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
def shell_marketplace_factory(
|
|
216
|
+
config: Any,
|
|
217
|
+
plugins: dict[str, Any],
|
|
218
|
+
backend: LLMBackend | None = None,
|
|
219
|
+
) -> dict[AgentId, StateMachineAgent]:
|
|
220
|
+
"""Create shell agents for the marketplace scenario.
|
|
221
|
+
|
|
222
|
+
Example::
|
|
223
|
+
|
|
224
|
+
agents = shell_marketplace_factory(config, plugins, backend=MockLLMBackend())
|
|
225
|
+
"""
|
|
226
|
+
from nest_shell.llm import MockLLMBackend
|
|
227
|
+
|
|
228
|
+
if backend is None:
|
|
229
|
+
backend = MockLLMBackend()
|
|
230
|
+
|
|
231
|
+
task_config = config.task.config
|
|
232
|
+
rounds = task_config.get("rounds", 10)
|
|
233
|
+
|
|
234
|
+
agents: dict[AgentId, StateMachineAgent] = {}
|
|
235
|
+
|
|
236
|
+
if config.agents.roles:
|
|
237
|
+
buyer_count = 0
|
|
238
|
+
seller_count = 0
|
|
239
|
+
for role in config.agents.roles:
|
|
240
|
+
if role.name == "buyer":
|
|
241
|
+
buyer_count = role.count
|
|
242
|
+
elif role.name == "seller":
|
|
243
|
+
seller_count = role.count
|
|
244
|
+
else:
|
|
245
|
+
buyer_count = config.agents.count // 2
|
|
246
|
+
seller_count = config.agents.count - buyer_count
|
|
247
|
+
|
|
248
|
+
for i in range(seller_count):
|
|
249
|
+
aid = AgentId(f"seller-{i}")
|
|
250
|
+
tpl = _resolve_template(config, "seller", "marketplace")
|
|
251
|
+
agents[aid] = ShellAgent(
|
|
252
|
+
agent_id=aid,
|
|
253
|
+
role="seller",
|
|
254
|
+
backend=backend,
|
|
255
|
+
num_sellers=seller_count,
|
|
256
|
+
rounds=rounds,
|
|
257
|
+
template=tpl,
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
for i in range(buyer_count):
|
|
261
|
+
aid = AgentId(f"buyer-{i}")
|
|
262
|
+
tpl = _resolve_template(config, "buyer", "marketplace")
|
|
263
|
+
agents[aid] = ShellAgent(
|
|
264
|
+
agent_id=aid,
|
|
265
|
+
role="buyer",
|
|
266
|
+
backend=backend,
|
|
267
|
+
num_sellers=seller_count,
|
|
268
|
+
rounds=rounds,
|
|
269
|
+
template=tpl,
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
return agents
|
nest_shell/factories.py
ADDED
|
@@ -0,0 +1,632 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
"""Shell agent factories for auction, voting, consensus, supply-chain, and reputation scenarios.
|
|
3
|
+
|
|
4
|
+
These factories create LLM-backed :class:`ShellAgent` instances with
|
|
5
|
+
scenario-appropriate system prompts so users can set ``brain: llm``
|
|
6
|
+
in their YAML files.
|
|
7
|
+
|
|
8
|
+
Example::
|
|
9
|
+
|
|
10
|
+
agents = shell_auction_factory(config, plugins, backend=MockLLMBackend())
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
|
|
15
|
+
from typing import Any
|
|
16
|
+
|
|
17
|
+
from nest_core.scenario import ScenarioConfig
|
|
18
|
+
from nest_core.sim.agent import StateMachineAgent
|
|
19
|
+
from nest_core.types import AgentId
|
|
20
|
+
|
|
21
|
+
from nest_shell.agent import ShellAgent, _resolve_template # pyright: ignore[reportPrivateUsage]
|
|
22
|
+
from nest_shell.llm import LLMBackend
|
|
23
|
+
|
|
24
|
+
_AUCTION_AUCTIONEER_PROMPT = """\
|
|
25
|
+
You are an auctioneer in a multi-agent auction simulation.
|
|
26
|
+
Your role is: auctioneer
|
|
27
|
+
|
|
28
|
+
When the simulation starts, announce an item for auction to all bidders.
|
|
29
|
+
When you receive bids, track them and pick the highest bidder.
|
|
30
|
+
|
|
31
|
+
Respond in this exact format:
|
|
32
|
+
|
|
33
|
+
ACTION: send
|
|
34
|
+
TO: <agent-id>
|
|
35
|
+
MESSAGE: <message-content>
|
|
36
|
+
|
|
37
|
+
Or if no action is needed:
|
|
38
|
+
ACTION: none
|
|
39
|
+
|
|
40
|
+
Rules:
|
|
41
|
+
- Announce items with format: auction:<item>:<base_price>
|
|
42
|
+
- When all bids arrive, notify the winner with: won:<item>:<price>
|
|
43
|
+
- Notify losers with: lost:<item>:<winning_price>
|
|
44
|
+
- Start new rounds after announcing results.
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
_AUCTION_BIDDER_PROMPT = """\
|
|
48
|
+
You are a bidder in a multi-agent auction simulation.
|
|
49
|
+
Your role is: bidder
|
|
50
|
+
|
|
51
|
+
When you receive an auction announcement, decide how much to bid.
|
|
52
|
+
|
|
53
|
+
Respond in this exact format:
|
|
54
|
+
|
|
55
|
+
ACTION: send
|
|
56
|
+
TO: <agent-id>
|
|
57
|
+
MESSAGE: <message-content>
|
|
58
|
+
|
|
59
|
+
Or if no action is needed:
|
|
60
|
+
ACTION: none
|
|
61
|
+
|
|
62
|
+
Rules:
|
|
63
|
+
- When you see auction:<item>:<base_price>, respond with bid:<item>:<your_bid>
|
|
64
|
+
- Your bid should be at or above the base price but within your budget.
|
|
65
|
+
- If you win, you receive won:<item>:<price>. If you lose, you receive lost:<item>:<price>.
|
|
66
|
+
"""
|
|
67
|
+
|
|
68
|
+
_VOTING_PROPOSER_PROMPT = """\
|
|
69
|
+
You are a proposer in a multi-agent voting simulation.
|
|
70
|
+
Your role is: proposer
|
|
71
|
+
|
|
72
|
+
You propose topics for voters to vote on.
|
|
73
|
+
|
|
74
|
+
Respond in this exact format:
|
|
75
|
+
|
|
76
|
+
ACTION: send
|
|
77
|
+
TO: <agent-id>
|
|
78
|
+
MESSAGE: <message-content>
|
|
79
|
+
|
|
80
|
+
Or if no action is needed:
|
|
81
|
+
ACTION: none
|
|
82
|
+
|
|
83
|
+
Rules:
|
|
84
|
+
- Propose topics with format: propose:<round>:<topic>
|
|
85
|
+
- When you receive result:<round>:<outcome>:<tally>, start a new round.
|
|
86
|
+
- Topics can be: increase-budget, new-policy, elect-leader.
|
|
87
|
+
"""
|
|
88
|
+
|
|
89
|
+
_VOTING_VOTER_PROMPT = """\
|
|
90
|
+
You are a voter in a multi-agent voting simulation.
|
|
91
|
+
Your role is: voter
|
|
92
|
+
|
|
93
|
+
When you receive a proposal, cast your vote.
|
|
94
|
+
|
|
95
|
+
Respond in this exact format:
|
|
96
|
+
|
|
97
|
+
ACTION: send
|
|
98
|
+
TO: <agent-id>
|
|
99
|
+
MESSAGE: <message-content>
|
|
100
|
+
|
|
101
|
+
Or if no action is needed:
|
|
102
|
+
ACTION: none
|
|
103
|
+
|
|
104
|
+
Rules:
|
|
105
|
+
- When you see propose:<round>:<topic>, respond with vote:<round>:<yes_or_no>:<your_id>
|
|
106
|
+
- Send your vote to the coordinator (coordinator-0).
|
|
107
|
+
- Vote yes or no based on the topic.
|
|
108
|
+
"""
|
|
109
|
+
|
|
110
|
+
_VOTING_COORDINATOR_PROMPT = """\
|
|
111
|
+
You are a coordinator in a multi-agent voting simulation.
|
|
112
|
+
Your role is: coordinator
|
|
113
|
+
|
|
114
|
+
You tally votes and announce results.
|
|
115
|
+
|
|
116
|
+
Respond in this exact format:
|
|
117
|
+
|
|
118
|
+
ACTION: send
|
|
119
|
+
TO: <agent-id>
|
|
120
|
+
MESSAGE: <message-content>
|
|
121
|
+
|
|
122
|
+
Or if no action is needed:
|
|
123
|
+
ACTION: none
|
|
124
|
+
|
|
125
|
+
Rules:
|
|
126
|
+
- Collect vote:<round>:<yes_or_no>:<voter_id> messages from voters.
|
|
127
|
+
- When all votes arrive, announce result:<round>:<passed_or_rejected>:<tally> to the proposer.
|
|
128
|
+
"""
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def shell_auction_factory(
|
|
132
|
+
config: ScenarioConfig,
|
|
133
|
+
plugins: dict[str, Any],
|
|
134
|
+
backend: LLMBackend | None = None,
|
|
135
|
+
) -> dict[AgentId, StateMachineAgent]:
|
|
136
|
+
"""Create shell agents for the auction scenario.
|
|
137
|
+
|
|
138
|
+
Example::
|
|
139
|
+
|
|
140
|
+
agents = shell_auction_factory(config, plugins, backend=MockLLMBackend())
|
|
141
|
+
"""
|
|
142
|
+
from nest_shell.llm import MockLLMBackend
|
|
143
|
+
|
|
144
|
+
if backend is None:
|
|
145
|
+
backend = MockLLMBackend()
|
|
146
|
+
|
|
147
|
+
task_config = config.task.config
|
|
148
|
+
rounds = task_config.get("rounds", 5)
|
|
149
|
+
|
|
150
|
+
agents: dict[AgentId, StateMachineAgent] = {}
|
|
151
|
+
|
|
152
|
+
if config.agents.roles:
|
|
153
|
+
bidder_count = 0
|
|
154
|
+
for role in config.agents.roles:
|
|
155
|
+
if role.name == "bidder":
|
|
156
|
+
bidder_count = role.count
|
|
157
|
+
if bidder_count == 0:
|
|
158
|
+
bidder_count = config.agents.count - 1
|
|
159
|
+
else:
|
|
160
|
+
bidder_count = config.agents.count - 1
|
|
161
|
+
|
|
162
|
+
auctioneer_id = AgentId("auctioneer-0")
|
|
163
|
+
tpl = _resolve_template(config, "auctioneer", "auction")
|
|
164
|
+
agents[auctioneer_id] = ShellAgent(
|
|
165
|
+
agent_id=auctioneer_id,
|
|
166
|
+
role="auctioneer",
|
|
167
|
+
backend=backend,
|
|
168
|
+
system_prompt=_AUCTION_AUCTIONEER_PROMPT,
|
|
169
|
+
num_sellers=bidder_count,
|
|
170
|
+
rounds=rounds,
|
|
171
|
+
template=tpl,
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
for i in range(bidder_count):
|
|
175
|
+
aid = AgentId(f"bidder-{i}")
|
|
176
|
+
tpl = _resolve_template(config, "bidder", "auction")
|
|
177
|
+
agents[aid] = ShellAgent(
|
|
178
|
+
agent_id=aid,
|
|
179
|
+
role="bidder",
|
|
180
|
+
backend=backend,
|
|
181
|
+
system_prompt=_AUCTION_BIDDER_PROMPT,
|
|
182
|
+
num_sellers=bidder_count,
|
|
183
|
+
rounds=rounds,
|
|
184
|
+
template=tpl,
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
return agents
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def shell_voting_factory(
|
|
191
|
+
config: ScenarioConfig,
|
|
192
|
+
plugins: dict[str, Any],
|
|
193
|
+
backend: LLMBackend | None = None,
|
|
194
|
+
) -> dict[AgentId, StateMachineAgent]:
|
|
195
|
+
"""Create shell agents for the voting scenario.
|
|
196
|
+
|
|
197
|
+
Example::
|
|
198
|
+
|
|
199
|
+
agents = shell_voting_factory(config, plugins, backend=MockLLMBackend())
|
|
200
|
+
"""
|
|
201
|
+
from nest_shell.llm import MockLLMBackend
|
|
202
|
+
|
|
203
|
+
if backend is None:
|
|
204
|
+
backend = MockLLMBackend()
|
|
205
|
+
|
|
206
|
+
task_config = config.task.config
|
|
207
|
+
rounds = task_config.get("rounds", 3)
|
|
208
|
+
|
|
209
|
+
agents: dict[AgentId, StateMachineAgent] = {}
|
|
210
|
+
|
|
211
|
+
if config.agents.roles:
|
|
212
|
+
voter_count = 0
|
|
213
|
+
for role in config.agents.roles:
|
|
214
|
+
if role.name == "voter":
|
|
215
|
+
voter_count = role.count
|
|
216
|
+
if voter_count == 0:
|
|
217
|
+
voter_count = max(1, config.agents.count - 2)
|
|
218
|
+
else:
|
|
219
|
+
voter_count = max(1, config.agents.count - 2)
|
|
220
|
+
|
|
221
|
+
proposer_id = AgentId("proposer-0")
|
|
222
|
+
coordinator_id = AgentId("coordinator-0")
|
|
223
|
+
|
|
224
|
+
tpl_proposer = _resolve_template(config, "proposer", "voting")
|
|
225
|
+
agents[proposer_id] = ShellAgent(
|
|
226
|
+
agent_id=proposer_id,
|
|
227
|
+
role="proposer",
|
|
228
|
+
backend=backend,
|
|
229
|
+
system_prompt=_VOTING_PROPOSER_PROMPT,
|
|
230
|
+
num_sellers=voter_count,
|
|
231
|
+
rounds=rounds,
|
|
232
|
+
template=tpl_proposer,
|
|
233
|
+
)
|
|
234
|
+
tpl_coord = _resolve_template(config, "coordinator", "voting")
|
|
235
|
+
agents[coordinator_id] = ShellAgent(
|
|
236
|
+
agent_id=coordinator_id,
|
|
237
|
+
role="coordinator",
|
|
238
|
+
backend=backend,
|
|
239
|
+
system_prompt=_VOTING_COORDINATOR_PROMPT,
|
|
240
|
+
num_sellers=voter_count,
|
|
241
|
+
rounds=rounds,
|
|
242
|
+
template=tpl_coord,
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
for i in range(voter_count):
|
|
246
|
+
aid = AgentId(f"voter-{i}")
|
|
247
|
+
tpl_voter = _resolve_template(config, "voter", "voting")
|
|
248
|
+
agents[aid] = ShellAgent(
|
|
249
|
+
agent_id=aid,
|
|
250
|
+
role="voter",
|
|
251
|
+
backend=backend,
|
|
252
|
+
system_prompt=_VOTING_VOTER_PROMPT,
|
|
253
|
+
num_sellers=voter_count,
|
|
254
|
+
rounds=rounds,
|
|
255
|
+
template=tpl_voter,
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
return agents
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
# ---------------------------------------------------------------------------
|
|
262
|
+
# Consensus
|
|
263
|
+
# ---------------------------------------------------------------------------
|
|
264
|
+
|
|
265
|
+
_CONSENSUS_LEADER_PROMPT = """\
|
|
266
|
+
You are a leader in a quorum-based consensus simulation.
|
|
267
|
+
Your role is: leader
|
|
268
|
+
|
|
269
|
+
Propose values to followers and collect their votes.
|
|
270
|
+
|
|
271
|
+
Respond in this exact format:
|
|
272
|
+
|
|
273
|
+
ACTION: send
|
|
274
|
+
TO: <agent-id>
|
|
275
|
+
MESSAGE: <message-content>
|
|
276
|
+
|
|
277
|
+
Or if no action is needed:
|
|
278
|
+
ACTION: none
|
|
279
|
+
|
|
280
|
+
Rules:
|
|
281
|
+
- Propose values with format: propose:<round>:<value>
|
|
282
|
+
- Collect vote:<round>:<accept_or_reject> from followers.
|
|
283
|
+
- When all votes arrive, announce result:<round>:<committed_or_aborted>:<tally>.
|
|
284
|
+
"""
|
|
285
|
+
|
|
286
|
+
_CONSENSUS_FOLLOWER_PROMPT = """\
|
|
287
|
+
You are a follower in a quorum-based consensus simulation.
|
|
288
|
+
Your role is: follower
|
|
289
|
+
|
|
290
|
+
Vote on proposals from the leader.
|
|
291
|
+
|
|
292
|
+
Respond in this exact format:
|
|
293
|
+
|
|
294
|
+
ACTION: send
|
|
295
|
+
TO: <agent-id>
|
|
296
|
+
MESSAGE: <message-content>
|
|
297
|
+
|
|
298
|
+
Or if no action is needed:
|
|
299
|
+
ACTION: none
|
|
300
|
+
|
|
301
|
+
Rules:
|
|
302
|
+
- When you see propose:<round>:<value>, respond with vote:<round>:<accept_or_reject>.
|
|
303
|
+
- Send your vote to the leader (leader-0).
|
|
304
|
+
"""
|
|
305
|
+
|
|
306
|
+
|
|
307
|
+
def shell_consensus_factory(
|
|
308
|
+
config: ScenarioConfig,
|
|
309
|
+
plugins: dict[str, Any],
|
|
310
|
+
backend: LLMBackend | None = None,
|
|
311
|
+
) -> dict[AgentId, StateMachineAgent]:
|
|
312
|
+
"""Create shell agents for the consensus scenario.
|
|
313
|
+
|
|
314
|
+
Example::
|
|
315
|
+
|
|
316
|
+
agents = shell_consensus_factory(config, plugins, backend=MockLLMBackend())
|
|
317
|
+
"""
|
|
318
|
+
from nest_shell.llm import MockLLMBackend
|
|
319
|
+
|
|
320
|
+
if backend is None:
|
|
321
|
+
backend = MockLLMBackend()
|
|
322
|
+
|
|
323
|
+
task_config = config.task.config
|
|
324
|
+
rounds = task_config.get("rounds", 5)
|
|
325
|
+
|
|
326
|
+
agents: dict[AgentId, StateMachineAgent] = {}
|
|
327
|
+
|
|
328
|
+
if config.agents.roles:
|
|
329
|
+
follower_count = 0
|
|
330
|
+
for role in config.agents.roles:
|
|
331
|
+
if role.name == "follower":
|
|
332
|
+
follower_count = role.count
|
|
333
|
+
if follower_count == 0:
|
|
334
|
+
follower_count = config.agents.count - 1
|
|
335
|
+
else:
|
|
336
|
+
follower_count = config.agents.count - 1
|
|
337
|
+
|
|
338
|
+
leader_id = AgentId("leader-0")
|
|
339
|
+
tpl_leader = _resolve_template(config, "leader", "consensus")
|
|
340
|
+
agents[leader_id] = ShellAgent(
|
|
341
|
+
agent_id=leader_id,
|
|
342
|
+
role="leader",
|
|
343
|
+
backend=backend,
|
|
344
|
+
system_prompt=_CONSENSUS_LEADER_PROMPT,
|
|
345
|
+
num_sellers=follower_count,
|
|
346
|
+
rounds=rounds,
|
|
347
|
+
template=tpl_leader,
|
|
348
|
+
)
|
|
349
|
+
|
|
350
|
+
for i in range(follower_count):
|
|
351
|
+
aid = AgentId(f"follower-{i}")
|
|
352
|
+
tpl_follower = _resolve_template(config, "follower", "consensus")
|
|
353
|
+
agents[aid] = ShellAgent(
|
|
354
|
+
agent_id=aid,
|
|
355
|
+
role="follower",
|
|
356
|
+
backend=backend,
|
|
357
|
+
system_prompt=_CONSENSUS_FOLLOWER_PROMPT,
|
|
358
|
+
num_sellers=follower_count,
|
|
359
|
+
rounds=rounds,
|
|
360
|
+
template=tpl_follower,
|
|
361
|
+
)
|
|
362
|
+
|
|
363
|
+
return agents
|
|
364
|
+
|
|
365
|
+
|
|
366
|
+
# ---------------------------------------------------------------------------
|
|
367
|
+
# Supply-chain
|
|
368
|
+
# ---------------------------------------------------------------------------
|
|
369
|
+
|
|
370
|
+
_SUPPLY_CHAIN_SUPPLIER_PROMPT = """\
|
|
371
|
+
You are a supplier in a multi-hop supply-chain simulation.
|
|
372
|
+
Your role is: supplier
|
|
373
|
+
|
|
374
|
+
Produce raw materials and send them to manufacturers.
|
|
375
|
+
|
|
376
|
+
Respond in this exact format:
|
|
377
|
+
|
|
378
|
+
ACTION: send
|
|
379
|
+
TO: <agent-id>
|
|
380
|
+
MESSAGE: <message-content>
|
|
381
|
+
|
|
382
|
+
Or if no action is needed:
|
|
383
|
+
ACTION: none
|
|
384
|
+
|
|
385
|
+
Rules:
|
|
386
|
+
- Send materials with format: material:<round>:<batch_id>
|
|
387
|
+
- Your target is manufacturer-0.
|
|
388
|
+
"""
|
|
389
|
+
|
|
390
|
+
_SUPPLY_CHAIN_MFG_PROMPT = """\
|
|
391
|
+
You are a manufacturer in a multi-hop supply-chain simulation.
|
|
392
|
+
Your role is: manufacturer
|
|
393
|
+
|
|
394
|
+
Receive materials, produce goods, and send to distributors.
|
|
395
|
+
|
|
396
|
+
Respond in this exact format:
|
|
397
|
+
|
|
398
|
+
ACTION: send
|
|
399
|
+
TO: <agent-id>
|
|
400
|
+
MESSAGE: <message-content>
|
|
401
|
+
|
|
402
|
+
Or if no action is needed:
|
|
403
|
+
ACTION: none
|
|
404
|
+
|
|
405
|
+
Rules:
|
|
406
|
+
- When you receive material:<round>:<batch>, produce and send product:<round>:<product_id>.
|
|
407
|
+
- Your target is distributor-0.
|
|
408
|
+
"""
|
|
409
|
+
|
|
410
|
+
_SUPPLY_CHAIN_DIST_PROMPT = """\
|
|
411
|
+
You are a distributor in a multi-hop supply-chain simulation.
|
|
412
|
+
Your role is: distributor
|
|
413
|
+
|
|
414
|
+
Receive goods and forward shipments to retailers.
|
|
415
|
+
|
|
416
|
+
Respond in this exact format:
|
|
417
|
+
|
|
418
|
+
ACTION: send
|
|
419
|
+
TO: <agent-id>
|
|
420
|
+
MESSAGE: <message-content>
|
|
421
|
+
|
|
422
|
+
Or if no action is needed:
|
|
423
|
+
ACTION: none
|
|
424
|
+
|
|
425
|
+
Rules:
|
|
426
|
+
- When you receive product:<round>:<product>, forward as shipment:<round>:<product>.
|
|
427
|
+
- Your target is retailer-0.
|
|
428
|
+
"""
|
|
429
|
+
|
|
430
|
+
_SUPPLY_CHAIN_RETAILER_PROMPT = """\
|
|
431
|
+
You are a retailer in a multi-hop supply-chain simulation.
|
|
432
|
+
Your role is: retailer
|
|
433
|
+
|
|
434
|
+
Receive goods and report delivery back to the supplier.
|
|
435
|
+
|
|
436
|
+
Respond in this exact format:
|
|
437
|
+
|
|
438
|
+
ACTION: send
|
|
439
|
+
TO: <agent-id>
|
|
440
|
+
MESSAGE: <message-content>
|
|
441
|
+
|
|
442
|
+
Or if no action is needed:
|
|
443
|
+
ACTION: none
|
|
444
|
+
|
|
445
|
+
Rules:
|
|
446
|
+
- When you receive shipment:<round>:<product>, report delivered:<round>:<product>.
|
|
447
|
+
- Your target is supplier-0.
|
|
448
|
+
"""
|
|
449
|
+
|
|
450
|
+
|
|
451
|
+
def shell_supply_chain_factory(
|
|
452
|
+
config: ScenarioConfig,
|
|
453
|
+
plugins: dict[str, Any],
|
|
454
|
+
backend: LLMBackend | None = None,
|
|
455
|
+
) -> dict[AgentId, StateMachineAgent]:
|
|
456
|
+
"""Create shell agents for the supply-chain scenario.
|
|
457
|
+
|
|
458
|
+
Example::
|
|
459
|
+
|
|
460
|
+
agents = shell_supply_chain_factory(config, plugins, backend=MockLLMBackend())
|
|
461
|
+
"""
|
|
462
|
+
from nest_shell.llm import MockLLMBackend
|
|
463
|
+
|
|
464
|
+
if backend is None:
|
|
465
|
+
backend = MockLLMBackend()
|
|
466
|
+
|
|
467
|
+
task_config = config.task.config
|
|
468
|
+
rounds = task_config.get("rounds", 3)
|
|
469
|
+
|
|
470
|
+
agents: dict[AgentId, StateMachineAgent] = {}
|
|
471
|
+
|
|
472
|
+
for role_name, prompt in [
|
|
473
|
+
("supplier", _SUPPLY_CHAIN_SUPPLIER_PROMPT),
|
|
474
|
+
("manufacturer", _SUPPLY_CHAIN_MFG_PROMPT),
|
|
475
|
+
("distributor", _SUPPLY_CHAIN_DIST_PROMPT),
|
|
476
|
+
("retailer", _SUPPLY_CHAIN_RETAILER_PROMPT),
|
|
477
|
+
]:
|
|
478
|
+
aid = AgentId(f"{role_name}-0")
|
|
479
|
+
tpl_sc = _resolve_template(config, role_name, "supply-chain")
|
|
480
|
+
agents[aid] = ShellAgent(
|
|
481
|
+
agent_id=aid,
|
|
482
|
+
role=role_name,
|
|
483
|
+
backend=backend,
|
|
484
|
+
system_prompt=prompt,
|
|
485
|
+
num_sellers=1,
|
|
486
|
+
rounds=rounds,
|
|
487
|
+
template=tpl_sc,
|
|
488
|
+
)
|
|
489
|
+
|
|
490
|
+
return agents
|
|
491
|
+
|
|
492
|
+
|
|
493
|
+
# ---------------------------------------------------------------------------
|
|
494
|
+
# Reputation
|
|
495
|
+
# ---------------------------------------------------------------------------
|
|
496
|
+
|
|
497
|
+
_REPUTATION_HONEST_PROMPT = """\
|
|
498
|
+
You are an honest trader in a reputation simulation.
|
|
499
|
+
Your role is: honest
|
|
500
|
+
|
|
501
|
+
Always deliver on trades and report bad actors.
|
|
502
|
+
|
|
503
|
+
Respond in this exact format:
|
|
504
|
+
|
|
505
|
+
ACTION: send
|
|
506
|
+
TO: <agent-id>
|
|
507
|
+
MESSAGE: <message-content>
|
|
508
|
+
|
|
509
|
+
Or if no action is needed:
|
|
510
|
+
ACTION: none
|
|
511
|
+
|
|
512
|
+
Rules:
|
|
513
|
+
- Initiate trades with format: trade:<round>:<your_id>
|
|
514
|
+
- Always respond to trades with: deliver:<round>:<your_id>
|
|
515
|
+
- Report outcomes to observer-0 with: report:<round>:<agent>:<good_or_bad>
|
|
516
|
+
"""
|
|
517
|
+
|
|
518
|
+
_REPUTATION_MALICIOUS_PROMPT = """\
|
|
519
|
+
You are a malicious trader in a reputation simulation.
|
|
520
|
+
Your role is: malicious
|
|
521
|
+
|
|
522
|
+
Sometimes cheat on trades to game the system.
|
|
523
|
+
|
|
524
|
+
Respond in this exact format:
|
|
525
|
+
|
|
526
|
+
ACTION: send
|
|
527
|
+
TO: <agent-id>
|
|
528
|
+
MESSAGE: <message-content>
|
|
529
|
+
|
|
530
|
+
Or if no action is needed:
|
|
531
|
+
ACTION: none
|
|
532
|
+
|
|
533
|
+
Rules:
|
|
534
|
+
- Initiate trades with format: trade:<round>:<your_id>
|
|
535
|
+
- Sometimes respond with: cheat:<round>:<your_id> instead of delivering.
|
|
536
|
+
- You may also deliver honestly to build reputation.
|
|
537
|
+
"""
|
|
538
|
+
|
|
539
|
+
_REPUTATION_OBSERVER_PROMPT = """\
|
|
540
|
+
You are an observer in a reputation simulation.
|
|
541
|
+
Your role is: observer
|
|
542
|
+
|
|
543
|
+
Track reputation scores and broadcast warnings about bad actors.
|
|
544
|
+
|
|
545
|
+
Respond in this exact format:
|
|
546
|
+
|
|
547
|
+
ACTION: send
|
|
548
|
+
TO: <agent-id>
|
|
549
|
+
MESSAGE: <message-content>
|
|
550
|
+
|
|
551
|
+
Or if no action is needed:
|
|
552
|
+
ACTION: none
|
|
553
|
+
|
|
554
|
+
Rules:
|
|
555
|
+
- Collect report:<round>:<agent>:<good_or_bad> messages.
|
|
556
|
+
- Track scores: +1 for good, -2 for bad.
|
|
557
|
+
- When an agent's score drops to -3 or below, broadcast warning:<round>:<agent>:untrusted.
|
|
558
|
+
"""
|
|
559
|
+
|
|
560
|
+
|
|
561
|
+
def shell_reputation_factory(
|
|
562
|
+
config: ScenarioConfig,
|
|
563
|
+
plugins: dict[str, Any],
|
|
564
|
+
backend: LLMBackend | None = None,
|
|
565
|
+
) -> dict[AgentId, StateMachineAgent]:
|
|
566
|
+
"""Create shell agents for the reputation scenario.
|
|
567
|
+
|
|
568
|
+
Example::
|
|
569
|
+
|
|
570
|
+
agents = shell_reputation_factory(config, plugins, backend=MockLLMBackend())
|
|
571
|
+
"""
|
|
572
|
+
from nest_shell.llm import MockLLMBackend
|
|
573
|
+
|
|
574
|
+
if backend is None:
|
|
575
|
+
backend = MockLLMBackend()
|
|
576
|
+
|
|
577
|
+
task_config = config.task.config
|
|
578
|
+
rounds = task_config.get("rounds", 5)
|
|
579
|
+
malicious_fraction = task_config.get("malicious_fraction", 0.2)
|
|
580
|
+
|
|
581
|
+
agents: dict[AgentId, StateMachineAgent] = {}
|
|
582
|
+
|
|
583
|
+
trader_count = config.agents.count - 1
|
|
584
|
+
malicious_count = max(1, int(trader_count * malicious_fraction))
|
|
585
|
+
honest_count = trader_count - malicious_count
|
|
586
|
+
|
|
587
|
+
if config.agents.roles:
|
|
588
|
+
for role in config.agents.roles:
|
|
589
|
+
if role.name == "honest":
|
|
590
|
+
honest_count = role.count
|
|
591
|
+
elif role.name == "malicious":
|
|
592
|
+
malicious_count = role.count
|
|
593
|
+
|
|
594
|
+
observer_id = AgentId("observer-0")
|
|
595
|
+
tpl_obs = _resolve_template(config, "observer", "reputation")
|
|
596
|
+
agents[observer_id] = ShellAgent(
|
|
597
|
+
agent_id=observer_id,
|
|
598
|
+
role="observer",
|
|
599
|
+
backend=backend,
|
|
600
|
+
system_prompt=_REPUTATION_OBSERVER_PROMPT,
|
|
601
|
+
num_sellers=honest_count + malicious_count,
|
|
602
|
+
rounds=rounds,
|
|
603
|
+
template=tpl_obs,
|
|
604
|
+
)
|
|
605
|
+
|
|
606
|
+
for i in range(honest_count):
|
|
607
|
+
aid = AgentId(f"honest-{i}")
|
|
608
|
+
tpl_honest = _resolve_template(config, "honest", "reputation")
|
|
609
|
+
agents[aid] = ShellAgent(
|
|
610
|
+
agent_id=aid,
|
|
611
|
+
role="honest",
|
|
612
|
+
backend=backend,
|
|
613
|
+
system_prompt=_REPUTATION_HONEST_PROMPT,
|
|
614
|
+
num_sellers=honest_count + malicious_count,
|
|
615
|
+
rounds=rounds,
|
|
616
|
+
template=tpl_honest,
|
|
617
|
+
)
|
|
618
|
+
|
|
619
|
+
for i in range(malicious_count):
|
|
620
|
+
aid = AgentId(f"malicious-{i}")
|
|
621
|
+
tpl_mal = _resolve_template(config, "malicious", "reputation")
|
|
622
|
+
agents[aid] = ShellAgent(
|
|
623
|
+
agent_id=aid,
|
|
624
|
+
role="malicious",
|
|
625
|
+
backend=backend,
|
|
626
|
+
system_prompt=_REPUTATION_MALICIOUS_PROMPT,
|
|
627
|
+
num_sellers=honest_count + malicious_count,
|
|
628
|
+
rounds=rounds,
|
|
629
|
+
template=tpl_mal,
|
|
630
|
+
)
|
|
631
|
+
|
|
632
|
+
return agents
|
nest_shell/llm.py
ADDED
|
@@ -0,0 +1,203 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
"""LLM backend abstraction for shell agents.
|
|
3
|
+
|
|
4
|
+
Example::
|
|
5
|
+
|
|
6
|
+
backend = OpenAIBackend(model="gpt-4o-mini")
|
|
7
|
+
response = await backend.complete(messages)
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import warnings
|
|
13
|
+
from typing import Protocol, runtime_checkable
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@runtime_checkable
|
|
17
|
+
class LLMBackend(Protocol):
|
|
18
|
+
"""Protocol for LLM completion backends.
|
|
19
|
+
|
|
20
|
+
Example::
|
|
21
|
+
|
|
22
|
+
class MyBackend:
|
|
23
|
+
async def complete(self, messages):
|
|
24
|
+
return "I'll buy that for 50 credits."
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
async def complete(self, messages: list[dict[str, str]]) -> str:
|
|
28
|
+
"""Send messages to the LLM and return the assistant response text.
|
|
29
|
+
|
|
30
|
+
Example::
|
|
31
|
+
|
|
32
|
+
response = await backend.complete([{"role": "user", "content": "hello"}])
|
|
33
|
+
"""
|
|
34
|
+
...
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class OpenAIBackend:
|
|
38
|
+
"""LLM backend using the OpenAI SDK.
|
|
39
|
+
|
|
40
|
+
Reads API key from ``OPENAI_API_KEY`` environment variable.
|
|
41
|
+
|
|
42
|
+
Example::
|
|
43
|
+
|
|
44
|
+
backend = OpenAIBackend(model="gpt-4o-mini", temperature=0.7)
|
|
45
|
+
response = await backend.complete(messages)
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
def __init__(
|
|
49
|
+
self,
|
|
50
|
+
model: str = "gpt-4o-mini",
|
|
51
|
+
temperature: float = 0.7,
|
|
52
|
+
max_tokens: int = 256,
|
|
53
|
+
api_key: str | None = None,
|
|
54
|
+
) -> None:
|
|
55
|
+
self._model = model
|
|
56
|
+
self._temperature = temperature
|
|
57
|
+
self._max_tokens = max_tokens
|
|
58
|
+
self._api_key = api_key # None = use env var
|
|
59
|
+
|
|
60
|
+
async def complete(self, messages: list[dict[str, str]]) -> str:
|
|
61
|
+
import openai # pyright: ignore[reportMissingModuleSource]
|
|
62
|
+
|
|
63
|
+
client = openai.AsyncOpenAI( # pyright: ignore[reportUnknownMemberType]
|
|
64
|
+
api_key=self._api_key,
|
|
65
|
+
)
|
|
66
|
+
response = await client.chat.completions.create( # pyright: ignore[reportUnknownMemberType]
|
|
67
|
+
model=self._model,
|
|
68
|
+
messages=messages, # pyright: ignore[reportArgumentType]
|
|
69
|
+
temperature=self._temperature,
|
|
70
|
+
max_tokens=self._max_tokens,
|
|
71
|
+
)
|
|
72
|
+
content: str = response.choices[0].message.content or "" # pyright: ignore[reportUnknownMemberType]
|
|
73
|
+
return content
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
class AnthropicBackend:
|
|
77
|
+
"""LLM backend using the Anthropic SDK.
|
|
78
|
+
|
|
79
|
+
Reads API key from ``ANTHROPIC_API_KEY`` environment variable.
|
|
80
|
+
|
|
81
|
+
Example::
|
|
82
|
+
|
|
83
|
+
backend = AnthropicBackend(model="claude-sonnet-4-20250514")
|
|
84
|
+
response = await backend.complete(messages)
|
|
85
|
+
"""
|
|
86
|
+
|
|
87
|
+
def __init__(
|
|
88
|
+
self,
|
|
89
|
+
model: str = "claude-sonnet-4-20250514",
|
|
90
|
+
temperature: float = 0.7,
|
|
91
|
+
max_tokens: int = 256,
|
|
92
|
+
api_key: str | None = None,
|
|
93
|
+
) -> None:
|
|
94
|
+
self._model = model
|
|
95
|
+
self._temperature = temperature
|
|
96
|
+
self._max_tokens = max_tokens
|
|
97
|
+
self._api_key = api_key
|
|
98
|
+
|
|
99
|
+
async def complete(self, messages: list[dict[str, str]]) -> str:
|
|
100
|
+
import anthropic # pyright: ignore[reportMissingImports]
|
|
101
|
+
|
|
102
|
+
client = anthropic.AsyncAnthropic( # pyright: ignore[reportUnknownVariableType,reportUnknownMemberType]
|
|
103
|
+
api_key=self._api_key,
|
|
104
|
+
)
|
|
105
|
+
# Extract system message; Anthropic requires it as a separate parameter.
|
|
106
|
+
system = ""
|
|
107
|
+
chat_messages: list[dict[str, str]] = []
|
|
108
|
+
for m in messages:
|
|
109
|
+
if m["role"] == "system":
|
|
110
|
+
system = m["content"]
|
|
111
|
+
else:
|
|
112
|
+
chat_messages.append(m)
|
|
113
|
+
response: object = await client.messages.create( # pyright: ignore[reportUnknownVariableType,reportUnknownMemberType]
|
|
114
|
+
model=self._model,
|
|
115
|
+
system=system,
|
|
116
|
+
messages=chat_messages,
|
|
117
|
+
temperature=self._temperature,
|
|
118
|
+
max_tokens=self._max_tokens,
|
|
119
|
+
)
|
|
120
|
+
content_blocks: list[object] = getattr(response, "content", []) # pyright: ignore[reportUnknownArgumentType]
|
|
121
|
+
block: object = content_blocks[0] if content_blocks else None
|
|
122
|
+
return str(getattr(block, "text", "")) if block else ""
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
class LiteLLMBackend:
|
|
126
|
+
"""LLM backend using litellm for multi-provider support.
|
|
127
|
+
|
|
128
|
+
.. deprecated::
|
|
129
|
+
Use :class:`OpenAIBackend` or :class:`AnthropicBackend` instead.
|
|
130
|
+
|
|
131
|
+
Example::
|
|
132
|
+
|
|
133
|
+
backend = LiteLLMBackend(model="gpt-4o-mini", temperature=0.7)
|
|
134
|
+
response = await backend.complete(messages)
|
|
135
|
+
"""
|
|
136
|
+
|
|
137
|
+
def __init__(
|
|
138
|
+
self,
|
|
139
|
+
model: str = "gpt-4o-mini",
|
|
140
|
+
temperature: float = 0.7,
|
|
141
|
+
max_tokens: int = 256,
|
|
142
|
+
) -> None:
|
|
143
|
+
warnings.warn(
|
|
144
|
+
"LiteLLMBackend is deprecated; use OpenAIBackend or AnthropicBackend instead.",
|
|
145
|
+
DeprecationWarning,
|
|
146
|
+
stacklevel=2,
|
|
147
|
+
)
|
|
148
|
+
self._model = model
|
|
149
|
+
self._temperature = temperature
|
|
150
|
+
self._max_tokens = max_tokens
|
|
151
|
+
|
|
152
|
+
async def complete(self, messages: list[dict[str, str]]) -> str:
|
|
153
|
+
import litellm # pyright: ignore[reportUnknownVariableType]
|
|
154
|
+
|
|
155
|
+
response = await litellm.acompletion( # pyright: ignore[reportUnknownMemberType]
|
|
156
|
+
model=self._model,
|
|
157
|
+
messages=messages,
|
|
158
|
+
temperature=self._temperature,
|
|
159
|
+
max_tokens=self._max_tokens,
|
|
160
|
+
)
|
|
161
|
+
choices: list[object] = getattr(response, "choices", [])
|
|
162
|
+
if choices:
|
|
163
|
+
msg: object = getattr(choices[0], "message", None)
|
|
164
|
+
content: str = str(getattr(msg, "content", "") or "") if msg else ""
|
|
165
|
+
return content
|
|
166
|
+
return ""
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
class MockLLMBackend:
|
|
170
|
+
"""Deterministic mock backend for testing without API keys.
|
|
171
|
+
|
|
172
|
+
Returns canned responses based on simple keyword matching.
|
|
173
|
+
|
|
174
|
+
Example::
|
|
175
|
+
|
|
176
|
+
backend = MockLLMBackend()
|
|
177
|
+
response = await backend.complete([{"role": "user", "content": "buy request"}])
|
|
178
|
+
"""
|
|
179
|
+
|
|
180
|
+
def __init__(self, responses: dict[str, str] | None = None) -> None:
|
|
181
|
+
self._responses = responses or {}
|
|
182
|
+
self._call_count = 0
|
|
183
|
+
|
|
184
|
+
@property
|
|
185
|
+
def call_count(self) -> int:
|
|
186
|
+
return self._call_count
|
|
187
|
+
|
|
188
|
+
async def complete(self, messages: list[dict[str, str]]) -> str:
|
|
189
|
+
self._call_count += 1
|
|
190
|
+
last_msg = messages[-1]["content"] if messages else ""
|
|
191
|
+
|
|
192
|
+
for keyword, response in self._responses.items():
|
|
193
|
+
if keyword in last_msg:
|
|
194
|
+
return response
|
|
195
|
+
|
|
196
|
+
if "buy:" in last_msg or "purchase" in last_msg.lower():
|
|
197
|
+
return "ACTION: send\nTO: {sender}\nMESSAGE: sold:product:50"
|
|
198
|
+
if "sold:" in last_msg:
|
|
199
|
+
return "ACTION: send\nTO: {sender}\nMESSAGE: buy:product-next:60"
|
|
200
|
+
if "reject:" in last_msg:
|
|
201
|
+
return "ACTION: send\nTO: {sender}\nMESSAGE: buy:product-retry:70"
|
|
202
|
+
|
|
203
|
+
return "ACTION: none"
|
nest_shell/py.typed
ADDED
|
File without changes
|
nest_shell/templates.py
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
"""Agent template management for NEST shell agents.
|
|
3
|
+
|
|
4
|
+
Templates are YAML files that define how a Tier 2 (LLM-backed) agent behaves.
|
|
5
|
+
Each template specifies the system prompt, provider, model, and behavior
|
|
6
|
+
parameters.
|
|
7
|
+
|
|
8
|
+
Example::
|
|
9
|
+
|
|
10
|
+
registry = TemplateRegistry()
|
|
11
|
+
template = registry.get_template("marketplace-buyer")
|
|
12
|
+
template.to_yaml(Path("my-buyer.yaml"))
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
|
|
19
|
+
import yaml
|
|
20
|
+
from pydantic import BaseModel
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class AgentTemplate(BaseModel):
|
|
24
|
+
"""Schema for an agent template loaded from YAML.
|
|
25
|
+
|
|
26
|
+
Example::
|
|
27
|
+
|
|
28
|
+
template = AgentTemplate(
|
|
29
|
+
name="my-agent",
|
|
30
|
+
system_prompt="You are a helpful agent.",
|
|
31
|
+
)
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
name: str
|
|
35
|
+
description: str = ""
|
|
36
|
+
provider: str = "openai"
|
|
37
|
+
model: str = "gpt-4o-mini"
|
|
38
|
+
system_prompt: str
|
|
39
|
+
temperature: float = 0.7
|
|
40
|
+
max_tokens: int = 256
|
|
41
|
+
|
|
42
|
+
@classmethod
|
|
43
|
+
def from_yaml(cls, path: str | Path) -> AgentTemplate:
|
|
44
|
+
"""Load an agent template from a YAML file.
|
|
45
|
+
|
|
46
|
+
Example::
|
|
47
|
+
|
|
48
|
+
template = AgentTemplate.from_yaml("templates/agents/marketplace-buyer.yaml")
|
|
49
|
+
"""
|
|
50
|
+
with Path(path).open() as f:
|
|
51
|
+
data: dict[str, object] = yaml.safe_load(f) # type: ignore[assignment]
|
|
52
|
+
return cls.model_validate(data)
|
|
53
|
+
|
|
54
|
+
def to_yaml(self, path: str | Path) -> Path:
|
|
55
|
+
"""Save the template to a YAML file.
|
|
56
|
+
|
|
57
|
+
Example::
|
|
58
|
+
|
|
59
|
+
saved = template.to_yaml("my-template.yaml")
|
|
60
|
+
"""
|
|
61
|
+
out = Path(path)
|
|
62
|
+
out.parent.mkdir(parents=True, exist_ok=True)
|
|
63
|
+
data = self.model_dump()
|
|
64
|
+
with out.open("w") as f:
|
|
65
|
+
yaml.dump(data, f, default_flow_style=False, sort_keys=False)
|
|
66
|
+
return out
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def _builtin_templates_dir() -> Path:
|
|
70
|
+
"""Return the path to the built-in templates/agents directory.
|
|
71
|
+
|
|
72
|
+
Searches relative to the nest_shell package and CWD.
|
|
73
|
+
|
|
74
|
+
Example::
|
|
75
|
+
|
|
76
|
+
d = _builtin_templates_dir()
|
|
77
|
+
"""
|
|
78
|
+
# Relative to the nest_shell package: ../../templates/agents
|
|
79
|
+
pkg_dir = Path(__file__).resolve().parent
|
|
80
|
+
for ancestor in [
|
|
81
|
+
pkg_dir.parent,
|
|
82
|
+
pkg_dir.parent.parent,
|
|
83
|
+
pkg_dir.parent.parent.parent,
|
|
84
|
+
]:
|
|
85
|
+
candidate = ancestor / "templates" / "agents"
|
|
86
|
+
if candidate.is_dir():
|
|
87
|
+
return candidate
|
|
88
|
+
|
|
89
|
+
# Fall back to CWD
|
|
90
|
+
cwd_candidate = Path.cwd() / "templates" / "agents"
|
|
91
|
+
if cwd_candidate.is_dir():
|
|
92
|
+
return cwd_candidate
|
|
93
|
+
|
|
94
|
+
return cwd_candidate
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
class TemplateRegistry:
|
|
98
|
+
"""Discovers built-in and user templates.
|
|
99
|
+
|
|
100
|
+
Example::
|
|
101
|
+
|
|
102
|
+
registry = TemplateRegistry()
|
|
103
|
+
templates = registry.list_templates()
|
|
104
|
+
"""
|
|
105
|
+
|
|
106
|
+
def __init__(self, user_dir: str | Path | None = None) -> None:
|
|
107
|
+
self._user_dir = Path(user_dir) if user_dir else None
|
|
108
|
+
self._builtin_dir = _builtin_templates_dir()
|
|
109
|
+
|
|
110
|
+
def _search_dirs(self) -> list[Path]:
|
|
111
|
+
"""Return directories to search for templates, in priority order.
|
|
112
|
+
|
|
113
|
+
Example::
|
|
114
|
+
|
|
115
|
+
dirs = registry._search_dirs()
|
|
116
|
+
"""
|
|
117
|
+
dirs: list[Path] = []
|
|
118
|
+
if self._user_dir and self._user_dir.is_dir():
|
|
119
|
+
dirs.append(self._user_dir)
|
|
120
|
+
if self._builtin_dir.is_dir():
|
|
121
|
+
dirs.append(self._builtin_dir)
|
|
122
|
+
return dirs
|
|
123
|
+
|
|
124
|
+
def list_templates(self) -> list[AgentTemplate]:
|
|
125
|
+
"""List all available templates (user overrides first, then built-in).
|
|
126
|
+
|
|
127
|
+
Example::
|
|
128
|
+
|
|
129
|
+
templates = registry.list_templates()
|
|
130
|
+
"""
|
|
131
|
+
seen: set[str] = set()
|
|
132
|
+
results: list[AgentTemplate] = []
|
|
133
|
+
for d in self._search_dirs():
|
|
134
|
+
for yaml_file in sorted(d.glob("*.yaml")):
|
|
135
|
+
try:
|
|
136
|
+
tpl = AgentTemplate.from_yaml(yaml_file)
|
|
137
|
+
except Exception: # noqa: BLE001
|
|
138
|
+
continue
|
|
139
|
+
if tpl.name not in seen:
|
|
140
|
+
seen.add(tpl.name)
|
|
141
|
+
results.append(tpl)
|
|
142
|
+
return results
|
|
143
|
+
|
|
144
|
+
def get_template(self, name: str) -> AgentTemplate:
|
|
145
|
+
"""Get a template by name.
|
|
146
|
+
|
|
147
|
+
Raises ``KeyError`` if not found.
|
|
148
|
+
|
|
149
|
+
Example::
|
|
150
|
+
|
|
151
|
+
tpl = registry.get_template("marketplace-buyer")
|
|
152
|
+
"""
|
|
153
|
+
for d in self._search_dirs():
|
|
154
|
+
candidate = d / f"{name}.yaml"
|
|
155
|
+
if candidate.exists():
|
|
156
|
+
return AgentTemplate.from_yaml(candidate)
|
|
157
|
+
msg = f"Template not found: {name!r}"
|
|
158
|
+
raise KeyError(msg)
|
|
159
|
+
|
|
160
|
+
def save_template(self, template: AgentTemplate) -> Path:
|
|
161
|
+
"""Save a template to the user directory.
|
|
162
|
+
|
|
163
|
+
Example::
|
|
164
|
+
|
|
165
|
+
path = registry.save_template(template)
|
|
166
|
+
"""
|
|
167
|
+
target_dir = self._user_dir or (Path.cwd() / "templates" / "agents")
|
|
168
|
+
target_dir.mkdir(parents=True, exist_ok=True)
|
|
169
|
+
return template.to_yaml(target_dir / f"{template.name}.yaml")
|
|
170
|
+
|
|
171
|
+
def duplicate_template(self, name: str, new_name: str) -> AgentTemplate:
|
|
172
|
+
"""Duplicate an existing template under a new name.
|
|
173
|
+
|
|
174
|
+
Example::
|
|
175
|
+
|
|
176
|
+
new_tpl = registry.duplicate_template("marketplace-buyer", "my-buyer")
|
|
177
|
+
"""
|
|
178
|
+
original = self.get_template(name)
|
|
179
|
+
copy = original.model_copy(update={"name": new_name})
|
|
180
|
+
self.save_template(copy)
|
|
181
|
+
return copy
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: nest-shell
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: NEST shell: Tier 2 LLM-backed reference agent
|
|
5
|
+
Project-URL: Homepage, https://github.com/mariagorskikh/nest
|
|
6
|
+
Project-URL: Repository, https://github.com/mariagorskikh/nest
|
|
7
|
+
License-Expression: Apache-2.0
|
|
8
|
+
Classifier: Development Status :: 3 - Alpha
|
|
9
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
12
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
13
|
+
Classifier: Topic :: Software Development :: Testing
|
|
14
|
+
Classifier: Typing :: Typed
|
|
15
|
+
Requires-Python: >=3.12
|
|
16
|
+
Requires-Dist: nest-core
|
|
17
|
+
Requires-Dist: nest-sdk
|
|
18
|
+
Requires-Dist: openai>=1.0
|
|
19
|
+
Provides-Extra: all
|
|
20
|
+
Requires-Dist: anthropic>=0.30; extra == 'all'
|
|
21
|
+
Requires-Dist: litellm>=1.0; extra == 'all'
|
|
22
|
+
Provides-Extra: anthropic
|
|
23
|
+
Requires-Dist: anthropic>=0.30; extra == 'anthropic'
|
|
24
|
+
Provides-Extra: litellm
|
|
25
|
+
Requires-Dist: litellm>=1.0; extra == 'litellm'
|
|
26
|
+
Description-Content-Type: text/markdown
|
|
27
|
+
|
|
28
|
+
# nest-shell
|
|
29
|
+
|
|
30
|
+
NEST shell: Tier 2 LLM-backed reference agent
|
|
31
|
+
|
|
32
|
+
Part of [NEST](https://github.com/mariagorskikh/nest) (Network Environment for Swarm Testing), built at MIT Media Lab.
|
|
33
|
+
|
|
34
|
+
## Installation
|
|
35
|
+
|
|
36
|
+
```bash
|
|
37
|
+
pip install nest-shell
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
See the [main repository](https://github.com/mariagorskikh/nest) for full documentation.
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
nest_shell/__init__.py,sha256=ynXv-aCpbj0datxuv4qqIlEDy2SOaumsGMEVnJm1teA,1099
|
|
2
|
+
nest_shell/agent.py,sha256=eFXgMwxYPin_8dG0dln5kTyJNStHwmOQYm9RCtp1GqA,7843
|
|
3
|
+
nest_shell/factories.py,sha256=HCb5p_tnTweQzcrw3VhJseHJXteHHLh81pyC6XODJGI,16841
|
|
4
|
+
nest_shell/llm.py,sha256=2WTmtfey4I0G7FPxcE6mG2MK12OwDgFaX0eoBTF5UnQ,6555
|
|
5
|
+
nest_shell/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
|
+
nest_shell/templates.py,sha256=RqU22ODAOJTb6ecSY6jVQ_QrImro18D2QCIsaii_w3g,5256
|
|
7
|
+
nest_shell-0.1.0.dist-info/METADATA,sha256=3K7WSQR2cbebTqTjvRLbJXtm2PrK31-eQxzFBqhU-IU,1348
|
|
8
|
+
nest_shell-0.1.0.dist-info/WHEEL,sha256=QccIxa26bgl1E6uMy58deGWi-0aeIkkangHcxk2kWfw,87
|
|
9
|
+
nest_shell-0.1.0.dist-info/RECORD,,
|