cogames-agents 0.0.0.7__cp312-cp312-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cogames_agents/__init__.py +0 -0
- cogames_agents/evals/__init__.py +5 -0
- cogames_agents/evals/planky_evals.py +415 -0
- cogames_agents/policy/__init__.py +0 -0
- cogames_agents/policy/evolution/__init__.py +0 -0
- cogames_agents/policy/evolution/cogsguard/__init__.py +0 -0
- cogames_agents/policy/evolution/cogsguard/evolution.py +695 -0
- cogames_agents/policy/evolution/cogsguard/evolutionary_coordinator.py +540 -0
- cogames_agents/policy/nim_agents/__init__.py +20 -0
- cogames_agents/policy/nim_agents/agents.py +98 -0
- cogames_agents/policy/nim_agents/bindings/generated/libnim_agents.dylib +0 -0
- cogames_agents/policy/nim_agents/bindings/generated/nim_agents.py +215 -0
- cogames_agents/policy/nim_agents/cogsguard_agents.nim +555 -0
- cogames_agents/policy/nim_agents/cogsguard_align_all_agents.nim +569 -0
- cogames_agents/policy/nim_agents/common.nim +1054 -0
- cogames_agents/policy/nim_agents/install.sh +1 -0
- cogames_agents/policy/nim_agents/ladybug_agent.nim +954 -0
- cogames_agents/policy/nim_agents/nim_agents.nim +68 -0
- cogames_agents/policy/nim_agents/nim_agents.nims +14 -0
- cogames_agents/policy/nim_agents/nimby.lock +3 -0
- cogames_agents/policy/nim_agents/racecar_agents.nim +844 -0
- cogames_agents/policy/nim_agents/random_agents.nim +68 -0
- cogames_agents/policy/nim_agents/test_agents.py +53 -0
- cogames_agents/policy/nim_agents/thinky_agents.nim +677 -0
- cogames_agents/policy/nim_agents/thinky_eval.py +230 -0
- cogames_agents/policy/scripted_agent/README.md +360 -0
- cogames_agents/policy/scripted_agent/__init__.py +0 -0
- cogames_agents/policy/scripted_agent/baseline_agent.py +1031 -0
- cogames_agents/policy/scripted_agent/cogas/__init__.py +5 -0
- cogames_agents/policy/scripted_agent/cogas/context.py +68 -0
- cogames_agents/policy/scripted_agent/cogas/entity_map.py +152 -0
- cogames_agents/policy/scripted_agent/cogas/goal.py +115 -0
- cogames_agents/policy/scripted_agent/cogas/goals/__init__.py +27 -0
- cogames_agents/policy/scripted_agent/cogas/goals/aligner.py +160 -0
- cogames_agents/policy/scripted_agent/cogas/goals/gear.py +197 -0
- cogames_agents/policy/scripted_agent/cogas/goals/miner.py +441 -0
- cogames_agents/policy/scripted_agent/cogas/goals/scout.py +40 -0
- cogames_agents/policy/scripted_agent/cogas/goals/scrambler.py +174 -0
- cogames_agents/policy/scripted_agent/cogas/goals/shared.py +160 -0
- cogames_agents/policy/scripted_agent/cogas/goals/stem.py +60 -0
- cogames_agents/policy/scripted_agent/cogas/goals/survive.py +100 -0
- cogames_agents/policy/scripted_agent/cogas/navigator.py +401 -0
- cogames_agents/policy/scripted_agent/cogas/obs_parser.py +238 -0
- cogames_agents/policy/scripted_agent/cogas/policy.py +525 -0
- cogames_agents/policy/scripted_agent/cogas/trace.py +69 -0
- cogames_agents/policy/scripted_agent/cogsguard/CLAUDE.md +517 -0
- cogames_agents/policy/scripted_agent/cogsguard/README.md +252 -0
- cogames_agents/policy/scripted_agent/cogsguard/__init__.py +74 -0
- cogames_agents/policy/scripted_agent/cogsguard/aligned_junction_held_investigation.md +152 -0
- cogames_agents/policy/scripted_agent/cogsguard/aligner.py +333 -0
- cogames_agents/policy/scripted_agent/cogsguard/behavior_hooks.py +44 -0
- cogames_agents/policy/scripted_agent/cogsguard/control_agent.py +323 -0
- cogames_agents/policy/scripted_agent/cogsguard/debug_agent.py +533 -0
- cogames_agents/policy/scripted_agent/cogsguard/miner.py +589 -0
- cogames_agents/policy/scripted_agent/cogsguard/options.py +67 -0
- cogames_agents/policy/scripted_agent/cogsguard/parity_metrics.py +36 -0
- cogames_agents/policy/scripted_agent/cogsguard/policy.py +1967 -0
- cogames_agents/policy/scripted_agent/cogsguard/prereq_trace.py +33 -0
- cogames_agents/policy/scripted_agent/cogsguard/role_trace.py +50 -0
- cogames_agents/policy/scripted_agent/cogsguard/roles.py +31 -0
- cogames_agents/policy/scripted_agent/cogsguard/rollout_trace.py +40 -0
- cogames_agents/policy/scripted_agent/cogsguard/scout.py +69 -0
- cogames_agents/policy/scripted_agent/cogsguard/scrambler.py +350 -0
- cogames_agents/policy/scripted_agent/cogsguard/targeted_agent.py +418 -0
- cogames_agents/policy/scripted_agent/cogsguard/teacher.py +224 -0
- cogames_agents/policy/scripted_agent/cogsguard/types.py +381 -0
- cogames_agents/policy/scripted_agent/cogsguard/v2_agent.py +49 -0
- cogames_agents/policy/scripted_agent/common/__init__.py +0 -0
- cogames_agents/policy/scripted_agent/common/geometry.py +24 -0
- cogames_agents/policy/scripted_agent/common/roles.py +34 -0
- cogames_agents/policy/scripted_agent/common/tag_utils.py +48 -0
- cogames_agents/policy/scripted_agent/demo_policy.py +242 -0
- cogames_agents/policy/scripted_agent/pathfinding.py +126 -0
- cogames_agents/policy/scripted_agent/pinky/DESIGN.md +317 -0
- cogames_agents/policy/scripted_agent/pinky/__init__.py +5 -0
- cogames_agents/policy/scripted_agent/pinky/behaviors/__init__.py +17 -0
- cogames_agents/policy/scripted_agent/pinky/behaviors/aligner.py +400 -0
- cogames_agents/policy/scripted_agent/pinky/behaviors/base.py +119 -0
- cogames_agents/policy/scripted_agent/pinky/behaviors/miner.py +632 -0
- cogames_agents/policy/scripted_agent/pinky/behaviors/scout.py +138 -0
- cogames_agents/policy/scripted_agent/pinky/behaviors/scrambler.py +433 -0
- cogames_agents/policy/scripted_agent/pinky/policy.py +570 -0
- cogames_agents/policy/scripted_agent/pinky/services/__init__.py +7 -0
- cogames_agents/policy/scripted_agent/pinky/services/map_tracker.py +808 -0
- cogames_agents/policy/scripted_agent/pinky/services/navigator.py +864 -0
- cogames_agents/policy/scripted_agent/pinky/services/safety.py +189 -0
- cogames_agents/policy/scripted_agent/pinky/state.py +299 -0
- cogames_agents/policy/scripted_agent/pinky/types.py +138 -0
- cogames_agents/policy/scripted_agent/planky/CLAUDE.md +124 -0
- cogames_agents/policy/scripted_agent/planky/IMPROVEMENTS.md +160 -0
- cogames_agents/policy/scripted_agent/planky/NOTES.md +153 -0
- cogames_agents/policy/scripted_agent/planky/PLAN.md +254 -0
- cogames_agents/policy/scripted_agent/planky/README.md +214 -0
- cogames_agents/policy/scripted_agent/planky/STRATEGY.md +100 -0
- cogames_agents/policy/scripted_agent/planky/__init__.py +5 -0
- cogames_agents/policy/scripted_agent/planky/context.py +68 -0
- cogames_agents/policy/scripted_agent/planky/entity_map.py +152 -0
- cogames_agents/policy/scripted_agent/planky/goal.py +107 -0
- cogames_agents/policy/scripted_agent/planky/goals/__init__.py +27 -0
- cogames_agents/policy/scripted_agent/planky/goals/aligner.py +168 -0
- cogames_agents/policy/scripted_agent/planky/goals/gear.py +179 -0
- cogames_agents/policy/scripted_agent/planky/goals/miner.py +416 -0
- cogames_agents/policy/scripted_agent/planky/goals/scout.py +40 -0
- cogames_agents/policy/scripted_agent/planky/goals/scrambler.py +174 -0
- cogames_agents/policy/scripted_agent/planky/goals/shared.py +160 -0
- cogames_agents/policy/scripted_agent/planky/goals/stem.py +49 -0
- cogames_agents/policy/scripted_agent/planky/goals/survive.py +96 -0
- cogames_agents/policy/scripted_agent/planky/navigator.py +388 -0
- cogames_agents/policy/scripted_agent/planky/obs_parser.py +238 -0
- cogames_agents/policy/scripted_agent/planky/policy.py +485 -0
- cogames_agents/policy/scripted_agent/planky/tests/__init__.py +0 -0
- cogames_agents/policy/scripted_agent/planky/tests/conftest.py +66 -0
- cogames_agents/policy/scripted_agent/planky/tests/helpers.py +152 -0
- cogames_agents/policy/scripted_agent/planky/tests/test_aligner.py +24 -0
- cogames_agents/policy/scripted_agent/planky/tests/test_miner.py +30 -0
- cogames_agents/policy/scripted_agent/planky/tests/test_scout.py +15 -0
- cogames_agents/policy/scripted_agent/planky/tests/test_scrambler.py +29 -0
- cogames_agents/policy/scripted_agent/planky/tests/test_stem.py +36 -0
- cogames_agents/policy/scripted_agent/planky/trace.py +69 -0
- cogames_agents/policy/scripted_agent/types.py +239 -0
- cogames_agents/policy/scripted_agent/unclipping_agent.py +461 -0
- cogames_agents/policy/scripted_agent/utils.py +381 -0
- cogames_agents/policy/scripted_registry.py +80 -0
- cogames_agents/py.typed +0 -0
- cogames_agents-0.0.0.7.dist-info/METADATA +98 -0
- cogames_agents-0.0.0.7.dist-info/RECORD +128 -0
- cogames_agents-0.0.0.7.dist-info/WHEEL +6 -0
- cogames_agents-0.0.0.7.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
"""Aligner goals — align neutral junctions."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import TYPE_CHECKING, Optional
|
|
6
|
+
|
|
7
|
+
from cogames_agents.policy.scripted_agent.planky.goal import Goal
|
|
8
|
+
from cogames_agents.policy.scripted_agent.planky.navigator import _manhattan
|
|
9
|
+
from mettagrid.simulator import Action
|
|
10
|
+
|
|
11
|
+
from .gear import GetGearGoal
|
|
12
|
+
|
|
13
|
+
if TYPE_CHECKING:
|
|
14
|
+
from cogames_agents.policy.scripted_agent.planky.context import PlankyContext
|
|
15
|
+
|
|
16
|
+
JUNCTION_AOE_RANGE = 10
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class GetAlignerGearGoal(GetGearGoal):
|
|
20
|
+
"""Get aligner gear (costs C3 O1 G1 S1 from collective)."""
|
|
21
|
+
|
|
22
|
+
def __init__(self) -> None:
|
|
23
|
+
super().__init__(
|
|
24
|
+
gear_attr="aligner_gear",
|
|
25
|
+
station_type="aligner_station",
|
|
26
|
+
goal_name="GetAlignerGear",
|
|
27
|
+
gear_cost={"carbon": 3, "oxygen": 1, "germanium": 1, "silicon": 1},
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class AlignJunctionGoal(Goal):
|
|
32
|
+
"""Find and align a neutral junction to cogs.
|
|
33
|
+
|
|
34
|
+
Tracks attempts per junction to avoid getting stuck on one that
|
|
35
|
+
can't be captured (e.g., already aligned but map hasn't updated).
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
name = "AlignJunction"
|
|
39
|
+
MAX_ATTEMPTS_PER_TARGET = 5
|
|
40
|
+
MAX_NAV_STEPS_PER_TARGET = 40
|
|
41
|
+
COOLDOWN_STEPS = 50
|
|
42
|
+
|
|
43
|
+
def is_satisfied(self, ctx: PlankyContext) -> bool:
|
|
44
|
+
# Can't align without gear and a heart
|
|
45
|
+
if not ctx.state.aligner_gear:
|
|
46
|
+
if ctx.trace:
|
|
47
|
+
ctx.trace.skip(self.name, "no gear")
|
|
48
|
+
return True
|
|
49
|
+
if ctx.state.heart < 1:
|
|
50
|
+
if ctx.trace:
|
|
51
|
+
ctx.trace.skip(self.name, "no heart")
|
|
52
|
+
return True
|
|
53
|
+
return False
|
|
54
|
+
|
|
55
|
+
def execute(self, ctx: PlankyContext) -> Optional[Action]:
|
|
56
|
+
nav_key = "_align_nav_steps"
|
|
57
|
+
nav_target_key = "_align_nav_target"
|
|
58
|
+
nav_steps = ctx.blackboard.get(nav_key, 0) + 1
|
|
59
|
+
ctx.blackboard[nav_key] = nav_steps
|
|
60
|
+
|
|
61
|
+
target = self._find_best_target(ctx)
|
|
62
|
+
if target is None:
|
|
63
|
+
ctx.blackboard[nav_key] = 0
|
|
64
|
+
return ctx.navigator.explore(
|
|
65
|
+
ctx.state.position,
|
|
66
|
+
ctx.map,
|
|
67
|
+
direction_bias=["north", "east", "south", "west"][ctx.agent_id % 4],
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
# Reset nav counter if target changed
|
|
71
|
+
prev_target = ctx.blackboard.get(nav_target_key)
|
|
72
|
+
if prev_target != target:
|
|
73
|
+
ctx.blackboard[nav_key] = 0
|
|
74
|
+
nav_steps = 0
|
|
75
|
+
ctx.blackboard[nav_target_key] = target
|
|
76
|
+
|
|
77
|
+
# Nav timeout — mark target as failed
|
|
78
|
+
if nav_steps > self.MAX_NAV_STEPS_PER_TARGET:
|
|
79
|
+
failed_key = f"align_failed_{target}"
|
|
80
|
+
ctx.blackboard[failed_key] = ctx.step
|
|
81
|
+
ctx.blackboard[nav_key] = 0
|
|
82
|
+
if ctx.trace:
|
|
83
|
+
ctx.trace.activate(self.name, f"nav timeout on {target}")
|
|
84
|
+
return ctx.navigator.explore(
|
|
85
|
+
ctx.state.position,
|
|
86
|
+
ctx.map,
|
|
87
|
+
direction_bias=["north", "east", "south", "west"][ctx.agent_id % 4],
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
if ctx.trace:
|
|
91
|
+
ctx.trace.nav_target = target
|
|
92
|
+
|
|
93
|
+
dist = _manhattan(ctx.state.position, target)
|
|
94
|
+
if dist <= 1:
|
|
95
|
+
# Track attempts on this specific junction
|
|
96
|
+
attempts_key = f"align_attempts_{target}"
|
|
97
|
+
attempts = ctx.blackboard.get(attempts_key, 0) + 1
|
|
98
|
+
ctx.blackboard[attempts_key] = attempts
|
|
99
|
+
|
|
100
|
+
if attempts > self.MAX_ATTEMPTS_PER_TARGET:
|
|
101
|
+
# Mark this junction as failed temporarily
|
|
102
|
+
failed_key = f"align_failed_{target}"
|
|
103
|
+
ctx.blackboard[failed_key] = ctx.step
|
|
104
|
+
ctx.blackboard[attempts_key] = 0
|
|
105
|
+
if ctx.trace:
|
|
106
|
+
ctx.trace.activate(self.name, f"giving up on {target}")
|
|
107
|
+
# Clear and try a different junction next tick
|
|
108
|
+
return ctx.navigator.explore(
|
|
109
|
+
ctx.state.position,
|
|
110
|
+
ctx.map,
|
|
111
|
+
direction_bias=["north", "east", "south", "west"][ctx.agent_id % 4],
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
if ctx.trace:
|
|
115
|
+
ctx.trace.activate(self.name, f"bump {attempts}/{self.MAX_ATTEMPTS_PER_TARGET}")
|
|
116
|
+
return _move_toward(ctx.state.position, target)
|
|
117
|
+
|
|
118
|
+
# Not adjacent - reset attempts for this target
|
|
119
|
+
attempts_key = f"align_attempts_{target}"
|
|
120
|
+
ctx.blackboard[attempts_key] = 0
|
|
121
|
+
return ctx.navigator.get_action(ctx.state.position, target, ctx.map, reach_adjacent=True)
|
|
122
|
+
|
|
123
|
+
def _find_best_target(self, ctx: PlankyContext) -> tuple[int, int] | None:
|
|
124
|
+
"""Find nearest neutral junction, including contested ones."""
|
|
125
|
+
pos = ctx.state.position
|
|
126
|
+
|
|
127
|
+
def recently_failed(p: tuple[int, int]) -> bool:
|
|
128
|
+
failed_step = ctx.blackboard.get(f"align_failed_{p}", -9999)
|
|
129
|
+
return ctx.step - failed_step < self.COOLDOWN_STEPS
|
|
130
|
+
|
|
131
|
+
# Find neutral junctions (no AOE filter — aligners go where needed)
|
|
132
|
+
candidates: list[tuple[int, tuple[int, int]]] = []
|
|
133
|
+
|
|
134
|
+
for jpos, e in ctx.map.find(type_contains="junction"):
|
|
135
|
+
alignment = e.properties.get("alignment")
|
|
136
|
+
if alignment is not None:
|
|
137
|
+
continue # Not neutral
|
|
138
|
+
if recently_failed(jpos):
|
|
139
|
+
continue
|
|
140
|
+
candidates.append((_manhattan(pos, jpos), jpos))
|
|
141
|
+
|
|
142
|
+
for cpos, e in ctx.map.find(type_contains="junction"):
|
|
143
|
+
alignment = e.properties.get("alignment")
|
|
144
|
+
if alignment is not None:
|
|
145
|
+
continue
|
|
146
|
+
if recently_failed(cpos):
|
|
147
|
+
continue
|
|
148
|
+
candidates.append((_manhattan(pos, cpos), cpos))
|
|
149
|
+
|
|
150
|
+
if not candidates:
|
|
151
|
+
return None
|
|
152
|
+
candidates.sort()
|
|
153
|
+
return candidates[0][1]
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def _move_toward(current: tuple[int, int], target: tuple[int, int]) -> Action:
|
|
157
|
+
dr = target[0] - current[0]
|
|
158
|
+
dc = target[1] - current[1]
|
|
159
|
+
if abs(dr) >= abs(dc):
|
|
160
|
+
if dr > 0:
|
|
161
|
+
return Action(name="move_south")
|
|
162
|
+
elif dr < 0:
|
|
163
|
+
return Action(name="move_north")
|
|
164
|
+
if dc > 0:
|
|
165
|
+
return Action(name="move_east")
|
|
166
|
+
elif dc < 0:
|
|
167
|
+
return Action(name="move_west")
|
|
168
|
+
return Action(name="move_north")
|
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
"""GetGearGoal — navigate to a station to acquire gear."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import TYPE_CHECKING, Optional
|
|
6
|
+
|
|
7
|
+
from cogames_agents.policy.scripted_agent.planky.goal import Goal
|
|
8
|
+
from cogames_agents.policy.scripted_agent.planky.navigator import _manhattan
|
|
9
|
+
from mettagrid.simulator import Action
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from cogames_agents.policy.scripted_agent.planky.context import PlankyContext
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class GetGearGoal(Goal):
|
|
16
|
+
"""Navigate to a station to acquire gear for a role.
|
|
17
|
+
|
|
18
|
+
If the team lacks resources to produce gear, the station won't give any.
|
|
19
|
+
Checks collective resources before attempting, to avoid wasting time bumping
|
|
20
|
+
a station that can't dispense gear.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
# How many bump attempts at dist=1 before exploring for another route
|
|
24
|
+
MAX_BUMPS_AT_STATION = 5
|
|
25
|
+
# How many total steps trying to get gear before giving up temporarily
|
|
26
|
+
MAX_TOTAL_ATTEMPTS = 80
|
|
27
|
+
# How many steps to wait before trying again
|
|
28
|
+
RETRY_INTERVAL = 150
|
|
29
|
+
|
|
30
|
+
def __init__(
|
|
31
|
+
self,
|
|
32
|
+
gear_attr: str,
|
|
33
|
+
station_type: str,
|
|
34
|
+
goal_name: str,
|
|
35
|
+
gear_cost: dict[str, int] | None = None,
|
|
36
|
+
) -> None:
|
|
37
|
+
self.name = goal_name
|
|
38
|
+
self._gear_attr = gear_attr # e.g. "miner_gear"
|
|
39
|
+
self._station_type = station_type # e.g. "miner_station"
|
|
40
|
+
self._gear_cost = gear_cost or {}
|
|
41
|
+
self._bb_attempts_key = f"{goal_name}_total_attempts"
|
|
42
|
+
self._bb_giveup_step_key = f"{goal_name}_giveup_step"
|
|
43
|
+
self._bb_bump_count_key = f"{goal_name}_bump_count"
|
|
44
|
+
self._bb_last_dist_key = f"{goal_name}_last_dist"
|
|
45
|
+
|
|
46
|
+
# Minimum collective resource reserve — don't consume below this level
|
|
47
|
+
RESOURCE_RESERVE = 3
|
|
48
|
+
|
|
49
|
+
def _collective_can_afford(self, ctx: PlankyContext) -> bool:
|
|
50
|
+
"""Check if the collective can afford gear while maintaining reserves."""
|
|
51
|
+
if not self._gear_cost:
|
|
52
|
+
return True
|
|
53
|
+
s = ctx.state
|
|
54
|
+
collective = {
|
|
55
|
+
"carbon": s.collective_carbon,
|
|
56
|
+
"oxygen": s.collective_oxygen,
|
|
57
|
+
"germanium": s.collective_germanium,
|
|
58
|
+
"silicon": s.collective_silicon,
|
|
59
|
+
}
|
|
60
|
+
# Must have cost + reserve for each resource
|
|
61
|
+
return all(collective.get(res, 0) >= amt + self.RESOURCE_RESERVE for res, amt in self._gear_cost.items())
|
|
62
|
+
|
|
63
|
+
def is_satisfied(self, ctx: PlankyContext) -> bool:
|
|
64
|
+
# Satisfied if we have the gear
|
|
65
|
+
if getattr(ctx.state, self._gear_attr, False):
|
|
66
|
+
# Got gear - reset attempts for next time
|
|
67
|
+
ctx.blackboard[self._bb_attempts_key] = 0
|
|
68
|
+
ctx.blackboard[self._bb_bump_count_key] = 0
|
|
69
|
+
return True
|
|
70
|
+
# Also "satisfied" (skip) if we gave up recently
|
|
71
|
+
giveup_step = ctx.blackboard.get(self._bb_giveup_step_key, -9999)
|
|
72
|
+
if ctx.step - giveup_step < self.RETRY_INTERVAL:
|
|
73
|
+
return True
|
|
74
|
+
# Skip if collective can't afford this gear
|
|
75
|
+
if not self._collective_can_afford(ctx):
|
|
76
|
+
if ctx.trace:
|
|
77
|
+
ctx.trace.skip(self.name, "collective lacks resources")
|
|
78
|
+
return True
|
|
79
|
+
return False
|
|
80
|
+
|
|
81
|
+
def execute(self, ctx: PlankyContext) -> Optional[Action]:
|
|
82
|
+
# Track total attempts regardless of distance
|
|
83
|
+
attempts = ctx.blackboard.get(self._bb_attempts_key, 0) + 1
|
|
84
|
+
ctx.blackboard[self._bb_attempts_key] = attempts
|
|
85
|
+
|
|
86
|
+
if attempts > self.MAX_TOTAL_ATTEMPTS:
|
|
87
|
+
# Give up - team probably lacks resources or station unreachable
|
|
88
|
+
ctx.blackboard[self._bb_giveup_step_key] = ctx.step
|
|
89
|
+
ctx.blackboard[self._bb_attempts_key] = 0
|
|
90
|
+
ctx.blackboard[self._bb_bump_count_key] = 0
|
|
91
|
+
if ctx.trace:
|
|
92
|
+
ctx.trace.activate(self.name, "giving up after max attempts")
|
|
93
|
+
return None # Skip to next goal
|
|
94
|
+
|
|
95
|
+
# Find station by type (filter to own team if known)
|
|
96
|
+
pf = {"collective_id": ctx.my_collective_id} if ctx.my_collective_id is not None else None
|
|
97
|
+
result = ctx.map.find_nearest(ctx.state.position, type_contains=self._station_type, property_filter=pf)
|
|
98
|
+
if result is None:
|
|
99
|
+
# Station not discovered yet — navigate toward hub (spawn) where stations are
|
|
100
|
+
from cogames_agents.policy.scripted_agent.planky.policy import SPAWN_POS
|
|
101
|
+
|
|
102
|
+
hub_dist = _manhattan(ctx.state.position, SPAWN_POS)
|
|
103
|
+
if ctx.trace:
|
|
104
|
+
ctx.trace.activate(self.name, f"exploring for {self._station_type} (hub dist={hub_dist})")
|
|
105
|
+
if hub_dist > 3:
|
|
106
|
+
# Navigate toward hub
|
|
107
|
+
return ctx.navigator.get_action(ctx.state.position, SPAWN_POS, ctx.map, reach_adjacent=True)
|
|
108
|
+
# At hub — explore nearby to find the station
|
|
109
|
+
return ctx.navigator.explore(ctx.state.position, ctx.map)
|
|
110
|
+
|
|
111
|
+
station_pos, _ = result
|
|
112
|
+
dist = _manhattan(ctx.state.position, station_pos)
|
|
113
|
+
|
|
114
|
+
if ctx.trace:
|
|
115
|
+
ctx.trace.nav_target = station_pos
|
|
116
|
+
|
|
117
|
+
# Track if we're making progress toward the station
|
|
118
|
+
last_dist = ctx.blackboard.get(self._bb_last_dist_key, 999)
|
|
119
|
+
ctx.blackboard[self._bb_last_dist_key] = dist
|
|
120
|
+
|
|
121
|
+
if dist <= 1:
|
|
122
|
+
# Adjacent to station — try to bump into it
|
|
123
|
+
bump_count = ctx.blackboard.get(self._bb_bump_count_key, 0) + 1
|
|
124
|
+
ctx.blackboard[self._bb_bump_count_key] = bump_count
|
|
125
|
+
|
|
126
|
+
if bump_count > self.MAX_BUMPS_AT_STATION:
|
|
127
|
+
# Stuck at dist=1 - explore to find another path
|
|
128
|
+
ctx.blackboard[self._bb_bump_count_key] = 0
|
|
129
|
+
if ctx.trace:
|
|
130
|
+
ctx.trace.activate(self.name, "stuck at dist=1, exploring")
|
|
131
|
+
# Clear navigator cache and explore a random direction
|
|
132
|
+
ctx.navigator._cached_path = None
|
|
133
|
+
ctx.navigator._cached_target = None
|
|
134
|
+
return ctx.navigator.explore(ctx.state.position, ctx.map)
|
|
135
|
+
|
|
136
|
+
if ctx.trace:
|
|
137
|
+
ctx.trace.activate(self.name, f"bump {bump_count}/{self.MAX_BUMPS_AT_STATION}")
|
|
138
|
+
return _move_toward(ctx.state.position, station_pos)
|
|
139
|
+
|
|
140
|
+
# Not adjacent yet - navigate toward station
|
|
141
|
+
ctx.blackboard[self._bb_bump_count_key] = 0
|
|
142
|
+
|
|
143
|
+
# If we're not making progress (dist not decreasing), clear cache and try fresh path
|
|
144
|
+
if dist >= last_dist and attempts > 10:
|
|
145
|
+
ctx.navigator._cached_path = None
|
|
146
|
+
ctx.navigator._cached_target = None
|
|
147
|
+
|
|
148
|
+
return ctx.navigator.get_action(ctx.state.position, station_pos, ctx.map, reach_adjacent=True)
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def _move_toward(current: tuple[int, int], target: tuple[int, int]) -> Action:
|
|
152
|
+
"""Move one step toward target, trying the most direct direction."""
|
|
153
|
+
dr = target[0] - current[0]
|
|
154
|
+
dc = target[1] - current[1]
|
|
155
|
+
|
|
156
|
+
# When exactly adjacent (dist=1), we want to bump INTO the target
|
|
157
|
+
# Return the direction that would move us onto the target
|
|
158
|
+
if dr == 1 and dc == 0:
|
|
159
|
+
return Action(name="move_south")
|
|
160
|
+
if dr == -1 and dc == 0:
|
|
161
|
+
return Action(name="move_north")
|
|
162
|
+
if dr == 0 and dc == 1:
|
|
163
|
+
return Action(name="move_east")
|
|
164
|
+
if dr == 0 and dc == -1:
|
|
165
|
+
return Action(name="move_west")
|
|
166
|
+
|
|
167
|
+
# For larger distances, prefer the longer axis
|
|
168
|
+
if abs(dr) >= abs(dc):
|
|
169
|
+
if dr > 0:
|
|
170
|
+
return Action(name="move_south")
|
|
171
|
+
elif dr < 0:
|
|
172
|
+
return Action(name="move_north")
|
|
173
|
+
if dc > 0:
|
|
174
|
+
return Action(name="move_east")
|
|
175
|
+
elif dc < 0:
|
|
176
|
+
return Action(name="move_west")
|
|
177
|
+
|
|
178
|
+
# On target — shouldn't happen, but bump north as fallback
|
|
179
|
+
return Action(name="move_north")
|