plancraft 0.1.0__tar.gz → 0.1.1__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (38) hide show
  1. {plancraft-0.1.0/plancraft/plancraft.egg-info → plancraft-0.1.1}/PKG-INFO +22 -1
  2. plancraft-0.1.1/README.md +22 -0
  3. plancraft-0.1.1/plancraft/environments/env_real.py +316 -0
  4. {plancraft-0.1.0 → plancraft-0.1.1}/plancraft/environments/env_symbolic.py +0 -3
  5. {plancraft-0.1.0 → plancraft-0.1.1}/plancraft/models/generators.py +1 -4
  6. {plancraft-0.1.0 → plancraft-0.1.1}/plancraft/models/oam.py +2 -3
  7. {plancraft-0.1.0 → plancraft-0.1.1}/plancraft/models/oracle.py +2 -5
  8. {plancraft-0.1.0 → plancraft-0.1.1}/plancraft/models/react.py +2 -7
  9. {plancraft-0.1.0 → plancraft-0.1.1/plancraft/plancraft.egg-info}/PKG-INFO +22 -1
  10. {plancraft-0.1.0 → plancraft-0.1.1}/plancraft/plancraft.egg-info/requires.txt +1 -0
  11. {plancraft-0.1.0 → plancraft-0.1.1}/pyproject.toml +3 -2
  12. plancraft-0.1.0/README.md +0 -2
  13. plancraft-0.1.0/plancraft/environments/env_real.py +0 -315
  14. {plancraft-0.1.0 → plancraft-0.1.1}/LICENSE +0 -0
  15. {plancraft-0.1.0 → plancraft-0.1.1}/plancraft/environments/__init__.py +0 -0
  16. {plancraft-0.1.0 → plancraft-0.1.1}/plancraft/environments/actions.py +0 -0
  17. {plancraft-0.1.0 → plancraft-0.1.1}/plancraft/environments/items.py +0 -0
  18. {plancraft-0.1.0 → plancraft-0.1.1}/plancraft/environments/planner.py +0 -0
  19. {plancraft-0.1.0 → plancraft-0.1.1}/plancraft/environments/recipes.py +0 -0
  20. {plancraft-0.1.0 → plancraft-0.1.1}/plancraft/environments/sampler.py +0 -0
  21. {plancraft-0.1.0 → plancraft-0.1.1}/plancraft/models/__init__.py +0 -0
  22. {plancraft-0.1.0 → plancraft-0.1.1}/plancraft/models/act.py +0 -0
  23. {plancraft-0.1.0 → plancraft-0.1.1}/plancraft/models/base.py +0 -0
  24. {plancraft-0.1.0 → plancraft-0.1.1}/plancraft/models/bbox_model.py +0 -0
  25. {plancraft-0.1.0 → plancraft-0.1.1}/plancraft/models/dummy.py +0 -0
  26. {plancraft-0.1.0 → plancraft-0.1.1}/plancraft/models/few_shot_images/__init__.py +0 -0
  27. {plancraft-0.1.0 → plancraft-0.1.1}/plancraft/models/prompts.py +0 -0
  28. {plancraft-0.1.0 → plancraft-0.1.1}/plancraft/models/utils.py +0 -0
  29. {plancraft-0.1.0 → plancraft-0.1.1}/plancraft/plancraft.egg-info/SOURCES.txt +0 -0
  30. {plancraft-0.1.0 → plancraft-0.1.1}/plancraft/plancraft.egg-info/dependency_links.txt +0 -0
  31. {plancraft-0.1.0 → plancraft-0.1.1}/plancraft/plancraft.egg-info/top_level.txt +0 -0
  32. {plancraft-0.1.0 → plancraft-0.1.1}/plancraft/train/dataset.py +0 -0
  33. {plancraft-0.1.0 → plancraft-0.1.1}/setup.cfg +0 -0
  34. {plancraft-0.1.0 → plancraft-0.1.1}/tests/test_planner.py +0 -0
  35. {plancraft-0.1.0 → plancraft-0.1.1}/tests/test_real_env.py +0 -0
  36. {plancraft-0.1.0 → plancraft-0.1.1}/tests/test_recipes.py +0 -0
  37. {plancraft-0.1.0 → plancraft-0.1.1}/tests/test_sampler.py +0 -0
  38. {plancraft-0.1.0 → plancraft-0.1.1}/tests/test_symbolic_env.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: plancraft
3
- Version: 0.1.0
3
+ Version: 0.1.1
4
4
  Summary: Plancraft: an evaluation dataset for planning with LLM agents
5
5
  Requires-Python: >=3.9
6
6
  Description-Content-Type: text/markdown
@@ -20,6 +20,7 @@ Requires-Dist: imageio>=2.36.0
20
20
  Requires-Dist: inflection>=0.3.1
21
21
  Requires-Dist: ipython>=7.5.0
22
22
  Requires-Dist: jinja2>=2.11.2
23
+ Requires-Dist: loguru>=0.7.2
23
24
  Requires-Dist: lxml>=4.3.3
24
25
  Requires-Dist: matplotlib>=3.9.2
25
26
  Requires-Dist: networkx>=3.2.1
@@ -50,4 +51,24 @@ Provides-Extra: full
50
51
  Requires-Dist: gym<=0.23.1,>=0.19.0; extra == "full"
51
52
 
52
53
  # plancraft
54
+
55
+ [![Test](https://github.com/gautierdag/plancraft/actions/workflows/test.yaml/badge.svg)](https://github.com/gautierdag/plancraft/actions/workflows/test.yaml)
56
+ ![Python Version](https://img.shields.io/badge/python-3.9+-blue)
57
+ ![Ruff](https://img.shields.io/badge/linter-ruff-blue)
58
+ [![PyPI Version](https://img.shields.io/pypi/v/plancraft)](https://pypi.org/project/plancraft/)
59
+
53
60
  Plancraft is a minecraft environment and agent that innovates on planning LLM agents with a retriever
61
+
62
+ You can install the package by running the following command:
63
+
64
+ ```bash
65
+ pip install plancraft
66
+ ```
67
+
68
+ Should you need the multimodal version of the package, you will also need a custom [fork](https://github.com/gautierdag/minerl.git) of the minerl package. You can install it by running the following command:
69
+
70
+ ```bash
71
+ pip install git+hhttps://github.com/gautierdag/minerl.git
72
+ ```
73
+
74
+ Note that you may need to follow the same installation instructions as in the [minerl documentation](https://minerl.readthedocs.io/en/latest/tutorials/index.html).
@@ -0,0 +1,22 @@
1
+ # plancraft
2
+
3
+ [![Test](https://github.com/gautierdag/plancraft/actions/workflows/test.yaml/badge.svg)](https://github.com/gautierdag/plancraft/actions/workflows/test.yaml)
4
+ ![Python Version](https://img.shields.io/badge/python-3.9+-blue)
5
+ ![Ruff](https://img.shields.io/badge/linter-ruff-blue)
6
+ [![PyPI Version](https://img.shields.io/pypi/v/plancraft)](https://pypi.org/project/plancraft/)
7
+
8
+ Plancraft is a minecraft environment and agent that innovates on planning LLM agents with a retriever
9
+
10
+ You can install the package by running the following command:
11
+
12
+ ```bash
13
+ pip install plancraft
14
+ ```
15
+
16
+ Should you need the multimodal version of the package, you will also need a custom [fork](https://github.com/gautierdag/minerl.git) of the minerl package. You can install it by running the following command:
17
+
18
+ ```bash
19
+ pip install git+hhttps://github.com/gautierdag/minerl.git
20
+ ```
21
+
22
+ Note that you may need to follow the same installation instructions as in the [minerl documentation](https://minerl.readthedocs.io/en/latest/tutorials/index.html).
@@ -0,0 +1,316 @@
1
+ from typing import Sequence, Union
2
+
3
+ import numpy as np
4
+ import json
5
+
6
+ from loguru import logger
7
+
8
+ from plancraft.environments.actions import RealAction
9
+
10
+ try:
11
+ from minerl.env import _singleagent
12
+ from minerl.herobraine.env_specs.human_controls import HumanControlEnvSpec
13
+ from minerl.herobraine.hero import handlers, mc, spaces
14
+ from minerl.herobraine.hero.handler import Handler
15
+ from minerl.herobraine.hero.handlers.agent.action import Action
16
+ from minerl.herobraine.hero.handlers.agent.start import InventoryAgentStart
17
+ from minerl.herobraine.hero.handlers.translation import TranslationHandler
18
+
19
+ class InventoryCommandAction(Action):
20
+ """
21
+ Handler which lets agents programmatically interact with an open container
22
+
23
+ Using this - agents can move a chosen quantity of items from one slot to another.
24
+ """
25
+
26
+ def to_string(self):
27
+ return "inventory_command"
28
+
29
+ def xml_template(self) -> str:
30
+ return str("<InventoryCommands/>")
31
+
32
+ def __init__(self):
33
+ self._command = "inventory_command"
34
+ # first argument is the slot to take from
35
+ # second is the slot to put into
36
+ # third is the count to take
37
+ super().__init__(
38
+ self.command,
39
+ spaces.Tuple(
40
+ (
41
+ spaces.Discrete(46),
42
+ spaces.Discrete(46),
43
+ spaces.Discrete(64),
44
+ )
45
+ ),
46
+ )
47
+
48
+ def from_universal(self, x):
49
+ return np.array([0, 0, 0], dtype=np.int32)
50
+
51
+ class SmeltCommandAction(Action):
52
+ """
53
+ An action handler for smelting an item
54
+ We assume smelting is immediate.
55
+ @TODO: might be interesting to explore using the smelting time as an additional planning parameter.
56
+
57
+ Using this agents can smelt items in their inventory.
58
+ """
59
+
60
+ def __init__(self):
61
+ self._command = "smelt"
62
+ # first argument is the slot to take from
63
+ # second is the slot to put into
64
+ # third is the count to smelt
65
+ super().__init__(
66
+ self.command,
67
+ spaces.Tuple(
68
+ (
69
+ spaces.Discrete(46),
70
+ spaces.Discrete(46),
71
+ spaces.Discrete(64),
72
+ )
73
+ ),
74
+ )
75
+
76
+ def to_string(self):
77
+ return "smelt"
78
+
79
+ def xml_template(self) -> str:
80
+ return str("<SmeltCommands/>")
81
+
82
+ def from_universal(self, x):
83
+ return np.array([0, 0, 0], dtype=np.int32)
84
+
85
+ class InventoryResetAction(Action):
86
+ def __init__(self):
87
+ self._command = "inventory_reset"
88
+ super().__init__(self._command, spaces.Text([1]))
89
+
90
+ def to_string(self) -> str:
91
+ return "inventory_reset"
92
+
93
+ def to_hero(self, inventory_items: list[dict]):
94
+ return "{} {}".format(self._command, json.dumps(inventory_items))
95
+
96
+ def xml_template(self) -> str:
97
+ return "<InventoryResetCommands/>"
98
+
99
+ def from_universal(self, x):
100
+ return []
101
+
102
+ MINUTE = 20 * 60
103
+
104
+ class CustomInventoryAgentStart(InventoryAgentStart):
105
+ def __init__(self, inventory: list[dict[str, Union[str, int]]]):
106
+ super().__init__({item["slot"]: item for item in inventory})
107
+
108
+ class CraftingTableOnly(Handler):
109
+ def to_string(self):
110
+ return "start_with_crafting_table"
111
+
112
+ def xml_template(self) -> str:
113
+ return "<CraftingTableOnly>true</CraftingTableOnly>"
114
+
115
+ class InventoryObservation(TranslationHandler):
116
+ """
117
+ Handles GUI Workbench Observations for selected items
118
+ """
119
+
120
+ def to_string(self):
121
+ return "inventory"
122
+
123
+ def xml_template(self) -> str:
124
+ return str("""<ObservationFromFullInventory flat="false"/>""")
125
+
126
+ def __init__(self, item_list, _other="other"):
127
+ item_list = sorted(item_list)
128
+ super().__init__(
129
+ spaces.Dict(
130
+ spaces={
131
+ k: spaces.Box(
132
+ low=0,
133
+ high=2304,
134
+ shape=(),
135
+ dtype=np.int32,
136
+ normalizer_scale="log",
137
+ )
138
+ for k in item_list
139
+ }
140
+ )
141
+ )
142
+ self.num_items = len(item_list)
143
+ self.items = item_list
144
+
145
+ def add_to_mission_spec(self, mission_spec):
146
+ pass
147
+
148
+ def from_hero(self, info):
149
+ return info["inventory"]
150
+
151
+ def from_universal(self, obs):
152
+ raise NotImplementedError(
153
+ "from_universal not implemented in InventoryObservation"
154
+ )
155
+
156
+ class PlancraftBaseEnvSpec(HumanControlEnvSpec):
157
+ def __init__(
158
+ self,
159
+ symbolic_action_space=False,
160
+ symbolic_observation_space=False,
161
+ max_episode_steps=2 * MINUTE,
162
+ inventory: Sequence[dict] = (),
163
+ preferred_spawn_biome: str = "plains",
164
+ resolution=[260, 180],
165
+ ):
166
+ self.inventory = inventory
167
+ self.preferred_spawn_biome = preferred_spawn_biome
168
+ self.symbolic_action_space = symbolic_action_space
169
+ self.symbolic_observation_space = symbolic_observation_space
170
+
171
+ mode = "real"
172
+ if symbolic_action_space:
173
+ mode += "-symbolic-act"
174
+ else:
175
+ mode += "-real-act"
176
+
177
+ if symbolic_observation_space:
178
+ mode += "-symbolic-obs"
179
+
180
+ if symbolic_action_space:
181
+ cursor_size = 1
182
+ else:
183
+ cursor_size = 16
184
+
185
+ name = f"plancraft-{mode}-v0"
186
+ super().__init__(
187
+ name=name,
188
+ max_episode_steps=max_episode_steps,
189
+ resolution=resolution,
190
+ cursor_size_range=[cursor_size, cursor_size],
191
+ )
192
+
193
+ def create_agent_start(self) -> list[Handler]:
194
+ base_agent_start_handlers = super().create_agent_start()
195
+ return base_agent_start_handlers + [
196
+ CustomInventoryAgentStart(self.inventory),
197
+ handlers.PreferredSpawnBiome(self.preferred_spawn_biome),
198
+ handlers.DoneOnDeath(),
199
+ CraftingTableOnly(),
200
+ ]
201
+
202
+ def create_observables(self) -> list[TranslationHandler]:
203
+ if self.symbolic_observation_space:
204
+ return [
205
+ handlers.POVObservation(self.resolution),
206
+ InventoryObservation([item["slot"] for item in self.inventory]),
207
+ ]
208
+ return [handlers.POVObservation(self.resolution)]
209
+
210
+ def create_server_world_generators(self) -> list[Handler]:
211
+ # TODO the original biome forced is not implemented yet. Use this for now.
212
+ return [handlers.DefaultWorldGenerator(force_reset=True)]
213
+
214
+ def create_server_quit_producers(self) -> list[Handler]:
215
+ return [
216
+ handlers.ServerQuitFromTimeUp(
217
+ (self.max_episode_steps * mc.MS_PER_STEP)
218
+ ),
219
+ handlers.ServerQuitWhenAnyAgentFinishes(),
220
+ ]
221
+
222
+ def create_server_initial_conditions(self) -> list[Handler]:
223
+ return [
224
+ handlers.TimeInitialCondition(allow_passage_of_time=False),
225
+ handlers.SpawningInitialCondition(allow_spawning=True),
226
+ ]
227
+
228
+ def create_actionables(self) -> list[TranslationHandler]:
229
+ """
230
+ Symbolic env can move items around in the inventory using function
231
+ Real env can use camera/keyboard
232
+ """
233
+ # Camera and mouse
234
+ if self.symbolic_action_space:
235
+ return [
236
+ InventoryCommandAction(),
237
+ SmeltCommandAction(),
238
+ InventoryResetAction(),
239
+ ]
240
+ return [
241
+ handlers.KeybasedCommandAction(v, v) for k, v in mc.KEYMAP.items()
242
+ ] + [
243
+ handlers.CameraAction(),
244
+ SmeltCommandAction(),
245
+ InventoryResetAction(),
246
+ ]
247
+
248
+ def is_from_folder(self, folder: str) -> bool:
249
+ return False
250
+
251
+ def create_agent_handlers(self) -> list[Handler]:
252
+ return []
253
+
254
+ def create_mission_handlers(self):
255
+ return []
256
+
257
+ def create_monitors(self):
258
+ return []
259
+
260
+ def create_rewardables(self):
261
+ return []
262
+
263
+ def create_server_decorators(self) -> list[Handler]:
264
+ return []
265
+
266
+ def determine_success_from_rewards(self, rewards: list) -> bool:
267
+ return False
268
+
269
+ def get_docstring(self):
270
+ return self.__class__.__doc__
271
+
272
+ class RealPlancraft(_singleagent._SingleAgentEnv):
273
+ def __init__(
274
+ self,
275
+ inventory: list[dict],
276
+ preferred_spawn_biome="plains",
277
+ symbolic_action_space=False,
278
+ symbolic_observation_space=True,
279
+ resolution=[512, 512],
280
+ crop=True,
281
+ ):
282
+ # NOTE: crop is only supported for resolution 512x512 (default)
283
+ self.crop = crop
284
+ self.resolution = resolution
285
+ env_spec = PlancraftBaseEnvSpec(
286
+ symbolic_action_space=symbolic_action_space,
287
+ symbolic_observation_space=symbolic_observation_space,
288
+ preferred_spawn_biome=preferred_spawn_biome,
289
+ inventory=inventory,
290
+ resolution=resolution,
291
+ )
292
+ super(RealPlancraft, self).__init__(env_spec=env_spec)
293
+ self.reset()
294
+
295
+ def step(self, action: RealAction | dict):
296
+ if not isinstance(action, dict):
297
+ action = action.to_action_dict()
298
+ obs, rew, done, info = super().step(action)
299
+ if "pov" in obs and self.crop and self.resolution == [512, 512]:
300
+ # crop at position x=174, y=170 with width=164 and height=173
301
+ obs["pov"] = obs["pov"][174 : 174 + 164, 170 : 168 + 173]
302
+ return obs, rew, done, info
303
+
304
+ def fast_reset(self, new_inventory: list[dict]):
305
+ super().step({"inventory_reset": new_inventory})
306
+
307
+
308
+ except ImportError:
309
+
310
+ class RealPlancraft:
311
+ def __init__(self, *args, **kwargs):
312
+ logger.warning(
313
+ "The 'minerl' package is required to use RealPlancraft. "
314
+ "Please install it using 'pip install plancraft[full]' or 'pip install minerl'."
315
+ )
316
+ raise ImportError("minerl package not found")
@@ -1,4 +1,3 @@
1
- import logging
2
1
  from typing import Optional
3
2
 
4
3
  from plancraft.environments.actions import SymbolicAction
@@ -11,8 +10,6 @@ from plancraft.environments.recipes import (
11
10
  )
12
11
  from plancraft.environments.sampler import MAX_STACK_SIZE
13
12
 
14
- logger = logging.getLogger(__name__)
15
-
16
13
 
17
14
  class PseudoActionSpace:
18
15
  def no_op(self):
@@ -1,9 +1,9 @@
1
- import logging
2
1
  import os
3
2
  import time
4
3
 
5
4
  import torch
6
5
  from dotenv import load_dotenv
6
+ from loguru import logger
7
7
  from openai import OpenAI
8
8
  from PIL import Image
9
9
  from transformers import (
@@ -23,9 +23,6 @@ from plancraft.models.utils import (
23
23
  tokenize,
24
24
  )
25
25
 
26
-
27
- logger = logging.getLogger(__name__)
28
-
29
26
  load_dotenv()
30
27
 
31
28
 
@@ -1,6 +1,7 @@
1
- import logging
2
1
  from typing import Optional
3
2
 
3
+ from loguru import logger
4
+
4
5
  import torch
5
6
  import torch.nn as nn
6
7
  import torchvision.transforms.v2 as v2
@@ -14,8 +15,6 @@ from transformers import (
14
15
 
15
16
  from plancraft.models.bbox_model import IntegratedBoundingBoxModel
16
17
 
17
- logger = logging.getLogger(__name__)
18
-
19
18
 
20
19
  class PlancraftOAMConfig(PretrainedConfig):
21
20
  model_type = "plancraft-aom"
@@ -1,13 +1,12 @@
1
- import logging
2
1
  import copy
3
2
  from collections import Counter
4
3
 
5
4
  from plancraft.config import EvalConfig
6
5
  from plancraft.environments.actions import (
7
6
  RealActionInteraction,
7
+ StopAction,
8
8
  SymbolicMoveAction,
9
9
  SymbolicSmeltAction,
10
- StopAction,
11
10
  )
12
11
  from plancraft.environments.planner import optimal_planner
13
12
  from plancraft.environments.recipes import (
@@ -16,10 +15,8 @@ from plancraft.environments.recipes import (
16
15
  SmeltingRecipe,
17
16
  id_to_item,
18
17
  )
19
- from plancraft.models.base import ABCModel, History
20
18
  from plancraft.environments.sampler import MAX_STACK_SIZE
21
-
22
- logger = logging.getLogger(__name__)
19
+ from plancraft.models.base import ABCModel, History
23
20
 
24
21
 
25
22
  def item_set_id_to_type(item_set_ids: set[int]):
@@ -1,21 +1,16 @@
1
- import logging
2
-
3
1
  from dotenv import load_dotenv
4
2
 
5
3
  from plancraft.config import EvalConfig
6
4
  from plancraft.environments.actions import (
7
- SymbolicAction,
8
5
  NoOp,
6
+ SymbolicAction,
9
7
  )
8
+ from plancraft.models.act import ActModel
10
9
  from plancraft.models.utils import (
11
10
  convert_observation_to_message,
12
11
  parse_content_response,
13
12
  )
14
13
 
15
- from plancraft.models.act import ActModel
16
-
17
- logger = logging.getLogger(__name__)
18
-
19
14
  load_dotenv()
20
15
 
21
16
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: plancraft
3
- Version: 0.1.0
3
+ Version: 0.1.1
4
4
  Summary: Plancraft: an evaluation dataset for planning with LLM agents
5
5
  Requires-Python: >=3.9
6
6
  Description-Content-Type: text/markdown
@@ -20,6 +20,7 @@ Requires-Dist: imageio>=2.36.0
20
20
  Requires-Dist: inflection>=0.3.1
21
21
  Requires-Dist: ipython>=7.5.0
22
22
  Requires-Dist: jinja2>=2.11.2
23
+ Requires-Dist: loguru>=0.7.2
23
24
  Requires-Dist: lxml>=4.3.3
24
25
  Requires-Dist: matplotlib>=3.9.2
25
26
  Requires-Dist: networkx>=3.2.1
@@ -50,4 +51,24 @@ Provides-Extra: full
50
51
  Requires-Dist: gym<=0.23.1,>=0.19.0; extra == "full"
51
52
 
52
53
  # plancraft
54
+
55
+ [![Test](https://github.com/gautierdag/plancraft/actions/workflows/test.yaml/badge.svg)](https://github.com/gautierdag/plancraft/actions/workflows/test.yaml)
56
+ ![Python Version](https://img.shields.io/badge/python-3.9+-blue)
57
+ ![Ruff](https://img.shields.io/badge/linter-ruff-blue)
58
+ [![PyPI Version](https://img.shields.io/pypi/v/plancraft)](https://pypi.org/project/plancraft/)
59
+
53
60
  Plancraft is a minecraft environment and agent that innovates on planning LLM agents with a retriever
61
+
62
+ You can install the package by running the following command:
63
+
64
+ ```bash
65
+ pip install plancraft
66
+ ```
67
+
68
+ Should you need the multimodal version of the package, you will also need a custom [fork](https://github.com/gautierdag/minerl.git) of the minerl package. You can install it by running the following command:
69
+
70
+ ```bash
71
+ pip install git+hhttps://github.com/gautierdag/minerl.git
72
+ ```
73
+
74
+ Note that you may need to follow the same installation instructions as in the [minerl documentation](https://minerl.readthedocs.io/en/latest/tutorials/index.html).
@@ -13,6 +13,7 @@ imageio>=2.36.0
13
13
  inflection>=0.3.1
14
14
  ipython>=7.5.0
15
15
  jinja2>=2.11.2
16
+ loguru>=0.7.2
16
17
  lxml>=4.3.3
17
18
  matplotlib>=3.9.2
18
19
  networkx>=3.2.1
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "plancraft"
3
- version = "0.1.0"
3
+ version = "0.1.1"
4
4
  description = "Plancraft: an evaluation dataset for planning with LLM agents"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.9"
@@ -20,6 +20,7 @@ dependencies = [
20
20
  "inflection>=0.3.1",
21
21
  "ipython>=7.5.0",
22
22
  "jinja2>=2.11.2",
23
+ "loguru>=0.7.2",
23
24
  "lxml>=4.3.3",
24
25
  "matplotlib>=3.9.2",
25
26
  "networkx>=3.2.1",
@@ -67,4 +68,4 @@ full = [
67
68
 
68
69
  [build-system]
69
70
  requires = ["setuptools"]
70
- build-backend = "setuptools.build_meta"
71
+ build-backend = "setuptools.build_meta"
plancraft-0.1.0/README.md DELETED
@@ -1,2 +0,0 @@
1
- # plancraft
2
- Plancraft is a minecraft environment and agent that innovates on planning LLM agents with a retriever
@@ -1,315 +0,0 @@
1
- from typing import Sequence, Union
2
-
3
- import numpy as np
4
- import json
5
-
6
-
7
- try:
8
- from minerl.env import _singleagent
9
- from minerl.herobraine.env_specs.human_controls import HumanControlEnvSpec
10
- from minerl.herobraine.hero import handlers, mc, spaces
11
- from minerl.herobraine.hero.handler import Handler
12
- from minerl.herobraine.hero.handlers.agent.action import Action
13
- from minerl.herobraine.hero.handlers.agent.start import InventoryAgentStart
14
- from minerl.herobraine.hero.handlers.translation import TranslationHandler
15
- except ImportError as e:
16
- raise ImportError(
17
- "The 'minerl' package is required to use RealPlancraft. "
18
- "Please install it using 'pip install plancraft[full]' or 'pip install minerl'."
19
- ) from e
20
-
21
-
22
- from plancraft.environments.actions import RealAction
23
-
24
-
25
- class InventoryCommandAction(Action):
26
- """
27
- Handler which lets agents programmatically interact with an open container
28
-
29
- Using this - agents can move a chosen quantity of items from one slot to another.
30
- """
31
-
32
- def to_string(self):
33
- return "inventory_command"
34
-
35
- def xml_template(self) -> str:
36
- return str("<InventoryCommands/>")
37
-
38
- def __init__(self):
39
- self._command = "inventory_command"
40
- # first argument is the slot to take from
41
- # second is the slot to put into
42
- # third is the count to take
43
- super().__init__(
44
- self.command,
45
- spaces.Tuple(
46
- (
47
- spaces.Discrete(46),
48
- spaces.Discrete(46),
49
- spaces.Discrete(64),
50
- )
51
- ),
52
- )
53
-
54
- def from_universal(self, x):
55
- return np.array([0, 0, 0], dtype=np.int32)
56
-
57
-
58
- class SmeltCommandAction(Action):
59
- """
60
- An action handler for smelting an item
61
- We assume smelting is immediate.
62
- @TODO: might be interesting to explore using the smelting time as an additional planning parameter.
63
-
64
- Using this agents can smelt items in their inventory.
65
- """
66
-
67
- def __init__(self):
68
- self._command = "smelt"
69
- # first argument is the slot to take from
70
- # second is the slot to put into
71
- # third is the count to smelt
72
- super().__init__(
73
- self.command,
74
- spaces.Tuple(
75
- (
76
- spaces.Discrete(46),
77
- spaces.Discrete(46),
78
- spaces.Discrete(64),
79
- )
80
- ),
81
- )
82
-
83
- def to_string(self):
84
- return "smelt"
85
-
86
- def xml_template(self) -> str:
87
- return str("<SmeltCommands/>")
88
-
89
- def from_universal(self, x):
90
- return np.array([0, 0, 0], dtype=np.int32)
91
-
92
-
93
- class InventoryResetAction(Action):
94
- def __init__(self):
95
- self._command = "inventory_reset"
96
- super().__init__(self._command, spaces.Text([1]))
97
-
98
- def to_string(self) -> str:
99
- return "inventory_reset"
100
-
101
- def to_hero(self, inventory_items: list[dict]):
102
- return "{} {}".format(self._command, json.dumps(inventory_items))
103
-
104
- def xml_template(self) -> str:
105
- return "<InventoryResetCommands/>"
106
-
107
- def from_universal(self, x):
108
- return []
109
-
110
-
111
- MINUTE = 20 * 60
112
-
113
-
114
- class CustomInventoryAgentStart(InventoryAgentStart):
115
- def __init__(self, inventory: list[dict[str, Union[str, int]]]):
116
- super().__init__({item["slot"]: item for item in inventory})
117
-
118
-
119
- class CraftingTableOnly(Handler):
120
- def to_string(self):
121
- return "start_with_crafting_table"
122
-
123
- def xml_template(self) -> str:
124
- return "<CraftingTableOnly>true</CraftingTableOnly>"
125
-
126
-
127
- class InventoryObservation(TranslationHandler):
128
- """
129
- Handles GUI Workbench Observations for selected items
130
- """
131
-
132
- def to_string(self):
133
- return "inventory"
134
-
135
- def xml_template(self) -> str:
136
- return str("""<ObservationFromFullInventory flat="false"/>""")
137
-
138
- def __init__(self, item_list, _other="other"):
139
- item_list = sorted(item_list)
140
- super().__init__(
141
- spaces.Dict(
142
- spaces={
143
- k: spaces.Box(
144
- low=0,
145
- high=2304,
146
- shape=(),
147
- dtype=np.int32,
148
- normalizer_scale="log",
149
- )
150
- for k in item_list
151
- }
152
- )
153
- )
154
- self.num_items = len(item_list)
155
- self.items = item_list
156
-
157
- def add_to_mission_spec(self, mission_spec):
158
- pass
159
-
160
- def from_hero(self, info):
161
- return info["inventory"]
162
-
163
- def from_universal(self, obs):
164
- raise NotImplementedError(
165
- "from_universal not implemented in InventoryObservation"
166
- )
167
-
168
-
169
- class PlancraftBaseEnvSpec(HumanControlEnvSpec):
170
- def __init__(
171
- self,
172
- symbolic_action_space=False,
173
- symbolic_observation_space=False,
174
- max_episode_steps=2 * MINUTE,
175
- inventory: Sequence[dict] = (),
176
- preferred_spawn_biome: str = "plains",
177
- resolution=[260, 180],
178
- ):
179
- self.inventory = inventory
180
- self.preferred_spawn_biome = preferred_spawn_biome
181
- self.symbolic_action_space = symbolic_action_space
182
- self.symbolic_observation_space = symbolic_observation_space
183
-
184
- mode = "real"
185
- if symbolic_action_space:
186
- mode += "-symbolic-act"
187
- else:
188
- mode += "-real-act"
189
-
190
- if symbolic_observation_space:
191
- mode += "-symbolic-obs"
192
-
193
- if symbolic_action_space:
194
- cursor_size = 1
195
- else:
196
- cursor_size = 16
197
-
198
- name = f"plancraft-{mode}-v0"
199
- super().__init__(
200
- name=name,
201
- max_episode_steps=max_episode_steps,
202
- resolution=resolution,
203
- cursor_size_range=[cursor_size, cursor_size],
204
- )
205
-
206
- def create_agent_start(self) -> list[Handler]:
207
- base_agent_start_handlers = super().create_agent_start()
208
- return base_agent_start_handlers + [
209
- CustomInventoryAgentStart(self.inventory),
210
- handlers.PreferredSpawnBiome(self.preferred_spawn_biome),
211
- handlers.DoneOnDeath(),
212
- CraftingTableOnly(),
213
- ]
214
-
215
- def create_observables(self) -> list[TranslationHandler]:
216
- if self.symbolic_observation_space:
217
- return [
218
- handlers.POVObservation(self.resolution),
219
- InventoryObservation([item["slot"] for item in self.inventory]),
220
- ]
221
- return [handlers.POVObservation(self.resolution)]
222
-
223
- def create_server_world_generators(self) -> list[Handler]:
224
- # TODO the original biome forced is not implemented yet. Use this for now.
225
- return [handlers.DefaultWorldGenerator(force_reset=True)]
226
-
227
- def create_server_quit_producers(self) -> list[Handler]:
228
- return [
229
- handlers.ServerQuitFromTimeUp((self.max_episode_steps * mc.MS_PER_STEP)),
230
- handlers.ServerQuitWhenAnyAgentFinishes(),
231
- ]
232
-
233
- def create_server_initial_conditions(self) -> list[Handler]:
234
- return [
235
- handlers.TimeInitialCondition(allow_passage_of_time=False),
236
- handlers.SpawningInitialCondition(allow_spawning=True),
237
- ]
238
-
239
- def create_actionables(self) -> list[TranslationHandler]:
240
- """
241
- Symbolic env can move items around in the inventory using function
242
- Real env can use camera/keyboard
243
- """
244
- # Camera and mouse
245
- if self.symbolic_action_space:
246
- return [
247
- InventoryCommandAction(),
248
- SmeltCommandAction(),
249
- InventoryResetAction(),
250
- ]
251
- return [handlers.KeybasedCommandAction(v, v) for k, v in mc.KEYMAP.items()] + [
252
- handlers.CameraAction(),
253
- SmeltCommandAction(),
254
- InventoryResetAction(),
255
- ]
256
-
257
- def is_from_folder(self, folder: str) -> bool:
258
- return False
259
-
260
- def create_agent_handlers(self) -> list[Handler]:
261
- return []
262
-
263
- def create_mission_handlers(self):
264
- return []
265
-
266
- def create_monitors(self):
267
- return []
268
-
269
- def create_rewardables(self):
270
- return []
271
-
272
- def create_server_decorators(self) -> list[Handler]:
273
- return []
274
-
275
- def determine_success_from_rewards(self, rewards: list) -> bool:
276
- return False
277
-
278
- def get_docstring(self):
279
- return self.__class__.__doc__
280
-
281
-
282
- class RealPlancraft(_singleagent._SingleAgentEnv):
283
- def __init__(
284
- self,
285
- inventory: list[dict],
286
- preferred_spawn_biome="plains",
287
- symbolic_action_space=False,
288
- symbolic_observation_space=True,
289
- resolution=[512, 512],
290
- crop=True,
291
- ):
292
- # NOTE: crop is only supported for resolution 512x512 (default)
293
- self.crop = crop
294
- self.resolution = resolution
295
- env_spec = PlancraftBaseEnvSpec(
296
- symbolic_action_space=symbolic_action_space,
297
- symbolic_observation_space=symbolic_observation_space,
298
- preferred_spawn_biome=preferred_spawn_biome,
299
- inventory=inventory,
300
- resolution=resolution,
301
- )
302
- super(RealPlancraft, self).__init__(env_spec=env_spec)
303
- self.reset()
304
-
305
- def step(self, action: RealAction | dict):
306
- if not isinstance(action, dict):
307
- action = action.to_action_dict()
308
- obs, rew, done, info = super().step(action)
309
- if "pov" in obs and self.crop and self.resolution == [512, 512]:
310
- # crop at position x=174, y=170 with width=164 and height=173
311
- obs["pov"] = obs["pov"][174 : 174 + 164, 170 : 168 + 173]
312
- return obs, rew, done, info
313
-
314
- def fast_reset(self, new_inventory: list[dict]):
315
- super().step({"inventory_reset": new_inventory})
File without changes
File without changes