plancraft 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- plancraft-0.1.2.dist-info/METADATA +74 -0
- plancraft-0.1.2.dist-info/RECORD +5 -0
- {plancraft-0.1.0.dist-info → plancraft-0.1.2.dist-info}/WHEEL +1 -1
- plancraft-0.1.2.dist-info/top_level.txt +1 -0
- environments/__init__.py +0 -0
- environments/actions.py +0 -218
- environments/env_real.py +0 -315
- environments/env_symbolic.py +0 -215
- environments/items.py +0 -10
- environments/planner.py +0 -109
- environments/recipes.py +0 -542
- environments/sampler.py +0 -224
- models/__init__.py +0 -21
- models/act.py +0 -184
- models/base.py +0 -152
- models/bbox_model.py +0 -492
- models/dummy.py +0 -54
- models/few_shot_images/__init__.py +0 -16
- models/generators.py +0 -483
- models/oam.py +0 -284
- models/oracle.py +0 -268
- models/prompts.py +0 -158
- models/react.py +0 -98
- models/utils.py +0 -289
- plancraft-0.1.0.dist-info/METADATA +0 -53
- plancraft-0.1.0.dist-info/RECORD +0 -26
- plancraft-0.1.0.dist-info/top_level.txt +0 -3
- train/dataset.py +0 -187
- {plancraft-0.1.0.dist-info → plancraft-0.1.2.dist-info}/LICENSE +0 -0
models/prompts.py
DELETED
@@ -1,158 +0,0 @@
|
|
1
|
-
from plancraft.models.utils import gold_search_recipe
|
2
|
-
|
3
|
-
VALID_ACTIONS = ["move", "smelt", "think", "search", "impossible"]
|
4
|
-
|
5
|
-
ACTIONS_DESCRIPTIONS = {
|
6
|
-
"move": {
|
7
|
-
"description": "Transfer a specific quantity of an item from one slot to another",
|
8
|
-
"format": "`move: from [Source] to [Target] with quantity N`",
|
9
|
-
},
|
10
|
-
"smelt": {
|
11
|
-
"description": "Smelt an item in a furnace and moves the output to a specific slot",
|
12
|
-
"format": "`smelt: from [Source] to [Target] with quantity N`",
|
13
|
-
},
|
14
|
-
"think": {
|
15
|
-
"description": "Generate thoughts to help you decide on the next action",
|
16
|
-
"format": "`think: <thought message>`",
|
17
|
-
},
|
18
|
-
"search": {
|
19
|
-
"description": "Search for a recipe to craft a specific item",
|
20
|
-
"format": "`search: <recipe name>`",
|
21
|
-
},
|
22
|
-
"impossible": {
|
23
|
-
"description": "Stop task if it is certain that it is impossible with given inventory",
|
24
|
-
"format": "`impossible: <reason>`",
|
25
|
-
},
|
26
|
-
}
|
27
|
-
|
28
|
-
BASE_SYSTEM_PROMPT = """You are crafting in Minecraft. You need to decide on the next action.
|
29
|
-
|
30
|
-
Crafting Grid: The crafting table is organized into a 3x3 grid. Each slot in the grid has a unique identifier:
|
31
|
-
- Top row: [A1] [A2] [A3]
|
32
|
-
- Middle row: [B1] [B2] [B3]
|
33
|
-
- Bottom row: [C1] [C2] [C3]
|
34
|
-
|
35
|
-
The output of the crafting process is placed in a designated output slot labeled [0] You cannot move or smelt items directly into slot [0]
|
36
|
-
|
37
|
-
Inventory Slots: The remaining inventory slots (outside of the crafting grid) are used for storing items. These slots are labeled as [I1] to [I36]"""
|
38
|
-
|
39
|
-
BASE_SYSTEM_PROMPT_EXAMPLE = """Example:
|
40
|
-
- `move: from [I2] to [A1] with quantity 3`
|
41
|
-
- `smelt: from [I5] to [I6] with quantity 1`
|
42
|
-
|
43
|
-
Constraints:
|
44
|
-
- You cannot move or smelt items into [0]
|
45
|
-
- If an item is not in slot [0] then the recipe is incorrect
|
46
|
-
- You need to move items from [0] to a free inventory slot to complete the crafting process"""
|
47
|
-
|
48
|
-
|
49
|
-
def get_system_prompt(actions: list[str]):
|
50
|
-
assert set(actions).issubset(VALID_ACTIONS), f"Invalid actions: {actions}"
|
51
|
-
assert "move" in actions, "move should be one of the actions"
|
52
|
-
assert "smelt" in actions, "smelt should be one of the actions"
|
53
|
-
|
54
|
-
descriptions = ""
|
55
|
-
for action in actions:
|
56
|
-
descriptions += f"\n\t- {action}: {ACTIONS_DESCRIPTIONS[action]['description']}"
|
57
|
-
|
58
|
-
output_format = ""
|
59
|
-
for action in actions:
|
60
|
-
output_format += f"\n\t- {ACTIONS_DESCRIPTIONS[action]['format']}"
|
61
|
-
|
62
|
-
return f"{BASE_SYSTEM_PROMPT}\n\nActions:{descriptions}\n\nFormat{output_format}\n\n{BASE_SYSTEM_PROMPT_EXAMPLE}"
|
63
|
-
|
64
|
-
|
65
|
-
CRAFTING_STEPS = [
|
66
|
-
"Craft an item of type: andesite\ninventory:\n - diorite [I18] quantity 1\n - cobblestone [I30] quantity 1",
|
67
|
-
"Craft an item of type: andesite\ninventory:\n - diorite [B1] quantity 1\n - cobblestone [I30] quantity 1",
|
68
|
-
"Craft an item of type: andesite\ninventory:\n - andesite [0] quantity 1\n - diorite [B1] quantity 1\n - cobblestone [B2] quantity 1",
|
69
|
-
"Craft an item of type: iron_ingot\ninventory:\n - iron_ore [I36] quantity 1\n - cobblestone [I30] quantity 1",
|
70
|
-
]
|
71
|
-
|
72
|
-
BASE_ACTION_STEPS = [
|
73
|
-
"move: from [I18] to [B1] with quantity 1",
|
74
|
-
"move: from [I30] to [B2] with quantity 1",
|
75
|
-
"move: from [0] to [I6] with quantity 1",
|
76
|
-
"smelt: from [I36] to [I35] with quantity 1",
|
77
|
-
]
|
78
|
-
|
79
|
-
THINK_STEPS = [
|
80
|
-
"think: To solve this task I need to craft andesite using 1 diorite and 1 cobblestone side by side.",
|
81
|
-
"think: Now I need to move the cobblestone into position [B2] to be right of the diorite.",
|
82
|
-
"think: Now I can craft the andesite by moving it from the craft slot [0] to a free inventory slot.",
|
83
|
-
"think: To craft an iron_ingot, I need to smelt iron_ore into an empty slot.",
|
84
|
-
]
|
85
|
-
|
86
|
-
SEARCH_STEPS = [
|
87
|
-
"search: andesite",
|
88
|
-
None,
|
89
|
-
None,
|
90
|
-
"search: iron_ingot",
|
91
|
-
]
|
92
|
-
|
93
|
-
|
94
|
-
def get_prompt_example(
|
95
|
-
actions: list[str],
|
96
|
-
use_text_inventory=True,
|
97
|
-
use_multimodal_content_format=False,
|
98
|
-
use_images=False,
|
99
|
-
) -> list[dict]:
|
100
|
-
assert set(actions).issubset(VALID_ACTIONS), f"Invalid actions: {actions}"
|
101
|
-
assert "move" in actions, "move should be one of the actions"
|
102
|
-
assert "smelt" in actions, "smelt should be one of the actions"
|
103
|
-
|
104
|
-
if use_images:
|
105
|
-
assert (
|
106
|
-
use_multimodal_content_format
|
107
|
-
), "use_images requires use_multimodal_content_format"
|
108
|
-
|
109
|
-
example_dialogue = []
|
110
|
-
for i, step in enumerate(CRAFTING_STEPS):
|
111
|
-
text = step
|
112
|
-
if not use_text_inventory:
|
113
|
-
text = text.split("\ninventory:\n")[0]
|
114
|
-
|
115
|
-
example_dialogue.append({"role": "user", "content": text})
|
116
|
-
if "search" in actions and SEARCH_STEPS[i]:
|
117
|
-
example_dialogue.append({"role": "assistant", "content": SEARCH_STEPS[i]})
|
118
|
-
search_target = text.split("seach: ")[-1].strip()
|
119
|
-
search_response = gold_search_recipe(search_target)
|
120
|
-
example_dialogue.append({"role": "user", "content": search_response})
|
121
|
-
if "think" in actions:
|
122
|
-
example_dialogue.append({"role": "assistant", "content": THINK_STEPS[i]})
|
123
|
-
example_dialogue.append({"role": "user", "content": "Ok"})
|
124
|
-
example_dialogue.append({"role": "assistant", "content": BASE_ACTION_STEPS[i]})
|
125
|
-
|
126
|
-
if not use_multimodal_content_format:
|
127
|
-
return example_dialogue
|
128
|
-
|
129
|
-
# convert to multimodal dialogue
|
130
|
-
multimodal_dialogue = []
|
131
|
-
for message in example_dialogue:
|
132
|
-
if "Craft an item" in message["content"]:
|
133
|
-
content_list = [
|
134
|
-
{
|
135
|
-
"type": "text",
|
136
|
-
"text": message["content"],
|
137
|
-
}
|
138
|
-
]
|
139
|
-
if use_images:
|
140
|
-
content_list.append(
|
141
|
-
{
|
142
|
-
"type": "image",
|
143
|
-
}
|
144
|
-
)
|
145
|
-
|
146
|
-
multimodal_dialogue.append(
|
147
|
-
{"role": message["role"], "content": content_list}
|
148
|
-
)
|
149
|
-
else:
|
150
|
-
multimodal_dialogue.append(
|
151
|
-
{
|
152
|
-
"role": message["role"],
|
153
|
-
"content": [
|
154
|
-
{"type": "text", "text": message["content"]},
|
155
|
-
],
|
156
|
-
}
|
157
|
-
)
|
158
|
-
return multimodal_dialogue
|
models/react.py
DELETED
@@ -1,98 +0,0 @@
|
|
1
|
-
import logging
|
2
|
-
|
3
|
-
from dotenv import load_dotenv
|
4
|
-
|
5
|
-
from plancraft.config import EvalConfig
|
6
|
-
from plancraft.environments.actions import (
|
7
|
-
SymbolicAction,
|
8
|
-
NoOp,
|
9
|
-
)
|
10
|
-
from plancraft.models.utils import (
|
11
|
-
convert_observation_to_message,
|
12
|
-
parse_content_response,
|
13
|
-
)
|
14
|
-
|
15
|
-
from plancraft.models.act import ActModel
|
16
|
-
|
17
|
-
logger = logging.getLogger(__name__)
|
18
|
-
|
19
|
-
load_dotenv()
|
20
|
-
|
21
|
-
|
22
|
-
class ReactModel(ActModel):
|
23
|
-
"""
|
24
|
-
Model that does action with interleaved thinking step
|
25
|
-
"""
|
26
|
-
|
27
|
-
def __init__(self, cfg: EvalConfig):
|
28
|
-
super().__init__(cfg)
|
29
|
-
self.max_invalid_actions = 3
|
30
|
-
|
31
|
-
def step(self, observation: dict) -> SymbolicAction:
|
32
|
-
# override the step method in ActModel to force thinking step
|
33
|
-
|
34
|
-
self.history.add_observation_to_history(observation)
|
35
|
-
observation_message = convert_observation_to_message(
|
36
|
-
observation,
|
37
|
-
objective=self.history.objective,
|
38
|
-
bbox_model=self.bbox_model,
|
39
|
-
oam_model="oam" in self.llm.model_name,
|
40
|
-
use_text_inventory=self.use_text_inventory,
|
41
|
-
use_multimodal_content_format=self.use_multimodal_content_format,
|
42
|
-
use_images=self.use_images,
|
43
|
-
)
|
44
|
-
# add observation to history
|
45
|
-
self.history.add_message_to_history(content=observation_message, role="user")
|
46
|
-
|
47
|
-
i = 0
|
48
|
-
while i < self.max_invalid_actions:
|
49
|
-
message_window, image_window = self.llm.prepare_messages(
|
50
|
-
history=self.history,
|
51
|
-
max_messages_window=self.max_messages_window,
|
52
|
-
system_prompt=self.system_prompt,
|
53
|
-
prompt_images=self.prompt_images,
|
54
|
-
)
|
55
|
-
think_messages, think_token_used = self.llm.generate_unconstrained(
|
56
|
-
batch_messages=[message_window],
|
57
|
-
images=[image_window],
|
58
|
-
start_messages_generation="think:",
|
59
|
-
)
|
60
|
-
self.history.tokens_used += think_token_used
|
61
|
-
think_message = "think: " + think_messages[0].split("\n")[0].strip()
|
62
|
-
self.history.add_message_to_history(content=think_message, role="assistant")
|
63
|
-
|
64
|
-
# retrieve new message window (with thinking prompt)
|
65
|
-
message_window, image_window = self.llm.prepare_messages(
|
66
|
-
history=self.history,
|
67
|
-
max_messages_window=self.max_messages_window,
|
68
|
-
system_prompt=self.system_prompt,
|
69
|
-
prompt_images=self.prompt_images,
|
70
|
-
)
|
71
|
-
action_messages, action_token_used = self.llm.generate_unconstrained(
|
72
|
-
batch_messages=[message_window],
|
73
|
-
images=[image_window],
|
74
|
-
start_messages_generation="",
|
75
|
-
)
|
76
|
-
self.history.tokens_used += action_token_used
|
77
|
-
|
78
|
-
action_message = action_messages[0].split("\n")[0].strip()
|
79
|
-
|
80
|
-
self.history.add_message_to_history(
|
81
|
-
content=action_message, role="assistant"
|
82
|
-
)
|
83
|
-
|
84
|
-
response = parse_content_response(
|
85
|
-
action_message, valid_actions=self.valid_actions
|
86
|
-
)
|
87
|
-
if not isinstance(response, str):
|
88
|
-
# valid action
|
89
|
-
self.history.add_action_to_history(response)
|
90
|
-
return response
|
91
|
-
|
92
|
-
self.history.add_message_to_history(
|
93
|
-
content=response,
|
94
|
-
)
|
95
|
-
i += 1
|
96
|
-
|
97
|
-
# default move action
|
98
|
-
return NoOp()
|
models/utils.py
DELETED
@@ -1,289 +0,0 @@
|
|
1
|
-
import base64
|
2
|
-
import glob
|
3
|
-
import io
|
4
|
-
import pathlib
|
5
|
-
import re
|
6
|
-
|
7
|
-
import numpy as np
|
8
|
-
import torch
|
9
|
-
from PIL import Image
|
10
|
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
11
|
-
|
12
|
-
from plancraft.environments.actions import (
|
13
|
-
StopAction,
|
14
|
-
SymbolicAction,
|
15
|
-
SymbolicMoveAction,
|
16
|
-
SymbolicSmeltAction,
|
17
|
-
convert_from_slot_index,
|
18
|
-
)
|
19
|
-
from plancraft.environments.recipes import RECIPES
|
20
|
-
|
21
|
-
|
22
|
-
def numpy_to_base64(img_array: np.ndarray, image_format: str = "PNG") -> str:
|
23
|
-
"""
|
24
|
-
Convert a NumPy array to a base64 encoded string.
|
25
|
-
|
26
|
-
Parameters:
|
27
|
-
- img_array: np.ndarray - Input image array.
|
28
|
-
- image_format: str - The format to save the image in (e.g., "PNG", "JPEG").
|
29
|
-
|
30
|
-
Returns:
|
31
|
-
- str - Base64 encoded string of the image.
|
32
|
-
"""
|
33
|
-
# Convert NumPy array to image
|
34
|
-
image = Image.fromarray(img_array)
|
35
|
-
|
36
|
-
# Save the image to a bytes buffer
|
37
|
-
buffered = io.BytesIO()
|
38
|
-
image.save(buffered, format=image_format)
|
39
|
-
|
40
|
-
# Encode the bytes to a base64 string
|
41
|
-
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
42
|
-
|
43
|
-
return img_str
|
44
|
-
|
45
|
-
|
46
|
-
def get_downloaded_models() -> dict:
|
47
|
-
"""
|
48
|
-
Get the list of downloaded models on the NFS partition (EIDF).
|
49
|
-
"""
|
50
|
-
downloaded_models = {}
|
51
|
-
# known models on NFS partition
|
52
|
-
if pathlib.Path("/nfs").exists():
|
53
|
-
local_models = glob.glob("/nfs/public/hf/models/*/*")
|
54
|
-
downloaded_models = {
|
55
|
-
model.replace("/nfs/public/hf/models/", ""): model for model in local_models
|
56
|
-
}
|
57
|
-
return downloaded_models
|
58
|
-
|
59
|
-
|
60
|
-
class TrieNode:
|
61
|
-
def __init__(self):
|
62
|
-
self.children = {}
|
63
|
-
self.is_end_of_sequence = False
|
64
|
-
|
65
|
-
|
66
|
-
class Trie:
|
67
|
-
def __init__(self):
|
68
|
-
self.root = TrieNode()
|
69
|
-
self.longest_sequence_length = 0
|
70
|
-
|
71
|
-
def insert(self, sequence: list):
|
72
|
-
node = self.root
|
73
|
-
for num in sequence:
|
74
|
-
if num not in node.children:
|
75
|
-
node.children[num] = TrieNode()
|
76
|
-
node = node.children[num]
|
77
|
-
node.is_end_of_sequence = True
|
78
|
-
|
79
|
-
if len(sequence) > self.longest_sequence_length:
|
80
|
-
self.longest_sequence_length = len(sequence)
|
81
|
-
|
82
|
-
def starts_with(self, prefix: list) -> bool:
|
83
|
-
node = self.root
|
84
|
-
for num in prefix:
|
85
|
-
if num not in node.children:
|
86
|
-
return False
|
87
|
-
node = node.children[num]
|
88
|
-
return True
|
89
|
-
|
90
|
-
def get_next(self, prefix: list) -> list:
|
91
|
-
node = self.root
|
92
|
-
for num in prefix:
|
93
|
-
if num not in node.children:
|
94
|
-
return []
|
95
|
-
node = node.children[num]
|
96
|
-
return list(node.children.keys())
|
97
|
-
|
98
|
-
|
99
|
-
def tokenize(
|
100
|
-
model: AutoModelForCausalLM,
|
101
|
-
tokenizer: AutoTokenizer,
|
102
|
-
batch_messages: list[list[dict]],
|
103
|
-
start_messages_generation: list[str],
|
104
|
-
max_tokens=256,
|
105
|
-
images=None,
|
106
|
-
) -> dict[str, torch.Tensor]:
|
107
|
-
"""
|
108
|
-
Tokenize a list of messages and start the response message
|
109
|
-
"""
|
110
|
-
assert len(start_messages_generation) == len(
|
111
|
-
batch_messages
|
112
|
-
), "Length of start_messages_generation should be equal to batch_messages"
|
113
|
-
|
114
|
-
message_texts = tokenizer.apply_chat_template(
|
115
|
-
batch_messages,
|
116
|
-
add_generation_prompt=True,
|
117
|
-
tokenize=False,
|
118
|
-
)
|
119
|
-
# add the start of the response message for each message
|
120
|
-
message_texts = [
|
121
|
-
messages_text + new_message_start
|
122
|
-
for (messages_text, new_message_start) in zip(
|
123
|
-
message_texts, start_messages_generation
|
124
|
-
)
|
125
|
-
]
|
126
|
-
|
127
|
-
max_prompt_length = None
|
128
|
-
# need to truncate if max_length is set
|
129
|
-
if model.generation_config.max_length > max_tokens:
|
130
|
-
max_prompt_length = model.generation_config.max_length - max_tokens
|
131
|
-
|
132
|
-
if images:
|
133
|
-
assert len(images) == len(
|
134
|
-
batch_messages
|
135
|
-
), "Length of images should be equal to batch_messages"
|
136
|
-
tokenized_messages = tokenizer(
|
137
|
-
message_texts,
|
138
|
-
return_tensors="pt",
|
139
|
-
truncation=True,
|
140
|
-
max_length=max_prompt_length,
|
141
|
-
padding=True,
|
142
|
-
images=images,
|
143
|
-
)
|
144
|
-
else:
|
145
|
-
tokenized_messages = tokenizer(
|
146
|
-
message_texts,
|
147
|
-
return_tensors="pt",
|
148
|
-
truncation=True,
|
149
|
-
max_length=max_prompt_length,
|
150
|
-
padding=True,
|
151
|
-
)
|
152
|
-
return tokenized_messages
|
153
|
-
|
154
|
-
|
155
|
-
def objective_and_inventory_to_str(objective: str, inventory: list[dict]) -> str:
|
156
|
-
inventory_str = ""
|
157
|
-
for item in inventory:
|
158
|
-
if item["quantity"] > 0:
|
159
|
-
if "index" in item:
|
160
|
-
slot = item["index"]
|
161
|
-
else:
|
162
|
-
slot = item["slot"]
|
163
|
-
|
164
|
-
if isinstance(slot, int):
|
165
|
-
slot = convert_from_slot_index(slot)
|
166
|
-
|
167
|
-
inventory_str += f"\n - {item['type']} {slot} quantity {item['quantity']}"
|
168
|
-
|
169
|
-
return f"{objective}\ninventory:{inventory_str}"
|
170
|
-
|
171
|
-
|
172
|
-
def convert_observation_to_message(
|
173
|
-
observation: dict,
|
174
|
-
objective: str,
|
175
|
-
bbox_model=None,
|
176
|
-
oam_model=False,
|
177
|
-
use_text_inventory=True,
|
178
|
-
use_multimodal_content_format=False,
|
179
|
-
use_images=False,
|
180
|
-
) -> str | dict:
|
181
|
-
"""
|
182
|
-
Convert an observation to a message format
|
183
|
-
|
184
|
-
Parameters:
|
185
|
-
- observation: dict - The observation to convert.
|
186
|
-
- objective: str - The objective of the observation.
|
187
|
-
- bbox_model: Optional - The bounding box model to use.
|
188
|
-
- oam_model: bool - Whether to use the OAM model.
|
189
|
-
- use_text_inventory: bool - Whether to use text inventory.
|
190
|
-
- use_multimodal_content_format: bool - Whether to use multimodal content format.
|
191
|
-
- use_images: bool - Whether to append an image to the message content - must be used with use_multimodal_content_format.
|
192
|
-
"""
|
193
|
-
if bbox_model is not None:
|
194
|
-
# convert to tensor
|
195
|
-
inventory = bbox_model.get_inventory(observation["pov"].copy())
|
196
|
-
text_content = objective_and_inventory_to_str(
|
197
|
-
objective, sorted(inventory, key=lambda x: x["slot"])
|
198
|
-
)
|
199
|
-
elif oam_model:
|
200
|
-
text_content = f"{objective}\ninventory:\n"
|
201
|
-
elif not use_text_inventory:
|
202
|
-
text_content = objective
|
203
|
-
else:
|
204
|
-
# if not multimodal, we only have text - we just dump a JSON of the inventory
|
205
|
-
inventory = []
|
206
|
-
for o in observation["inventory"]:
|
207
|
-
if o["quantity"] > 0:
|
208
|
-
inventory.append(
|
209
|
-
{
|
210
|
-
"type": o["type"],
|
211
|
-
"slot": convert_from_slot_index(o["index"]),
|
212
|
-
"quantity": o["quantity"],
|
213
|
-
}
|
214
|
-
)
|
215
|
-
text_content = objective_and_inventory_to_str(objective, inventory)
|
216
|
-
|
217
|
-
if not use_multimodal_content_format:
|
218
|
-
return text_content
|
219
|
-
|
220
|
-
content_list = [{"type": "text", "text": text_content}]
|
221
|
-
if use_images:
|
222
|
-
content_list.append({"type": "image"})
|
223
|
-
return {"content": content_list}
|
224
|
-
|
225
|
-
|
226
|
-
def gold_search_recipe(recipe_name: str) -> str:
|
227
|
-
"""
|
228
|
-
Gold search recipe for the given observation and action
|
229
|
-
"""
|
230
|
-
if recipe_name not in RECIPES:
|
231
|
-
return "Could not find a recipe by that name."
|
232
|
-
|
233
|
-
out_string = f"Recipes to craft {recipe_name}:\n"
|
234
|
-
for i, r in enumerate(RECIPES[recipe_name]):
|
235
|
-
if r.recipe_type != "smelting":
|
236
|
-
# sample a valid input grid (note that this is not guaranteed to be the only valid grid)
|
237
|
-
input_crafting_grid = r.sample_input_crafting_grid()
|
238
|
-
recipe_instructions = ""
|
239
|
-
for item in input_crafting_grid:
|
240
|
-
recipe_instructions += (
|
241
|
-
f"{item['type']} at {convert_from_slot_index(item['slot'])}\n"
|
242
|
-
)
|
243
|
-
else:
|
244
|
-
# smelting recipe
|
245
|
-
recipe_instructions = f"smelt {r.ingredient}\n"
|
246
|
-
out_string += f"recipe {i+1}:\n{recipe_instructions}"
|
247
|
-
return out_string
|
248
|
-
|
249
|
-
|
250
|
-
def parse_content_response(
|
251
|
-
content: str, valid_actions: list[str] = ["smelt", "move"]
|
252
|
-
) -> str | SymbolicAction | StopAction:
|
253
|
-
"""
|
254
|
-
Given a message and set of valid actions, parse the content to return the action
|
255
|
-
or a message if the action is not valid/requires message response
|
256
|
-
"""
|
257
|
-
|
258
|
-
action_match = re.search(f"({'|'.join(valid_actions)}):", content)
|
259
|
-
if action_match:
|
260
|
-
action = action_match.group(1)
|
261
|
-
if action == "think":
|
262
|
-
return "Ok"
|
263
|
-
elif action == "impossible":
|
264
|
-
reason = re.search(r"impossible: (.*)", content).group(1)
|
265
|
-
return StopAction(reason=reason)
|
266
|
-
elif action == "search":
|
267
|
-
search_target = re.search(r"search: (\w+)", content).group(1)
|
268
|
-
return gold_search_recipe(search_target)
|
269
|
-
else:
|
270
|
-
try:
|
271
|
-
slot_from = re.search(r" from (\[[ABCI]?\d+\])", content).group(1)
|
272
|
-
slot_to = re.search(r" to (\[[ABCI]?\d+\])", content).group(1)
|
273
|
-
quantity = re.search(r"with quantity (\d+)", content).group(1)
|
274
|
-
if action == "move":
|
275
|
-
action = SymbolicMoveAction(
|
276
|
-
slot_from=slot_from,
|
277
|
-
slot_to=slot_to,
|
278
|
-
quantity=quantity,
|
279
|
-
)
|
280
|
-
else:
|
281
|
-
action = SymbolicSmeltAction(
|
282
|
-
slot_from=slot_from,
|
283
|
-
slot_to=slot_to,
|
284
|
-
quantity=quantity,
|
285
|
-
)
|
286
|
-
return action
|
287
|
-
except AttributeError as e:
|
288
|
-
return f"Format Error: {e}"
|
289
|
-
return f"Only select actions from the following: {', '.join(valid_actions)}"
|
@@ -1,53 +0,0 @@
|
|
1
|
-
Metadata-Version: 2.1
|
2
|
-
Name: plancraft
|
3
|
-
Version: 0.1.0
|
4
|
-
Summary: Plancraft: an evaluation dataset for planning with LLM agents
|
5
|
-
Requires-Python: >=3.9
|
6
|
-
Description-Content-Type: text/markdown
|
7
|
-
License-File: LICENSE
|
8
|
-
Requires-Dist: accelerate >=1.0.1
|
9
|
-
Requires-Dist: coloredlogs >=10.0
|
10
|
-
Requires-Dist: daemoniker >=0.2.3
|
11
|
-
Requires-Dist: datasets >=3.0.2
|
12
|
-
Requires-Dist: dill >=0.3.1.1
|
13
|
-
Requires-Dist: einops >=0.8.0
|
14
|
-
Requires-Dist: flaky >=3.8.1
|
15
|
-
Requires-Dist: hf-transfer >=0.1.8
|
16
|
-
Requires-Dist: huggingface-hub >=0.26.1
|
17
|
-
Requires-Dist: hydra-core >=1.3.2
|
18
|
-
Requires-Dist: imagehash >=4.0.0
|
19
|
-
Requires-Dist: imageio >=2.36.0
|
20
|
-
Requires-Dist: inflection >=0.3.1
|
21
|
-
Requires-Dist: ipython >=7.5.0
|
22
|
-
Requires-Dist: jinja2 >=2.11.2
|
23
|
-
Requires-Dist: lxml >=4.3.3
|
24
|
-
Requires-Dist: matplotlib >=3.9.2
|
25
|
-
Requires-Dist: networkx >=3.2.1
|
26
|
-
Requires-Dist: numpy <1.24,>=1.16.2
|
27
|
-
Requires-Dist: openai >=1.52.2
|
28
|
-
Requires-Dist: opencv-python >=4.1.0.25
|
29
|
-
Requires-Dist: pandas >=2.1.0
|
30
|
-
Requires-Dist: peft >=0.13.2
|
31
|
-
Requires-Dist: pillow >=8.0.0
|
32
|
-
Requires-Dist: psutil >=5.6.2
|
33
|
-
Requires-Dist: pydantic >=2.9.2
|
34
|
-
Requires-Dist: pyglet >=2.0.18
|
35
|
-
Requires-Dist: pyro4 >=4.76
|
36
|
-
Requires-Dist: python-dotenv >=1.0.1
|
37
|
-
Requires-Dist: pyyaml >=6.0.2
|
38
|
-
Requires-Dist: requests >=2.20.0
|
39
|
-
Requires-Dist: seaborn >=0.13.2
|
40
|
-
Requires-Dist: setuptools >=49.2.0
|
41
|
-
Requires-Dist: tinydb >=4.8.2
|
42
|
-
Requires-Dist: torch >=2.5.0
|
43
|
-
Requires-Dist: torchvision >=0.20.0
|
44
|
-
Requires-Dist: tqdm >=4.32.2
|
45
|
-
Requires-Dist: transformers >=4.43.3
|
46
|
-
Requires-Dist: typing >=3.6.6
|
47
|
-
Requires-Dist: wandb >=0.18.5
|
48
|
-
Requires-Dist: xmltodict ==0.12.0
|
49
|
-
Provides-Extra: full
|
50
|
-
Requires-Dist: gym <=0.23.1,>=0.19.0 ; extra == 'full'
|
51
|
-
|
52
|
-
# plancraft
|
53
|
-
Plancraft is a minecraft environment and agent that innovates on planning LLM agents with a retriever
|
plancraft-0.1.0.dist-info/RECORD
DELETED
@@ -1,26 +0,0 @@
|
|
1
|
-
environments/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
|
-
environments/actions.py,sha256=SeeC9l1cJBs9pdba6BefQ_iQNfFf6FVTWm7HWkacbsY,6262
|
3
|
-
environments/env_real.py,sha256=suU6f8tn9FGCnrMgjINJC4wibCpVsbIJmWtyNj__hJ4,9709
|
4
|
-
environments/env_symbolic.py,sha256=BSInuOF7qTHzIjmWZ10kx-TsnzDtqqiq7SI3htuMG0M,7830
|
5
|
-
environments/items.py,sha256=1R56LyK6tqIssQJMqHst6A9DeEfOX5DN-OBAkumGncw,217
|
6
|
-
environments/planner.py,sha256=2B-0aunllmTuiHE7Jn5jHzCg6mMgxZjisiTDdpSKupk,3954
|
7
|
-
environments/recipes.py,sha256=nXvOLCRljiZ5IgeevXuosU9IgSs7oQWQJFiuyRNSVFs,19571
|
8
|
-
environments/sampler.py,sha256=ZBYoENKdQQ7wbAyVk-c9UNRWKPE0omv9he8c8QZ6wXg,7625
|
9
|
-
models/__init__.py,sha256=PasK3jpbhpD0kxF4iHukcccZqvZg6lL240zie3DfLDY,622
|
10
|
-
models/act.py,sha256=jdZunT7FcbHvcaJZ_wUDLSuObHjU6JZybghR_B0QJ8Q,6548
|
11
|
-
models/base.py,sha256=fFM2BV9PqvIFtUlTz8iz5HPemYRy3S0EituM1XdJJSQ,4927
|
12
|
-
models/bbox_model.py,sha256=CoX-odH59S-djkPOH2ViEmbYWo1sefmHiOcBlFWiAkg,16814
|
13
|
-
models/dummy.py,sha256=QjxTIiKsWSmhUMAuw7Yy-OKKCLi_x3rwll4hH7ZNXso,1732
|
14
|
-
models/generators.py,sha256=kmwhHDGCbhrf_tEY-Z7ALfcNnmsKTPssGWxwdLLZqaA,17386
|
15
|
-
models/oam.py,sha256=pKVp1-vbDo6bEZdaLx4_5LPYFHnBxty09YvJMkM-kvw,9969
|
16
|
-
models/oracle.py,sha256=5FACRc1CkRb1-LntSfyaOFOfgbvRwWZjnuIi6NWtIkQ,9651
|
17
|
-
models/prompts.py,sha256=XwoRqd_5_VfCUXb10dCRFYXgw70mO2VoQocn3Z2zgs0,6165
|
18
|
-
models/react.py,sha256=T57-Xh55LobkWLsh7V7PD3E7Merx5zx0wqY9ULIA2WU,3401
|
19
|
-
models/utils.py,sha256=osKX0_uux9wzqYzq1ST0Cu5idrAnyfNvXrj0uO1eKo0,9424
|
20
|
-
models/few_shot_images/__init__.py,sha256=nIkyB6w3ok-h4lfJKsrcMQzQF624Y9uxYV1FqCu3Lx0,351
|
21
|
-
train/dataset.py,sha256=NrZjbIkosui1kaq7AIWSrYvvzrDxu_njH7FmGKY3xnI,5434
|
22
|
-
plancraft-0.1.0.dist-info/LICENSE,sha256=YGR8ehDB4t-T-lOQKMfKNR-2zsOU7E3E5NA8t25HKE0,1070
|
23
|
-
plancraft-0.1.0.dist-info/METADATA,sha256=fGHIBsicXNJAqBQWERKuMGPwThmE2RbfciQAX2AZy4A,1726
|
24
|
-
plancraft-0.1.0.dist-info/WHEEL,sha256=R06PA3UVYHThwHvxuRWMqaGcr-PuniXahwjmQRFMEkY,91
|
25
|
-
plancraft-0.1.0.dist-info/top_level.txt,sha256=ZT60unZw3qNbZoGMCVc-V_0hI4YIYvTVGGScIgkCa88,26
|
26
|
-
plancraft-0.1.0.dist-info/RECORD,,
|