mettagrid 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mettagrid might be problematic. Click here for more details.

@@ -0,0 +1,181 @@
1
+ from typing import Any, Dict
2
+
3
+ import pufferlib
4
+ import numpy as np
5
+ from omegaconf import OmegaConf
6
+
7
+ from mettagrid.config.game_builder import MettaGridGameBuilder
8
+ from mettagrid.renderer.raylib_client import MettaRaylibClient
9
+ from mettagrid.config.sample_config import sample_config
10
+ from mettagrid.mettagrid_c import MettaGrid
11
+ from pufferlib.environments.ocean.render import GridRender
12
+
13
+ class GridClient:
14
+ def __init__(self, width, height):
15
+ self._width = width
16
+ self._height = height
17
+
18
+ class MettaGridEnv(pufferlib.PufferEnv):
19
+ def __init__(self, render_mode: str, **cfg):
20
+ super().__init__()
21
+
22
+ self._render_mode = render_mode
23
+ self._cfg = OmegaConf.create(cfg)
24
+ self.make_env()
25
+
26
+ if render_mode == "human":
27
+ self._renderer = MettaRaylibClient(
28
+ self._env.map_width(), self._env.map_height(),
29
+ )
30
+ elif render_mode == "raylib":
31
+ self._renderer = GridRender(
32
+ self._env.map_width(), self._env.map_height(),
33
+ fps=10
34
+ )
35
+
36
+
37
+ def make_env(self):
38
+ game_cfg = OmegaConf.create(sample_config(self._cfg.game))
39
+ self._game_builder = MettaGridGameBuilder(**game_cfg)
40
+ level = self._game_builder.level()
41
+ self._c_env = MettaGrid(game_cfg, level)
42
+ self._grid_env = self._c_env
43
+ self._num_agents = self._c_env.num_agents()
44
+
45
+ # self._grid_env = PufferGridEnv(self._c_env)
46
+ env = self._grid_env
47
+
48
+ self._env = env
49
+ #self._env = LastActionTracker(self._grid_env)
50
+ #self._env = Kinship(**sample_config(self._cfg.kinship), env=self._env)
51
+ #self._env = RewardTracker(self._env)
52
+ #self._env = FeatureMasker(self._env, self._cfg.hidden_features)
53
+ self.done = False
54
+
55
+ def reset(self, **kwargs):
56
+ self.make_env()
57
+ if hasattr(self, "buf"):
58
+ self._c_env.set_buffers(
59
+ self.buf.observations,
60
+ self.buf.terminals,
61
+ self.buf.truncations,
62
+ self.buf.rewards)
63
+
64
+ # obs, infos = self._env.reset(**kwargs)
65
+ # self._compute_max_energy()
66
+ # return obs, infos
67
+ obs, infos = self._c_env.reset()
68
+ return obs, infos
69
+
70
+ def step(self, actions):
71
+ obs, rewards, terminated, truncated, infos = self._c_env.step(actions.astype(np.int32))
72
+
73
+ rewards_sum = rewards.sum()
74
+ if rewards_sum != 0:
75
+ reward_mean = rewards_sum / self._num_agents
76
+ rewards -= reward_mean
77
+
78
+ if terminated.all() or truncated.all():
79
+ self.done = True
80
+
81
+ stats = self._c_env.get_episode_stats()
82
+ episode_rewards = self._c_env.get_episode_rewards()
83
+ episode_rewards_sum = episode_rewards.sum()
84
+ episode_rewards_mean = episode_rewards_sum / self._num_agents
85
+
86
+ infos = {
87
+ "episode/reward.sum": episode_rewards_sum,
88
+ "episode/reward.mean": episode_rewards_mean,
89
+ "episode/reward.min": episode_rewards.min(),
90
+ "episode/reward.max": episode_rewards.max(),
91
+ "episode_length": self._c_env.current_timestep(),
92
+ }
93
+
94
+ agent_stats = {}
95
+ for a_stats in stats["agent_stats"]:
96
+ for k, v in a_stats.items():
97
+ if k not in agent_stats:
98
+ agent_stats[k] = 0
99
+ agent_stats[k] += v
100
+
101
+ for k, v in agent_stats.items():
102
+ infos[f"agent_stats/{k}"] = float(v) / self._num_agents
103
+
104
+ return obs, list(rewards), terminated.all(), truncated.all(), infos
105
+
106
+ def process_episode_stats(self, episode_stats: Dict[str, Any]):
107
+ for agent_stats in episode_stats["agent_stats"]:
108
+ extra_stats = {}
109
+ for stat_name in agent_stats.keys():
110
+ if stat_name.startswith("action_"):
111
+ extra_stats[stat_name + "_pct"] = agent_stats[stat_name] / self._grid_env.current_timestep
112
+
113
+
114
+ # for object in self._game_builder.object_configs.keys():
115
+ # if stat_name.startswith(f"stats_{object}_") and object != "agent":
116
+ # symbol = self._game_builder._objects[object].symbol
117
+ # num_obj = self._griddly_yaml["Environment"]["Levels"][0].count(symbol)
118
+ # if num_obj == 0:
119
+ # num_obj = 1
120
+ # extra_stats[stat_name + "_pct"] = agent_stats[stat_name] / num_obj
121
+
122
+ agent_stats.update(extra_stats)
123
+ agent_stats.update(episode_stats["game_stats"])
124
+ # agent_stats["level_max_energy"] = self._max_level_energy
125
+ # agent_stats["level_max_energy_per_agent"] = self._max_level_energy_per_agent
126
+ # agent_stats["level_max_reward_per_agent"] = self._max_level_reward_per_agent
127
+
128
+ def _compute_max_energy(self):
129
+ pass
130
+ # num_generators = self._griddly_yaml["Environment"]["Levels"][0].count("g")
131
+ # num_converters = self._griddly_yaml["Environment"]["Levels"][0].count("c")
132
+ # max_resources = num_generators * min(
133
+ # self._game_builder.object_configs.generator.initial_resources,
134
+ # self._max_steps / self._game_builder.object_configs.generator.cooldown)
135
+
136
+ # max_conversions = num_converters * (
137
+ # self._max_steps / self._game_builder.object_configs.converter.cooldown
138
+ # )
139
+ # max_conv_energy = min(max_resources, max_conversions) * \
140
+ # np.mean(list(self._game_builder.object_configs.converter.energy_output.values()))
141
+
142
+ # initial_energy = self._game_builder.object_configs.agent.initial_energy * self._game_builder.num_agents
143
+
144
+ # self._max_level_energy = max_conv_energy + initial_energy
145
+ # self._max_level_energy_per_agent = self._max_level_energy / self._game_builder.num_agents
146
+
147
+ # self._max_level_reward_per_agent = self._max_level_energy_per_agent
148
+
149
+
150
+ @property
151
+ def _max_steps(self):
152
+ return self._game_builder.max_steps
153
+
154
+ @property
155
+ def observation_space(self):
156
+ return self._env.observation_space
157
+
158
+ @property
159
+ def action_space(self):
160
+ return self._env.action_space
161
+
162
+ @property
163
+ def player_count(self):
164
+ return self._num_agents
165
+
166
+ def render(self, *args, **kwargs):
167
+ return self._renderer.render(
168
+ self._c_env.grid_objects(),
169
+ )
170
+
171
+ @property
172
+ def grid_features(self):
173
+ return self._env.grid_features()
174
+
175
+ @property
176
+ def global_features(self):
177
+ return []
178
+
179
+ @property
180
+ def render_mode(self):
181
+ return self._render_mode
mettagrid/objects.pxd ADDED
@@ -0,0 +1,197 @@
1
+ # distutils: language=c++
2
+ # cython: warn.undeclared=False
3
+
4
+ cimport cython
5
+
6
+ from libcpp.vector cimport vector
7
+ from libcpp.map cimport map
8
+ from libcpp.string cimport string
9
+ from puffergrid.grid_env import StatsTracker
10
+ from libc.stdio cimport printf
11
+ from puffergrid.observation_encoder cimport ObservationEncoder, ObsType
12
+ from puffergrid.grid_object cimport GridObject, TypeId, GridCoord, GridLocation, GridObjectId
13
+ from puffergrid.event cimport EventHandler, EventArg
14
+
15
+ cdef enum GridLayer:
16
+ Agent_Layer = 0
17
+ Object_Layer = 1
18
+
19
+ ctypedef map[string, int] ObjectConfig
20
+
21
+ cdef cppclass MettaObject(GridObject):
22
+ unsigned int hp
23
+
24
+ inline void init_mo(ObjectConfig cfg):
25
+ this.hp = cfg[b"hp"]
26
+
27
+ inline char usable(const Agent *actor):
28
+ return False
29
+
30
+ inline char attackable():
31
+ return False
32
+
33
+ cdef cppclass Usable(MettaObject):
34
+ unsigned int use_cost
35
+ unsigned int cooldown
36
+ unsigned char ready
37
+
38
+ inline void init_usable(ObjectConfig cfg):
39
+ this.use_cost = cfg[b"use_cost"]
40
+ this.cooldown = cfg[b"cooldown"]
41
+ this.ready = 1
42
+
43
+ inline char usable(const Agent *actor):
44
+ return this.ready and this.use_cost <= actor.energy
45
+
46
+ cdef enum ObjectType:
47
+ AgentT = 0
48
+ WallT = 1
49
+ GeneratorT = 2
50
+ ConverterT = 3
51
+ AltarT = 4
52
+ Count = 5
53
+
54
+ cdef vector[string] ObjectTypeNames # defined in objects.pyx
55
+
56
+ cdef enum InventoryItem:
57
+ r1 = 0,
58
+ r2 = 1,
59
+ r3 = 2,
60
+ InventoryCount = 3
61
+
62
+ cdef vector[string] InventoryItemNames # defined in objects.pyx
63
+
64
+
65
+ cdef cppclass Agent(MettaObject):
66
+ char frozen
67
+ unsigned int energy
68
+ unsigned int orientation
69
+ char shield
70
+ vector[unsigned short] inventory
71
+
72
+ inline Agent(GridCoord r, GridCoord c, ObjectConfig cfg):
73
+ GridObject.init(ObjectType.AgentT, GridLocation(r, c, GridLayer.Agent_Layer))
74
+ MettaObject.init_mo(cfg)
75
+ this.frozen = False
76
+ this.energy = cfg[b"initial_energy"]
77
+ this.orientation = 0
78
+ this.inventory.resize(InventoryItem.InventoryCount)
79
+
80
+ inline void update_inventory(InventoryItem item, short amount):
81
+ this.inventory[<InventoryItem>item] += amount
82
+
83
+ inline void obs(ObsType[:] obs):
84
+ obs[0] = 1
85
+ obs[1] = this.hp
86
+ obs[2] = this.frozen
87
+ obs[3] = this.energy
88
+ obs[4] = this.orientation
89
+ obs[5] = this.shield
90
+
91
+ cdef unsigned short idx = 6
92
+ cdef unsigned short i
93
+ for i in range(InventoryItem.InventoryCount):
94
+ obs[idx + i] = this.inventory[i]
95
+
96
+ @staticmethod
97
+ inline vector[string] feature_names():
98
+ return [
99
+ "agent", "agent:hp", "agent:frozen", "agent:energy", "agent:orientation",
100
+ "agent:shield"
101
+ ] + [
102
+ "agent:inv:" + n for n in InventoryItemNames]
103
+
104
+ cdef cppclass Wall(MettaObject):
105
+ inline Wall(GridCoord r, GridCoord c, ObjectConfig cfg):
106
+ GridObject.init(ObjectType.WallT, GridLocation(r, c, GridLayer.Object_Layer))
107
+ MettaObject.init_mo(cfg)
108
+
109
+ inline void obs(ObsType[:] obs):
110
+ obs[0] = 1
111
+ obs[1] = hp
112
+
113
+ @staticmethod
114
+ inline vector[string] feature_names():
115
+ return ["wall", "wall:hp"]
116
+
117
+ cdef cppclass Generator(Usable):
118
+ unsigned int r1
119
+
120
+ inline Generator(GridCoord r, GridCoord c, ObjectConfig cfg):
121
+ GridObject.init(ObjectType.GeneratorT, GridLocation(r, c, GridLayer.Object_Layer))
122
+ MettaObject.init_mo(cfg)
123
+ Usable.init_usable(cfg)
124
+ this.r1 = cfg[b"initial_resources"]
125
+
126
+ inline char usable(const Agent *actor):
127
+ return Usable.usable(actor) and this.r1 > 0
128
+
129
+ inline void obs(ObsType[:] obs):
130
+ obs[0] = 1
131
+ obs[1] = this.hp
132
+ obs[2] = this.r1
133
+ obs[3] = this.ready
134
+
135
+
136
+ @staticmethod
137
+ inline vector[string] feature_names():
138
+ return ["generator", "generator:hp", "generator:r1", "generator:ready"]
139
+
140
+ cdef cppclass Converter(Usable):
141
+ InventoryItem input_resource
142
+ InventoryItem output_resource
143
+ short output_energy
144
+
145
+ inline Converter(GridCoord r, GridCoord c, ObjectConfig cfg):
146
+ GridObject.init(ObjectType.ConverterT, GridLocation(r, c, GridLayer.Object_Layer))
147
+ MettaObject.init_mo(cfg)
148
+ Usable.init_usable(cfg)
149
+ this.input_resource = InventoryItem.r1
150
+ this.output_resource = InventoryItem.r2
151
+ this.output_energy = cfg[b"energy_output.r1"]
152
+
153
+ inline char usable(const Agent *actor):
154
+ return Usable.usable(actor) and actor.inventory[this.input_resource] > 0
155
+
156
+ inline obs(ObsType[:] obs):
157
+ obs[0] = 1
158
+ obs[1] = hp
159
+ obs[2] = input_resource
160
+ obs[3] = output_resource
161
+ obs[4] = output_energy
162
+ obs[5] = ready
163
+
164
+ @staticmethod
165
+ inline vector[string] feature_names():
166
+ return ["converter", "converter:hp", "converter:input_resource", "converter:output_resource", "converter:output_energy", "converter:ready"]
167
+
168
+ cdef cppclass Altar(Usable):
169
+ inline Altar(GridCoord r, GridCoord c, ObjectConfig cfg):
170
+ GridObject.init(ObjectType.AltarT, GridLocation(r, c, GridLayer.Object_Layer))
171
+ MettaObject.init_mo(cfg)
172
+ Usable.init_usable(cfg)
173
+
174
+ inline void obs(ObsType[:] obs):
175
+ obs[0] = 1
176
+ obs[1] = hp
177
+ obs[2] = ready
178
+
179
+ @staticmethod
180
+ inline vector[string] feature_names():
181
+ return ["altar", "altar:hp", "altar:ready"]
182
+
183
+ cdef map[TypeId, GridLayer] ObjectLayers
184
+
185
+ cdef class ResetHandler(EventHandler):
186
+ cdef inline void handle_event(self, GridObjectId obj_id, EventArg arg):
187
+ cdef Usable *usable = <Usable*>self.env._grid.object(obj_id)
188
+ usable.ready = True
189
+ self.env._stats.game_incr("resets." + ObjectTypeNames[usable._type_id])
190
+
191
+ cdef enum Events:
192
+ Reset = 0
193
+
194
+ cdef class MettaObservationEncoder(ObservationEncoder):
195
+ cdef vector[short] _offsets
196
+ cdef vector[string] _feature_names
197
+
mettagrid/objects.pyx ADDED
@@ -0,0 +1,67 @@
1
+ # distutils: language=c++
2
+
3
+ from libc.stdio cimport printf
4
+ from libcpp.string cimport string
5
+ from libcpp.vector cimport vector
6
+ from puffergrid.grid_object cimport GridObject, GridObjectId
7
+
8
+ cdef vector[string] ObjectTypeNames = [
9
+ "agent",
10
+ "wall",
11
+ "generator",
12
+ "converter",
13
+ "altar"
14
+ ]
15
+
16
+ cdef vector[string] InventoryItemNames = [
17
+ "r1",
18
+ "r2",
19
+ "r3"
20
+ ]
21
+
22
+ ObjectLayers = {
23
+ ObjectType.AgentT: GridLayer.Agent_Layer,
24
+ ObjectType.WallT: GridLayer.Object_Layer,
25
+ ObjectType.GeneratorT: GridLayer.Object_Layer,
26
+ ObjectType.ConverterT: GridLayer.Object_Layer,
27
+ ObjectType.AltarT: GridLayer.Object_Layer,
28
+ }
29
+
30
+ cdef class MettaObservationEncoder(ObservationEncoder):
31
+ def __init__(self) -> None:
32
+ self._offsets.resize(ObjectType.Count)
33
+
34
+ features = []
35
+ self._offsets[ObjectType.AgentT] = 0
36
+ features.extend(Agent.feature_names())
37
+
38
+ self._offsets[ObjectType.WallT] = len(features)
39
+ features.extend(Wall.feature_names())
40
+
41
+ self._offsets[ObjectType.GeneratorT] = len(features)
42
+ features.extend(Generator.feature_names())
43
+
44
+ self._offsets[ObjectType.ConverterT] = len(features)
45
+ features.extend(Converter.feature_names())
46
+
47
+ self._offsets[ObjectType.AltarT] = len(features)
48
+ features.extend(Altar.feature_names())
49
+
50
+ self._feature_names = features
51
+
52
+ cdef encode(self, GridObject *obj, ObsType[:] obs):
53
+ if obj._type_id == ObjectType.AgentT:
54
+ (<Agent*>obj).obs(obs[self._offsets[ObjectType.AgentT]:])
55
+ elif obj._type_id == ObjectType.WallT:
56
+ (<Wall*>obj).obs(obs[self._offsets[ObjectType.WallT]:])
57
+ elif obj._type_id == ObjectType.GeneratorT:
58
+ (<Generator*>obj).obs(obs[self._offsets[ObjectType.GeneratorT]:])
59
+ elif obj._type_id == ObjectType.ConverterT:
60
+ (<Converter*>obj).obs(obs[self._offsets[ObjectType.ConverterT]:])
61
+ elif obj._type_id == ObjectType.AltarT:
62
+ (<Altar*>obj).obs(obs[self._offsets[ObjectType.AltarT]:])
63
+ else:
64
+ printf("Encoding object of unknown type: %d\n", obj._type_id)
65
+
66
+ cdef vector[string] feature_names(self):
67
+ return self._feature_names
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
@@ -0,0 +1,180 @@
1
+ from pdb import set_trace as T
2
+ import numpy as np
3
+ import os
4
+
5
+ import pettingzoo
6
+ import gymnasium
7
+
8
+ import pufferlib
9
+ from pufferlib.environments.ocean import render
10
+
11
+ class MettaRaylibClient:
12
+ def __init__(self, width, height, tile_size=32):
13
+ self.width = width
14
+ self.height = height
15
+ self.tile_size = tile_size
16
+
17
+
18
+ sprite_sheet_path = os.path.join(
19
+ *self.__module__.split('.')[:-1], './puffer_chars.png')
20
+ self.asset_map = {
21
+ 1: (0, 0, 128, 128),
22
+ 3: (128, 0, 128, 128),
23
+ 4: (256, 0, 128, 128),
24
+ # 5: (384, 0, 128, 128),
25
+ 5: (512, 0, 128, 128), #star
26
+ }
27
+
28
+ from raylib import rl, colors
29
+ rl.InitWindow(width*tile_size, height*tile_size,
30
+ "PufferLib Ray Grid".encode())
31
+ rl.SetTargetFPS(10)
32
+ self.puffer = rl.LoadTexture(sprite_sheet_path.encode())
33
+ self.rl = rl
34
+ self.colors = colors
35
+
36
+ import pyray as ray
37
+ camera = ray.Camera2D()
38
+ camera.target = ray.Vector2(0.0, 0.0)
39
+ camera.rotation = 0.0
40
+ camera.zoom = 1.0
41
+ self.camera = camera
42
+
43
+ from cffi import FFI
44
+ self.ffi = FFI()
45
+
46
+ def _cdata_to_numpy(self):
47
+ image = self.rl.LoadImageFromScreen()
48
+ width, height, channels = image.width, image.height, 4
49
+ cdata = self.ffi.buffer(image.data, width*height*channels)
50
+ return np.frombuffer(cdata, dtype=np.uint8
51
+ ).reshape((height, width, channels))[:, :, :3]
52
+
53
+ def render(self, grid):
54
+ rl = self.rl
55
+ colors = self.colors
56
+ ay, ax = None, None
57
+
58
+ ts = self.tile_size
59
+
60
+ pos = rl.GetMousePosition()
61
+ raw_mouse_x = pos.x
62
+ raw_mouse_y = pos.y
63
+ mouse_x = int(raw_mouse_x // ts)
64
+ mouse_y = int(raw_mouse_y // ts)
65
+ ay = int(np.clip((pos.y - ts*self.height//2) / 50, -3, 3)) + 3
66
+ ax = int(np.clip((pos.x - ts*self.width//2) / 50, -3, 3)) + 3
67
+
68
+ if rl.IsKeyDown(rl.KEY_ESCAPE):
69
+ exit(0)
70
+
71
+ action_id = 0
72
+ action_arg = 0
73
+
74
+ if rl.IsKeyDown(rl.KEY_E):
75
+ action_id = 0
76
+ action_arg = 0
77
+ elif rl.IsKeyDown(rl.KEY_Q):
78
+ action_id = 0
79
+ action_arg = 1
80
+
81
+ elif rl.IsKeyDown(rl.KEY_W):
82
+ action_id = 1
83
+ action_arg = 0
84
+ elif rl.IsKeyDown(rl.KEY_S):
85
+ action_id = 1
86
+ action_arg = 1
87
+ elif rl.IsKeyDown(rl.KEY_A):
88
+ action_id = 1
89
+ action_arg = 2
90
+ elif rl.IsKeyDown(rl.KEY_R):
91
+ action_id = 1
92
+ action_arg = 3
93
+
94
+ # if rl.IsKeyDown(rl.KEY_LEFT_SHIFT):
95
+ # target_heros = 2
96
+
97
+ action = (action_id, action_arg)
98
+
99
+ rl.BeginDrawing()
100
+ rl.BeginMode2D(self.camera)
101
+ rl.ClearBackground([6, 24, 24, 255])
102
+ for y in range(self.height):
103
+ for x in range(self.width):
104
+ tile = grid[y, x]
105
+ tx = x*ts
106
+ ty = y*ts
107
+ if tile == 0:
108
+ continue
109
+ elif tile == 2:
110
+ # Wall
111
+ rl.DrawRectangle(x*ts, y*ts, ts, ts, [0, 0, 0, 255])
112
+ continue
113
+ else:
114
+ # Player
115
+ source_rect = self.asset_map[tile]
116
+ dest_rect = (tx, ty, ts, ts)
117
+ rl.DrawTexturePro(self.puffer, source_rect, dest_rect,
118
+ (0, 0), 0, colors.WHITE)
119
+
120
+ # Draw circle at mouse x, y
121
+ rl.DrawCircle(ts*mouse_x + ts//2, ts*mouse_y + ts//8, ts//8, [255, 0, 0, 255])
122
+
123
+ rl.EndMode2D()
124
+
125
+ # Draw HUD
126
+ # player = entities[0]
127
+ # hud_y = self.height*ts - 2*ts
128
+ # draw_bars(rl, player, 2*ts, hud_y, 10*ts, 24, draw_text=True)
129
+
130
+ # off_color = [255, 255, 255, 255]
131
+ # on_color = [0, 255, 0, 255]
132
+
133
+ # q_color = on_color if skill_q else off_color
134
+ # w_color = on_color if skill_w else off_color
135
+ # e_color = on_color if skill_e else off_color
136
+
137
+ # q_cd = player.q_timer
138
+ # w_cd = player.w_timer
139
+ # e_cd = player.e_timer
140
+
141
+ # rl.DrawText(f'Q: {q_cd}'.encode(), 13*ts, hud_y - 20, 40, q_color)
142
+ # rl.DrawText(f'W: {w_cd}'.encode(), 17*ts, hud_y - 20, 40, w_color)
143
+ # rl.DrawText(f'E: {e_cd}'.encode(), 21*ts, hud_y - 20, 40, e_color)
144
+ # rl.DrawText(f'Stun: {player.stun_timer}'.encode(), 25*ts, hud_y - 20, 20, e_color)
145
+ # rl.DrawText(f'Move: {player.move_timer}'.encode(), 25*ts, hud_y, 20, e_color)
146
+
147
+ rl.EndDrawing()
148
+ return self._cdata_to_numpy(), action
149
+
150
+ def draw_bars(rl, entity, x, y, width, height=4, draw_text=False):
151
+ health_bar = entity.health / entity.max_health
152
+ mana_bar = entity.mana / entity.max_mana
153
+ if entity.max_health == 0:
154
+ health_bar = 2
155
+ if entity.max_mana == 0:
156
+ mana_bar = 2
157
+ rl.DrawRectangle(x, y, width, height, [255, 0, 0, 255])
158
+ rl.DrawRectangle(x, y, int(width*health_bar), height, [0, 255, 0, 255])
159
+
160
+ if entity.entity_type == 0:
161
+ rl.DrawRectangle(x, y - height - 2, width, height, [255, 0, 0, 255])
162
+ rl.DrawRectangle(x, y - height - 2, int(width*mana_bar), height, [0, 255, 255, 255])
163
+
164
+ if draw_text:
165
+ health = int(entity.health)
166
+ mana = int(entity.mana)
167
+ max_health = int(entity.max_health)
168
+ max_mana = int(entity.max_mana)
169
+ rl.DrawText(f'Health: {health}/{max_health}'.encode(),
170
+ x+8, y+2, 20, [255, 255, 255, 255])
171
+ rl.DrawText(f'Mana: {mana}/{max_mana}'.encode(),
172
+ x+8, y+2 - height - 2, 20, [255, 255, 255, 255])
173
+
174
+ #rl.DrawRectangle(x, y - 2*height - 4, int(width*mana_bar), height, [255, 255, 0, 255])
175
+ rl.DrawText(f'Experience: {entity.xp}'.encode(),
176
+ x+8, y - 2*height - 4, 20, [255, 255, 255, 255])
177
+
178
+ elif entity.entity_type == 0:
179
+ rl.DrawText(f'Level: {entity.level}'.encode(),
180
+ x+4, y -2*height - 12, 12, [255, 255, 255, 255])
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024 David Bloomin
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,23 @@
1
+ Metadata-Version: 2.1
2
+ Name: mettagrid
3
+ Version: 0.0.1
4
+ Summary: A fast grid-based open-ended MARL environment
5
+ Home-page: https://daveey.github.io
6
+ License: MIT
7
+ Keywords: puffergrid,gridworld,minigrid,rl,reinforcement-learning,environment,gym
8
+ Author: David Bloomin
9
+ Author-email: daveey@gmail.com
10
+ Requires-Python: >=3.10,<4.0
11
+ Classifier: License :: OSI Approved :: MIT License
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Programming Language :: Python :: 3.10
14
+ Classifier: Programming Language :: Python :: 3.11
15
+ Classifier: Programming Language :: Python :: 3.12
16
+ Requires-Dist: cython (>=3.0.11,<4.0.0)
17
+ Requires-Dist: numpy (>=1.21.0,<2.0.0)
18
+ Project-URL: Repository, https://github.com/Metta-AI/mettagrid
19
+ Description-Content-Type: text/markdown
20
+
21
+ # mettagrid
22
+ A fast grid-based open-ended MARL environment
23
+