synth-ai 0.2.0__py3-none-any.whl → 0.2.1.dev0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (266) hide show
  1. synth_ai/__init__.py +28 -2
  2. synth_ai/core/system.py +4 -0
  3. synth_ai/environments/__init__.py +35 -0
  4. synth_ai/environments/environment/__init__.py +1 -0
  5. synth_ai/environments/environment/artifacts/__init__.py +1 -0
  6. synth_ai/environments/environment/artifacts/base.py +50 -0
  7. synth_ai/environments/environment/core.py +22 -0
  8. synth_ai/environments/environment/db/__init__.py +1 -0
  9. synth_ai/environments/environment/db/sqlite.py +45 -0
  10. synth_ai/environments/environment/registry.py +24 -0
  11. synth_ai/environments/environment/resources/sqlite.py +46 -0
  12. synth_ai/environments/environment/results.py +1 -0
  13. synth_ai/environments/environment/rewards/__init__.py +1 -0
  14. synth_ai/environments/environment/rewards/core.py +28 -0
  15. synth_ai/environments/environment/shared_engine.py +26 -0
  16. synth_ai/environments/environment/tools/__init__.py +34 -0
  17. synth_ai/environments/examples/__init__.py +1 -0
  18. synth_ai/environments/examples/crafter_classic/__init__.py +8 -0
  19. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_comprehensive_evaluation.py +58 -0
  20. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_evaluation_browser.py +152 -0
  21. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_evaluation_framework.py +1194 -0
  22. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_quick_evaluation.py +51 -0
  23. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_react_agent.py +872 -0
  24. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_trace_evaluation.py +1412 -0
  25. synth_ai/environments/examples/crafter_classic/agent_demos/test_crafter_react_agent.py +1110 -0
  26. synth_ai/environments/examples/crafter_classic/config_logging.py +111 -0
  27. synth_ai/environments/examples/crafter_classic/engine.py +502 -0
  28. synth_ai/environments/examples/crafter_classic/engine_deterministic_patch.py +63 -0
  29. synth_ai/environments/examples/crafter_classic/engine_helpers/action_map.py +5 -0
  30. synth_ai/environments/examples/crafter_classic/engine_helpers/serialization.py +74 -0
  31. synth_ai/environments/examples/crafter_classic/environment.py +255 -0
  32. synth_ai/environments/examples/crafter_classic/taskset.py +228 -0
  33. synth_ai/environments/examples/enron/agent_demos/test_synth_react.py +535 -0
  34. synth_ai/environments/examples/enron/art_helpers/email_search_tools.py +156 -0
  35. synth_ai/environments/examples/enron/art_helpers/local_email_db.py +280 -0
  36. synth_ai/environments/examples/enron/art_helpers/types_enron.py +24 -0
  37. synth_ai/environments/examples/enron/engine.py +291 -0
  38. synth_ai/environments/examples/enron/environment.py +165 -0
  39. synth_ai/environments/examples/enron/taskset.py +112 -0
  40. synth_ai/environments/examples/enron/units/keyword_stats.py +111 -0
  41. synth_ai/environments/examples/enron/units/test_email_index.py +8 -0
  42. synth_ai/environments/examples/minigrid/__init__.py +48 -0
  43. synth_ai/environments/examples/minigrid/agent_demos/minigrid_evaluation_framework.py +1188 -0
  44. synth_ai/environments/examples/minigrid/agent_demos/minigrid_quick_evaluation.py +47 -0
  45. synth_ai/environments/examples/minigrid/agent_demos/minigrid_react_agent.py +562 -0
  46. synth_ai/environments/examples/minigrid/agent_demos/minigrid_trace_evaluation.py +220 -0
  47. synth_ai/environments/examples/minigrid/agent_demos/test_minigrid_react_agent.py +393 -0
  48. synth_ai/environments/examples/minigrid/engine.py +589 -0
  49. synth_ai/environments/examples/minigrid/environment.py +274 -0
  50. synth_ai/environments/examples/minigrid/environment_mapping.py +242 -0
  51. synth_ai/environments/examples/minigrid/puzzle_loader.py +416 -0
  52. synth_ai/environments/examples/minigrid/taskset.py +583 -0
  53. synth_ai/environments/examples/minigrid/units/test_action_behavior.py +226 -0
  54. synth_ai/environments/examples/minigrid/units/test_debug_messages.py +83 -0
  55. synth_ai/environments/examples/minigrid/units/test_exploration.py +120 -0
  56. synth_ai/environments/examples/minigrid/units/test_minigrid_engine.py +214 -0
  57. synth_ai/environments/examples/minigrid/units/test_minigrid_environment.py +238 -0
  58. synth_ai/environments/examples/minigrid/units/test_minigrid_environment_mapping.py +301 -0
  59. synth_ai/environments/examples/minigrid/units/test_minigrid_taskset.py +210 -0
  60. synth_ai/environments/examples/nethack/__init__.py +7 -0
  61. synth_ai/environments/examples/nethack/achievements.py +337 -0
  62. synth_ai/environments/examples/nethack/agent_demos/nethack_evaluation_framework.py +981 -0
  63. synth_ai/environments/examples/nethack/agent_demos/nethack_quick_evaluation.py +74 -0
  64. synth_ai/environments/examples/nethack/agent_demos/nethack_react_agent.py +832 -0
  65. synth_ai/environments/examples/nethack/agent_demos/test_nethack_react_agent.py +1112 -0
  66. synth_ai/environments/examples/nethack/engine.py +738 -0
  67. synth_ai/environments/examples/nethack/environment.py +255 -0
  68. synth_ai/environments/examples/nethack/helpers/__init__.py +42 -0
  69. synth_ai/environments/examples/nethack/helpers/action_mapping.py +301 -0
  70. synth_ai/environments/examples/nethack/helpers/nle_wrapper.py +401 -0
  71. synth_ai/environments/examples/nethack/helpers/observation_utils.py +433 -0
  72. synth_ai/environments/examples/nethack/helpers/recording_wrapper.py +201 -0
  73. synth_ai/environments/examples/nethack/helpers/trajectory_recorder.py +268 -0
  74. synth_ai/environments/examples/nethack/helpers/visualization/replay_viewer.py +308 -0
  75. synth_ai/environments/examples/nethack/helpers/visualization/visualizer.py +430 -0
  76. synth_ai/environments/examples/nethack/taskset.py +323 -0
  77. synth_ai/environments/examples/nethack/units/test_nethack_engine.py +277 -0
  78. synth_ai/environments/examples/nethack/units/test_nethack_environment.py +281 -0
  79. synth_ai/environments/examples/nethack/units/test_nethack_taskset.py +213 -0
  80. synth_ai/environments/examples/nethack/units/test_recording.py +307 -0
  81. synth_ai/environments/examples/red/__init__.py +7 -0
  82. synth_ai/environments/examples/red/agent_demos/__init__.py +1 -0
  83. synth_ai/environments/examples/red/agent_demos/test_synth_react.py +1471 -0
  84. synth_ai/environments/examples/red/config_logging.py +110 -0
  85. synth_ai/environments/examples/red/engine.py +693 -0
  86. synth_ai/environments/examples/red/engine_helpers/__init__.py +1 -0
  87. synth_ai/environments/examples/red/engine_helpers/memory_map.py +28 -0
  88. synth_ai/environments/examples/red/engine_helpers/reward_components.py +275 -0
  89. synth_ai/environments/examples/red/engine_helpers/reward_library/__init__.py +142 -0
  90. synth_ai/environments/examples/red/engine_helpers/reward_library/adaptive_rewards.py +56 -0
  91. synth_ai/environments/examples/red/engine_helpers/reward_library/battle_rewards.py +283 -0
  92. synth_ai/environments/examples/red/engine_helpers/reward_library/composite_rewards.py +149 -0
  93. synth_ai/environments/examples/red/engine_helpers/reward_library/economy_rewards.py +137 -0
  94. synth_ai/environments/examples/red/engine_helpers/reward_library/efficiency_rewards.py +56 -0
  95. synth_ai/environments/examples/red/engine_helpers/reward_library/exploration_rewards.py +330 -0
  96. synth_ai/environments/examples/red/engine_helpers/reward_library/novelty_rewards.py +120 -0
  97. synth_ai/environments/examples/red/engine_helpers/reward_library/pallet_town_rewards.py +558 -0
  98. synth_ai/environments/examples/red/engine_helpers/reward_library/pokemon_rewards.py +312 -0
  99. synth_ai/environments/examples/red/engine_helpers/reward_library/social_rewards.py +147 -0
  100. synth_ai/environments/examples/red/engine_helpers/reward_library/story_rewards.py +246 -0
  101. synth_ai/environments/examples/red/engine_helpers/screen_analysis.py +367 -0
  102. synth_ai/environments/examples/red/engine_helpers/state_extraction.py +139 -0
  103. synth_ai/environments/examples/red/environment.py +235 -0
  104. synth_ai/environments/examples/red/taskset.py +77 -0
  105. synth_ai/environments/examples/red/test_fixes.py +125 -0
  106. synth_ai/environments/examples/red/test_fixes_mock.py +148 -0
  107. synth_ai/environments/examples/red/units/__init__.py +1 -0
  108. synth_ai/environments/examples/red/units/test_basic_functionality.py +97 -0
  109. synth_ai/environments/examples/red/units/test_button_press_requirements.py +217 -0
  110. synth_ai/environments/examples/red/units/test_engine.py +192 -0
  111. synth_ai/environments/examples/red/units/test_environment.py +455 -0
  112. synth_ai/environments/examples/red/units/test_exploration_strategy.py +227 -0
  113. synth_ai/environments/examples/red/units/test_integration.py +217 -0
  114. synth_ai/environments/examples/red/units/test_memory_extraction.py +111 -0
  115. synth_ai/environments/examples/red/units/test_menu_bug_reproduction.py +1100 -0
  116. synth_ai/environments/examples/red/units/test_movement_debug.py +255 -0
  117. synth_ai/environments/examples/red/units/test_pokemon_mcts_debug.py +163 -0
  118. synth_ai/environments/examples/red/units/test_pokemon_mcts_verbose.py +117 -0
  119. synth_ai/environments/examples/red/units/test_red_basic.py +145 -0
  120. synth_ai/environments/examples/red/units/test_red_comprehensive.py +323 -0
  121. synth_ai/environments/examples/red/units/test_retry_movement.py +195 -0
  122. synth_ai/environments/examples/red/units/test_reward_components.py +186 -0
  123. synth_ai/environments/examples/red/units/test_rom_integration.py +260 -0
  124. synth_ai/environments/examples/red/units/test_taskset.py +116 -0
  125. synth_ai/environments/examples/red/units/test_tree.py +448 -0
  126. synth_ai/environments/examples/sokoban/__init__.py +1 -0
  127. synth_ai/environments/examples/sokoban/agent_demos/sokoban_full_eval.py +900 -0
  128. synth_ai/environments/examples/sokoban/agent_demos/test_dspy_react.py +1 -0
  129. synth_ai/environments/examples/sokoban/agent_demos/test_sokoban_react_agent.py +498 -0
  130. synth_ai/environments/examples/sokoban/agent_demos/test_synth_lats.py +1 -0
  131. synth_ai/environments/examples/sokoban/agent_demos/test_synth_react_locally.py +748 -0
  132. synth_ai/environments/examples/sokoban/agent_demos/test_synth_react_service.py +296 -0
  133. synth_ai/environments/examples/sokoban/engine.py +675 -0
  134. synth_ai/environments/examples/sokoban/engine_helpers/__init__.py +1 -0
  135. synth_ai/environments/examples/sokoban/engine_helpers/room_utils.py +656 -0
  136. synth_ai/environments/examples/sokoban/engine_helpers/vendored/__init__.py +17 -0
  137. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/__init__.py +3 -0
  138. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/boxoban_env.py +129 -0
  139. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/render_utils.py +370 -0
  140. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/room_utils.py +331 -0
  141. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env.py +305 -0
  142. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_fixed_targets.py +66 -0
  143. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_pull.py +114 -0
  144. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_two_player.py +122 -0
  145. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_variations.py +394 -0
  146. synth_ai/environments/examples/sokoban/environment.py +228 -0
  147. synth_ai/environments/examples/sokoban/generate_verified_puzzles.py +438 -0
  148. synth_ai/environments/examples/sokoban/puzzle_loader.py +311 -0
  149. synth_ai/environments/examples/sokoban/taskset.py +425 -0
  150. synth_ai/environments/examples/sokoban/units/astar_common.py +94 -0
  151. synth_ai/environments/examples/sokoban/units/test_building_task_set.py +49 -0
  152. synth_ai/environments/examples/sokoban/units/test_false_positive.py +120 -0
  153. synth_ai/environments/examples/sokoban/units/test_simple_run_through_environment.py +119 -0
  154. synth_ai/environments/examples/sokoban/units/test_sokoban_environment.py +98 -0
  155. synth_ai/environments/examples/sokoban/units/test_tree.py +364 -0
  156. synth_ai/environments/examples/tictactoe/__init__.py +1 -0
  157. synth_ai/environments/examples/tictactoe/agent_demos/test_synth_react.py +266 -0
  158. synth_ai/environments/examples/tictactoe/agent_demos/test_tictactoe_react_agent.py +470 -0
  159. synth_ai/environments/examples/tictactoe/engine.py +368 -0
  160. synth_ai/environments/examples/tictactoe/environment.py +239 -0
  161. synth_ai/environments/examples/tictactoe/taskset.py +214 -0
  162. synth_ai/environments/examples/tictactoe/units/test_tictactoe_engine.py +393 -0
  163. synth_ai/environments/examples/tictactoe/units/test_tictactoe_environment.py +493 -0
  164. synth_ai/environments/examples/tictactoe/units/test_tictactoe_taskset.py +191 -0
  165. synth_ai/environments/examples/verilog/__init__.py +10 -0
  166. synth_ai/environments/examples/verilog/agent_demos/test_synth_react.py +520 -0
  167. synth_ai/environments/examples/verilog/engine.py +328 -0
  168. synth_ai/environments/examples/verilog/environment.py +349 -0
  169. synth_ai/environments/examples/verilog/taskset.py +418 -0
  170. synth_ai/environments/examples/verilog/units/test_verilog_engine.py +466 -0
  171. synth_ai/environments/examples/verilog/units/test_verilog_environment.py +585 -0
  172. synth_ai/environments/examples/verilog/units/test_verilog_integration.py +383 -0
  173. synth_ai/environments/examples/verilog/units/test_verilog_taskset.py +457 -0
  174. synth_ai/environments/reproducibility/core.py +42 -0
  175. synth_ai/environments/reproducibility/tree.py +364 -0
  176. synth_ai/environments/service/app.py +78 -0
  177. synth_ai/environments/service/core_routes.py +775 -0
  178. synth_ai/environments/service/external_registry.py +57 -0
  179. synth_ai/environments/service/registry.py +9 -0
  180. synth_ai/environments/stateful/__init__.py +1 -0
  181. synth_ai/environments/stateful/core.py +28 -0
  182. synth_ai/environments/stateful/engine.py +21 -0
  183. synth_ai/environments/stateful/state.py +7 -0
  184. synth_ai/environments/tasks/api.py +19 -0
  185. synth_ai/environments/tasks/core.py +78 -0
  186. synth_ai/environments/tasks/filters.py +39 -0
  187. synth_ai/environments/tasks/utils.py +89 -0
  188. synth_ai/environments/v0_observability/history.py +3 -0
  189. synth_ai/environments/v0_observability/log.py +2 -0
  190. synth_ai/lm/caching/constants.py +1 -0
  191. synth_ai/{zyk/lms → lm}/caching/ephemeral.py +4 -8
  192. synth_ai/{zyk/lms → lm}/caching/handler.py +15 -15
  193. synth_ai/{zyk/lms → lm}/caching/initialize.py +2 -4
  194. synth_ai/{zyk/lms → lm}/caching/persistent.py +4 -10
  195. synth_ai/{zyk/lms → lm}/config.py +2 -1
  196. synth_ai/{zyk/lms → lm}/constants.py +2 -2
  197. synth_ai/{zyk/lms → lm}/core/all.py +10 -10
  198. synth_ai/{zyk/lms → lm}/core/main.py +57 -33
  199. synth_ai/{zyk/lms → lm}/core/vendor_clients.py +12 -10
  200. synth_ai/lm/cost/monitor.py +1 -0
  201. synth_ai/lm/cost/statefulness.py +1 -0
  202. synth_ai/lm/provider_support/__init__.py +8 -0
  203. synth_ai/lm/provider_support/anthropic.py +945 -0
  204. synth_ai/lm/provider_support/openai.py +1115 -0
  205. synth_ai/lm/provider_support/suppress_logging.py +31 -0
  206. synth_ai/{zyk/lms → lm}/structured_outputs/handler.py +58 -80
  207. synth_ai/{zyk/lms → lm}/structured_outputs/inject.py +6 -20
  208. synth_ai/{zyk/lms → lm}/structured_outputs/rehabilitate.py +6 -12
  209. synth_ai/{zyk/lms → lm}/vendors/core/anthropic_api.py +21 -30
  210. synth_ai/{zyk/lms → lm}/vendors/core/gemini_api.py +35 -32
  211. synth_ai/{zyk/lms → lm}/vendors/core/mistral_api.py +19 -28
  212. synth_ai/{zyk/lms → lm}/vendors/core/openai_api.py +26 -36
  213. synth_ai/{zyk/lms → lm}/vendors/openai_standard.py +29 -33
  214. synth_ai/{zyk/lms → lm}/vendors/retries.py +1 -1
  215. synth_ai/lm/vendors/supported/__init__.py +0 -0
  216. synth_ai/{zyk/lms → lm}/vendors/supported/custom_endpoint.py +131 -118
  217. synth_ai/{zyk/lms → lm}/vendors/supported/deepseek.py +4 -8
  218. synth_ai/{zyk/lms → lm}/vendors/supported/grok.py +6 -8
  219. synth_ai/{zyk/lms → lm}/vendors/supported/groq.py +1 -1
  220. synth_ai/{zyk/lms → lm}/vendors/supported/ollama.py +2 -2
  221. synth_ai/{zyk/lms → lm}/vendors/supported/openrouter.py +18 -16
  222. synth_ai/{zyk/lms → lm}/vendors/supported/together.py +1 -1
  223. synth_ai/tracing/__init__.py +0 -0
  224. synth_ai/tracing/abstractions.py +224 -0
  225. synth_ai/tracing/base_client.py +91 -0
  226. synth_ai/tracing/client_manager.py +131 -0
  227. synth_ai/tracing/config.py +140 -0
  228. synth_ai/tracing/context.py +146 -0
  229. synth_ai/tracing/decorators.py +679 -0
  230. synth_ai/tracing/events/__init__.py +0 -0
  231. synth_ai/tracing/events/manage.py +147 -0
  232. synth_ai/tracing/events/scope.py +86 -0
  233. synth_ai/tracing/events/store.py +227 -0
  234. synth_ai/tracing/immediate_client.py +152 -0
  235. synth_ai/tracing/local.py +18 -0
  236. synth_ai/tracing/log_client_base.py +74 -0
  237. synth_ai/tracing/retry_queue.py +187 -0
  238. synth_ai/tracing/trackers.py +515 -0
  239. synth_ai/tracing/upload.py +504 -0
  240. synth_ai/tracing/utils.py +9 -0
  241. synth_ai/zyk/__init__.py +28 -2
  242. synth_ai-0.2.1.dev0.dist-info/METADATA +349 -0
  243. synth_ai-0.2.1.dev0.dist-info/RECORD +261 -0
  244. {synth_ai-0.2.0.dist-info → synth_ai-0.2.1.dev0.dist-info}/WHEEL +1 -1
  245. synth_ai/zyk/lms/caching/constants.py +0 -1
  246. synth_ai/zyk/lms/cost/monitor.py +0 -1
  247. synth_ai/zyk/lms/cost/statefulness.py +0 -1
  248. synth_ai-0.2.0.dist-info/METADATA +0 -36
  249. synth_ai-0.2.0.dist-info/RECORD +0 -50
  250. /synth_ai/{zyk/lms/__init__.py → environments/reproducibility/helpers.py} +0 -0
  251. /synth_ai/{zyk/lms/caching → lm}/__init__.py +0 -0
  252. /synth_ai/{zyk/lms/core → lm/caching}/__init__.py +0 -0
  253. /synth_ai/{zyk/lms → lm}/caching/dbs.py +0 -0
  254. /synth_ai/{zyk/lms/cost → lm/core}/__init__.py +0 -0
  255. /synth_ai/{zyk/lms → lm}/core/exceptions.py +0 -0
  256. /synth_ai/{zyk/lms/structured_outputs → lm/cost}/__init__.py +0 -0
  257. /synth_ai/{zyk/lms/vendors → lm/structured_outputs}/__init__.py +0 -0
  258. /synth_ai/{zyk/lms → lm}/tools/__init__.py +0 -0
  259. /synth_ai/{zyk/lms → lm}/tools/base.py +0 -0
  260. /synth_ai/{zyk/lms/vendors/core → lm/vendors}/__init__.py +0 -0
  261. /synth_ai/{zyk/lms → lm}/vendors/base.py +0 -0
  262. /synth_ai/{zyk/lms/vendors/local → lm/vendors/core}/__init__.py +0 -0
  263. /synth_ai/{zyk/lms/vendors/supported → lm/vendors/local}/__init__.py +0 -0
  264. /synth_ai/{zyk/lms → lm}/vendors/local/ollama.py +0 -0
  265. {synth_ai-0.2.0.dist-info → synth_ai-0.2.1.dev0.dist-info/licenses}/LICENSE +0 -0
  266. {synth_ai-0.2.0.dist-info → synth_ai-0.2.1.dev0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,981 @@
1
+ """
2
+ NetHack Evaluation Framework
3
+ ============================
4
+ Provides detailed metrics, trajectory analysis, and achievement statistics for NetHack.
5
+ Mirrors the Crafter evaluation structure but adapted for NetHack specifics.
6
+ """
7
+
8
+ import asyncio
9
+ import json
10
+ import time
11
+ import math
12
+ import uuid
13
+ import os
14
+ from dataclasses import dataclass, asdict
15
+ from typing import Dict, List, Optional, Set, Tuple, Any
16
+ from collections import defaultdict
17
+
18
+ import pandas as pd
19
+ from tqdm import tqdm
20
+
21
+ # NetHack specific imports
22
+ from src.synth_env.examples.nethack.environment import NetHackEnvironment
23
+ from src.synth_env.examples.nethack.taskset import (
24
+ NetHackTaskInstance,
25
+ NetHackTaskInstanceMetadata,
26
+ )
27
+ from src.synth_env.examples.nethack.agent_demos.test_synth_react import (
28
+ NetHackReActAgent,
29
+ )
30
+ from src.synth_env.examples.nethack.achievements import NetHackAchievements
31
+ from src.synth_env.examples.nethack.engine import NetHackObservationCallable
32
+ from src.synth_env.tasks.core import Impetus, Intent
33
+ from synth_ai.zyk import LM
34
+
35
+ # Load achievements mapping for BALROG scoring
36
+ _achievements_path = os.path.join(os.path.dirname(__file__), "..", "helpers", "achievements.json")
37
+ with open(_achievements_path, "r") as f:
38
+ BALROG_ACHIEVEMENTS = json.load(f)["3.4.3"]
39
+
40
+ # Achievement categories based on difficulty/complexity
41
+ ACHIEVEMENT_CATEGORIES = {
42
+ "basic": [
43
+ "first_kill",
44
+ "first_spell_cast",
45
+ "first_prayer",
46
+ "survived_100_turns",
47
+ "reached_dlvl_2",
48
+ "reached_dlvl_5",
49
+ "killed_10_monsters",
50
+ ],
51
+ "intermediate": [
52
+ "reached_dlvl_10",
53
+ "reached_dlvl_20",
54
+ "killed_50_monsters",
55
+ "killed_100_monsters",
56
+ "collected_1000_gold",
57
+ "reached_level_5",
58
+ "reached_level_10",
59
+ "reached_minetown",
60
+ ],
61
+ "advanced": [
62
+ "reached_dlvl_30",
63
+ "reached_castle",
64
+ "got_quest",
65
+ "completed_quest",
66
+ "reached_level_20",
67
+ "collected_10000_gold",
68
+ "found_artifact",
69
+ "reached_mines_end",
70
+ ],
71
+ }
72
+
73
+ # Get all achievements from NetHackAchievements
74
+ _sample_achievements = NetHackAchievements()
75
+ ALL_ACHIEVEMENTS = list(_sample_achievements.get_unlocked_achievements().keys())
76
+
77
+ TERMINATION_REASONS = ["timeout", "death", "agent_quit", "environment_error"]
78
+
79
+ # SOTA scores (NetHack doesn't have published Hafner scores, only BALROG)
80
+ BALROG_SOTA_SCORES = {
81
+ "balrog_leaderboard": {
82
+ # TODO: Add real BALROG leaderboard scores when available
83
+ "Claude 3.5 Sonnet": 25.0, # Placeholder
84
+ "GPT-4o": 20.0, # Placeholder
85
+ "GPT-4o-mini": 15.0, # Placeholder
86
+ "Gemini 1.5 Flash": 12.0, # Placeholder
87
+ }
88
+ }
89
+
90
+ # Model name mapping for SOTA percentage calculations
91
+ MODEL_NAME_TO_SOTA = {
92
+ "claude-3-5-sonnet-latest": "Claude 3.5 Sonnet",
93
+ "gpt-4o": "GPT-4o",
94
+ "gpt-4o-mini": "GPT-4o-mini",
95
+ "gemini-1.5-flash": "Gemini 1.5 Flash",
96
+ "gemini-1.5-flash-latest": "Gemini 1.5 Flash",
97
+ }
98
+
99
+
100
+ def hafner_score(success_rates_percent: List[float]) -> float:
101
+ """Compute the Hafner adjusted score (log-mean) for NetHack."""
102
+ if not success_rates_percent:
103
+ return 0.0
104
+ N = len(success_rates_percent)
105
+ g = sum(math.log(1 + s) for s in success_rates_percent) / N
106
+ return math.exp(g) - 1
107
+
108
+
109
+ def balrog_score_simple(percent: float) -> float:
110
+ """BALROG score is already a percentage (0-100)."""
111
+ return percent
112
+
113
+
114
+ @dataclass
115
+ class TrajectoryResult:
116
+ """Results from a single NetHack trajectory/episode."""
117
+
118
+ trajectory_id: str
119
+ model_name: str
120
+ difficulty: str
121
+ seed: int
122
+
123
+ # Core metrics
124
+ success: bool
125
+ total_steps: int
126
+ total_turns: int
127
+ total_reward: float
128
+
129
+ # Achievement tracking
130
+ achievements_unlocked: Set[str]
131
+ achievement_turn_unlocked: Dict[str, int]
132
+
133
+ # Multi-action metrics (if applicable)
134
+ actions_per_turn: List[int]
135
+ avg_actions_per_turn: float
136
+
137
+ # Termination analysis
138
+ termination_reason: str
139
+ final_depth: Optional[int]
140
+ final_level: Optional[int]
141
+ final_gold: Optional[int]
142
+
143
+ # BALROG scoring
144
+ balrog_percent: float
145
+
146
+ # Trajectory data for detailed analysis
147
+ turn_by_turn_data: Optional[List[Dict[str, Any]]] = None
148
+
149
+
150
+ @dataclass
151
+ class AggregateResults:
152
+ """Aggregate results across multiple NetHack trajectories."""
153
+
154
+ model_name: str
155
+ difficulty: str
156
+ num_trajectories: int
157
+
158
+ # Success metrics
159
+ success_rate: float
160
+ avg_total_steps: float
161
+ avg_total_turns: float
162
+ avg_total_reward: float
163
+
164
+ # Achievement metrics
165
+ unique_achievements_unlocked: Set[str]
166
+ total_achievement_count: int
167
+ avg_achievements_per_trajectory: float
168
+ achievement_unlock_rates: Dict[str, float]
169
+ hafner_score: float
170
+ balrog_score_avg: float
171
+ balrog_score_best: float
172
+
173
+ # Multi-action metrics
174
+ avg_actions_per_turn_overall: float
175
+ actions_per_turn_distribution: Dict[int, int]
176
+
177
+ # Termination analysis
178
+ termination_breakdown: Dict[str, float]
179
+ avg_final_depth: Optional[float]
180
+ avg_final_level: Optional[float]
181
+ avg_final_gold: Optional[float]
182
+
183
+
184
+ class NetHackEvalFramework:
185
+ """Standardized evaluation framework for NetHack environments."""
186
+
187
+ def __init__(self):
188
+ self.trajectory_results: List[TrajectoryResult] = []
189
+
190
+ async def run_single_trajectory(
191
+ self,
192
+ model_name: str,
193
+ difficulty: str,
194
+ seed: int,
195
+ max_turns: int = 200,
196
+ collect_detailed_data: bool = True,
197
+ ) -> TrajectoryResult:
198
+ """Run a single NetHack trajectory and collect detailed metrics."""
199
+
200
+ # Create task instance
201
+ metadata = NetHackTaskInstanceMetadata(
202
+ character_role="knight", # Default role
203
+ starting_level=1,
204
+ target_depth=5 if difficulty == "easy" else 10,
205
+ time_limit=max_turns * 10, # Generous time limit
206
+ difficulty=difficulty,
207
+ special_objectives=[
208
+ "Survive for as long as possible",
209
+ "Collect gold",
210
+ "Kill monsters",
211
+ ],
212
+ seed=seed,
213
+ )
214
+ instance = NetHackTaskInstance(
215
+ id=uuid.uuid4(),
216
+ impetus=Impetus(
217
+ instructions=f"Explore the NetHack dungeon on {difficulty} difficulty. Survive as long as possible, kill monsters, collect items, and descend to deeper levels."
218
+ ),
219
+ intent=Intent(rubric={}, gold_trajectories=None, gold_state_diff={}),
220
+ metadata=metadata,
221
+ is_reproducible=True,
222
+ initial_engine_snapshot=None,
223
+ )
224
+
225
+ # Setup environment and agent
226
+ obs_callback = NetHackObservationCallable()
227
+ env = NetHackEnvironment(instance, custom_step_obs=obs_callback)
228
+
229
+ llm = LM(model_name=model_name, formatting_model_name=model_name, temperature=0.0)
230
+ agent = NetHackReActAgent(llm, max_turns=max_turns)
231
+
232
+ # Set system prompt for agent
233
+ task_instructions = instance.impetus.instructions
234
+ agent.system_prompt = agent._create_system_prompt(task_instructions)
235
+
236
+ # Initialize tracking
237
+ trajectory_id = str(uuid.uuid4())
238
+ achievements = NetHackAchievements()
239
+ achievements_unlocked = set()
240
+ achievement_turn_unlocked = {}
241
+ actions_per_turn = []
242
+ turn_by_turn_data = [] if collect_detailed_data else None
243
+
244
+ # Progress tracking for BALROG score
245
+ class BalrogProgress:
246
+ def __init__(self):
247
+ self.percent = 0.0
248
+ self.end_reason = None
249
+
250
+ def update(self, depth: int, level: int, done: bool = False, end_reason: str = ""):
251
+ # Simple progress based on depth and level
252
+ depth_score = min(depth * 2, 50) # Max 50 from depth
253
+ level_score = min(level * 3, 50) # Max 50 from level
254
+ self.percent = max(depth_score, level_score)
255
+ if done:
256
+ self.end_reason = end_reason
257
+
258
+ balrog_progress = BalrogProgress()
259
+
260
+ # Run episode
261
+ obs_payload = await env.initialize()
262
+ turn_count = 0
263
+ termination_reason = "unknown"
264
+
265
+ # Create progress bar
266
+ pbar = tqdm(
267
+ total=max_turns,
268
+ desc=f"{model_name} ({difficulty}) Seed {seed}",
269
+ unit="turn",
270
+ leave=False,
271
+ ncols=100,
272
+ )
273
+
274
+ try:
275
+ while turn_count < max_turns:
276
+ turn_count += 1
277
+ pbar.update(1)
278
+
279
+ # Extract stats from observation for progress tracking
280
+ if "formatted_obs" in obs_payload:
281
+ current_formatted_obs = obs_payload["formatted_obs"]
282
+ elif "message" in obs_payload:
283
+ # Format the observation for the agent
284
+ current_formatted_obs = f"""
285
+ === NetHack Observation ===
286
+ Message: {obs_payload.get("message", "")}
287
+ Map:
288
+ {obs_payload.get("ascii_map", "")}
289
+
290
+ Stats: {obs_payload.get("player_stats", {})}
291
+ Inventory: {obs_payload.get("inventory", [])}
292
+ In Menu: {obs_payload.get("in_menu", False)}
293
+ """
294
+ else:
295
+ # Fallback to string representation
296
+ current_formatted_obs = str(obs_payload)
297
+
298
+ # Update achievements (simplified - would need real obs parsing)
299
+ prev_achievements = achievements_unlocked.copy()
300
+
301
+ # Extract game state for BALROG scoring
302
+ try:
303
+ # Parse the actual game state from obs
304
+ player_stats = obs_payload.get("player_stats", {})
305
+ current_depth = player_stats.get("depth", 1)
306
+ current_level = player_stats.get("experience_level", 1)
307
+ balrog_progress.update(current_depth, current_level)
308
+ except:
309
+ current_depth = 1
310
+ current_level = 1
311
+ balrog_progress.update(current_depth, current_level)
312
+
313
+ # Update progress bar
314
+ easy_count = len(
315
+ [a for a in achievements_unlocked if a in ACHIEVEMENT_CATEGORIES["basic"]]
316
+ )
317
+ inter_count = len(
318
+ [
319
+ a
320
+ for a in achievements_unlocked
321
+ if a in ACHIEVEMENT_CATEGORIES["intermediate"]
322
+ ]
323
+ )
324
+ adv_count = len(
325
+ [a for a in achievements_unlocked if a in ACHIEVEMENT_CATEGORIES["advanced"]]
326
+ )
327
+ total_count = len(achievements_unlocked)
328
+ achievement_display = f"{total_count}({easy_count}/{inter_count}/{adv_count})"
329
+
330
+ pbar.set_postfix(
331
+ {
332
+ "achievements": achievement_display,
333
+ "balrog": f"{balrog_progress.percent:.1f}%",
334
+ }
335
+ )
336
+
337
+ # Agent decision
338
+ decision = await agent.decide(current_formatted_obs)
339
+
340
+ # Check for termination - NetHack agent uses different format
341
+ if isinstance(decision, dict):
342
+ # Handle tool call format: {'name': 'tool_name', 'parameters': {...}}
343
+ if decision.get("name") == "terminate":
344
+ termination_reason = "agent_quit"
345
+ break
346
+
347
+ # Extract actions from NetHack agent response
348
+ if "parameters" in decision and isinstance(decision["parameters"], dict):
349
+ params = decision["parameters"]
350
+ if "actions" in params:
351
+ actions = params["actions"]
352
+ elif "action" in params:
353
+ actions = [params["action"]]
354
+ else:
355
+ actions = ["wait"] # Default action
356
+ elif "actions" in decision:
357
+ actions = decision["actions"]
358
+ elif "action" in decision:
359
+ actions = [decision["action"]]
360
+ else:
361
+ actions = ["wait"] # Default action
362
+ else:
363
+ # If decision is not a dict, assume it's a single action or termination
364
+ if decision == -1 or decision == [-1]:
365
+ termination_reason = "agent_quit"
366
+ break
367
+ elif isinstance(decision, list):
368
+ actions = decision
369
+ else:
370
+ actions = [str(decision)]
371
+
372
+ if not isinstance(actions, list):
373
+ actions = [str(actions)]
374
+
375
+ actions_per_turn.append(len(actions))
376
+
377
+ # Collect turn data
378
+ if collect_detailed_data:
379
+ turn_data = {
380
+ "turn": turn_count,
381
+ "actions_planned": len(actions),
382
+ "achievements_at_start": list(achievements_unlocked),
383
+ "balrog_percent": balrog_progress.percent,
384
+ }
385
+ turn_by_turn_data.append(turn_data)
386
+
387
+ # Execute actions
388
+ for action in actions:
389
+ obs_payload = await env.step(action)
390
+
391
+ # Check for REAL environment errors (not NetHack game messages)
392
+ if "error" in obs_payload:
393
+ error_msg = obs_payload["error"]
394
+ # NetHack game messages like "No stairs here" are normal, not environment errors
395
+ if error_msg and not any(
396
+ phrase in error_msg.lower()
397
+ for phrase in [
398
+ "no stairs",
399
+ "can't go",
400
+ "there is nothing",
401
+ "you can't",
402
+ "you don't",
403
+ "you aren't",
404
+ "you have no",
405
+ "invalid action",
406
+ "stairs here to",
407
+ "can't",
408
+ "there's nothing",
409
+ "no door",
410
+ ]
411
+ ):
412
+ print(f" ⚠️ Real environment error: {error_msg}")
413
+ termination_reason = "environment_error"
414
+ break
415
+ # This is just a NetHack game message, continue playing
416
+
417
+ # Check termination status
418
+ private_state = obs_payload.get("private")
419
+ if private_state:
420
+ if getattr(private_state, "terminated", False) or getattr(
421
+ private_state, "truncated", False
422
+ ):
423
+ termination_reason = (
424
+ "timeout" if getattr(private_state, "truncated", False) else "death"
425
+ )
426
+ balrog_progress.update(
427
+ current_depth,
428
+ current_level,
429
+ done=True,
430
+ end_reason=termination_reason,
431
+ )
432
+ break
433
+
434
+ if termination_reason in ["environment_error", "timeout", "death"]:
435
+ break
436
+
437
+ # Final metrics
438
+ if termination_reason == "unknown":
439
+ termination_reason = "timeout"
440
+
441
+ final_private = obs_payload.get("private")
442
+ final_public = obs_payload.get("public")
443
+
444
+ total_steps = getattr(final_public, "step_count", turn_count)
445
+ total_reward = getattr(final_private, "total_reward", 0.0)
446
+
447
+ # Final stats from player_stats
448
+ player_stats = obs_payload.get("player_stats", {})
449
+ final_depth = player_stats.get("depth", current_depth)
450
+ final_level = player_stats.get("experience_level", current_level)
451
+ final_gold = player_stats.get("gold", 0)
452
+
453
+ # Success determination
454
+ success = len(achievements_unlocked) > 0 or balrog_progress.percent > 5.0
455
+
456
+ avg_actions_per_turn = (
457
+ sum(actions_per_turn) / len(actions_per_turn) if actions_per_turn else 0.0
458
+ )
459
+
460
+ return TrajectoryResult(
461
+ trajectory_id=trajectory_id,
462
+ model_name=model_name,
463
+ difficulty=difficulty,
464
+ seed=seed,
465
+ success=success,
466
+ total_steps=total_steps,
467
+ total_turns=turn_count,
468
+ total_reward=total_reward,
469
+ achievements_unlocked=achievements_unlocked,
470
+ achievement_turn_unlocked=achievement_turn_unlocked,
471
+ actions_per_turn=actions_per_turn,
472
+ avg_actions_per_turn=avg_actions_per_turn,
473
+ termination_reason=termination_reason,
474
+ final_depth=final_depth,
475
+ final_level=final_level,
476
+ final_gold=final_gold,
477
+ balrog_percent=balrog_progress.percent,
478
+ turn_by_turn_data=turn_by_turn_data,
479
+ )
480
+ finally:
481
+ pbar.close()
482
+
483
+ async def run_evaluation(
484
+ self,
485
+ model_names: List[str],
486
+ difficulties: List[str] = ["easy", "hard"],
487
+ num_trajectories_per_condition: int = 3,
488
+ max_turns: int = 200,
489
+ collect_detailed_data: bool = True,
490
+ ) -> Dict[str, Any]:
491
+ """Run comprehensive evaluation across models and difficulties."""
492
+
493
+ print(f"🎯 Starting NetHack Evaluation")
494
+ print(f" Models: {model_names}")
495
+ print(f" Difficulties: {difficulties}")
496
+ print(f" Trajectories per condition: {num_trajectories_per_condition}")
497
+ print(f" Max turns per trajectory: {max_turns}")
498
+
499
+ all_results = []
500
+
501
+ for model_name in model_names:
502
+ for difficulty in difficulties:
503
+ print(f"\n🔄 Running {model_name} on {difficulty} difficulty...")
504
+
505
+ # Run trajectories for this condition
506
+ trajectory_tasks = []
507
+ for i in range(num_trajectories_per_condition):
508
+ seed = 1000 + i if difficulty == "easy" else 2000 + i
509
+ trajectory_tasks.append(
510
+ self.run_single_trajectory(
511
+ model_name=model_name,
512
+ difficulty=difficulty,
513
+ seed=seed,
514
+ max_turns=max_turns,
515
+ collect_detailed_data=collect_detailed_data,
516
+ )
517
+ )
518
+
519
+ condition_results = await asyncio.gather(*trajectory_tasks)
520
+ all_results.extend(condition_results)
521
+
522
+ self.trajectory_results = all_results
523
+ return self._generate_comprehensive_report()
524
+
525
+ def _compute_aggregate_metrics(
526
+ self, model_name: str, difficulty: str, trajectories: List[TrajectoryResult]
527
+ ) -> AggregateResults:
528
+ """Compute aggregate metrics for a model-difficulty condition."""
529
+
530
+ num_trajectories = len(trajectories)
531
+ if num_trajectories == 0:
532
+ return AggregateResults(
533
+ model_name=model_name,
534
+ difficulty=difficulty,
535
+ num_trajectories=0,
536
+ success_rate=0.0,
537
+ avg_total_steps=0.0,
538
+ avg_total_turns=0.0,
539
+ avg_total_reward=0.0,
540
+ unique_achievements_unlocked=set(),
541
+ total_achievement_count=0,
542
+ avg_achievements_per_trajectory=0.0,
543
+ achievement_unlock_rates={},
544
+ hafner_score=0.0,
545
+ balrog_score_avg=0.0,
546
+ balrog_score_best=0.0,
547
+ avg_actions_per_turn_overall=0.0,
548
+ actions_per_turn_distribution={},
549
+ termination_breakdown={},
550
+ avg_final_depth=None,
551
+ avg_final_level=None,
552
+ avg_final_gold=None,
553
+ )
554
+
555
+ # Success metrics
556
+ success_rate = sum(1 for t in trajectories if t.success) / num_trajectories
557
+ avg_total_steps = sum(t.total_steps for t in trajectories) / num_trajectories
558
+ avg_total_turns = sum(t.total_turns for t in trajectories) / num_trajectories
559
+ avg_total_reward = sum(t.total_reward for t in trajectories) / num_trajectories
560
+
561
+ # Achievement analysis
562
+ all_achievements = set()
563
+ total_achievement_count = 0
564
+ achievement_counts = defaultdict(int)
565
+
566
+ for traj in trajectories:
567
+ all_achievements.update(traj.achievements_unlocked)
568
+ total_achievement_count += len(traj.achievements_unlocked)
569
+ for ach in traj.achievements_unlocked:
570
+ achievement_counts[ach] += 1
571
+
572
+ achievement_unlock_rates = {
573
+ ach: count / num_trajectories for ach, count in achievement_counts.items()
574
+ }
575
+ avg_achievements_per_trajectory = total_achievement_count / num_trajectories
576
+
577
+ # Compute Hafner score
578
+ all_achievement_rates = []
579
+ for achievement in ALL_ACHIEVEMENTS:
580
+ unlock_rate = achievement_counts.get(achievement, 0) / num_trajectories
581
+ all_achievement_rates.append(unlock_rate * 100.0)
582
+
583
+ hafner_adjusted_score = hafner_score(all_achievement_rates)
584
+
585
+ # Compute BALROG scores
586
+ balrog_scores = [t.balrog_percent for t in trajectories]
587
+ balrog_score_avg = sum(balrog_scores) / len(balrog_scores) if balrog_scores else 0.0
588
+ balrog_score_best = max(balrog_scores) if balrog_scores else 0.0
589
+
590
+ # Multi-action analysis
591
+ all_actions_per_turn = []
592
+ actions_per_turn_dist = defaultdict(int)
593
+ for traj in trajectories:
594
+ all_actions_per_turn.extend(traj.actions_per_turn)
595
+ for count in traj.actions_per_turn:
596
+ actions_per_turn_dist[count] += 1
597
+
598
+ avg_actions_per_turn_overall = (
599
+ sum(all_actions_per_turn) / len(all_actions_per_turn) if all_actions_per_turn else 0.0
600
+ )
601
+
602
+ # Termination analysis
603
+ termination_counts = defaultdict(int)
604
+ for traj in trajectories:
605
+ termination_counts[traj.termination_reason] += 1
606
+ termination_breakdown = {
607
+ reason: count / num_trajectories for reason, count in termination_counts.items()
608
+ }
609
+
610
+ # Final stats
611
+ depth_values = [t.final_depth for t in trajectories if t.final_depth is not None]
612
+ level_values = [t.final_level for t in trajectories if t.final_level is not None]
613
+ gold_values = [t.final_gold for t in trajectories if t.final_gold is not None]
614
+
615
+ avg_final_depth = sum(depth_values) / len(depth_values) if depth_values else None
616
+ avg_final_level = sum(level_values) / len(level_values) if level_values else None
617
+ avg_final_gold = sum(gold_values) / len(gold_values) if gold_values else None
618
+
619
+ return AggregateResults(
620
+ model_name=model_name,
621
+ difficulty=difficulty,
622
+ num_trajectories=num_trajectories,
623
+ success_rate=success_rate,
624
+ avg_total_steps=avg_total_steps,
625
+ avg_total_turns=avg_total_turns,
626
+ avg_total_reward=avg_total_reward,
627
+ unique_achievements_unlocked=all_achievements,
628
+ total_achievement_count=total_achievement_count,
629
+ avg_achievements_per_trajectory=avg_achievements_per_trajectory,
630
+ achievement_unlock_rates=achievement_unlock_rates,
631
+ hafner_score=hafner_adjusted_score,
632
+ balrog_score_avg=balrog_score_avg,
633
+ balrog_score_best=balrog_score_best,
634
+ avg_actions_per_turn_overall=avg_actions_per_turn_overall,
635
+ actions_per_turn_distribution=dict(actions_per_turn_dist),
636
+ termination_breakdown=termination_breakdown,
637
+ avg_final_depth=avg_final_depth,
638
+ avg_final_level=avg_final_level,
639
+ avg_final_gold=avg_final_gold,
640
+ )
641
+
642
+ def _generate_comprehensive_report(self) -> Dict[str, Any]:
643
+ """Generate comprehensive evaluation report with all metrics and tables."""
644
+
645
+ # Group results by model and difficulty
646
+ grouped_results = defaultdict(lambda: defaultdict(list))
647
+ for result in self.trajectory_results:
648
+ grouped_results[result.model_name][result.difficulty].append(result)
649
+
650
+ # Generate aggregate results
651
+ aggregate_results = []
652
+ for model_name, difficulties in grouped_results.items():
653
+ for difficulty, trajectories in difficulties.items():
654
+ agg = self._compute_aggregate_metrics(model_name, difficulty, trajectories)
655
+ aggregate_results.append(agg)
656
+
657
+ # Generate all tables and analyses
658
+ report = {
659
+ "evaluation_summary": self._generate_summary_table(aggregate_results),
660
+ "achievement_percentage_table": self._generate_achievement_percentage_table(
661
+ grouped_results
662
+ ),
663
+ "termination_breakdown_table": self._generate_termination_breakdown_table(
664
+ aggregate_results
665
+ ),
666
+ "trajectory_by_trajectory_breakdown": self._generate_trajectory_breakdown(),
667
+ "sota_comparison": self._generate_sota_comparison(aggregate_results),
668
+ "raw_aggregate_results": [asdict(agg) for agg in aggregate_results],
669
+ "raw_trajectory_results": [asdict(traj) for traj in self.trajectory_results],
670
+ }
671
+
672
+ return report
673
+
674
+ def _generate_summary_table(self, aggregate_results: List[AggregateResults]) -> pd.DataFrame:
675
+ """Generate main summary table with key metrics."""
676
+
677
+ data = []
678
+ for agg in aggregate_results:
679
+ data.append(
680
+ {
681
+ "Model": agg.model_name,
682
+ "Difficulty": agg.difficulty,
683
+ "Success Rate": f"{agg.success_rate:.1%}",
684
+ "Hafner Score": f"{agg.hafner_score:.1f}%",
685
+ "BALROG Avg": f"{agg.balrog_score_avg:.1f}%",
686
+ "BALROG Best": f"{agg.balrog_score_best:.1f}%",
687
+ "Avg Steps": f"{agg.avg_total_steps:.1f}",
688
+ "Avg Turns": f"{agg.avg_total_turns:.1f}",
689
+ "Avg Reward": f"{agg.avg_total_reward:.3f}",
690
+ "Unique Achievements": len(agg.unique_achievements_unlocked),
691
+ "Avg Achievements/Traj": f"{agg.avg_achievements_per_trajectory:.2f}",
692
+ "Avg Actions/Turn": f"{agg.avg_actions_per_turn_overall:.1f}",
693
+ }
694
+ )
695
+
696
+ return pd.DataFrame(data)
697
+
698
+ def _generate_achievement_percentage_table(
699
+ self, grouped_results: Dict[str, Dict[str, List[TrajectoryResult]]]
700
+ ) -> pd.DataFrame:
701
+ """Generate table showing percentage of trajectories achieving each achievement."""
702
+
703
+ data = []
704
+
705
+ for model_name, difficulties in grouped_results.items():
706
+ for difficulty, trajectories in difficulties.items():
707
+ if not trajectories:
708
+ continue
709
+
710
+ num_trajectories = len(trajectories)
711
+ row = {"Model": model_name, "Difficulty": difficulty}
712
+
713
+ # Count achievements
714
+ achievement_counts = defaultdict(int)
715
+ for traj in trajectories:
716
+ for ach in traj.achievements_unlocked:
717
+ achievement_counts[ach] += 1
718
+
719
+ # Add percentage for each achievement
720
+ for achievement in ALL_ACHIEVEMENTS:
721
+ count = achievement_counts[achievement]
722
+ percentage = count / num_trajectories if num_trajectories > 0 else 0.0
723
+ row[achievement] = f"{percentage:.1%}"
724
+
725
+ data.append(row)
726
+
727
+ df = pd.DataFrame(data)
728
+
729
+ # Reorder columns: Model, Difficulty, then achievements by category
730
+ base_cols = ["Model", "Difficulty"]
731
+ achievement_cols = []
732
+ for category in ["basic", "intermediate", "advanced"]:
733
+ for ach in ACHIEVEMENT_CATEGORIES[category]:
734
+ if ach in df.columns:
735
+ achievement_cols.append(ach)
736
+
737
+ return df[base_cols + achievement_cols]
738
+
739
+ def _generate_termination_breakdown_table(
740
+ self, aggregate_results: List[AggregateResults]
741
+ ) -> pd.DataFrame:
742
+ """Generate table showing termination reason percentages."""
743
+
744
+ data = []
745
+ for agg in aggregate_results:
746
+ row = {
747
+ "Model": agg.model_name,
748
+ "Difficulty": agg.difficulty,
749
+ }
750
+
751
+ for reason in TERMINATION_REASONS:
752
+ percentage = agg.termination_breakdown.get(reason, 0.0)
753
+ row[f"{reason.title()} %"] = f"{percentage:.1%}"
754
+
755
+ data.append(row)
756
+
757
+ return pd.DataFrame(data)
758
+
759
+ def _generate_trajectory_breakdown(self) -> pd.DataFrame:
760
+ """Generate detailed trajectory-by-trajectory breakdown."""
761
+
762
+ data = []
763
+ for traj in self.trajectory_results:
764
+ # Achievement category breakdown
765
+ basic_achievements = len(
766
+ [a for a in traj.achievements_unlocked if a in ACHIEVEMENT_CATEGORIES["basic"]]
767
+ )
768
+ inter_achievements = len(
769
+ [
770
+ a
771
+ for a in traj.achievements_unlocked
772
+ if a in ACHIEVEMENT_CATEGORIES["intermediate"]
773
+ ]
774
+ )
775
+ adv_achievements = len(
776
+ [a for a in traj.achievements_unlocked if a in ACHIEVEMENT_CATEGORIES["advanced"]]
777
+ )
778
+
779
+ data.append(
780
+ {
781
+ "Trajectory ID": traj.trajectory_id[:8],
782
+ "Model": traj.model_name,
783
+ "Difficulty": traj.difficulty,
784
+ "Seed": traj.seed,
785
+ "Success": "✓" if traj.success else "✗",
786
+ "Steps": traj.total_steps,
787
+ "Turns": traj.total_turns,
788
+ "Reward": f"{traj.total_reward:.3f}",
789
+ "Total Achievements": len(traj.achievements_unlocked),
790
+ "Basic": basic_achievements,
791
+ "Intermediate": inter_achievements,
792
+ "Advanced": adv_achievements,
793
+ "BALROG Score": f"{traj.balrog_percent:.1f}%",
794
+ "Termination": traj.termination_reason,
795
+ "Final Depth": traj.final_depth,
796
+ "Achievements": ", ".join(sorted(traj.achievements_unlocked))
797
+ if traj.achievements_unlocked
798
+ else "None",
799
+ }
800
+ )
801
+
802
+ return pd.DataFrame(data)
803
+
804
+ def _generate_sota_comparison(
805
+ self, aggregate_results: List[AggregateResults]
806
+ ) -> Dict[str, pd.DataFrame]:
807
+ """Generate comparison tables with SOTA benchmarks, separating Hafner and BALROG methodologies."""
808
+
809
+ # Create our results table for both methodologies
810
+ our_hafner_data = []
811
+ our_balrog_data = []
812
+
813
+ for agg in aggregate_results:
814
+ # Hafner results
815
+ hafner_row = {
816
+ "System": f"{agg.model_name} (multi-action)",
817
+ "Hafner Score": f"{agg.hafner_score:.1f}%",
818
+ "Category": "Current Evaluation (Hafner)",
819
+ }
820
+ our_hafner_data.append(hafner_row)
821
+
822
+ # BALROG results
823
+ balrog_row = {
824
+ "System": f"{agg.model_name} (multi-action)",
825
+ "BALROG Score (Avg)": f"{agg.balrog_score_avg:.1f}%",
826
+ "BALROG Score (Best)": f"{agg.balrog_score_best:.1f}%",
827
+ "Category": "Current Evaluation (BALROG)",
828
+ }
829
+
830
+ # Add percentage comparison to BALROG SOTA if we can map the model name
831
+ if agg.model_name in MODEL_NAME_TO_SOTA:
832
+ sota_name = MODEL_NAME_TO_SOTA[agg.model_name]
833
+ if sota_name in BALROG_SOTA_SCORES["balrog_leaderboard"]:
834
+ balrog_sota_score = BALROG_SOTA_SCORES["balrog_leaderboard"][sota_name]
835
+ percentage_of_balrog_sota_avg = (agg.balrog_score_avg / balrog_sota_score) * 100
836
+ percentage_of_balrog_sota_best = (
837
+ agg.balrog_score_best / balrog_sota_score
838
+ ) * 100
839
+ balrog_row["% of BALROG SOTA (Avg)"] = f"{percentage_of_balrog_sota_avg:.1f}%"
840
+ balrog_row["% of BALROG SOTA (Best)"] = f"{percentage_of_balrog_sota_best:.1f}%"
841
+ balrog_row["BALROG SOTA Reference"] = f"{sota_name} ({balrog_sota_score:.1f}%)"
842
+
843
+ our_balrog_data.append(balrog_row)
844
+
845
+ our_hafner_df = pd.DataFrame(our_hafner_data)
846
+ our_balrog_df = pd.DataFrame(our_balrog_data)
847
+
848
+ return {
849
+ "our_hafner_results": our_hafner_df,
850
+ "our_balrog_results": our_balrog_df,
851
+ "methodology_note": "⚠️ CRITICAL: Hafner scores (log-adjusted multi-episode) and BALROG scores (simple single-episode percentage) use different methodologies and are NOT directly comparable!",
852
+ }
853
+
854
+ def print_report(self, report: Dict[str, Any]):
855
+ """Print a formatted evaluation report."""
856
+
857
+ print("\n" + "=" * 80)
858
+ print("🎯 NETHACK EVALUATION REPORT")
859
+ print("=" * 80)
860
+
861
+ # Summary table
862
+ print("\n📊 EVALUATION SUMMARY")
863
+ summary_df = report["evaluation_summary"]
864
+ # Clean formatting for summary table
865
+ for col in summary_df.columns:
866
+ if len(col) > 12: # Truncate long column names
867
+ summary_df = summary_df.rename(columns={col: col[:12]})
868
+ print(summary_df.to_string(index=False, max_colwidth=12))
869
+
870
+ # Create and show vertical achievement table
871
+ print("\n🏆 ACHIEVEMENT UNLOCK RATES")
872
+ print("Format: unlocked/total (percentage)")
873
+
874
+ # Group results for achievement summary
875
+ grouped_results = defaultdict(lambda: defaultdict(list))
876
+ for traj in self.trajectory_results:
877
+ grouped_results[traj.model_name][traj.difficulty].append(traj)
878
+
879
+ achievement_summary = self._generate_achievement_summary_table(grouped_results)
880
+
881
+ # Print by category for better readability
882
+ for category in ["Basic", "Intermediate", "Advanced"]:
883
+ category_data = achievement_summary[achievement_summary["Category"] == category]
884
+ if not category_data.empty:
885
+ print(f"\n{category.upper()} ACHIEVEMENTS:")
886
+ category_display = category_data.drop("Category", axis=1)
887
+ print(category_display.to_string(index=False))
888
+
889
+ # Trajectory breakdown (summary stats only for space)
890
+ traj_df = report["trajectory_by_trajectory_breakdown"]
891
+ print(f"\n📋 TRAJECTORY BREAKDOWN ({len(traj_df)} total trajectories)")
892
+ print("Sample trajectories:")
893
+ sample_cols = [
894
+ "Model",
895
+ "Difficulty",
896
+ "Success",
897
+ "Steps",
898
+ "Total Achievements",
899
+ "BALROG Score",
900
+ "Termination",
901
+ ]
902
+ sample_df = traj_df[sample_cols].head(5)
903
+ print(sample_df.to_string(index=False, max_colwidth=12))
904
+ if len(traj_df) > 5:
905
+ print(f"... and {len(traj_df) - 5} more trajectories")
906
+
907
+ # SOTA comparison
908
+ sota_comparison = report["sota_comparison"]
909
+ print("\n🏆 SOTA COMPARISON")
910
+ print(sota_comparison["methodology_note"])
911
+
912
+ print("\n📊 HAFNER METHODOLOGY RESULTS (Multi-episode log-adjusted)")
913
+ hafner_df = sota_comparison["our_hafner_results"]
914
+ print(hafner_df.to_string(index=False, max_colwidth=20))
915
+
916
+ print("\n📊 BALROG METHODOLOGY RESULTS (Single-episode percentage)")
917
+ balrog_df = sota_comparison["our_balrog_results"]
918
+ # Clean up column names for better display
919
+ balrog_clean = balrog_df.copy()
920
+ if "% of BALROG SOTA (Avg)" in balrog_clean.columns:
921
+ balrog_clean = balrog_clean.rename(columns={"% of BALROG SOTA (Avg)": "% SOTA Avg"})
922
+ if "% of BALROG SOTA (Best)" in balrog_clean.columns:
923
+ balrog_clean = balrog_clean.rename(columns={"% of BALROG SOTA (Best)": "% SOTA Best"})
924
+ print(balrog_clean.to_string(index=False, max_colwidth=20))
925
+
926
+ print("\n" + "=" * 80)
927
+
928
+ def _generate_achievement_summary_table(
929
+ self, grouped_results: Dict[str, Dict[str, List[TrajectoryResult]]]
930
+ ) -> pd.DataFrame:
931
+ """Generate a vertical achievement summary table that's easier to read."""
932
+
933
+ data = []
934
+
935
+ # For each achievement, show rates across all model/difficulty combinations
936
+ for category_name, achievements in ACHIEVEMENT_CATEGORIES.items():
937
+ for achievement in achievements:
938
+ row = {
939
+ "Category": category_name.capitalize(),
940
+ "Achievement": achievement.replace("_", " ").title(),
941
+ }
942
+
943
+ # Add columns for each model/difficulty combination
944
+ for model_name, difficulties in grouped_results.items():
945
+ for difficulty, trajectories in difficulties.items():
946
+ if not trajectories:
947
+ continue
948
+
949
+ num_trajectories = len(trajectories)
950
+ count = sum(
951
+ 1 for traj in trajectories if achievement in traj.achievements_unlocked
952
+ )
953
+ percentage = count / num_trajectories if num_trajectories > 0 else 0.0
954
+
955
+ col_name = f"{model_name} ({difficulty})"
956
+ row[col_name] = f"{count}/{num_trajectories} ({percentage:.1%})"
957
+
958
+ data.append(row)
959
+
960
+ return pd.DataFrame(data)
961
+
962
+
963
+ # Convenience function for quick evaluations
964
+ async def run_nethack_eval(
965
+ model_names: List[str],
966
+ difficulties: List[str] = ["easy", "hard"],
967
+ num_trajectories: int = 3,
968
+ max_turns: int = 200,
969
+ ) -> Dict[str, Any]:
970
+ """Quick evaluation runner with automatic report generation."""
971
+
972
+ framework = NetHackEvalFramework()
973
+ report = await framework.run_evaluation(
974
+ model_names=model_names,
975
+ difficulties=difficulties,
976
+ num_trajectories_per_condition=num_trajectories,
977
+ max_turns=max_turns,
978
+ )
979
+
980
+ framework.print_report(report)
981
+ return report