synth-ai 0.1.9__py3-none-any.whl → 0.2.1.dev0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- synth_ai/__init__.py +28 -2
- synth_ai/core/system.py +4 -0
- synth_ai/environments/__init__.py +35 -0
- synth_ai/environments/environment/__init__.py +1 -0
- synth_ai/environments/environment/artifacts/__init__.py +1 -0
- synth_ai/environments/environment/artifacts/base.py +50 -0
- synth_ai/environments/environment/core.py +22 -0
- synth_ai/environments/environment/db/__init__.py +1 -0
- synth_ai/environments/environment/db/sqlite.py +45 -0
- synth_ai/environments/environment/registry.py +24 -0
- synth_ai/environments/environment/resources/sqlite.py +46 -0
- synth_ai/environments/environment/results.py +1 -0
- synth_ai/environments/environment/rewards/__init__.py +1 -0
- synth_ai/environments/environment/rewards/core.py +28 -0
- synth_ai/environments/environment/shared_engine.py +26 -0
- synth_ai/environments/environment/tools/__init__.py +34 -0
- synth_ai/environments/examples/__init__.py +1 -0
- synth_ai/environments/examples/crafter_classic/__init__.py +8 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_comprehensive_evaluation.py +58 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_evaluation_browser.py +152 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_evaluation_framework.py +1194 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_quick_evaluation.py +51 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_react_agent.py +872 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_trace_evaluation.py +1412 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/test_crafter_react_agent.py +1110 -0
- synth_ai/environments/examples/crafter_classic/config_logging.py +111 -0
- synth_ai/environments/examples/crafter_classic/engine.py +502 -0
- synth_ai/environments/examples/crafter_classic/engine_deterministic_patch.py +63 -0
- synth_ai/environments/examples/crafter_classic/engine_helpers/action_map.py +5 -0
- synth_ai/environments/examples/crafter_classic/engine_helpers/serialization.py +74 -0
- synth_ai/environments/examples/crafter_classic/environment.py +255 -0
- synth_ai/environments/examples/crafter_classic/taskset.py +228 -0
- synth_ai/environments/examples/enron/agent_demos/test_synth_react.py +535 -0
- synth_ai/environments/examples/enron/art_helpers/email_search_tools.py +156 -0
- synth_ai/environments/examples/enron/art_helpers/local_email_db.py +280 -0
- synth_ai/environments/examples/enron/art_helpers/types_enron.py +24 -0
- synth_ai/environments/examples/enron/engine.py +291 -0
- synth_ai/environments/examples/enron/environment.py +165 -0
- synth_ai/environments/examples/enron/taskset.py +112 -0
- synth_ai/environments/examples/enron/units/keyword_stats.py +111 -0
- synth_ai/environments/examples/enron/units/test_email_index.py +8 -0
- synth_ai/environments/examples/minigrid/__init__.py +48 -0
- synth_ai/environments/examples/minigrid/agent_demos/minigrid_evaluation_framework.py +1188 -0
- synth_ai/environments/examples/minigrid/agent_demos/minigrid_quick_evaluation.py +47 -0
- synth_ai/environments/examples/minigrid/agent_demos/minigrid_react_agent.py +562 -0
- synth_ai/environments/examples/minigrid/agent_demos/minigrid_trace_evaluation.py +220 -0
- synth_ai/environments/examples/minigrid/agent_demos/test_minigrid_react_agent.py +393 -0
- synth_ai/environments/examples/minigrid/engine.py +589 -0
- synth_ai/environments/examples/minigrid/environment.py +274 -0
- synth_ai/environments/examples/minigrid/environment_mapping.py +242 -0
- synth_ai/environments/examples/minigrid/puzzle_loader.py +416 -0
- synth_ai/environments/examples/minigrid/taskset.py +583 -0
- synth_ai/environments/examples/minigrid/units/test_action_behavior.py +226 -0
- synth_ai/environments/examples/minigrid/units/test_debug_messages.py +83 -0
- synth_ai/environments/examples/minigrid/units/test_exploration.py +120 -0
- synth_ai/environments/examples/minigrid/units/test_minigrid_engine.py +214 -0
- synth_ai/environments/examples/minigrid/units/test_minigrid_environment.py +238 -0
- synth_ai/environments/examples/minigrid/units/test_minigrid_environment_mapping.py +301 -0
- synth_ai/environments/examples/minigrid/units/test_minigrid_taskset.py +210 -0
- synth_ai/environments/examples/nethack/__init__.py +7 -0
- synth_ai/environments/examples/nethack/achievements.py +337 -0
- synth_ai/environments/examples/nethack/agent_demos/nethack_evaluation_framework.py +981 -0
- synth_ai/environments/examples/nethack/agent_demos/nethack_quick_evaluation.py +74 -0
- synth_ai/environments/examples/nethack/agent_demos/nethack_react_agent.py +832 -0
- synth_ai/environments/examples/nethack/agent_demos/test_nethack_react_agent.py +1112 -0
- synth_ai/environments/examples/nethack/engine.py +738 -0
- synth_ai/environments/examples/nethack/environment.py +255 -0
- synth_ai/environments/examples/nethack/helpers/__init__.py +42 -0
- synth_ai/environments/examples/nethack/helpers/action_mapping.py +301 -0
- synth_ai/environments/examples/nethack/helpers/nle_wrapper.py +401 -0
- synth_ai/environments/examples/nethack/helpers/observation_utils.py +433 -0
- synth_ai/environments/examples/nethack/helpers/recording_wrapper.py +201 -0
- synth_ai/environments/examples/nethack/helpers/trajectory_recorder.py +268 -0
- synth_ai/environments/examples/nethack/helpers/visualization/replay_viewer.py +308 -0
- synth_ai/environments/examples/nethack/helpers/visualization/visualizer.py +430 -0
- synth_ai/environments/examples/nethack/taskset.py +323 -0
- synth_ai/environments/examples/nethack/units/test_nethack_engine.py +277 -0
- synth_ai/environments/examples/nethack/units/test_nethack_environment.py +281 -0
- synth_ai/environments/examples/nethack/units/test_nethack_taskset.py +213 -0
- synth_ai/environments/examples/nethack/units/test_recording.py +307 -0
- synth_ai/environments/examples/red/__init__.py +7 -0
- synth_ai/environments/examples/red/agent_demos/__init__.py +1 -0
- synth_ai/environments/examples/red/agent_demos/test_synth_react.py +1471 -0
- synth_ai/environments/examples/red/config_logging.py +110 -0
- synth_ai/environments/examples/red/engine.py +693 -0
- synth_ai/environments/examples/red/engine_helpers/__init__.py +1 -0
- synth_ai/environments/examples/red/engine_helpers/memory_map.py +28 -0
- synth_ai/environments/examples/red/engine_helpers/reward_components.py +275 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/__init__.py +142 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/adaptive_rewards.py +56 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/battle_rewards.py +283 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/composite_rewards.py +149 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/economy_rewards.py +137 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/efficiency_rewards.py +56 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/exploration_rewards.py +330 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/novelty_rewards.py +120 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/pallet_town_rewards.py +558 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/pokemon_rewards.py +312 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/social_rewards.py +147 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/story_rewards.py +246 -0
- synth_ai/environments/examples/red/engine_helpers/screen_analysis.py +367 -0
- synth_ai/environments/examples/red/engine_helpers/state_extraction.py +139 -0
- synth_ai/environments/examples/red/environment.py +235 -0
- synth_ai/environments/examples/red/taskset.py +77 -0
- synth_ai/environments/examples/red/test_fixes.py +125 -0
- synth_ai/environments/examples/red/test_fixes_mock.py +148 -0
- synth_ai/environments/examples/red/units/__init__.py +1 -0
- synth_ai/environments/examples/red/units/test_basic_functionality.py +97 -0
- synth_ai/environments/examples/red/units/test_button_press_requirements.py +217 -0
- synth_ai/environments/examples/red/units/test_engine.py +192 -0
- synth_ai/environments/examples/red/units/test_environment.py +455 -0
- synth_ai/environments/examples/red/units/test_exploration_strategy.py +227 -0
- synth_ai/environments/examples/red/units/test_integration.py +217 -0
- synth_ai/environments/examples/red/units/test_memory_extraction.py +111 -0
- synth_ai/environments/examples/red/units/test_menu_bug_reproduction.py +1100 -0
- synth_ai/environments/examples/red/units/test_movement_debug.py +255 -0
- synth_ai/environments/examples/red/units/test_pokemon_mcts_debug.py +163 -0
- synth_ai/environments/examples/red/units/test_pokemon_mcts_verbose.py +117 -0
- synth_ai/environments/examples/red/units/test_red_basic.py +145 -0
- synth_ai/environments/examples/red/units/test_red_comprehensive.py +323 -0
- synth_ai/environments/examples/red/units/test_retry_movement.py +195 -0
- synth_ai/environments/examples/red/units/test_reward_components.py +186 -0
- synth_ai/environments/examples/red/units/test_rom_integration.py +260 -0
- synth_ai/environments/examples/red/units/test_taskset.py +116 -0
- synth_ai/environments/examples/red/units/test_tree.py +448 -0
- synth_ai/environments/examples/sokoban/__init__.py +1 -0
- synth_ai/environments/examples/sokoban/agent_demos/sokoban_full_eval.py +900 -0
- synth_ai/environments/examples/sokoban/agent_demos/test_dspy_react.py +1 -0
- synth_ai/environments/examples/sokoban/agent_demos/test_sokoban_react_agent.py +498 -0
- synth_ai/environments/examples/sokoban/agent_demos/test_synth_lats.py +1 -0
- synth_ai/environments/examples/sokoban/agent_demos/test_synth_react_locally.py +748 -0
- synth_ai/environments/examples/sokoban/agent_demos/test_synth_react_service.py +296 -0
- synth_ai/environments/examples/sokoban/engine.py +675 -0
- synth_ai/environments/examples/sokoban/engine_helpers/__init__.py +1 -0
- synth_ai/environments/examples/sokoban/engine_helpers/room_utils.py +656 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/__init__.py +17 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/__init__.py +3 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/boxoban_env.py +129 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/render_utils.py +370 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/room_utils.py +331 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env.py +305 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_fixed_targets.py +66 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_pull.py +114 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_two_player.py +122 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_variations.py +394 -0
- synth_ai/environments/examples/sokoban/environment.py +228 -0
- synth_ai/environments/examples/sokoban/generate_verified_puzzles.py +438 -0
- synth_ai/environments/examples/sokoban/puzzle_loader.py +311 -0
- synth_ai/environments/examples/sokoban/taskset.py +425 -0
- synth_ai/environments/examples/sokoban/units/astar_common.py +94 -0
- synth_ai/environments/examples/sokoban/units/test_building_task_set.py +49 -0
- synth_ai/environments/examples/sokoban/units/test_false_positive.py +120 -0
- synth_ai/environments/examples/sokoban/units/test_simple_run_through_environment.py +119 -0
- synth_ai/environments/examples/sokoban/units/test_sokoban_environment.py +98 -0
- synth_ai/environments/examples/sokoban/units/test_tree.py +364 -0
- synth_ai/environments/examples/tictactoe/__init__.py +1 -0
- synth_ai/environments/examples/tictactoe/agent_demos/test_synth_react.py +266 -0
- synth_ai/environments/examples/tictactoe/agent_demos/test_tictactoe_react_agent.py +470 -0
- synth_ai/environments/examples/tictactoe/engine.py +368 -0
- synth_ai/environments/examples/tictactoe/environment.py +239 -0
- synth_ai/environments/examples/tictactoe/taskset.py +214 -0
- synth_ai/environments/examples/tictactoe/units/test_tictactoe_engine.py +393 -0
- synth_ai/environments/examples/tictactoe/units/test_tictactoe_environment.py +493 -0
- synth_ai/environments/examples/tictactoe/units/test_tictactoe_taskset.py +191 -0
- synth_ai/environments/examples/verilog/__init__.py +10 -0
- synth_ai/environments/examples/verilog/agent_demos/test_synth_react.py +520 -0
- synth_ai/environments/examples/verilog/engine.py +328 -0
- synth_ai/environments/examples/verilog/environment.py +349 -0
- synth_ai/environments/examples/verilog/taskset.py +418 -0
- synth_ai/environments/examples/verilog/units/test_verilog_engine.py +466 -0
- synth_ai/environments/examples/verilog/units/test_verilog_environment.py +585 -0
- synth_ai/environments/examples/verilog/units/test_verilog_integration.py +383 -0
- synth_ai/environments/examples/verilog/units/test_verilog_taskset.py +457 -0
- synth_ai/environments/reproducibility/core.py +42 -0
- synth_ai/environments/reproducibility/tree.py +364 -0
- synth_ai/environments/service/app.py +78 -0
- synth_ai/environments/service/core_routes.py +775 -0
- synth_ai/environments/service/external_registry.py +57 -0
- synth_ai/environments/service/registry.py +9 -0
- synth_ai/environments/stateful/__init__.py +1 -0
- synth_ai/environments/stateful/core.py +28 -0
- synth_ai/environments/stateful/engine.py +21 -0
- synth_ai/environments/stateful/state.py +7 -0
- synth_ai/environments/tasks/api.py +19 -0
- synth_ai/environments/tasks/core.py +78 -0
- synth_ai/environments/tasks/filters.py +39 -0
- synth_ai/environments/tasks/utils.py +89 -0
- synth_ai/environments/v0_observability/history.py +3 -0
- synth_ai/environments/v0_observability/log.py +2 -0
- synth_ai/lm/caching/constants.py +1 -0
- synth_ai/{zyk/lms → lm}/caching/ephemeral.py +4 -8
- synth_ai/{zyk/lms → lm}/caching/handler.py +15 -15
- synth_ai/{zyk/lms → lm}/caching/initialize.py +2 -4
- synth_ai/{zyk/lms → lm}/caching/persistent.py +4 -10
- synth_ai/{zyk/lms → lm}/config.py +2 -1
- synth_ai/{zyk/lms → lm}/constants.py +2 -2
- synth_ai/{zyk/lms → lm}/core/all.py +10 -10
- synth_ai/{zyk/lms → lm}/core/main.py +57 -33
- synth_ai/{zyk/lms → lm}/core/vendor_clients.py +12 -10
- synth_ai/lm/cost/monitor.py +1 -0
- synth_ai/lm/cost/statefulness.py +1 -0
- synth_ai/lm/provider_support/__init__.py +8 -0
- synth_ai/lm/provider_support/anthropic.py +945 -0
- synth_ai/lm/provider_support/openai.py +1115 -0
- synth_ai/lm/provider_support/suppress_logging.py +31 -0
- synth_ai/{zyk/lms → lm}/structured_outputs/handler.py +58 -80
- synth_ai/{zyk/lms → lm}/structured_outputs/inject.py +6 -20
- synth_ai/{zyk/lms → lm}/structured_outputs/rehabilitate.py +6 -12
- synth_ai/{zyk/lms → lm}/vendors/core/anthropic_api.py +21 -30
- synth_ai/{zyk/lms → lm}/vendors/core/gemini_api.py +37 -32
- synth_ai/{zyk/lms → lm}/vendors/core/mistral_api.py +19 -28
- synth_ai/{zyk/lms → lm}/vendors/core/openai_api.py +26 -36
- synth_ai/{zyk/lms → lm}/vendors/openai_standard.py +29 -33
- synth_ai/{zyk/lms → lm}/vendors/retries.py +1 -1
- synth_ai/lm/vendors/supported/__init__.py +0 -0
- synth_ai/{zyk/lms → lm}/vendors/supported/custom_endpoint.py +131 -118
- synth_ai/{zyk/lms → lm}/vendors/supported/deepseek.py +4 -8
- synth_ai/{zyk/lms → lm}/vendors/supported/grok.py +6 -8
- synth_ai/{zyk/lms → lm}/vendors/supported/groq.py +1 -1
- synth_ai/{zyk/lms → lm}/vendors/supported/ollama.py +2 -2
- synth_ai/{zyk/lms → lm}/vendors/supported/openrouter.py +18 -16
- synth_ai/{zyk/lms → lm}/vendors/supported/together.py +1 -1
- synth_ai/tracing/__init__.py +0 -0
- synth_ai/tracing/abstractions.py +224 -0
- synth_ai/tracing/base_client.py +91 -0
- synth_ai/tracing/client_manager.py +131 -0
- synth_ai/tracing/config.py +140 -0
- synth_ai/tracing/context.py +146 -0
- synth_ai/tracing/decorators.py +679 -0
- synth_ai/tracing/events/__init__.py +0 -0
- synth_ai/tracing/events/manage.py +147 -0
- synth_ai/tracing/events/scope.py +86 -0
- synth_ai/tracing/events/store.py +227 -0
- synth_ai/tracing/immediate_client.py +152 -0
- synth_ai/tracing/local.py +18 -0
- synth_ai/tracing/log_client_base.py +74 -0
- synth_ai/tracing/retry_queue.py +187 -0
- synth_ai/tracing/trackers.py +515 -0
- synth_ai/tracing/upload.py +504 -0
- synth_ai/tracing/utils.py +9 -0
- synth_ai/zyk/__init__.py +28 -2
- synth_ai-0.2.1.dev0.dist-info/METADATA +349 -0
- synth_ai-0.2.1.dev0.dist-info/RECORD +261 -0
- synth_ai/zyk/lms/caching/constants.py +0 -1
- synth_ai/zyk/lms/cost/monitor.py +0 -1
- synth_ai/zyk/lms/cost/statefulness.py +0 -1
- synth_ai-0.1.9.dist-info/METADATA +0 -37
- synth_ai-0.1.9.dist-info/RECORD +0 -50
- /synth_ai/{zyk/lms/__init__.py → environments/reproducibility/helpers.py} +0 -0
- /synth_ai/{zyk/lms/caching → lm}/__init__.py +0 -0
- /synth_ai/{zyk/lms/core → lm/caching}/__init__.py +0 -0
- /synth_ai/{zyk/lms → lm}/caching/dbs.py +0 -0
- /synth_ai/{zyk/lms/cost → lm/core}/__init__.py +0 -0
- /synth_ai/{zyk/lms → lm}/core/exceptions.py +0 -0
- /synth_ai/{zyk/lms/structured_outputs → lm/cost}/__init__.py +0 -0
- /synth_ai/{zyk/lms/vendors → lm/structured_outputs}/__init__.py +0 -0
- /synth_ai/{zyk/lms → lm}/tools/__init__.py +0 -0
- /synth_ai/{zyk/lms → lm}/tools/base.py +0 -0
- /synth_ai/{zyk/lms/vendors/core → lm/vendors}/__init__.py +0 -0
- /synth_ai/{zyk/lms → lm}/vendors/base.py +0 -0
- /synth_ai/{zyk/lms/vendors/local → lm/vendors/core}/__init__.py +0 -0
- /synth_ai/{zyk/lms/vendors/supported → lm/vendors/local}/__init__.py +0 -0
- /synth_ai/{zyk/lms → lm}/vendors/local/ollama.py +0 -0
- {synth_ai-0.1.9.dist-info → synth_ai-0.2.1.dev0.dist-info}/WHEEL +0 -0
- {synth_ai-0.1.9.dist-info → synth_ai-0.2.1.dev0.dist-info}/licenses/LICENSE +0 -0
- {synth_ai-0.1.9.dist-info → synth_ai-0.2.1.dev0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1194 @@
|
|
1
|
+
"""
|
2
|
+
Standardized Crafter Evaluation Framework
|
3
|
+
Provides detailed metrics, trajectory analysis, and achievement statistics.
|
4
|
+
"""
|
5
|
+
|
6
|
+
import asyncio
|
7
|
+
import json
|
8
|
+
import time
|
9
|
+
import math
|
10
|
+
from dataclasses import dataclass, asdict
|
11
|
+
from typing import Dict, List, Optional, Set, Tuple, Any
|
12
|
+
from collections import defaultdict
|
13
|
+
import uuid
|
14
|
+
|
15
|
+
import pandas as pd
|
16
|
+
from tqdm import tqdm
|
17
|
+
|
18
|
+
# Achievement categories based on difficulty/complexity
|
19
|
+
ACHIEVEMENT_CATEGORIES = {
|
20
|
+
"easy": [
|
21
|
+
"collect_wood",
|
22
|
+
"collect_stone",
|
23
|
+
"collect_sapling",
|
24
|
+
"place_stone",
|
25
|
+
"place_table",
|
26
|
+
"wake_up",
|
27
|
+
"eat_plant",
|
28
|
+
"collect_drink",
|
29
|
+
],
|
30
|
+
"medium": [
|
31
|
+
"make_wood_pickaxe",
|
32
|
+
"make_wood_sword",
|
33
|
+
"place_furnace",
|
34
|
+
"place_plant",
|
35
|
+
"collect_coal",
|
36
|
+
"collect_iron",
|
37
|
+
"eat_cow",
|
38
|
+
],
|
39
|
+
"hard": [
|
40
|
+
"make_stone_pickaxe",
|
41
|
+
"make_stone_sword",
|
42
|
+
"make_iron_pickaxe",
|
43
|
+
"make_iron_sword",
|
44
|
+
"collect_diamond",
|
45
|
+
"defeat_skeleton",
|
46
|
+
"defeat_zombie",
|
47
|
+
],
|
48
|
+
}
|
49
|
+
|
50
|
+
ALL_ACHIEVEMENTS = [ach for category in ACHIEVEMENT_CATEGORIES.values() for ach in category]
|
51
|
+
|
52
|
+
TERMINATION_REASONS = ["timeout", "death", "agent_quit", "environment_error"]
|
53
|
+
|
54
|
+
# SOTA Benchmarks for comparison
|
55
|
+
# ⚠️ IMPORTANT: These use different scoring methodologies and are NOT directly comparable!
|
56
|
+
|
57
|
+
HAFNER_SOTA_SCORES = {
|
58
|
+
# Official Hafner scores use log-adjusted multi-episode success rates
|
59
|
+
"rl_baselines_hafner": {
|
60
|
+
"Achievement Distillation + EnvGen (COLM 2024)": 35.3,
|
61
|
+
"PPO + EnvGen": 32.2,
|
62
|
+
"Curious Replay": 19.4,
|
63
|
+
"Human experts": 50.5,
|
64
|
+
"SPRING (GPT-4 planner)": 27.3,
|
65
|
+
"Plan2Explore (unsupervised)": 2.1,
|
66
|
+
}
|
67
|
+
}
|
68
|
+
|
69
|
+
BALROG_SOTA_SCORES = {
|
70
|
+
# BALROG scores use simple percentage: achievements_unlocked/22 * 100
|
71
|
+
"balrog_leaderboard": {
|
72
|
+
"Claude 3.5 Sonnet": 37.3,
|
73
|
+
"Gemini 1.5 Pro": 36.4,
|
74
|
+
"GPT-4o": 33.6,
|
75
|
+
"Claude 3 Opus": 33.1,
|
76
|
+
"GPT-4 Turbo": 32.7,
|
77
|
+
"Gemini 1.5 Flash": 31.7,
|
78
|
+
"Claude 3.5 Haiku": 31.2,
|
79
|
+
"GPT-4o-mini": 30.2,
|
80
|
+
"Llama 3.1 405B": 28.6,
|
81
|
+
"Gemini 1.0 Pro": 27.7,
|
82
|
+
"Claude 3 Haiku": 27.3,
|
83
|
+
"Llama 3.1 70B": 26.4,
|
84
|
+
"GPT-3.5 Turbo": 26.2,
|
85
|
+
"Llama 3.1 8B": 25.5,
|
86
|
+
"Gemini 1.5 Flash-8B": 25.0,
|
87
|
+
"Llama 3 70B": 22.7,
|
88
|
+
"Llama 3 8B": 20.0,
|
89
|
+
"Gemini 1.0 Pro Vision": 17.3,
|
90
|
+
"GPT-3.5 Turbo Instruct": 16.4,
|
91
|
+
}
|
92
|
+
}
|
93
|
+
|
94
|
+
# Model name mapping for SOTA percentage calculations
|
95
|
+
MODEL_NAME_TO_SOTA = {
|
96
|
+
"claude-3-5-sonnet-latest": "Claude 3.5 Sonnet",
|
97
|
+
"claude-3-5-sonnet-20241022": "Claude 3.5 Sonnet",
|
98
|
+
"claude-3-5-haiku-latest": "Claude 3.5 Haiku",
|
99
|
+
"claude-3-5-haiku-20241022": "Claude 3.5 Haiku",
|
100
|
+
"claude-3-opus-20240229": "Claude 3 Opus",
|
101
|
+
"claude-3-haiku-20240307": "Claude 3 Haiku",
|
102
|
+
"gpt-4o": "GPT-4o",
|
103
|
+
"gpt-4o-2024-11-20": "GPT-4o",
|
104
|
+
"gpt-4o-mini": "GPT-4o-mini",
|
105
|
+
"gpt-4o-mini-2024-07-18": "GPT-4o-mini",
|
106
|
+
"gpt-4-turbo": "GPT-4 Turbo",
|
107
|
+
"gpt-4-turbo-2024-04-09": "GPT-4 Turbo",
|
108
|
+
"gpt-3.5-turbo": "GPT-3.5 Turbo",
|
109
|
+
"gpt-3.5-turbo-instruct": "GPT-3.5 Turbo Instruct",
|
110
|
+
"gemini-1.5-pro-latest": "Gemini 1.5 Pro",
|
111
|
+
"gemini-1.5-flash-latest": "Gemini 1.5 Flash",
|
112
|
+
"gemini-1.0-pro": "Gemini 1.0 Pro",
|
113
|
+
}
|
114
|
+
|
115
|
+
|
116
|
+
def crafter_score(success_rates_percent: List[float]) -> float:
|
117
|
+
"""
|
118
|
+
Compute the official Hafner adjusted Crafter score (2022).
|
119
|
+
|
120
|
+
Args:
|
121
|
+
success_rates_percent: List of success rates for each achievement (0-100 scale)
|
122
|
+
|
123
|
+
Returns:
|
124
|
+
Log-adjusted Crafter score as percentage (0-100)
|
125
|
+
|
126
|
+
Formula: exp(mean(log(1+si))) - 1 where si is each achievement's success rate in %
|
127
|
+
"""
|
128
|
+
if not success_rates_percent:
|
129
|
+
return 0.0
|
130
|
+
|
131
|
+
N = len(success_rates_percent)
|
132
|
+
g = sum(math.log(1 + s) for s in success_rates_percent) / N
|
133
|
+
return math.exp(g) - 1
|
134
|
+
|
135
|
+
|
136
|
+
def balrog_score(achievements_unlocked: int, total_achievements: int = 22) -> float:
|
137
|
+
"""
|
138
|
+
Compute BALROG-style Crafter score (simple percentage).
|
139
|
+
|
140
|
+
Args:
|
141
|
+
achievements_unlocked: Number of achievements unlocked in episode
|
142
|
+
total_achievements: Total possible achievements (22 in Crafter)
|
143
|
+
|
144
|
+
Returns:
|
145
|
+
Simple percentage score (0-100)
|
146
|
+
|
147
|
+
Formula: (achievements_unlocked / total_achievements) * 100
|
148
|
+
"""
|
149
|
+
return (achievements_unlocked / total_achievements) * 100.0
|
150
|
+
|
151
|
+
|
152
|
+
@dataclass
|
153
|
+
class TrajectoryResult:
|
154
|
+
"""Results from a single trajectory/episode."""
|
155
|
+
|
156
|
+
trajectory_id: str
|
157
|
+
model_name: str
|
158
|
+
difficulty: str
|
159
|
+
seed: int
|
160
|
+
|
161
|
+
# Core metrics
|
162
|
+
success: bool
|
163
|
+
total_steps: int
|
164
|
+
total_turns: int # Number of agent decision turns
|
165
|
+
total_reward: float
|
166
|
+
|
167
|
+
# Time metrics
|
168
|
+
total_duration_sec: float # Episode wall-clock duration in seconds
|
169
|
+
|
170
|
+
# Achievement tracking
|
171
|
+
achievements_unlocked: Set[str]
|
172
|
+
achievement_turn_unlocked: Dict[str, int] # achievement -> turn when unlocked
|
173
|
+
|
174
|
+
# Multi-action metrics
|
175
|
+
actions_per_turn: List[int] # Number of actions per turn
|
176
|
+
avg_actions_per_turn: float
|
177
|
+
|
178
|
+
# Termination analysis
|
179
|
+
termination_reason: str # "timeout", "death", "agent_quit", "environment_error"
|
180
|
+
final_health: Optional[float]
|
181
|
+
final_food: Optional[int]
|
182
|
+
final_drink: Optional[int]
|
183
|
+
|
184
|
+
# Trajectory data for detailed analysis
|
185
|
+
turn_by_turn_data: Optional[List[Dict[str, Any]]] = None
|
186
|
+
|
187
|
+
|
188
|
+
@dataclass
|
189
|
+
class AggregateResults:
|
190
|
+
"""Aggregate results across multiple trajectories."""
|
191
|
+
|
192
|
+
model_name: str
|
193
|
+
difficulty: str
|
194
|
+
num_trajectories: int
|
195
|
+
|
196
|
+
# Success metrics
|
197
|
+
success_rate: float
|
198
|
+
avg_total_steps: float
|
199
|
+
avg_total_turns: float
|
200
|
+
avg_total_reward: float
|
201
|
+
|
202
|
+
# Achievement metrics
|
203
|
+
unique_achievements_unlocked: Set[str]
|
204
|
+
total_achievement_count: int
|
205
|
+
avg_achievements_per_trajectory: float
|
206
|
+
achievement_unlock_rates: Dict[str, float] # achievement -> % of trajectories that unlocked it
|
207
|
+
hafner_score: float # Official Hafner adjusted score (log-mean)
|
208
|
+
balrog_score_avg: float # Average BALROG-style score across trajectories
|
209
|
+
balrog_score_best: float # Best single BALROG-style score
|
210
|
+
|
211
|
+
# Multi-action metrics
|
212
|
+
avg_actions_per_turn_overall: float
|
213
|
+
actions_per_turn_distribution: Dict[int, int] # num_actions -> count
|
214
|
+
|
215
|
+
# Termination analysis
|
216
|
+
termination_breakdown: Dict[str, float] # reason -> percentage
|
217
|
+
avg_final_health: Optional[float]
|
218
|
+
avg_final_food: Optional[float]
|
219
|
+
avg_final_drink: Optional[float]
|
220
|
+
|
221
|
+
# Rollout duration stats (seconds)
|
222
|
+
median_duration_sec: float
|
223
|
+
p90_duration_sec: float
|
224
|
+
max_duration_sec: float
|
225
|
+
|
226
|
+
|
227
|
+
class CrafterEvalFramework:
|
228
|
+
"""Standardized evaluation framework for Crafter environments."""
|
229
|
+
|
230
|
+
def __init__(self):
|
231
|
+
self.trajectory_results: List[TrajectoryResult] = []
|
232
|
+
|
233
|
+
async def run_single_trajectory(
|
234
|
+
self,
|
235
|
+
model_name: str,
|
236
|
+
difficulty: str,
|
237
|
+
seed: int,
|
238
|
+
max_turns: int = 30,
|
239
|
+
collect_detailed_data: bool = True,
|
240
|
+
) -> TrajectoryResult:
|
241
|
+
"""Run a single trajectory and collect detailed metrics."""
|
242
|
+
from src.synth_env.examples.crafter_classic.agent_demos.crafter_react_agent import (
|
243
|
+
ReActAgent,
|
244
|
+
CrafterHistoryObservationCallable,
|
245
|
+
CrafterMove,
|
246
|
+
)
|
247
|
+
from src.synth_env.examples.crafter_classic.environment import (
|
248
|
+
CrafterClassicEnvironment,
|
249
|
+
)
|
250
|
+
from src.synth_env.examples.crafter_classic.taskset import (
|
251
|
+
CrafterTaskInstance,
|
252
|
+
CrafterTaskInstanceMetadata,
|
253
|
+
)
|
254
|
+
from src.synth_env.tasks.core import Impetus, Intent
|
255
|
+
from synth_ai.zyk import LM
|
256
|
+
|
257
|
+
# Create task instance
|
258
|
+
metadata = CrafterTaskInstanceMetadata(
|
259
|
+
difficulty=difficulty,
|
260
|
+
seed=seed,
|
261
|
+
num_trees_radius=0,
|
262
|
+
num_cows_radius=0,
|
263
|
+
num_hostiles_radius=0,
|
264
|
+
)
|
265
|
+
instance = CrafterTaskInstance(
|
266
|
+
id=uuid.uuid4(),
|
267
|
+
impetus=Impetus(
|
268
|
+
instructions=f"Survive and unlock achievements in a {difficulty} environment."
|
269
|
+
),
|
270
|
+
intent=Intent(rubric={}, gold_trajectories=None, gold_state_diff={}),
|
271
|
+
metadata=metadata,
|
272
|
+
is_reproducible=True,
|
273
|
+
initial_engine_snapshot=None,
|
274
|
+
)
|
275
|
+
|
276
|
+
# Setup environment and agent
|
277
|
+
hist_cb = CrafterHistoryObservationCallable(max_history=1)
|
278
|
+
env = CrafterClassicEnvironment(instance, custom_step_obs=hist_cb)
|
279
|
+
|
280
|
+
llm = LM(model_name=model_name, formatting_model_name=model_name, temperature=0.0)
|
281
|
+
agent = ReActAgent(llm, max_turns=max_turns)
|
282
|
+
|
283
|
+
# Initialize tracking
|
284
|
+
trajectory_id = str(uuid.uuid4())
|
285
|
+
achievements_unlocked = set()
|
286
|
+
achievement_turn_unlocked = {}
|
287
|
+
actions_per_turn = []
|
288
|
+
turn_by_turn_data = [] if collect_detailed_data else None
|
289
|
+
|
290
|
+
# Run episode
|
291
|
+
start_time = time.perf_counter()
|
292
|
+
obs_payload = await env.initialize()
|
293
|
+
turn_count = 0
|
294
|
+
termination_reason = "unknown"
|
295
|
+
|
296
|
+
# Create progress bar for this trajectory
|
297
|
+
pbar = tqdm(
|
298
|
+
total=max_turns,
|
299
|
+
desc=f"{model_name} ({difficulty}) Seed {seed}",
|
300
|
+
unit="turn",
|
301
|
+
leave=False,
|
302
|
+
ncols=100,
|
303
|
+
)
|
304
|
+
|
305
|
+
try:
|
306
|
+
while turn_count < max_turns:
|
307
|
+
turn_count += 1
|
308
|
+
pbar.update(1)
|
309
|
+
# Calculate achievement breakdown by difficulty
|
310
|
+
easy_count = len(
|
311
|
+
[a for a in achievements_unlocked if a in ACHIEVEMENT_CATEGORIES["easy"]]
|
312
|
+
)
|
313
|
+
medium_count = len(
|
314
|
+
[a for a in achievements_unlocked if a in ACHIEVEMENT_CATEGORIES["medium"]]
|
315
|
+
)
|
316
|
+
hard_count = len(
|
317
|
+
[a for a in achievements_unlocked if a in ACHIEVEMENT_CATEGORIES["hard"]]
|
318
|
+
)
|
319
|
+
total_count = len(achievements_unlocked)
|
320
|
+
|
321
|
+
achievement_display = f"{total_count}({easy_count}/{medium_count}/{hard_count})"
|
322
|
+
|
323
|
+
pbar.set_postfix(
|
324
|
+
{
|
325
|
+
"achievements": achievement_display,
|
326
|
+
"steps": obs_payload.get("public", {}).num_steps_taken
|
327
|
+
if hasattr(obs_payload.get("public", {}), "num_steps_taken")
|
328
|
+
else 0,
|
329
|
+
}
|
330
|
+
)
|
331
|
+
|
332
|
+
current_formatted_obs = obs_payload["formatted_obs"]
|
333
|
+
|
334
|
+
# Track achievements at start of turn
|
335
|
+
current_achievements = set()
|
336
|
+
if "public" in obs_payload and hasattr(
|
337
|
+
obs_payload["public"], "achievements_status"
|
338
|
+
):
|
339
|
+
current_achievements = {
|
340
|
+
ach
|
341
|
+
for ach, status in obs_payload["public"].achievements_status.items()
|
342
|
+
if status
|
343
|
+
}
|
344
|
+
|
345
|
+
# Check for new achievements
|
346
|
+
new_achievements = current_achievements - achievements_unlocked
|
347
|
+
for ach in new_achievements:
|
348
|
+
achievements_unlocked.add(ach)
|
349
|
+
achievement_turn_unlocked[ach] = turn_count
|
350
|
+
agent.current_achievements.add(ach)
|
351
|
+
|
352
|
+
# Agent decision
|
353
|
+
action_sequence = await agent.decide(current_formatted_obs, obs_payload)
|
354
|
+
|
355
|
+
if action_sequence == [-1]: # Agent terminated
|
356
|
+
termination_reason = "agent_quit"
|
357
|
+
break
|
358
|
+
|
359
|
+
actions_per_turn.append(len(action_sequence))
|
360
|
+
|
361
|
+
# Collect turn data
|
362
|
+
if collect_detailed_data:
|
363
|
+
turn_data = {
|
364
|
+
"turn": turn_count,
|
365
|
+
"actions_planned": len(action_sequence),
|
366
|
+
"achievements_at_start": list(current_achievements),
|
367
|
+
"new_achievements_this_turn": list(new_achievements),
|
368
|
+
"steps_before_turn": obs_payload.get("public", {}).num_steps_taken
|
369
|
+
if hasattr(obs_payload.get("public", {}), "num_steps_taken")
|
370
|
+
else 0,
|
371
|
+
}
|
372
|
+
turn_by_turn_data.append(turn_data)
|
373
|
+
|
374
|
+
# Execute actions
|
375
|
+
for i, act_idx in enumerate(action_sequence):
|
376
|
+
obs_payload = await env.step([[CrafterMove(act_idx)]])
|
377
|
+
|
378
|
+
if "error" in obs_payload:
|
379
|
+
termination_reason = "environment_error"
|
380
|
+
break
|
381
|
+
|
382
|
+
if obs_payload["private"].terminated or obs_payload["private"].truncated:
|
383
|
+
termination_reason = (
|
384
|
+
"timeout" if obs_payload["private"].truncated else "death"
|
385
|
+
)
|
386
|
+
break
|
387
|
+
|
388
|
+
if termination_reason in ["environment_error", "timeout", "death"]:
|
389
|
+
break
|
390
|
+
|
391
|
+
# Final metrics
|
392
|
+
if termination_reason == "unknown":
|
393
|
+
termination_reason = "timeout"
|
394
|
+
|
395
|
+
final_private = obs_payload.get("private")
|
396
|
+
final_public = obs_payload.get("public")
|
397
|
+
|
398
|
+
total_steps = (
|
399
|
+
final_public.num_steps_taken if hasattr(final_public, "num_steps_taken") else 0
|
400
|
+
)
|
401
|
+
total_reward = (
|
402
|
+
final_private.total_reward_episode
|
403
|
+
if hasattr(final_private, "total_reward_episode")
|
404
|
+
else 0.0
|
405
|
+
)
|
406
|
+
|
407
|
+
# Health/survival stats
|
408
|
+
final_health = None
|
409
|
+
final_food = None
|
410
|
+
final_drink = None
|
411
|
+
if hasattr(final_private, "player_internal_stats"):
|
412
|
+
stats = final_private.player_internal_stats
|
413
|
+
final_health = stats.get("health")
|
414
|
+
final_food = stats.get("food")
|
415
|
+
final_drink = stats.get("drink")
|
416
|
+
|
417
|
+
# Success determination
|
418
|
+
success = len(achievements_unlocked) > 0 or (
|
419
|
+
hasattr(final_private, "terminated") and final_private.terminated
|
420
|
+
)
|
421
|
+
|
422
|
+
avg_actions_per_turn = (
|
423
|
+
sum(actions_per_turn) / len(actions_per_turn) if actions_per_turn else 0.0
|
424
|
+
)
|
425
|
+
|
426
|
+
return TrajectoryResult(
|
427
|
+
trajectory_id=trajectory_id,
|
428
|
+
model_name=model_name,
|
429
|
+
difficulty=difficulty,
|
430
|
+
seed=seed,
|
431
|
+
success=success,
|
432
|
+
total_steps=total_steps,
|
433
|
+
total_turns=turn_count,
|
434
|
+
total_reward=total_reward,
|
435
|
+
total_duration_sec=time.perf_counter() - start_time,
|
436
|
+
achievements_unlocked=achievements_unlocked,
|
437
|
+
achievement_turn_unlocked=achievement_turn_unlocked,
|
438
|
+
actions_per_turn=actions_per_turn,
|
439
|
+
avg_actions_per_turn=avg_actions_per_turn,
|
440
|
+
termination_reason=termination_reason,
|
441
|
+
final_health=final_health,
|
442
|
+
final_food=final_food,
|
443
|
+
final_drink=final_drink,
|
444
|
+
turn_by_turn_data=turn_by_turn_data,
|
445
|
+
)
|
446
|
+
finally:
|
447
|
+
pbar.close()
|
448
|
+
|
449
|
+
async def run_evaluation(
|
450
|
+
self,
|
451
|
+
model_names: List[str],
|
452
|
+
difficulties: List[str] = ["easy", "hard"],
|
453
|
+
num_trajectories_per_condition: int = 3,
|
454
|
+
max_turns: int = 30,
|
455
|
+
collect_detailed_data: bool = True,
|
456
|
+
) -> Dict[str, Any]:
|
457
|
+
"""Run comprehensive evaluation across models and difficulties."""
|
458
|
+
|
459
|
+
print(f"🎯 Starting Crafter Evaluation")
|
460
|
+
print(f" Models: {model_names}")
|
461
|
+
print(f" Difficulties: {difficulties}")
|
462
|
+
print(f" Trajectories per condition: {num_trajectories_per_condition}")
|
463
|
+
print(f" Max turns per trajectory: {max_turns}")
|
464
|
+
|
465
|
+
all_results = []
|
466
|
+
|
467
|
+
for model_name in model_names:
|
468
|
+
for difficulty in difficulties:
|
469
|
+
print(f"\n🔄 Running {model_name} on {difficulty} difficulty...")
|
470
|
+
|
471
|
+
# Run trajectories for this condition
|
472
|
+
trajectory_tasks = []
|
473
|
+
for i in range(num_trajectories_per_condition):
|
474
|
+
seed = 1000 + i if difficulty == "easy" else 2000 + i
|
475
|
+
trajectory_tasks.append(
|
476
|
+
self.run_single_trajectory(
|
477
|
+
model_name=model_name,
|
478
|
+
difficulty=difficulty,
|
479
|
+
seed=seed,
|
480
|
+
max_turns=max_turns,
|
481
|
+
collect_detailed_data=collect_detailed_data,
|
482
|
+
)
|
483
|
+
)
|
484
|
+
|
485
|
+
condition_results = await asyncio.gather(*trajectory_tasks)
|
486
|
+
all_results.extend(condition_results)
|
487
|
+
|
488
|
+
self.trajectory_results = all_results
|
489
|
+
return self._generate_comprehensive_report()
|
490
|
+
|
491
|
+
def _generate_comprehensive_report(self) -> Dict[str, Any]:
|
492
|
+
"""Generate comprehensive evaluation report with all metrics and tables."""
|
493
|
+
|
494
|
+
# Group results by model and difficulty
|
495
|
+
grouped_results = defaultdict(lambda: defaultdict(list))
|
496
|
+
for result in self.trajectory_results:
|
497
|
+
grouped_results[result.model_name][result.difficulty].append(result)
|
498
|
+
|
499
|
+
# Generate aggregate results
|
500
|
+
aggregate_results = []
|
501
|
+
for model_name, difficulties in grouped_results.items():
|
502
|
+
for difficulty, trajectories in difficulties.items():
|
503
|
+
agg = self._compute_aggregate_metrics(model_name, difficulty, trajectories)
|
504
|
+
aggregate_results.append(agg)
|
505
|
+
|
506
|
+
# Generate all tables and analyses
|
507
|
+
report = {
|
508
|
+
"evaluation_summary": self._generate_summary_table(aggregate_results),
|
509
|
+
"achievement_percentage_table": self._generate_achievement_percentage_table(
|
510
|
+
grouped_results
|
511
|
+
),
|
512
|
+
"termination_breakdown_table": self._generate_termination_breakdown_table(
|
513
|
+
aggregate_results
|
514
|
+
),
|
515
|
+
"multi_action_analysis": self._generate_multi_action_analysis(aggregate_results),
|
516
|
+
"trajectory_by_trajectory_breakdown": self._generate_trajectory_breakdown(),
|
517
|
+
"model_comparison_tables": self._generate_model_comparison_tables(aggregate_results),
|
518
|
+
"sota_comparison": self._generate_sota_comparison(aggregate_results),
|
519
|
+
"raw_aggregate_results": [asdict(agg) for agg in aggregate_results],
|
520
|
+
"raw_trajectory_results": [asdict(traj) for traj in self.trajectory_results],
|
521
|
+
}
|
522
|
+
|
523
|
+
return report
|
524
|
+
|
525
|
+
def _compute_aggregate_metrics(
|
526
|
+
self, model_name: str, difficulty: str, trajectories: List[TrajectoryResult]
|
527
|
+
) -> AggregateResults:
|
528
|
+
"""Compute aggregate metrics for a model-difficulty condition."""
|
529
|
+
|
530
|
+
num_trajectories = len(trajectories)
|
531
|
+
if num_trajectories == 0:
|
532
|
+
return AggregateResults(
|
533
|
+
model_name=model_name,
|
534
|
+
difficulty=difficulty,
|
535
|
+
num_trajectories=0,
|
536
|
+
success_rate=0.0,
|
537
|
+
avg_total_steps=0.0,
|
538
|
+
avg_total_turns=0.0,
|
539
|
+
avg_total_reward=0.0,
|
540
|
+
unique_achievements_unlocked=set(),
|
541
|
+
total_achievement_count=0,
|
542
|
+
avg_achievements_per_trajectory=0.0,
|
543
|
+
achievement_unlock_rates={},
|
544
|
+
hafner_score=0.0,
|
545
|
+
balrog_score_avg=0.0,
|
546
|
+
balrog_score_best=0.0,
|
547
|
+
avg_actions_per_turn_overall=0.0,
|
548
|
+
actions_per_turn_distribution={},
|
549
|
+
termination_breakdown={},
|
550
|
+
avg_final_health=None,
|
551
|
+
avg_final_food=None,
|
552
|
+
avg_final_drink=None,
|
553
|
+
median_duration_sec=0.0,
|
554
|
+
p90_duration_sec=0.0,
|
555
|
+
max_duration_sec=0.0,
|
556
|
+
)
|
557
|
+
|
558
|
+
# Success metrics
|
559
|
+
success_rate = sum(1 for t in trajectories if t.success) / num_trajectories
|
560
|
+
avg_total_steps = sum(t.total_steps for t in trajectories) / num_trajectories
|
561
|
+
avg_total_turns = sum(t.total_turns for t in trajectories) / num_trajectories
|
562
|
+
avg_total_reward = sum(t.total_reward for t in trajectories) / num_trajectories
|
563
|
+
|
564
|
+
# Achievement analysis
|
565
|
+
all_achievements = set()
|
566
|
+
total_achievement_count = 0
|
567
|
+
achievement_counts = defaultdict(int)
|
568
|
+
|
569
|
+
for traj in trajectories:
|
570
|
+
all_achievements.update(traj.achievements_unlocked)
|
571
|
+
total_achievement_count += len(traj.achievements_unlocked)
|
572
|
+
for ach in traj.achievements_unlocked:
|
573
|
+
achievement_counts[ach] += 1
|
574
|
+
|
575
|
+
achievement_unlock_rates = {
|
576
|
+
ach: count / num_trajectories for ach, count in achievement_counts.items()
|
577
|
+
}
|
578
|
+
avg_achievements_per_trajectory = total_achievement_count / num_trajectories
|
579
|
+
|
580
|
+
# Compute Hafner adjusted score across all achievements
|
581
|
+
all_achievement_rates = []
|
582
|
+
for achievement in ALL_ACHIEVEMENTS:
|
583
|
+
unlock_rate = achievement_counts.get(achievement, 0) / num_trajectories
|
584
|
+
all_achievement_rates.append(unlock_rate * 100.0) # Convert to percentage
|
585
|
+
|
586
|
+
hafner_adjusted_score = crafter_score(all_achievement_rates)
|
587
|
+
|
588
|
+
# Compute BALROG scores
|
589
|
+
balrog_scores = [balrog_score(len(traj.achievements_unlocked)) for traj in trajectories]
|
590
|
+
balrog_score_avg = sum(balrog_scores) / len(balrog_scores) if balrog_scores else 0.0
|
591
|
+
balrog_score_best = max(balrog_scores) if balrog_scores else 0.0
|
592
|
+
|
593
|
+
# Multi-action analysis
|
594
|
+
all_actions_per_turn = []
|
595
|
+
actions_per_turn_dist = defaultdict(int)
|
596
|
+
for traj in trajectories:
|
597
|
+
all_actions_per_turn.extend(traj.actions_per_turn)
|
598
|
+
for count in traj.actions_per_turn:
|
599
|
+
actions_per_turn_dist[count] += 1
|
600
|
+
|
601
|
+
avg_actions_per_turn_overall = (
|
602
|
+
sum(all_actions_per_turn) / len(all_actions_per_turn) if all_actions_per_turn else 0.0
|
603
|
+
)
|
604
|
+
|
605
|
+
# Termination analysis
|
606
|
+
termination_counts = defaultdict(int)
|
607
|
+
for traj in trajectories:
|
608
|
+
termination_counts[traj.termination_reason] += 1
|
609
|
+
termination_breakdown = {
|
610
|
+
reason: count / num_trajectories for reason, count in termination_counts.items()
|
611
|
+
}
|
612
|
+
|
613
|
+
# Survival stats
|
614
|
+
health_values = [t.final_health for t in trajectories if t.final_health is not None]
|
615
|
+
food_values = [t.final_food for t in trajectories if t.final_food is not None]
|
616
|
+
drink_values = [t.final_drink for t in trajectories if t.final_drink is not None]
|
617
|
+
|
618
|
+
avg_final_health = sum(health_values) / len(health_values) if health_values else None
|
619
|
+
avg_final_food = sum(food_values) / len(food_values) if food_values else None
|
620
|
+
avg_final_drink = sum(drink_values) / len(drink_values) if drink_values else None
|
621
|
+
|
622
|
+
# Duration stats
|
623
|
+
durations = [t.total_duration_sec for t in trajectories]
|
624
|
+
durations.sort()
|
625
|
+
median_duration_sec = durations[len(durations) // 2] if durations else 0.0
|
626
|
+
p90_duration_sec = durations[int(len(durations) * 0.9)] if durations else 0.0
|
627
|
+
max_duration_sec = durations[-1] if durations else 0.0
|
628
|
+
|
629
|
+
return AggregateResults(
|
630
|
+
model_name=model_name,
|
631
|
+
difficulty=difficulty,
|
632
|
+
num_trajectories=num_trajectories,
|
633
|
+
success_rate=success_rate,
|
634
|
+
avg_total_steps=avg_total_steps,
|
635
|
+
avg_total_turns=avg_total_turns,
|
636
|
+
avg_total_reward=avg_total_reward,
|
637
|
+
unique_achievements_unlocked=all_achievements,
|
638
|
+
total_achievement_count=total_achievement_count,
|
639
|
+
avg_achievements_per_trajectory=avg_achievements_per_trajectory,
|
640
|
+
achievement_unlock_rates=achievement_unlock_rates,
|
641
|
+
avg_actions_per_turn_overall=avg_actions_per_turn_overall,
|
642
|
+
actions_per_turn_distribution=dict(actions_per_turn_dist),
|
643
|
+
termination_breakdown=termination_breakdown,
|
644
|
+
avg_final_health=avg_final_health,
|
645
|
+
avg_final_food=avg_final_food,
|
646
|
+
avg_final_drink=avg_final_drink,
|
647
|
+
hafner_score=hafner_adjusted_score,
|
648
|
+
balrog_score_avg=balrog_score_avg,
|
649
|
+
balrog_score_best=balrog_score_best,
|
650
|
+
median_duration_sec=median_duration_sec,
|
651
|
+
p90_duration_sec=p90_duration_sec,
|
652
|
+
max_duration_sec=max_duration_sec,
|
653
|
+
)
|
654
|
+
|
655
|
+
def _generate_summary_table(self, aggregate_results: List[AggregateResults]) -> pd.DataFrame:
|
656
|
+
"""Generate main summary table with key metrics."""
|
657
|
+
|
658
|
+
data = []
|
659
|
+
for agg in aggregate_results:
|
660
|
+
data.append(
|
661
|
+
{
|
662
|
+
"Model": agg.model_name,
|
663
|
+
"Difficulty": agg.difficulty,
|
664
|
+
"Success Rate": f"{agg.success_rate:.1%}",
|
665
|
+
"Hafner Score": f"{agg.hafner_score:.1f}%",
|
666
|
+
"BALROG Avg": f"{agg.balrog_score_avg:.1f}%",
|
667
|
+
"BALROG Best": f"{agg.balrog_score_best:.1f}%",
|
668
|
+
"Avg Steps": f"{agg.avg_total_steps:.1f}",
|
669
|
+
"Avg Turns": f"{agg.avg_total_turns:.1f}",
|
670
|
+
"Avg Reward": f"{agg.avg_total_reward:.3f}",
|
671
|
+
"Unique Achievements": len(agg.unique_achievements_unlocked),
|
672
|
+
"Avg Achievements/Traj": f"{agg.avg_achievements_per_trajectory:.2f}",
|
673
|
+
"Avg Actions/Turn": f"{agg.avg_actions_per_turn_overall:.1f}",
|
674
|
+
"Q2 Secs": f"{agg.median_duration_sec:.1f}",
|
675
|
+
"P90 Secs": f"{agg.p90_duration_sec:.1f}",
|
676
|
+
"Max Secs": f"{agg.max_duration_sec:.1f}",
|
677
|
+
}
|
678
|
+
)
|
679
|
+
|
680
|
+
return pd.DataFrame(data)
|
681
|
+
|
682
|
+
def _generate_achievement_percentage_table(
|
683
|
+
self, grouped_results: Dict[str, Dict[str, List[TrajectoryResult]]]
|
684
|
+
) -> pd.DataFrame:
|
685
|
+
"""Generate table showing percentage of trajectories achieving each achievement."""
|
686
|
+
|
687
|
+
data = []
|
688
|
+
|
689
|
+
for model_name, difficulties in grouped_results.items():
|
690
|
+
for difficulty, trajectories in difficulties.items():
|
691
|
+
if not trajectories:
|
692
|
+
continue
|
693
|
+
|
694
|
+
num_trajectories = len(trajectories)
|
695
|
+
row = {"Model": model_name, "Difficulty": difficulty}
|
696
|
+
|
697
|
+
# Count achievements
|
698
|
+
achievement_counts = defaultdict(int)
|
699
|
+
for traj in trajectories:
|
700
|
+
for ach in traj.achievements_unlocked:
|
701
|
+
achievement_counts[ach] += 1
|
702
|
+
|
703
|
+
# Add percentage for each achievement
|
704
|
+
for achievement in ALL_ACHIEVEMENTS:
|
705
|
+
count = achievement_counts[achievement]
|
706
|
+
percentage = count / num_trajectories if num_trajectories > 0 else 0.0
|
707
|
+
row[achievement] = f"{percentage:.1%}"
|
708
|
+
|
709
|
+
data.append(row)
|
710
|
+
|
711
|
+
df = pd.DataFrame(data)
|
712
|
+
|
713
|
+
# Reorder columns: Model, Difficulty, then achievements by category
|
714
|
+
base_cols = ["Model", "Difficulty"]
|
715
|
+
achievement_cols = []
|
716
|
+
for category in ["easy", "medium", "hard"]:
|
717
|
+
for ach in ACHIEVEMENT_CATEGORIES[category]:
|
718
|
+
if ach in df.columns:
|
719
|
+
achievement_cols.append(ach)
|
720
|
+
|
721
|
+
return df[base_cols + achievement_cols]
|
722
|
+
|
723
|
+
def _generate_termination_breakdown_table(
|
724
|
+
self, aggregate_results: List[AggregateResults]
|
725
|
+
) -> pd.DataFrame:
|
726
|
+
"""Generate table showing termination reason percentages."""
|
727
|
+
|
728
|
+
data = []
|
729
|
+
for agg in aggregate_results:
|
730
|
+
row = {
|
731
|
+
"Model": agg.model_name,
|
732
|
+
"Difficulty": agg.difficulty,
|
733
|
+
}
|
734
|
+
|
735
|
+
for reason in TERMINATION_REASONS:
|
736
|
+
percentage = agg.termination_breakdown.get(reason, 0.0)
|
737
|
+
row[f"{reason.title()} %"] = f"{percentage:.1%}"
|
738
|
+
|
739
|
+
data.append(row)
|
740
|
+
|
741
|
+
return pd.DataFrame(data)
|
742
|
+
|
743
|
+
def _generate_multi_action_analysis(
|
744
|
+
self, aggregate_results: List[AggregateResults]
|
745
|
+
) -> Dict[str, pd.DataFrame]:
|
746
|
+
"""Generate analysis of multi-action tool calls."""
|
747
|
+
|
748
|
+
# Summary table
|
749
|
+
summary_data = []
|
750
|
+
for agg in aggregate_results:
|
751
|
+
summary_data.append(
|
752
|
+
{
|
753
|
+
"Model": agg.model_name,
|
754
|
+
"Difficulty": agg.difficulty,
|
755
|
+
"Avg Actions/Turn": f"{agg.avg_actions_per_turn_overall:.2f}",
|
756
|
+
"Most Common": max(
|
757
|
+
agg.actions_per_turn_distribution.items(), key=lambda x: x[1]
|
758
|
+
)[0]
|
759
|
+
if agg.actions_per_turn_distribution
|
760
|
+
else 0,
|
761
|
+
"Distribution": str(dict(sorted(agg.actions_per_turn_distribution.items()))),
|
762
|
+
}
|
763
|
+
)
|
764
|
+
|
765
|
+
summary_df = pd.DataFrame(summary_data)
|
766
|
+
|
767
|
+
# Detailed distribution table
|
768
|
+
all_action_counts = set()
|
769
|
+
for agg in aggregate_results:
|
770
|
+
all_action_counts.update(agg.actions_per_turn_distribution.keys())
|
771
|
+
|
772
|
+
dist_data = []
|
773
|
+
for agg in aggregate_results:
|
774
|
+
row = {"Model": agg.model_name, "Difficulty": agg.difficulty}
|
775
|
+
total_turns = sum(agg.actions_per_turn_distribution.values())
|
776
|
+
|
777
|
+
for count in sorted(all_action_counts):
|
778
|
+
turns_with_count = agg.actions_per_turn_distribution.get(count, 0)
|
779
|
+
percentage = turns_with_count / total_turns if total_turns > 0 else 0.0
|
780
|
+
row[f"{count} Actions"] = f"{percentage:.1%}"
|
781
|
+
|
782
|
+
dist_data.append(row)
|
783
|
+
|
784
|
+
distribution_df = pd.DataFrame(dist_data)
|
785
|
+
|
786
|
+
return {"summary": summary_df, "distribution": distribution_df}
|
787
|
+
|
788
|
+
def _generate_trajectory_breakdown(self) -> pd.DataFrame:
|
789
|
+
"""Generate detailed trajectory-by-trajectory breakdown."""
|
790
|
+
|
791
|
+
data = []
|
792
|
+
for traj in self.trajectory_results:
|
793
|
+
# Achievement category breakdown
|
794
|
+
easy_achievements = len(
|
795
|
+
[a for a in traj.achievements_unlocked if a in ACHIEVEMENT_CATEGORIES["easy"]]
|
796
|
+
)
|
797
|
+
medium_achievements = len(
|
798
|
+
[a for a in traj.achievements_unlocked if a in ACHIEVEMENT_CATEGORIES["medium"]]
|
799
|
+
)
|
800
|
+
hard_achievements = len(
|
801
|
+
[a for a in traj.achievements_unlocked if a in ACHIEVEMENT_CATEGORIES["hard"]]
|
802
|
+
)
|
803
|
+
|
804
|
+
data.append(
|
805
|
+
{
|
806
|
+
"Trajectory ID": traj.trajectory_id[:8], # Short ID
|
807
|
+
"Model": traj.model_name,
|
808
|
+
"Difficulty": traj.difficulty,
|
809
|
+
"Seed": traj.seed,
|
810
|
+
"Success": "✓" if traj.success else "✗",
|
811
|
+
"Steps": traj.total_steps,
|
812
|
+
"Turns": traj.total_turns,
|
813
|
+
"Reward": f"{traj.total_reward:.3f}",
|
814
|
+
"Total Achievements": len(traj.achievements_unlocked),
|
815
|
+
"Easy": easy_achievements,
|
816
|
+
"Medium": medium_achievements,
|
817
|
+
"Hard": hard_achievements,
|
818
|
+
"Avg Actions/Turn": f"{traj.avg_actions_per_turn:.1f}",
|
819
|
+
"Termination": traj.termination_reason,
|
820
|
+
"Final Health": traj.final_health,
|
821
|
+
"Achievements": ", ".join(sorted(traj.achievements_unlocked))
|
822
|
+
if traj.achievements_unlocked
|
823
|
+
else "None",
|
824
|
+
}
|
825
|
+
)
|
826
|
+
|
827
|
+
return pd.DataFrame(data)
|
828
|
+
|
829
|
+
def _generate_model_comparison_tables(
|
830
|
+
self, aggregate_results: List[AggregateResults]
|
831
|
+
) -> Dict[str, Any]:
|
832
|
+
"""Generate model-to-model comparison tables and deltas."""
|
833
|
+
|
834
|
+
if len(set(agg.model_name for agg in aggregate_results)) < 2:
|
835
|
+
return {"note": "Need at least 2 models for comparison"}
|
836
|
+
|
837
|
+
# Group by difficulty for comparison
|
838
|
+
by_difficulty = defaultdict(list)
|
839
|
+
for agg in aggregate_results:
|
840
|
+
by_difficulty[agg.difficulty].append(agg)
|
841
|
+
|
842
|
+
comparison_tables = {}
|
843
|
+
|
844
|
+
for difficulty, agg_list in by_difficulty.items():
|
845
|
+
if len(agg_list) < 2:
|
846
|
+
continue
|
847
|
+
|
848
|
+
# Sort by model name for consistent ordering
|
849
|
+
agg_list.sort(key=lambda x: x.model_name)
|
850
|
+
|
851
|
+
# Create comparison table
|
852
|
+
comparison_data = []
|
853
|
+
for agg in agg_list:
|
854
|
+
comparison_data.append(
|
855
|
+
{
|
856
|
+
"Model": agg.model_name,
|
857
|
+
"Success Rate": agg.success_rate,
|
858
|
+
"Avg Steps": agg.avg_total_steps,
|
859
|
+
"Avg Achievements": agg.avg_achievements_per_trajectory,
|
860
|
+
"Avg Actions/Turn": agg.avg_actions_per_turn_overall,
|
861
|
+
}
|
862
|
+
)
|
863
|
+
|
864
|
+
comparison_df = pd.DataFrame(comparison_data)
|
865
|
+
|
866
|
+
# Create delta table (difference from first model)
|
867
|
+
if len(agg_list) > 1:
|
868
|
+
baseline = agg_list[0]
|
869
|
+
delta_data = []
|
870
|
+
|
871
|
+
for agg in agg_list[1:]:
|
872
|
+
delta_data.append(
|
873
|
+
{
|
874
|
+
"Model vs Baseline": f"{agg.model_name} vs {baseline.model_name}",
|
875
|
+
"Success Rate Δ": f"{agg.success_rate - baseline.success_rate:+.1%}",
|
876
|
+
"Avg Steps Δ": f"{agg.avg_total_steps - baseline.avg_total_steps:+.1f}",
|
877
|
+
"Avg Achievements Δ": f"{agg.avg_achievements_per_trajectory - baseline.avg_achievements_per_trajectory:+.2f}",
|
878
|
+
"Avg Actions/Turn Δ": f"{agg.avg_actions_per_turn_overall - baseline.avg_actions_per_turn_overall:+.2f}",
|
879
|
+
}
|
880
|
+
)
|
881
|
+
|
882
|
+
delta_df = pd.DataFrame(delta_data) if delta_data else None
|
883
|
+
else:
|
884
|
+
delta_df = None
|
885
|
+
|
886
|
+
comparison_tables[difficulty] = {
|
887
|
+
"comparison": comparison_df,
|
888
|
+
"deltas": delta_df,
|
889
|
+
}
|
890
|
+
|
891
|
+
return comparison_tables
|
892
|
+
|
893
|
+
def _generate_achievement_summary_table(
|
894
|
+
self, grouped_results: Dict[str, Dict[str, List[TrajectoryResult]]]
|
895
|
+
) -> pd.DataFrame:
|
896
|
+
"""Generate a vertical achievement summary table that's easier to read."""
|
897
|
+
|
898
|
+
data = []
|
899
|
+
|
900
|
+
# For each achievement, show rates across all model/difficulty combinations
|
901
|
+
for category_name, achievements in ACHIEVEMENT_CATEGORIES.items():
|
902
|
+
for achievement in achievements:
|
903
|
+
row = {
|
904
|
+
"Category": category_name.capitalize(),
|
905
|
+
"Achievement": achievement.replace("_", " ").title(),
|
906
|
+
}
|
907
|
+
|
908
|
+
# Add columns for each model/difficulty combination
|
909
|
+
for model_name, difficulties in grouped_results.items():
|
910
|
+
for difficulty, trajectories in difficulties.items():
|
911
|
+
if not trajectories:
|
912
|
+
continue
|
913
|
+
|
914
|
+
num_trajectories = len(trajectories)
|
915
|
+
count = sum(
|
916
|
+
1 for traj in trajectories if achievement in traj.achievements_unlocked
|
917
|
+
)
|
918
|
+
percentage = count / num_trajectories if num_trajectories > 0 else 0.0
|
919
|
+
|
920
|
+
col_name = f"{model_name} ({difficulty})"
|
921
|
+
row[col_name] = f"{count}/{num_trajectories} ({percentage:.1%})"
|
922
|
+
|
923
|
+
data.append(row)
|
924
|
+
|
925
|
+
return pd.DataFrame(data)
|
926
|
+
|
927
|
+
def _generate_sota_comparison(
|
928
|
+
self, aggregate_results: List[AggregateResults]
|
929
|
+
) -> Dict[str, pd.DataFrame]:
|
930
|
+
"""Generate comparison tables with SOTA benchmarks, separating Hafner and BALROG methodologies."""
|
931
|
+
|
932
|
+
# ⚠️ CRITICAL: Hafner and BALROG scores use different methodologies and are NOT comparable!
|
933
|
+
|
934
|
+
# Create our results table for both methodologies
|
935
|
+
our_hafner_data = []
|
936
|
+
our_balrog_data = []
|
937
|
+
|
938
|
+
for agg in aggregate_results:
|
939
|
+
# Hafner results
|
940
|
+
hafner_row = {
|
941
|
+
"System": f"{agg.model_name} (multi-action)",
|
942
|
+
"Hafner Score": f"{agg.hafner_score:.1f}%",
|
943
|
+
"Category": "Current Evaluation (Hafner)",
|
944
|
+
}
|
945
|
+
our_hafner_data.append(hafner_row)
|
946
|
+
|
947
|
+
# BALROG results
|
948
|
+
balrog_row = {
|
949
|
+
"System": f"{agg.model_name} (multi-action)",
|
950
|
+
"BALROG Score (Avg)": f"{agg.balrog_score_avg:.1f}%",
|
951
|
+
"BALROG Score (Best)": f"{agg.balrog_score_best:.1f}%",
|
952
|
+
"Category": "Current Evaluation (BALROG)",
|
953
|
+
}
|
954
|
+
|
955
|
+
# Add percentage comparison to BALROG SOTA if we can map the model name
|
956
|
+
if agg.model_name in MODEL_NAME_TO_SOTA:
|
957
|
+
sota_name = MODEL_NAME_TO_SOTA[agg.model_name]
|
958
|
+
if sota_name in BALROG_SOTA_SCORES["balrog_leaderboard"]:
|
959
|
+
balrog_sota_score = BALROG_SOTA_SCORES["balrog_leaderboard"][sota_name]
|
960
|
+
percentage_of_balrog_sota_avg = (agg.balrog_score_avg / balrog_sota_score) * 100
|
961
|
+
percentage_of_balrog_sota_best = (
|
962
|
+
agg.balrog_score_best / balrog_sota_score
|
963
|
+
) * 100
|
964
|
+
balrog_row["% of BALROG SOTA (Avg)"] = f"{percentage_of_balrog_sota_avg:.1f}%"
|
965
|
+
balrog_row["% of BALROG SOTA (Best)"] = f"{percentage_of_balrog_sota_best:.1f}%"
|
966
|
+
balrog_row["BALROG SOTA Reference"] = f"{sota_name} ({balrog_sota_score:.1f}%)"
|
967
|
+
|
968
|
+
our_balrog_data.append(balrog_row)
|
969
|
+
|
970
|
+
our_hafner_df = pd.DataFrame(our_hafner_data)
|
971
|
+
our_balrog_df = pd.DataFrame(our_balrog_data)
|
972
|
+
|
973
|
+
# Create nearby comparisons for BALROG methodology only (since that's what we can compare to)
|
974
|
+
balrog_nearby_comparisons = []
|
975
|
+
all_balrog_scores = []
|
976
|
+
|
977
|
+
# Add BALROG leaderboard scores
|
978
|
+
for system, score in BALROG_SOTA_SCORES["balrog_leaderboard"].items():
|
979
|
+
all_balrog_scores.append(
|
980
|
+
{"System": system, "Score": score, "Category": "BALROG Leaderboard"}
|
981
|
+
)
|
982
|
+
|
983
|
+
# Sort BALROG scores
|
984
|
+
all_balrog_scores.sort(key=lambda x: x["Score"], reverse=True)
|
985
|
+
|
986
|
+
# For each of our models, find nearby BALROG scores
|
987
|
+
for agg in aggregate_results:
|
988
|
+
# Use average BALROG score for comparison
|
989
|
+
model_balrog_score = agg.balrog_score_avg
|
990
|
+
|
991
|
+
# Find position where this model would fit
|
992
|
+
insert_pos = 0
|
993
|
+
for i, sota_entry in enumerate(all_balrog_scores):
|
994
|
+
if model_balrog_score > sota_entry["Score"]:
|
995
|
+
insert_pos = i
|
996
|
+
break
|
997
|
+
insert_pos = i + 1
|
998
|
+
|
999
|
+
# Get 2 scores above and 2 scores below (if available)
|
1000
|
+
start_idx = max(0, insert_pos - 2)
|
1001
|
+
end_idx = min(len(all_balrog_scores), insert_pos + 3)
|
1002
|
+
|
1003
|
+
nearby_scores = all_balrog_scores[start_idx:end_idx]
|
1004
|
+
|
1005
|
+
# Create comparison table for this model
|
1006
|
+
comparison_data = []
|
1007
|
+
|
1008
|
+
# Add scores above
|
1009
|
+
for sota_entry in nearby_scores[: insert_pos - start_idx]:
|
1010
|
+
comparison_data.append(
|
1011
|
+
{
|
1012
|
+
"System": sota_entry["System"],
|
1013
|
+
"BALROG Score": f"{sota_entry['Score']:.1f}%",
|
1014
|
+
"Category": sota_entry["Category"],
|
1015
|
+
}
|
1016
|
+
)
|
1017
|
+
|
1018
|
+
# Add our model
|
1019
|
+
row = {
|
1020
|
+
"System": f"{agg.model_name} (multi-action)",
|
1021
|
+
"BALROG Score": f"{agg.balrog_score_avg:.1f}%",
|
1022
|
+
"Category": "Current Evaluation",
|
1023
|
+
}
|
1024
|
+
|
1025
|
+
# Add percentage of BALROG SOTA if we can map the model name
|
1026
|
+
if agg.model_name in MODEL_NAME_TO_SOTA:
|
1027
|
+
sota_name = MODEL_NAME_TO_SOTA[agg.model_name]
|
1028
|
+
if sota_name in BALROG_SOTA_SCORES["balrog_leaderboard"]:
|
1029
|
+
balrog_sota_score = BALROG_SOTA_SCORES["balrog_leaderboard"][sota_name]
|
1030
|
+
percentage_of_balrog_sota = (agg.balrog_score_avg / balrog_sota_score) * 100
|
1031
|
+
row["% of BALROG SOTA"] = f"{percentage_of_balrog_sota:.1f}%"
|
1032
|
+
row["BALROG SOTA Reference"] = f"{sota_name} ({balrog_sota_score:.1f}%)"
|
1033
|
+
|
1034
|
+
comparison_data.append(row)
|
1035
|
+
|
1036
|
+
# Add scores below
|
1037
|
+
for sota_entry in nearby_scores[insert_pos - start_idx :]:
|
1038
|
+
comparison_data.append(
|
1039
|
+
{
|
1040
|
+
"System": sota_entry["System"],
|
1041
|
+
"BALROG Score": f"{sota_entry['Score']:.1f}%",
|
1042
|
+
"Category": sota_entry["Category"],
|
1043
|
+
}
|
1044
|
+
)
|
1045
|
+
|
1046
|
+
balrog_nearby_comparisons.append(
|
1047
|
+
{"model": agg.model_name, "comparison": pd.DataFrame(comparison_data)}
|
1048
|
+
)
|
1049
|
+
|
1050
|
+
return {
|
1051
|
+
"our_hafner_results": our_hafner_df,
|
1052
|
+
"our_balrog_results": our_balrog_df,
|
1053
|
+
"balrog_nearby_comparisons": balrog_nearby_comparisons,
|
1054
|
+
"methodology_note": "⚠️ CRITICAL: Hafner scores (log-adjusted multi-episode) and BALROG scores (simple single-episode percentage) use different methodologies and are NOT directly comparable!",
|
1055
|
+
}
|
1056
|
+
|
1057
|
+
def print_report(self, report: Dict[str, Any]):
|
1058
|
+
"""Print a formatted evaluation report."""
|
1059
|
+
|
1060
|
+
print("\n" + "=" * 80)
|
1061
|
+
print("🎯 CRAFTER EVALUATION REPORT")
|
1062
|
+
print("=" * 80)
|
1063
|
+
|
1064
|
+
# Summary table
|
1065
|
+
print("\n📊 EVALUATION SUMMARY")
|
1066
|
+
summary_df = report["evaluation_summary"]
|
1067
|
+
# Clean formatting for summary table
|
1068
|
+
for col in summary_df.columns:
|
1069
|
+
if len(col) > 12: # Truncate long column names
|
1070
|
+
summary_df = summary_df.rename(columns={col: col[:12]})
|
1071
|
+
print(summary_df.to_string(index=False, max_colwidth=12))
|
1072
|
+
|
1073
|
+
# Create and show vertical achievement table
|
1074
|
+
print("\n🏆 ACHIEVEMENT UNLOCK RATES")
|
1075
|
+
print("Format: unlocked/total (percentage)")
|
1076
|
+
|
1077
|
+
# Group results for achievement summary
|
1078
|
+
grouped_results = defaultdict(lambda: defaultdict(list))
|
1079
|
+
for traj in self.trajectory_results:
|
1080
|
+
grouped_results[traj.model_name][traj.difficulty].append(traj)
|
1081
|
+
|
1082
|
+
achievement_summary = self._generate_achievement_summary_table(grouped_results)
|
1083
|
+
|
1084
|
+
# Print by category for better readability
|
1085
|
+
for category in ["Easy", "Medium", "Hard"]:
|
1086
|
+
category_data = achievement_summary[achievement_summary["Category"] == category]
|
1087
|
+
if not category_data.empty:
|
1088
|
+
print(f"\n{category.upper()} ACHIEVEMENTS:")
|
1089
|
+
category_display = category_data.drop("Category", axis=1)
|
1090
|
+
print(category_display.to_string(index=False))
|
1091
|
+
|
1092
|
+
# # Termination breakdown
|
1093
|
+
# print("\n⚰️ TERMINATION BREAKDOWN")
|
1094
|
+
# print(report["termination_breakdown_table"].to_string(index=False))
|
1095
|
+
|
1096
|
+
# # Multi-action analysis
|
1097
|
+
# print("\n⚡ MULTI-ACTION ANALYSIS")
|
1098
|
+
# multi_action = report["multi_action_analysis"]
|
1099
|
+
|
1100
|
+
# # Clean summary table
|
1101
|
+
# summary_clean = multi_action["summary"].copy()
|
1102
|
+
# summary_clean = summary_clean.drop(columns=["Distribution"], errors='ignore') # Remove cluttered distribution column
|
1103
|
+
# print("Summary:")
|
1104
|
+
# print(summary_clean.to_string(index=False, max_colwidth=15))
|
1105
|
+
|
1106
|
+
# # Show distribution in cleaner format
|
1107
|
+
# print("\nAction Count Distribution:")
|
1108
|
+
# dist_clean = multi_action["distribution"].copy()
|
1109
|
+
# # Only show columns with meaningful data
|
1110
|
+
# cols_to_show = ["Model", "Difficulty"] + [col for col in dist_clean.columns if "Actions" in col and not dist_clean[col].str.contains("0.0%").all()]
|
1111
|
+
# if len(cols_to_show) > 8: # Limit to prevent overflow
|
1112
|
+
# cols_to_show = cols_to_show[:8]
|
1113
|
+
# print(dist_clean[cols_to_show].to_string(index=False, max_colwidth=10))
|
1114
|
+
|
1115
|
+
# Model comparisons
|
1116
|
+
if "note" not in report["model_comparison_tables"]:
|
1117
|
+
print("\n🔄 MODEL COMPARISONS")
|
1118
|
+
for difficulty, tables in report["model_comparison_tables"].items():
|
1119
|
+
print(f"\n{difficulty.upper()} Difficulty:")
|
1120
|
+
print(tables["comparison"].to_string(index=False))
|
1121
|
+
if tables["deltas"] is not None:
|
1122
|
+
print(f"\nDeltas vs Baseline:")
|
1123
|
+
print(tables["deltas"].to_string(index=False))
|
1124
|
+
|
1125
|
+
# Trajectory breakdown (summary stats only for space)
|
1126
|
+
traj_df = report["trajectory_by_trajectory_breakdown"]
|
1127
|
+
print(f"\n📋 TRAJECTORY BREAKDOWN ({len(traj_df)} total trajectories)")
|
1128
|
+
print("Sample trajectories:")
|
1129
|
+
sample_cols = [
|
1130
|
+
"Model",
|
1131
|
+
"Difficulty",
|
1132
|
+
"Success",
|
1133
|
+
"Steps",
|
1134
|
+
"Total Achievements",
|
1135
|
+
"Termination",
|
1136
|
+
]
|
1137
|
+
sample_df = traj_df[sample_cols].head(5) # Show fewer rows for cleaner display
|
1138
|
+
print(sample_df.to_string(index=False, max_colwidth=12))
|
1139
|
+
if len(traj_df) > 5:
|
1140
|
+
print(f"... and {len(traj_df) - 5} more trajectories")
|
1141
|
+
|
1142
|
+
# SOTA comparison
|
1143
|
+
sota_comparison = report["sota_comparison"]
|
1144
|
+
print("\n🏆 SOTA COMPARISON")
|
1145
|
+
print(sota_comparison["methodology_note"])
|
1146
|
+
|
1147
|
+
print("\n📊 HAFNER METHODOLOGY RESULTS (Multi-episode log-adjusted)")
|
1148
|
+
hafner_df = sota_comparison["our_hafner_results"]
|
1149
|
+
print(hafner_df.to_string(index=False, max_colwidth=20))
|
1150
|
+
|
1151
|
+
print("\n📊 BALROG METHODOLOGY RESULTS (Single-episode percentage)")
|
1152
|
+
balrog_df = sota_comparison["our_balrog_results"]
|
1153
|
+
# Clean up column names for better display
|
1154
|
+
balrog_clean = balrog_df.copy()
|
1155
|
+
if "% of BALROG SOTA (Avg)" in balrog_clean.columns:
|
1156
|
+
balrog_clean = balrog_clean.rename(columns={"% of BALROG SOTA (Avg)": "% SOTA Avg"})
|
1157
|
+
if "% of BALROG SOTA (Best)" in balrog_clean.columns:
|
1158
|
+
balrog_clean = balrog_clean.rename(columns={"% of BALROG SOTA (Best)": "% SOTA Best"})
|
1159
|
+
print(balrog_clean.to_string(index=False, max_colwidth=20))
|
1160
|
+
|
1161
|
+
print("\n🎯 BALROG vs Nearby SOTA Benchmarks (Apples-to-Apples)")
|
1162
|
+
for comparison in sota_comparison["balrog_nearby_comparisons"]:
|
1163
|
+
print(f"\n{comparison['model']} vs Nearby BALROG Scores:")
|
1164
|
+
comp_df = comparison["comparison"]
|
1165
|
+
# Clean up long reference columns
|
1166
|
+
comp_clean = comp_df.copy()
|
1167
|
+
if "BALROG SOTA Reference" in comp_clean.columns:
|
1168
|
+
comp_clean = comp_clean.drop(
|
1169
|
+
columns=["BALROG SOTA Reference"]
|
1170
|
+
) # Too long for display
|
1171
|
+
print(comp_clean.to_string(index=False, max_colwidth=18))
|
1172
|
+
|
1173
|
+
print("\n" + "=" * 80)
|
1174
|
+
|
1175
|
+
|
1176
|
+
# Convenience function for quick evaluations
|
1177
|
+
async def run_crafter_eval(
|
1178
|
+
model_names: List[str],
|
1179
|
+
difficulties: List[str] = ["easy", "hard"],
|
1180
|
+
num_trajectories: int = 3,
|
1181
|
+
max_turns: int = 30,
|
1182
|
+
) -> Dict[str, Any]:
|
1183
|
+
"""Quick evaluation runner with automatic report generation."""
|
1184
|
+
|
1185
|
+
framework = CrafterEvalFramework()
|
1186
|
+
report = await framework.run_evaluation(
|
1187
|
+
model_names=model_names,
|
1188
|
+
difficulties=difficulties,
|
1189
|
+
num_trajectories_per_condition=num_trajectories,
|
1190
|
+
max_turns=max_turns,
|
1191
|
+
)
|
1192
|
+
|
1193
|
+
framework.print_report(report)
|
1194
|
+
return report
|