synth-ai 0.2.0__py3-none-any.whl → 0.2.1.dev0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- synth_ai/__init__.py +28 -2
- synth_ai/core/system.py +4 -0
- synth_ai/environments/__init__.py +35 -0
- synth_ai/environments/environment/__init__.py +1 -0
- synth_ai/environments/environment/artifacts/__init__.py +1 -0
- synth_ai/environments/environment/artifacts/base.py +50 -0
- synth_ai/environments/environment/core.py +22 -0
- synth_ai/environments/environment/db/__init__.py +1 -0
- synth_ai/environments/environment/db/sqlite.py +45 -0
- synth_ai/environments/environment/registry.py +24 -0
- synth_ai/environments/environment/resources/sqlite.py +46 -0
- synth_ai/environments/environment/results.py +1 -0
- synth_ai/environments/environment/rewards/__init__.py +1 -0
- synth_ai/environments/environment/rewards/core.py +28 -0
- synth_ai/environments/environment/shared_engine.py +26 -0
- synth_ai/environments/environment/tools/__init__.py +34 -0
- synth_ai/environments/examples/__init__.py +1 -0
- synth_ai/environments/examples/crafter_classic/__init__.py +8 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_comprehensive_evaluation.py +58 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_evaluation_browser.py +152 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_evaluation_framework.py +1194 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_quick_evaluation.py +51 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_react_agent.py +872 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_trace_evaluation.py +1412 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/test_crafter_react_agent.py +1110 -0
- synth_ai/environments/examples/crafter_classic/config_logging.py +111 -0
- synth_ai/environments/examples/crafter_classic/engine.py +502 -0
- synth_ai/environments/examples/crafter_classic/engine_deterministic_patch.py +63 -0
- synth_ai/environments/examples/crafter_classic/engine_helpers/action_map.py +5 -0
- synth_ai/environments/examples/crafter_classic/engine_helpers/serialization.py +74 -0
- synth_ai/environments/examples/crafter_classic/environment.py +255 -0
- synth_ai/environments/examples/crafter_classic/taskset.py +228 -0
- synth_ai/environments/examples/enron/agent_demos/test_synth_react.py +535 -0
- synth_ai/environments/examples/enron/art_helpers/email_search_tools.py +156 -0
- synth_ai/environments/examples/enron/art_helpers/local_email_db.py +280 -0
- synth_ai/environments/examples/enron/art_helpers/types_enron.py +24 -0
- synth_ai/environments/examples/enron/engine.py +291 -0
- synth_ai/environments/examples/enron/environment.py +165 -0
- synth_ai/environments/examples/enron/taskset.py +112 -0
- synth_ai/environments/examples/enron/units/keyword_stats.py +111 -0
- synth_ai/environments/examples/enron/units/test_email_index.py +8 -0
- synth_ai/environments/examples/minigrid/__init__.py +48 -0
- synth_ai/environments/examples/minigrid/agent_demos/minigrid_evaluation_framework.py +1188 -0
- synth_ai/environments/examples/minigrid/agent_demos/minigrid_quick_evaluation.py +47 -0
- synth_ai/environments/examples/minigrid/agent_demos/minigrid_react_agent.py +562 -0
- synth_ai/environments/examples/minigrid/agent_demos/minigrid_trace_evaluation.py +220 -0
- synth_ai/environments/examples/minigrid/agent_demos/test_minigrid_react_agent.py +393 -0
- synth_ai/environments/examples/minigrid/engine.py +589 -0
- synth_ai/environments/examples/minigrid/environment.py +274 -0
- synth_ai/environments/examples/minigrid/environment_mapping.py +242 -0
- synth_ai/environments/examples/minigrid/puzzle_loader.py +416 -0
- synth_ai/environments/examples/minigrid/taskset.py +583 -0
- synth_ai/environments/examples/minigrid/units/test_action_behavior.py +226 -0
- synth_ai/environments/examples/minigrid/units/test_debug_messages.py +83 -0
- synth_ai/environments/examples/minigrid/units/test_exploration.py +120 -0
- synth_ai/environments/examples/minigrid/units/test_minigrid_engine.py +214 -0
- synth_ai/environments/examples/minigrid/units/test_minigrid_environment.py +238 -0
- synth_ai/environments/examples/minigrid/units/test_minigrid_environment_mapping.py +301 -0
- synth_ai/environments/examples/minigrid/units/test_minigrid_taskset.py +210 -0
- synth_ai/environments/examples/nethack/__init__.py +7 -0
- synth_ai/environments/examples/nethack/achievements.py +337 -0
- synth_ai/environments/examples/nethack/agent_demos/nethack_evaluation_framework.py +981 -0
- synth_ai/environments/examples/nethack/agent_demos/nethack_quick_evaluation.py +74 -0
- synth_ai/environments/examples/nethack/agent_demos/nethack_react_agent.py +832 -0
- synth_ai/environments/examples/nethack/agent_demos/test_nethack_react_agent.py +1112 -0
- synth_ai/environments/examples/nethack/engine.py +738 -0
- synth_ai/environments/examples/nethack/environment.py +255 -0
- synth_ai/environments/examples/nethack/helpers/__init__.py +42 -0
- synth_ai/environments/examples/nethack/helpers/action_mapping.py +301 -0
- synth_ai/environments/examples/nethack/helpers/nle_wrapper.py +401 -0
- synth_ai/environments/examples/nethack/helpers/observation_utils.py +433 -0
- synth_ai/environments/examples/nethack/helpers/recording_wrapper.py +201 -0
- synth_ai/environments/examples/nethack/helpers/trajectory_recorder.py +268 -0
- synth_ai/environments/examples/nethack/helpers/visualization/replay_viewer.py +308 -0
- synth_ai/environments/examples/nethack/helpers/visualization/visualizer.py +430 -0
- synth_ai/environments/examples/nethack/taskset.py +323 -0
- synth_ai/environments/examples/nethack/units/test_nethack_engine.py +277 -0
- synth_ai/environments/examples/nethack/units/test_nethack_environment.py +281 -0
- synth_ai/environments/examples/nethack/units/test_nethack_taskset.py +213 -0
- synth_ai/environments/examples/nethack/units/test_recording.py +307 -0
- synth_ai/environments/examples/red/__init__.py +7 -0
- synth_ai/environments/examples/red/agent_demos/__init__.py +1 -0
- synth_ai/environments/examples/red/agent_demos/test_synth_react.py +1471 -0
- synth_ai/environments/examples/red/config_logging.py +110 -0
- synth_ai/environments/examples/red/engine.py +693 -0
- synth_ai/environments/examples/red/engine_helpers/__init__.py +1 -0
- synth_ai/environments/examples/red/engine_helpers/memory_map.py +28 -0
- synth_ai/environments/examples/red/engine_helpers/reward_components.py +275 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/__init__.py +142 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/adaptive_rewards.py +56 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/battle_rewards.py +283 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/composite_rewards.py +149 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/economy_rewards.py +137 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/efficiency_rewards.py +56 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/exploration_rewards.py +330 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/novelty_rewards.py +120 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/pallet_town_rewards.py +558 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/pokemon_rewards.py +312 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/social_rewards.py +147 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/story_rewards.py +246 -0
- synth_ai/environments/examples/red/engine_helpers/screen_analysis.py +367 -0
- synth_ai/environments/examples/red/engine_helpers/state_extraction.py +139 -0
- synth_ai/environments/examples/red/environment.py +235 -0
- synth_ai/environments/examples/red/taskset.py +77 -0
- synth_ai/environments/examples/red/test_fixes.py +125 -0
- synth_ai/environments/examples/red/test_fixes_mock.py +148 -0
- synth_ai/environments/examples/red/units/__init__.py +1 -0
- synth_ai/environments/examples/red/units/test_basic_functionality.py +97 -0
- synth_ai/environments/examples/red/units/test_button_press_requirements.py +217 -0
- synth_ai/environments/examples/red/units/test_engine.py +192 -0
- synth_ai/environments/examples/red/units/test_environment.py +455 -0
- synth_ai/environments/examples/red/units/test_exploration_strategy.py +227 -0
- synth_ai/environments/examples/red/units/test_integration.py +217 -0
- synth_ai/environments/examples/red/units/test_memory_extraction.py +111 -0
- synth_ai/environments/examples/red/units/test_menu_bug_reproduction.py +1100 -0
- synth_ai/environments/examples/red/units/test_movement_debug.py +255 -0
- synth_ai/environments/examples/red/units/test_pokemon_mcts_debug.py +163 -0
- synth_ai/environments/examples/red/units/test_pokemon_mcts_verbose.py +117 -0
- synth_ai/environments/examples/red/units/test_red_basic.py +145 -0
- synth_ai/environments/examples/red/units/test_red_comprehensive.py +323 -0
- synth_ai/environments/examples/red/units/test_retry_movement.py +195 -0
- synth_ai/environments/examples/red/units/test_reward_components.py +186 -0
- synth_ai/environments/examples/red/units/test_rom_integration.py +260 -0
- synth_ai/environments/examples/red/units/test_taskset.py +116 -0
- synth_ai/environments/examples/red/units/test_tree.py +448 -0
- synth_ai/environments/examples/sokoban/__init__.py +1 -0
- synth_ai/environments/examples/sokoban/agent_demos/sokoban_full_eval.py +900 -0
- synth_ai/environments/examples/sokoban/agent_demos/test_dspy_react.py +1 -0
- synth_ai/environments/examples/sokoban/agent_demos/test_sokoban_react_agent.py +498 -0
- synth_ai/environments/examples/sokoban/agent_demos/test_synth_lats.py +1 -0
- synth_ai/environments/examples/sokoban/agent_demos/test_synth_react_locally.py +748 -0
- synth_ai/environments/examples/sokoban/agent_demos/test_synth_react_service.py +296 -0
- synth_ai/environments/examples/sokoban/engine.py +675 -0
- synth_ai/environments/examples/sokoban/engine_helpers/__init__.py +1 -0
- synth_ai/environments/examples/sokoban/engine_helpers/room_utils.py +656 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/__init__.py +17 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/__init__.py +3 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/boxoban_env.py +129 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/render_utils.py +370 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/room_utils.py +331 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env.py +305 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_fixed_targets.py +66 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_pull.py +114 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_two_player.py +122 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_variations.py +394 -0
- synth_ai/environments/examples/sokoban/environment.py +228 -0
- synth_ai/environments/examples/sokoban/generate_verified_puzzles.py +438 -0
- synth_ai/environments/examples/sokoban/puzzle_loader.py +311 -0
- synth_ai/environments/examples/sokoban/taskset.py +425 -0
- synth_ai/environments/examples/sokoban/units/astar_common.py +94 -0
- synth_ai/environments/examples/sokoban/units/test_building_task_set.py +49 -0
- synth_ai/environments/examples/sokoban/units/test_false_positive.py +120 -0
- synth_ai/environments/examples/sokoban/units/test_simple_run_through_environment.py +119 -0
- synth_ai/environments/examples/sokoban/units/test_sokoban_environment.py +98 -0
- synth_ai/environments/examples/sokoban/units/test_tree.py +364 -0
- synth_ai/environments/examples/tictactoe/__init__.py +1 -0
- synth_ai/environments/examples/tictactoe/agent_demos/test_synth_react.py +266 -0
- synth_ai/environments/examples/tictactoe/agent_demos/test_tictactoe_react_agent.py +470 -0
- synth_ai/environments/examples/tictactoe/engine.py +368 -0
- synth_ai/environments/examples/tictactoe/environment.py +239 -0
- synth_ai/environments/examples/tictactoe/taskset.py +214 -0
- synth_ai/environments/examples/tictactoe/units/test_tictactoe_engine.py +393 -0
- synth_ai/environments/examples/tictactoe/units/test_tictactoe_environment.py +493 -0
- synth_ai/environments/examples/tictactoe/units/test_tictactoe_taskset.py +191 -0
- synth_ai/environments/examples/verilog/__init__.py +10 -0
- synth_ai/environments/examples/verilog/agent_demos/test_synth_react.py +520 -0
- synth_ai/environments/examples/verilog/engine.py +328 -0
- synth_ai/environments/examples/verilog/environment.py +349 -0
- synth_ai/environments/examples/verilog/taskset.py +418 -0
- synth_ai/environments/examples/verilog/units/test_verilog_engine.py +466 -0
- synth_ai/environments/examples/verilog/units/test_verilog_environment.py +585 -0
- synth_ai/environments/examples/verilog/units/test_verilog_integration.py +383 -0
- synth_ai/environments/examples/verilog/units/test_verilog_taskset.py +457 -0
- synth_ai/environments/reproducibility/core.py +42 -0
- synth_ai/environments/reproducibility/tree.py +364 -0
- synth_ai/environments/service/app.py +78 -0
- synth_ai/environments/service/core_routes.py +775 -0
- synth_ai/environments/service/external_registry.py +57 -0
- synth_ai/environments/service/registry.py +9 -0
- synth_ai/environments/stateful/__init__.py +1 -0
- synth_ai/environments/stateful/core.py +28 -0
- synth_ai/environments/stateful/engine.py +21 -0
- synth_ai/environments/stateful/state.py +7 -0
- synth_ai/environments/tasks/api.py +19 -0
- synth_ai/environments/tasks/core.py +78 -0
- synth_ai/environments/tasks/filters.py +39 -0
- synth_ai/environments/tasks/utils.py +89 -0
- synth_ai/environments/v0_observability/history.py +3 -0
- synth_ai/environments/v0_observability/log.py +2 -0
- synth_ai/lm/caching/constants.py +1 -0
- synth_ai/{zyk/lms → lm}/caching/ephemeral.py +4 -8
- synth_ai/{zyk/lms → lm}/caching/handler.py +15 -15
- synth_ai/{zyk/lms → lm}/caching/initialize.py +2 -4
- synth_ai/{zyk/lms → lm}/caching/persistent.py +4 -10
- synth_ai/{zyk/lms → lm}/config.py +2 -1
- synth_ai/{zyk/lms → lm}/constants.py +2 -2
- synth_ai/{zyk/lms → lm}/core/all.py +10 -10
- synth_ai/{zyk/lms → lm}/core/main.py +57 -33
- synth_ai/{zyk/lms → lm}/core/vendor_clients.py +12 -10
- synth_ai/lm/cost/monitor.py +1 -0
- synth_ai/lm/cost/statefulness.py +1 -0
- synth_ai/lm/provider_support/__init__.py +8 -0
- synth_ai/lm/provider_support/anthropic.py +945 -0
- synth_ai/lm/provider_support/openai.py +1115 -0
- synth_ai/lm/provider_support/suppress_logging.py +31 -0
- synth_ai/{zyk/lms → lm}/structured_outputs/handler.py +58 -80
- synth_ai/{zyk/lms → lm}/structured_outputs/inject.py +6 -20
- synth_ai/{zyk/lms → lm}/structured_outputs/rehabilitate.py +6 -12
- synth_ai/{zyk/lms → lm}/vendors/core/anthropic_api.py +21 -30
- synth_ai/{zyk/lms → lm}/vendors/core/gemini_api.py +35 -32
- synth_ai/{zyk/lms → lm}/vendors/core/mistral_api.py +19 -28
- synth_ai/{zyk/lms → lm}/vendors/core/openai_api.py +26 -36
- synth_ai/{zyk/lms → lm}/vendors/openai_standard.py +29 -33
- synth_ai/{zyk/lms → lm}/vendors/retries.py +1 -1
- synth_ai/lm/vendors/supported/__init__.py +0 -0
- synth_ai/{zyk/lms → lm}/vendors/supported/custom_endpoint.py +131 -118
- synth_ai/{zyk/lms → lm}/vendors/supported/deepseek.py +4 -8
- synth_ai/{zyk/lms → lm}/vendors/supported/grok.py +6 -8
- synth_ai/{zyk/lms → lm}/vendors/supported/groq.py +1 -1
- synth_ai/{zyk/lms → lm}/vendors/supported/ollama.py +2 -2
- synth_ai/{zyk/lms → lm}/vendors/supported/openrouter.py +18 -16
- synth_ai/{zyk/lms → lm}/vendors/supported/together.py +1 -1
- synth_ai/tracing/__init__.py +0 -0
- synth_ai/tracing/abstractions.py +224 -0
- synth_ai/tracing/base_client.py +91 -0
- synth_ai/tracing/client_manager.py +131 -0
- synth_ai/tracing/config.py +140 -0
- synth_ai/tracing/context.py +146 -0
- synth_ai/tracing/decorators.py +679 -0
- synth_ai/tracing/events/__init__.py +0 -0
- synth_ai/tracing/events/manage.py +147 -0
- synth_ai/tracing/events/scope.py +86 -0
- synth_ai/tracing/events/store.py +227 -0
- synth_ai/tracing/immediate_client.py +152 -0
- synth_ai/tracing/local.py +18 -0
- synth_ai/tracing/log_client_base.py +74 -0
- synth_ai/tracing/retry_queue.py +187 -0
- synth_ai/tracing/trackers.py +515 -0
- synth_ai/tracing/upload.py +504 -0
- synth_ai/tracing/utils.py +9 -0
- synth_ai/zyk/__init__.py +28 -2
- synth_ai-0.2.1.dev0.dist-info/METADATA +349 -0
- synth_ai-0.2.1.dev0.dist-info/RECORD +261 -0
- {synth_ai-0.2.0.dist-info → synth_ai-0.2.1.dev0.dist-info}/WHEEL +1 -1
- synth_ai/zyk/lms/caching/constants.py +0 -1
- synth_ai/zyk/lms/cost/monitor.py +0 -1
- synth_ai/zyk/lms/cost/statefulness.py +0 -1
- synth_ai-0.2.0.dist-info/METADATA +0 -36
- synth_ai-0.2.0.dist-info/RECORD +0 -50
- /synth_ai/{zyk/lms/__init__.py → environments/reproducibility/helpers.py} +0 -0
- /synth_ai/{zyk/lms/caching → lm}/__init__.py +0 -0
- /synth_ai/{zyk/lms/core → lm/caching}/__init__.py +0 -0
- /synth_ai/{zyk/lms → lm}/caching/dbs.py +0 -0
- /synth_ai/{zyk/lms/cost → lm/core}/__init__.py +0 -0
- /synth_ai/{zyk/lms → lm}/core/exceptions.py +0 -0
- /synth_ai/{zyk/lms/structured_outputs → lm/cost}/__init__.py +0 -0
- /synth_ai/{zyk/lms/vendors → lm/structured_outputs}/__init__.py +0 -0
- /synth_ai/{zyk/lms → lm}/tools/__init__.py +0 -0
- /synth_ai/{zyk/lms → lm}/tools/base.py +0 -0
- /synth_ai/{zyk/lms/vendors/core → lm/vendors}/__init__.py +0 -0
- /synth_ai/{zyk/lms → lm}/vendors/base.py +0 -0
- /synth_ai/{zyk/lms/vendors/local → lm/vendors/core}/__init__.py +0 -0
- /synth_ai/{zyk/lms/vendors/supported → lm/vendors/local}/__init__.py +0 -0
- /synth_ai/{zyk/lms → lm}/vendors/local/ollama.py +0 -0
- {synth_ai-0.2.0.dist-info → synth_ai-0.2.1.dev0.dist-info/licenses}/LICENSE +0 -0
- {synth_ai-0.2.0.dist-info → synth_ai-0.2.1.dev0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1188 @@
|
|
1
|
+
"""
|
2
|
+
MiniGrid Evaluation Framework
|
3
|
+
Provides detailed metrics, trajectory analysis, and achievement statistics for MiniGrid environments.
|
4
|
+
"""
|
5
|
+
|
6
|
+
import asyncio
|
7
|
+
import json
|
8
|
+
import time
|
9
|
+
import math
|
10
|
+
from dataclasses import dataclass, asdict
|
11
|
+
from typing import Dict, List, Optional, Set, Tuple, Any
|
12
|
+
from collections import defaultdict
|
13
|
+
import uuid
|
14
|
+
import os
|
15
|
+
from pathlib import Path
|
16
|
+
|
17
|
+
import pandas as pd
|
18
|
+
from tqdm import tqdm
|
19
|
+
|
20
|
+
# Synth-SDK tracing imports
|
21
|
+
from synth_sdk.tracing.context import trace_context
|
22
|
+
from synth_sdk.tracing.events.store import event_store
|
23
|
+
|
24
|
+
# MiniGrid-specific achievements based on task complexity
|
25
|
+
MINIGRID_ACHIEVEMENTS = {
|
26
|
+
"basic": [
|
27
|
+
"reach_goal", # Complete any goal-reaching task
|
28
|
+
"first_pickup", # Pick up first object
|
29
|
+
"first_door_open", # Open first door
|
30
|
+
"first_key_use", # Use key to unlock door
|
31
|
+
"navigate_empty_room", # Complete Empty room tasks
|
32
|
+
"complete_5_tasks", # Complete 5 different tasks
|
33
|
+
],
|
34
|
+
"intermediate": [
|
35
|
+
"door_key_master", # Complete DoorKey tasks consistently
|
36
|
+
"multi_room_navigator", # Complete MultiRoom tasks
|
37
|
+
"unlock_pickup_combo", # Complete UnlockPickup tasks
|
38
|
+
"four_rooms_explorer", # Complete FourRooms tasks
|
39
|
+
"complete_20_tasks", # Complete 20 different tasks
|
40
|
+
"efficiency_expert", # Complete task in <50% of max steps
|
41
|
+
],
|
42
|
+
"advanced": [
|
43
|
+
"lava_crosser", # Complete LavaCrossing tasks
|
44
|
+
"large_room_master", # Complete 16x16+ room tasks
|
45
|
+
"complex_multi_room", # Complete N6+ MultiRoom tasks
|
46
|
+
"speed_runner", # Complete task in <25% of max steps
|
47
|
+
"complete_50_tasks", # Complete 50 different tasks
|
48
|
+
"perfect_navigator", # 90%+ success rate across all task types
|
49
|
+
],
|
50
|
+
}
|
51
|
+
|
52
|
+
ALL_ACHIEVEMENTS = [ach for category in MINIGRID_ACHIEVEMENTS.values() for ach in category]
|
53
|
+
|
54
|
+
TERMINATION_REASONS = [
|
55
|
+
"timeout",
|
56
|
+
"goal_reached",
|
57
|
+
"agent_quit",
|
58
|
+
"environment_error",
|
59
|
+
"lava_death",
|
60
|
+
]
|
61
|
+
|
62
|
+
# Task difficulty mapping
|
63
|
+
MINIGRID_DIFFICULTY_MAPPING = {
|
64
|
+
"easy": [
|
65
|
+
"MiniGrid-Empty-5x5-v0",
|
66
|
+
"MiniGrid-Empty-6x6-v0",
|
67
|
+
"MiniGrid-Empty-8x8-v0",
|
68
|
+
"MiniGrid-FourRooms-v0",
|
69
|
+
],
|
70
|
+
"medium": [
|
71
|
+
"MiniGrid-DoorKey-5x5-v0",
|
72
|
+
"MiniGrid-DoorKey-6x6-v0",
|
73
|
+
"MiniGrid-DoorKey-8x8-v0",
|
74
|
+
"MiniGrid-Unlock-v0",
|
75
|
+
"MiniGrid-UnlockPickup-v0",
|
76
|
+
],
|
77
|
+
"hard": [
|
78
|
+
"MiniGrid-DoorKey-16x16-v0",
|
79
|
+
"MiniGrid-MultiRoom-N2-S4-v0",
|
80
|
+
"MiniGrid-MultiRoom-N4-S5-v0",
|
81
|
+
"MiniGrid-MultiRoom-N6-v0",
|
82
|
+
"MiniGrid-LavaGapS5-v0",
|
83
|
+
"MiniGrid-LavaGapS6-v0",
|
84
|
+
"MiniGrid-LavaGapS7-v0",
|
85
|
+
"MiniGrid-LavaCrossingS9N1-v0",
|
86
|
+
"MiniGrid-LavaCrossingS9N2-v0",
|
87
|
+
"MiniGrid-LavaCrossingS9N3-v0",
|
88
|
+
],
|
89
|
+
}
|
90
|
+
|
91
|
+
|
92
|
+
def minigrid_composite_score(
|
93
|
+
achievements_unlocked: int,
|
94
|
+
task_completion_rate: float,
|
95
|
+
avg_efficiency: float,
|
96
|
+
exploration_coverage: float,
|
97
|
+
) -> float:
|
98
|
+
"""
|
99
|
+
MiniGrid composite scoring based on:
|
100
|
+
- Achievement unlocking (30%)
|
101
|
+
- Task completion rate (40%)
|
102
|
+
- Movement efficiency (20%)
|
103
|
+
- Exploration coverage (10%)
|
104
|
+
"""
|
105
|
+
achievement_score = (achievements_unlocked / len(ALL_ACHIEVEMENTS)) * 30
|
106
|
+
completion_score = task_completion_rate * 40
|
107
|
+
efficiency_score = avg_efficiency * 20
|
108
|
+
exploration_score = exploration_coverage * 10
|
109
|
+
return achievement_score + completion_score + efficiency_score + exploration_score
|
110
|
+
|
111
|
+
|
112
|
+
def minigrid_navigation_score(
|
113
|
+
success_rate: float, efficiency_ratio: float, wall_collision_rate: float
|
114
|
+
) -> float:
|
115
|
+
"""Navigation-specific score focusing on pathfinding ability."""
|
116
|
+
# Penalize wall collisions
|
117
|
+
collision_penalty = min(wall_collision_rate * 10, 20) # Cap at 20% penalty
|
118
|
+
base_score = (success_rate * 70) + (efficiency_ratio * 30)
|
119
|
+
return max(0, base_score - collision_penalty)
|
120
|
+
|
121
|
+
|
122
|
+
@dataclass
|
123
|
+
class MiniGridTrajectoryResult:
|
124
|
+
"""Results from a single MiniGrid trajectory/episode."""
|
125
|
+
|
126
|
+
trajectory_id: str
|
127
|
+
model_name: str
|
128
|
+
difficulty: str
|
129
|
+
task_type: str # "Empty", "DoorKey", "MultiRoom", etc.
|
130
|
+
seed: int
|
131
|
+
|
132
|
+
# Core metrics
|
133
|
+
success: bool
|
134
|
+
total_steps: int
|
135
|
+
total_turns: int # Number of agent decision turns
|
136
|
+
total_reward: float
|
137
|
+
|
138
|
+
# MiniGrid-specific fields
|
139
|
+
grid_size: Tuple[int, int] # (width, height)
|
140
|
+
steps_to_goal: int # Actual steps taken
|
141
|
+
optimal_steps: Optional[int] # Theoretical minimum steps
|
142
|
+
efficiency_ratio: float # optimal_steps / steps_to_goal (higher is better)
|
143
|
+
objects_interacted: List[str] # ["door", "key", "goal"]
|
144
|
+
rooms_visited: int # Number of different rooms visited
|
145
|
+
|
146
|
+
# Navigation metrics
|
147
|
+
backtrack_count: int # Number of revisited positions
|
148
|
+
wall_collision_count: int # Number of invalid moves
|
149
|
+
exploration_coverage: float # % of accessible area explored
|
150
|
+
|
151
|
+
# Achievement tracking
|
152
|
+
achievements_unlocked: Set[str]
|
153
|
+
achievement_turn_unlocked: Dict[str, int] # achievement -> turn when unlocked
|
154
|
+
|
155
|
+
# Multi-action metrics
|
156
|
+
actions_per_turn: List[int] # Number of actions per turn
|
157
|
+
avg_actions_per_turn: float
|
158
|
+
|
159
|
+
# Termination analysis
|
160
|
+
termination_reason: (
|
161
|
+
str # "timeout", "goal_reached", "agent_quit", "environment_error", "lava_death"
|
162
|
+
)
|
163
|
+
final_position: Optional[Tuple[int, int]]
|
164
|
+
final_direction: Optional[int]
|
165
|
+
|
166
|
+
# Trajectory data for detailed analysis
|
167
|
+
turn_by_turn_data: Optional[List[Dict[str, Any]]] = None
|
168
|
+
|
169
|
+
|
170
|
+
@dataclass
|
171
|
+
class MiniGridAggregateResults:
|
172
|
+
"""Aggregate results across multiple MiniGrid trajectories."""
|
173
|
+
|
174
|
+
model_name: str
|
175
|
+
difficulty: str
|
176
|
+
num_trajectories: int
|
177
|
+
|
178
|
+
# Success metrics
|
179
|
+
success_rate: float
|
180
|
+
avg_total_steps: float
|
181
|
+
avg_total_turns: float
|
182
|
+
avg_total_reward: float
|
183
|
+
|
184
|
+
# MiniGrid-specific metrics
|
185
|
+
task_completion_rates: Dict[str, float] # task_type -> completion rate
|
186
|
+
avg_efficiency_ratio: float
|
187
|
+
avg_exploration_coverage: float
|
188
|
+
avg_wall_collisions: float
|
189
|
+
avg_backtrack_count: float
|
190
|
+
|
191
|
+
# Achievement metrics
|
192
|
+
unique_achievements_unlocked: Set[str]
|
193
|
+
total_achievement_count: int
|
194
|
+
avg_achievements_per_trajectory: float
|
195
|
+
achievement_unlock_rates: Dict[str, float] # achievement -> % of trajectories that unlocked it
|
196
|
+
|
197
|
+
# MiniGrid-specific scores
|
198
|
+
composite_score_avg: float # Average composite score across trajectories
|
199
|
+
composite_score_best: float # Best single composite score
|
200
|
+
navigation_score_avg: float # Average navigation score
|
201
|
+
navigation_score_best: float # Best navigation score
|
202
|
+
|
203
|
+
# Multi-action metrics
|
204
|
+
avg_actions_per_turn_overall: float
|
205
|
+
actions_per_turn_distribution: Dict[int, int] # num_actions -> count
|
206
|
+
|
207
|
+
# Termination analysis
|
208
|
+
termination_breakdown: Dict[str, float] # reason -> percentage
|
209
|
+
avg_final_position: Optional[Tuple[float, float]]
|
210
|
+
|
211
|
+
|
212
|
+
def get_pure_success_scores(
|
213
|
+
aggregate_results: List[MiniGridAggregateResults],
|
214
|
+
) -> Dict[str, float]:
|
215
|
+
"""
|
216
|
+
Extract pure success scores - the percentage of tasks completed successfully.
|
217
|
+
|
218
|
+
This is the simplest, most direct metric: did the agent reach the goal?
|
219
|
+
|
220
|
+
Returns:
|
221
|
+
Dict mapping "model_name (difficulty)" to success rate percentage (0-100)
|
222
|
+
"""
|
223
|
+
success_scores = {}
|
224
|
+
for agg in aggregate_results:
|
225
|
+
key = f"{agg.model_name} ({agg.difficulty})"
|
226
|
+
success_scores[key] = agg.success_rate * 100.0 # Convert to percentage
|
227
|
+
|
228
|
+
return success_scores
|
229
|
+
|
230
|
+
|
231
|
+
def print_pure_success_summary(aggregate_results: List[MiniGridAggregateResults]):
|
232
|
+
"""Print a clean summary focused on pure success rates."""
|
233
|
+
print("\n🎯 PURE SUCCESS RATES (Task Completion)")
|
234
|
+
print("=" * 50)
|
235
|
+
|
236
|
+
success_scores = get_pure_success_scores(aggregate_results)
|
237
|
+
|
238
|
+
# Sort by success rate (highest first)
|
239
|
+
sorted_results = sorted(aggregate_results, key=lambda x: x.success_rate, reverse=True)
|
240
|
+
|
241
|
+
for agg in sorted_results:
|
242
|
+
success_pct = agg.success_rate * 100.0
|
243
|
+
print(f"{agg.model_name:25} ({agg.difficulty:6}): {success_pct:5.1f}%")
|
244
|
+
|
245
|
+
print("=" * 50)
|
246
|
+
print("✓ Success = Agent reached the goal")
|
247
|
+
print("✗ Failure = Timeout, quit, or error")
|
248
|
+
|
249
|
+
|
250
|
+
def get_success_rate(report: Dict[str, Any], model_name: str, difficulty: str = None) -> float:
|
251
|
+
"""
|
252
|
+
Quick helper to get the success rate for a specific model.
|
253
|
+
|
254
|
+
Args:
|
255
|
+
report: Evaluation report from run_minigrid_eval()
|
256
|
+
model_name: Name of the model
|
257
|
+
difficulty: Specific difficulty, or None for all difficulties
|
258
|
+
|
259
|
+
Returns:
|
260
|
+
Success rate as percentage (0-100)
|
261
|
+
"""
|
262
|
+
if "pure_success_scores" not in report:
|
263
|
+
return 0.0
|
264
|
+
|
265
|
+
success_scores = report["pure_success_scores"]
|
266
|
+
|
267
|
+
if difficulty:
|
268
|
+
key = f"{model_name} ({difficulty})"
|
269
|
+
return success_scores.get(key, 0.0)
|
270
|
+
else:
|
271
|
+
# Return average across all difficulties for this model
|
272
|
+
matching_scores = [
|
273
|
+
score for key, score in success_scores.items() if key.startswith(model_name)
|
274
|
+
]
|
275
|
+
return sum(matching_scores) / len(matching_scores) if matching_scores else 0.0
|
276
|
+
|
277
|
+
|
278
|
+
class MiniGridEvalFramework:
|
279
|
+
"""Evaluation framework for MiniGrid environments."""
|
280
|
+
|
281
|
+
def __init__(self):
|
282
|
+
self.trajectory_results: List[MiniGridTrajectoryResult] = []
|
283
|
+
|
284
|
+
async def run_single_trajectory(
|
285
|
+
self,
|
286
|
+
model_name: str,
|
287
|
+
difficulty: str,
|
288
|
+
task_type: str,
|
289
|
+
seed: int,
|
290
|
+
max_turns: int = 30,
|
291
|
+
collect_detailed_data: bool = True,
|
292
|
+
) -> MiniGridTrajectoryResult:
|
293
|
+
"""Run a single trajectory and collect detailed metrics."""
|
294
|
+
import sys
|
295
|
+
import os
|
296
|
+
|
297
|
+
# Add the agent_demos directory to path
|
298
|
+
agent_demos_dir = os.path.dirname(os.path.abspath(__file__))
|
299
|
+
sys.path.insert(0, agent_demos_dir)
|
300
|
+
# Add the minigrid directory to path
|
301
|
+
minigrid_dir = os.path.dirname(agent_demos_dir)
|
302
|
+
sys.path.insert(0, minigrid_dir)
|
303
|
+
|
304
|
+
from test_synth_react import MiniGridReActAgent
|
305
|
+
from environment import MiniGridEnvironment
|
306
|
+
from taskset import MiniGridTaskInstance, MiniGridTaskInstanceMetadata
|
307
|
+
from synth_ai.environments.tasks.core import Impetus, Intent
|
308
|
+
from synth_ai.zyk import LM
|
309
|
+
|
310
|
+
# Create task instance based on task type
|
311
|
+
# Extract grid size from task name
|
312
|
+
grid_size = (6, 6) # Default
|
313
|
+
if "5x5" in task_type:
|
314
|
+
grid_size = (5, 5)
|
315
|
+
elif "6x6" in task_type:
|
316
|
+
grid_size = (6, 6)
|
317
|
+
elif "8x8" in task_type:
|
318
|
+
grid_size = (8, 8)
|
319
|
+
elif "16x16" in task_type:
|
320
|
+
grid_size = (16, 16)
|
321
|
+
|
322
|
+
# Determine features
|
323
|
+
has_key = "DoorKey" in task_type or "Unlock" in task_type
|
324
|
+
has_door = "Door" in task_type or "Room" in task_type
|
325
|
+
has_lava = "Lava" in task_type
|
326
|
+
|
327
|
+
metadata = MiniGridTaskInstanceMetadata(
|
328
|
+
env_name=task_type,
|
329
|
+
grid_size=grid_size,
|
330
|
+
difficulty=difficulty,
|
331
|
+
has_key=has_key,
|
332
|
+
has_door=has_door,
|
333
|
+
has_lava=has_lava,
|
334
|
+
num_objects=1 if has_key or has_door else 0,
|
335
|
+
seed=seed,
|
336
|
+
)
|
337
|
+
|
338
|
+
instance = MiniGridTaskInstance(
|
339
|
+
id=uuid.uuid4(),
|
340
|
+
impetus=Impetus(instructions=f"Navigate and complete the {task_type} environment."),
|
341
|
+
intent=Intent(rubric={}, gold_trajectories=None, gold_state_diff={}),
|
342
|
+
metadata=metadata,
|
343
|
+
is_reproducible=True,
|
344
|
+
initial_engine_snapshot=None,
|
345
|
+
)
|
346
|
+
|
347
|
+
# Setup environment and agent
|
348
|
+
env = MiniGridEnvironment(instance)
|
349
|
+
|
350
|
+
llm = LM(model_name=model_name, formatting_model_name=model_name, temperature=0.0)
|
351
|
+
agent = MiniGridReActAgent(llm, max_turns=max_turns, verbose=True)
|
352
|
+
|
353
|
+
# Initialize tracking
|
354
|
+
trajectory_id = str(uuid.uuid4())
|
355
|
+
achievements_unlocked = set()
|
356
|
+
achievement_turn_unlocked = {}
|
357
|
+
actions_per_turn = []
|
358
|
+
turn_by_turn_data = [] if collect_detailed_data else None
|
359
|
+
|
360
|
+
# Navigation tracking
|
361
|
+
positions_visited = set()
|
362
|
+
wall_collisions = 0
|
363
|
+
backtrack_count = 0
|
364
|
+
objects_interacted = []
|
365
|
+
|
366
|
+
# Wrap in trace context for synth-sdk tracing
|
367
|
+
with trace_context(
|
368
|
+
system_name="minigrid_evaluation",
|
369
|
+
system_id="minigrid_evaluation",
|
370
|
+
system_instance_id=trajectory_id,
|
371
|
+
):
|
372
|
+
# Run episode
|
373
|
+
obs_payload = await env.initialize()
|
374
|
+
turn_count = 0
|
375
|
+
termination_reason = "unknown"
|
376
|
+
|
377
|
+
# Extract grid size from initial observation
|
378
|
+
grid_size = self._extract_grid_size(obs_payload)
|
379
|
+
|
380
|
+
# Create progress bar for this trajectory
|
381
|
+
pbar = tqdm(
|
382
|
+
total=max_turns,
|
383
|
+
desc=f"{model_name} ({difficulty}) {task_type} Seed {seed}",
|
384
|
+
unit="turn",
|
385
|
+
leave=False,
|
386
|
+
ncols=100,
|
387
|
+
)
|
388
|
+
|
389
|
+
try:
|
390
|
+
while turn_count < max_turns:
|
391
|
+
turn_count += 1
|
392
|
+
pbar.update(1)
|
393
|
+
|
394
|
+
# Track achievements
|
395
|
+
easy_count = len(
|
396
|
+
[a for a in achievements_unlocked if a in MINIGRID_ACHIEVEMENTS["basic"]]
|
397
|
+
)
|
398
|
+
medium_count = len(
|
399
|
+
[
|
400
|
+
a
|
401
|
+
for a in achievements_unlocked
|
402
|
+
if a in MINIGRID_ACHIEVEMENTS["intermediate"]
|
403
|
+
]
|
404
|
+
)
|
405
|
+
hard_count = len(
|
406
|
+
[a for a in achievements_unlocked if a in MINIGRID_ACHIEVEMENTS["advanced"]]
|
407
|
+
)
|
408
|
+
total_count = len(achievements_unlocked)
|
409
|
+
|
410
|
+
achievement_display = f"{total_count}({easy_count}/{medium_count}/{hard_count})"
|
411
|
+
|
412
|
+
pbar.set_postfix(
|
413
|
+
{
|
414
|
+
"achievements": achievement_display,
|
415
|
+
"steps": obs_payload.get("public", {}).step_count
|
416
|
+
if hasattr(obs_payload.get("public", {}), "step_count")
|
417
|
+
else 0,
|
418
|
+
}
|
419
|
+
)
|
420
|
+
|
421
|
+
current_formatted_obs = obs_payload.get("formatted_obs", "")
|
422
|
+
|
423
|
+
# Track current position
|
424
|
+
current_position = self._extract_position(obs_payload)
|
425
|
+
if current_position:
|
426
|
+
if current_position in positions_visited:
|
427
|
+
backtrack_count += 1
|
428
|
+
positions_visited.add(current_position)
|
429
|
+
|
430
|
+
# Check for new achievements
|
431
|
+
new_achievements = self._check_achievements(
|
432
|
+
obs_payload, achievements_unlocked, turn_count, task_type
|
433
|
+
)
|
434
|
+
for ach in new_achievements:
|
435
|
+
achievements_unlocked.add(ach)
|
436
|
+
achievement_turn_unlocked[ach] = turn_count
|
437
|
+
|
438
|
+
# Agent decision
|
439
|
+
task_description = f"Complete the {task_type} task"
|
440
|
+
action_decision = await agent.decide(
|
441
|
+
current_formatted_obs, task_description, turn_count
|
442
|
+
)
|
443
|
+
|
444
|
+
if action_decision["name"] == "terminate":
|
445
|
+
termination_reason = "agent_quit"
|
446
|
+
break
|
447
|
+
|
448
|
+
# Convert to environment action format
|
449
|
+
env_action = self._convert_action_format(action_decision)
|
450
|
+
actions_per_turn.append(1) # MiniGrid typically uses single actions
|
451
|
+
|
452
|
+
# Collect turn data
|
453
|
+
if collect_detailed_data:
|
454
|
+
turn_data = {
|
455
|
+
"turn": turn_count,
|
456
|
+
"action_planned": action_decision,
|
457
|
+
"achievements_at_start": list(achievements_unlocked),
|
458
|
+
"new_achievements_this_turn": list(new_achievements),
|
459
|
+
"position": current_position,
|
460
|
+
"steps_before_turn": obs_payload.get("public", {}).step_count
|
461
|
+
if hasattr(obs_payload.get("public", {}), "step_count")
|
462
|
+
else 0,
|
463
|
+
}
|
464
|
+
turn_by_turn_data.append(turn_data)
|
465
|
+
|
466
|
+
# Execute action
|
467
|
+
obs_payload = await env.step(env_action)
|
468
|
+
|
469
|
+
# Check for wall collision
|
470
|
+
if "blocked" in obs_payload.get("formatted_obs", "").lower():
|
471
|
+
wall_collisions += 1
|
472
|
+
|
473
|
+
# Check for object interaction
|
474
|
+
objects_interacted.extend(self._extract_object_interactions(obs_payload))
|
475
|
+
|
476
|
+
if "error" in obs_payload:
|
477
|
+
termination_reason = "environment_error"
|
478
|
+
break
|
479
|
+
|
480
|
+
# Fix the terminated/truncated check
|
481
|
+
private_data = obs_payload.get("private", {})
|
482
|
+
if (hasattr(private_data, "terminated") and private_data.terminated) or (
|
483
|
+
hasattr(private_data, "truncated") and private_data.truncated
|
484
|
+
):
|
485
|
+
if "lava" in obs_payload.get("formatted_obs", "").lower():
|
486
|
+
termination_reason = "lava_death"
|
487
|
+
elif hasattr(private_data, "terminated") and private_data.terminated:
|
488
|
+
termination_reason = "goal_reached"
|
489
|
+
else:
|
490
|
+
termination_reason = "timeout"
|
491
|
+
break
|
492
|
+
|
493
|
+
# Final metrics
|
494
|
+
if termination_reason == "unknown":
|
495
|
+
termination_reason = "timeout"
|
496
|
+
|
497
|
+
final_private = obs_payload.get("private", {})
|
498
|
+
final_public = obs_payload.get("public", {})
|
499
|
+
|
500
|
+
total_steps = getattr(final_public, "step_count", 0)
|
501
|
+
total_reward = getattr(final_private, "total_reward", 0.0)
|
502
|
+
|
503
|
+
# Calculate efficiency
|
504
|
+
optimal_steps = self._estimate_optimal_steps(task_type, grid_size)
|
505
|
+
efficiency_ratio = optimal_steps / max(total_steps, 1) if optimal_steps else 1.0
|
506
|
+
|
507
|
+
# Calculate exploration coverage
|
508
|
+
total_accessible_cells = self._estimate_accessible_cells(grid_size, task_type)
|
509
|
+
exploration_coverage = len(positions_visited) / max(total_accessible_cells, 1)
|
510
|
+
|
511
|
+
# Success determination
|
512
|
+
success = termination_reason == "goal_reached"
|
513
|
+
|
514
|
+
# Final position and direction
|
515
|
+
final_position = self._extract_position(obs_payload)
|
516
|
+
final_direction = self._extract_direction(obs_payload)
|
517
|
+
|
518
|
+
avg_actions_per_turn = (
|
519
|
+
sum(actions_per_turn) / len(actions_per_turn) if actions_per_turn else 0.0
|
520
|
+
)
|
521
|
+
|
522
|
+
return MiniGridTrajectoryResult(
|
523
|
+
trajectory_id=trajectory_id,
|
524
|
+
model_name=model_name,
|
525
|
+
difficulty=difficulty,
|
526
|
+
task_type=task_type,
|
527
|
+
seed=seed,
|
528
|
+
success=success,
|
529
|
+
total_steps=total_steps,
|
530
|
+
total_turns=turn_count,
|
531
|
+
total_reward=total_reward,
|
532
|
+
grid_size=grid_size,
|
533
|
+
steps_to_goal=total_steps,
|
534
|
+
optimal_steps=optimal_steps,
|
535
|
+
efficiency_ratio=efficiency_ratio,
|
536
|
+
objects_interacted=list(set(objects_interacted)),
|
537
|
+
rooms_visited=1, # TODO: Implement room detection
|
538
|
+
backtrack_count=backtrack_count,
|
539
|
+
wall_collision_count=wall_collisions,
|
540
|
+
exploration_coverage=exploration_coverage,
|
541
|
+
achievements_unlocked=achievements_unlocked,
|
542
|
+
achievement_turn_unlocked=achievement_turn_unlocked,
|
543
|
+
actions_per_turn=actions_per_turn,
|
544
|
+
avg_actions_per_turn=avg_actions_per_turn,
|
545
|
+
termination_reason=termination_reason,
|
546
|
+
final_position=final_position,
|
547
|
+
final_direction=final_direction,
|
548
|
+
turn_by_turn_data=turn_by_turn_data,
|
549
|
+
)
|
550
|
+
finally:
|
551
|
+
pbar.close()
|
552
|
+
|
553
|
+
async def run_evaluation(
|
554
|
+
self,
|
555
|
+
model_names: List[str],
|
556
|
+
difficulties: List[str] = ["easy", "medium"],
|
557
|
+
task_types: List[str] = None,
|
558
|
+
num_trajectories_per_condition: int = 3,
|
559
|
+
max_turns: int = 30,
|
560
|
+
collect_detailed_data: bool = True,
|
561
|
+
) -> Dict[str, Any]:
|
562
|
+
"""Run comprehensive evaluation across models and difficulties."""
|
563
|
+
|
564
|
+
if task_types is None:
|
565
|
+
task_types = ["MiniGrid-Empty-6x6-v0", "MiniGrid-DoorKey-5x5-v0"]
|
566
|
+
|
567
|
+
print(f"🎯 Starting MiniGrid Evaluation")
|
568
|
+
print(f" Models: {model_names}")
|
569
|
+
print(f" Difficulties: {difficulties}")
|
570
|
+
print(f" Task Types: {task_types}")
|
571
|
+
print(f" Trajectories per condition: {num_trajectories_per_condition}")
|
572
|
+
print(f" Max turns per trajectory: {max_turns}")
|
573
|
+
|
574
|
+
all_results = []
|
575
|
+
|
576
|
+
for model_name in model_names:
|
577
|
+
for difficulty in difficulties:
|
578
|
+
for task_type in task_types:
|
579
|
+
print(f"\n🔄 Running {model_name} on {difficulty} difficulty, {task_type}...")
|
580
|
+
|
581
|
+
# Run trajectories for this condition
|
582
|
+
trajectory_tasks = []
|
583
|
+
for i in range(num_trajectories_per_condition):
|
584
|
+
seed = hash(f"{difficulty}_{task_type}_{i}") % 10000
|
585
|
+
trajectory_tasks.append(
|
586
|
+
self.run_single_trajectory(
|
587
|
+
model_name=model_name,
|
588
|
+
difficulty=difficulty,
|
589
|
+
task_type=task_type,
|
590
|
+
seed=seed,
|
591
|
+
max_turns=max_turns,
|
592
|
+
collect_detailed_data=collect_detailed_data,
|
593
|
+
)
|
594
|
+
)
|
595
|
+
|
596
|
+
condition_results = await asyncio.gather(*trajectory_tasks)
|
597
|
+
all_results.extend(condition_results)
|
598
|
+
|
599
|
+
self.trajectory_results = all_results
|
600
|
+
|
601
|
+
# Save synth-sdk traces after evaluation
|
602
|
+
self._save_traces()
|
603
|
+
|
604
|
+
return self._generate_comprehensive_report()
|
605
|
+
|
606
|
+
def _extract_grid_size(self, obs_payload: Dict[str, Any]) -> Tuple[int, int]:
|
607
|
+
"""Extract grid size from observation."""
|
608
|
+
# Try to extract from public state
|
609
|
+
public = obs_payload.get("public", {})
|
610
|
+
if hasattr(public, "grid_array"):
|
611
|
+
grid = public.grid_array
|
612
|
+
return (grid.shape[1], grid.shape[0]) # (width, height)
|
613
|
+
|
614
|
+
# Default fallback
|
615
|
+
return (6, 6)
|
616
|
+
|
617
|
+
def _extract_position(self, obs_payload: Dict[str, Any]) -> Optional[Tuple[int, int]]:
|
618
|
+
"""Extract agent position from observation."""
|
619
|
+
public = obs_payload.get("public", {})
|
620
|
+
if hasattr(public, "agent_pos"):
|
621
|
+
return public.agent_pos
|
622
|
+
return None
|
623
|
+
|
624
|
+
def _extract_direction(self, obs_payload: Dict[str, Any]) -> Optional[int]:
|
625
|
+
"""Extract agent direction from observation."""
|
626
|
+
public = obs_payload.get("public", {})
|
627
|
+
if hasattr(public, "agent_dir"):
|
628
|
+
return public.agent_dir
|
629
|
+
return None
|
630
|
+
|
631
|
+
def _extract_object_interactions(self, obs_payload: Dict[str, Any]) -> List[str]:
|
632
|
+
"""Extract object interactions from observation."""
|
633
|
+
interactions = []
|
634
|
+
formatted_obs = obs_payload.get("formatted_obs", "").lower()
|
635
|
+
|
636
|
+
if "pickup" in formatted_obs:
|
637
|
+
interactions.append("pickup")
|
638
|
+
if "door" in formatted_obs:
|
639
|
+
interactions.append("door")
|
640
|
+
if "key" in formatted_obs:
|
641
|
+
interactions.append("key")
|
642
|
+
if "goal" in formatted_obs:
|
643
|
+
interactions.append("goal")
|
644
|
+
|
645
|
+
return interactions
|
646
|
+
|
647
|
+
def _check_achievements(
|
648
|
+
self,
|
649
|
+
obs_payload: Dict[str, Any],
|
650
|
+
current_achievements: Set[str],
|
651
|
+
turn: int,
|
652
|
+
task_type: str,
|
653
|
+
) -> Set[str]:
|
654
|
+
"""Check for new achievements based on current state."""
|
655
|
+
new_achievements = set()
|
656
|
+
formatted_obs = obs_payload.get("formatted_obs", "").lower()
|
657
|
+
|
658
|
+
# Basic achievements
|
659
|
+
if "reach_goal" not in current_achievements and "goal" in formatted_obs:
|
660
|
+
new_achievements.add("reach_goal")
|
661
|
+
|
662
|
+
if "first_pickup" not in current_achievements and "pickup" in formatted_obs:
|
663
|
+
new_achievements.add("first_pickup")
|
664
|
+
|
665
|
+
if (
|
666
|
+
"first_door_open" not in current_achievements
|
667
|
+
and "door" in formatted_obs
|
668
|
+
and "open" in formatted_obs
|
669
|
+
):
|
670
|
+
new_achievements.add("first_door_open")
|
671
|
+
|
672
|
+
if "first_key_use" not in current_achievements and "key" in formatted_obs:
|
673
|
+
new_achievements.add("first_key_use")
|
674
|
+
|
675
|
+
# Task-specific achievements
|
676
|
+
if "navigate_empty_room" not in current_achievements and "empty" in task_type.lower():
|
677
|
+
private_data = obs_payload.get("private", {})
|
678
|
+
if hasattr(private_data, "terminated") and private_data.terminated:
|
679
|
+
new_achievements.add("navigate_empty_room")
|
680
|
+
|
681
|
+
# Count-based achievements
|
682
|
+
task_completions = len([a for a in current_achievements if "complete" not in a])
|
683
|
+
if task_completions >= 5 and "complete_5_tasks" not in current_achievements:
|
684
|
+
new_achievements.add("complete_5_tasks")
|
685
|
+
|
686
|
+
return new_achievements
|
687
|
+
|
688
|
+
def _convert_action_format(self, action_decision: Dict[str, Any]) -> Dict[str, Any]:
|
689
|
+
"""Convert agent action decision to environment format."""
|
690
|
+
if action_decision["name"] == "minigrid_act":
|
691
|
+
action = action_decision["parameters"]["action"]
|
692
|
+
return {"tool": "minigrid_act", "args": {"action": action}}
|
693
|
+
|
694
|
+
# Fail fast if not minigrid_act
|
695
|
+
raise ValueError(f"Expected minigrid_act tool, got {action_decision['name']}")
|
696
|
+
|
697
|
+
def _estimate_optimal_steps(self, task_type: str, grid_size: Tuple[int, int]) -> Optional[int]:
|
698
|
+
"""Estimate optimal steps for a task type."""
|
699
|
+
width, height = grid_size
|
700
|
+
|
701
|
+
if "empty" in task_type.lower():
|
702
|
+
# Manhattan distance estimate
|
703
|
+
return width + height - 2
|
704
|
+
elif "doorkey" in task_type.lower():
|
705
|
+
# Need to find key, then door, then goal
|
706
|
+
return (width + height) * 2
|
707
|
+
else:
|
708
|
+
# Conservative estimate
|
709
|
+
return width * height // 2
|
710
|
+
|
711
|
+
def _estimate_accessible_cells(self, grid_size: Tuple[int, int], task_type: str) -> int:
|
712
|
+
"""Estimate number of accessible cells."""
|
713
|
+
width, height = grid_size
|
714
|
+
total_cells = width * height
|
715
|
+
|
716
|
+
# Account for walls (rough estimate)
|
717
|
+
if "empty" in task_type.lower():
|
718
|
+
return int(total_cells * 0.8) # 80% accessible
|
719
|
+
else:
|
720
|
+
return int(total_cells * 0.6) # 60% accessible with obstacles
|
721
|
+
|
722
|
+
def _generate_comprehensive_report(self) -> Dict[str, Any]:
|
723
|
+
"""Generate comprehensive evaluation report with all metrics and tables."""
|
724
|
+
|
725
|
+
# Group results by model and difficulty
|
726
|
+
grouped_results = defaultdict(lambda: defaultdict(list))
|
727
|
+
for result in self.trajectory_results:
|
728
|
+
grouped_results[result.model_name][result.difficulty].append(result)
|
729
|
+
|
730
|
+
# Generate aggregate results
|
731
|
+
aggregate_results = []
|
732
|
+
for model_name, difficulties in grouped_results.items():
|
733
|
+
for difficulty, trajectories in difficulties.items():
|
734
|
+
agg = self._compute_aggregate_metrics(model_name, difficulty, trajectories)
|
735
|
+
aggregate_results.append(agg)
|
736
|
+
|
737
|
+
# Generate all tables and analyses
|
738
|
+
report = {
|
739
|
+
"evaluation_summary": self._generate_summary_table(aggregate_results),
|
740
|
+
"achievement_percentage_table": self._generate_achievement_percentage_table(
|
741
|
+
grouped_results
|
742
|
+
),
|
743
|
+
"task_completion_breakdown": self._generate_task_completion_table(aggregate_results),
|
744
|
+
"navigation_analysis": self._generate_navigation_analysis(aggregate_results),
|
745
|
+
"trajectory_by_trajectory_breakdown": self._generate_trajectory_breakdown(),
|
746
|
+
"raw_aggregate_results": [asdict(agg) for agg in aggregate_results],
|
747
|
+
"raw_trajectory_results": [asdict(traj) for traj in self.trajectory_results],
|
748
|
+
}
|
749
|
+
|
750
|
+
return report
|
751
|
+
|
752
|
+
def _compute_aggregate_metrics(
|
753
|
+
self,
|
754
|
+
model_name: str,
|
755
|
+
difficulty: str,
|
756
|
+
trajectories: List[MiniGridTrajectoryResult],
|
757
|
+
) -> MiniGridAggregateResults:
|
758
|
+
"""Compute aggregate metrics for a model-difficulty condition."""
|
759
|
+
|
760
|
+
num_trajectories = len(trajectories)
|
761
|
+
if num_trajectories == 0:
|
762
|
+
return MiniGridAggregateResults(
|
763
|
+
model_name=model_name,
|
764
|
+
difficulty=difficulty,
|
765
|
+
num_trajectories=0,
|
766
|
+
success_rate=0.0,
|
767
|
+
avg_total_steps=0.0,
|
768
|
+
avg_total_turns=0.0,
|
769
|
+
avg_total_reward=0.0,
|
770
|
+
task_completion_rates={},
|
771
|
+
avg_efficiency_ratio=0.0,
|
772
|
+
avg_exploration_coverage=0.0,
|
773
|
+
avg_wall_collisions=0.0,
|
774
|
+
avg_backtrack_count=0.0,
|
775
|
+
unique_achievements_unlocked=set(),
|
776
|
+
total_achievement_count=0,
|
777
|
+
avg_achievements_per_trajectory=0.0,
|
778
|
+
achievement_unlock_rates={},
|
779
|
+
composite_score_avg=0.0,
|
780
|
+
composite_score_best=0.0,
|
781
|
+
navigation_score_avg=0.0,
|
782
|
+
navigation_score_best=0.0,
|
783
|
+
avg_actions_per_turn_overall=0.0,
|
784
|
+
actions_per_turn_distribution={},
|
785
|
+
termination_breakdown={},
|
786
|
+
avg_final_position=None,
|
787
|
+
)
|
788
|
+
|
789
|
+
# Success metrics
|
790
|
+
success_rate = sum(1 for t in trajectories if t.success) / num_trajectories
|
791
|
+
avg_total_steps = sum(t.total_steps for t in trajectories) / num_trajectories
|
792
|
+
avg_total_turns = sum(t.total_turns for t in trajectories) / num_trajectories
|
793
|
+
avg_total_reward = sum(t.total_reward for t in trajectories) / num_trajectories
|
794
|
+
|
795
|
+
# MiniGrid-specific metrics
|
796
|
+
task_completion_rates = {}
|
797
|
+
task_counts = defaultdict(int)
|
798
|
+
task_successes = defaultdict(int)
|
799
|
+
|
800
|
+
for traj in trajectories:
|
801
|
+
task_counts[traj.task_type] += 1
|
802
|
+
if traj.success:
|
803
|
+
task_successes[traj.task_type] += 1
|
804
|
+
|
805
|
+
for task_type in task_counts:
|
806
|
+
task_completion_rates[task_type] = task_successes[task_type] / task_counts[task_type]
|
807
|
+
|
808
|
+
avg_efficiency_ratio = sum(t.efficiency_ratio for t in trajectories) / num_trajectories
|
809
|
+
avg_exploration_coverage = (
|
810
|
+
sum(t.exploration_coverage for t in trajectories) / num_trajectories
|
811
|
+
)
|
812
|
+
avg_wall_collisions = sum(t.wall_collision_count for t in trajectories) / num_trajectories
|
813
|
+
avg_backtrack_count = sum(t.backtrack_count for t in trajectories) / num_trajectories
|
814
|
+
|
815
|
+
# Achievement analysis
|
816
|
+
all_achievements = set()
|
817
|
+
total_achievement_count = 0
|
818
|
+
achievement_counts = defaultdict(int)
|
819
|
+
|
820
|
+
for traj in trajectories:
|
821
|
+
all_achievements.update(traj.achievements_unlocked)
|
822
|
+
total_achievement_count += len(traj.achievements_unlocked)
|
823
|
+
for ach in traj.achievements_unlocked:
|
824
|
+
achievement_counts[ach] += 1
|
825
|
+
|
826
|
+
achievement_unlock_rates = {
|
827
|
+
ach: count / num_trajectories for ach, count in achievement_counts.items()
|
828
|
+
}
|
829
|
+
avg_achievements_per_trajectory = total_achievement_count / num_trajectories
|
830
|
+
|
831
|
+
# Compute MiniGrid-specific scores
|
832
|
+
composite_scores = [
|
833
|
+
minigrid_composite_score(
|
834
|
+
len(traj.achievements_unlocked),
|
835
|
+
1.0 if traj.success else 0.0,
|
836
|
+
traj.efficiency_ratio,
|
837
|
+
traj.exploration_coverage,
|
838
|
+
)
|
839
|
+
for traj in trajectories
|
840
|
+
]
|
841
|
+
composite_score_avg = (
|
842
|
+
sum(composite_scores) / len(composite_scores) if composite_scores else 0.0
|
843
|
+
)
|
844
|
+
composite_score_best = max(composite_scores) if composite_scores else 0.0
|
845
|
+
|
846
|
+
# Navigation scores
|
847
|
+
navigation_scores = [
|
848
|
+
minigrid_navigation_score(
|
849
|
+
1.0 if traj.success else 0.0,
|
850
|
+
traj.efficiency_ratio,
|
851
|
+
traj.wall_collision_count / max(traj.total_turns, 1),
|
852
|
+
)
|
853
|
+
for traj in trajectories
|
854
|
+
]
|
855
|
+
navigation_score_avg = (
|
856
|
+
sum(navigation_scores) / len(navigation_scores) if navigation_scores else 0.0
|
857
|
+
)
|
858
|
+
navigation_score_best = max(navigation_scores) if navigation_scores else 0.0
|
859
|
+
|
860
|
+
# Multi-action analysis
|
861
|
+
all_actions_per_turn = []
|
862
|
+
actions_per_turn_dist = defaultdict(int)
|
863
|
+
for traj in trajectories:
|
864
|
+
all_actions_per_turn.extend(traj.actions_per_turn)
|
865
|
+
for count in traj.actions_per_turn:
|
866
|
+
actions_per_turn_dist[count] += 1
|
867
|
+
|
868
|
+
avg_actions_per_turn_overall = (
|
869
|
+
sum(all_actions_per_turn) / len(all_actions_per_turn) if all_actions_per_turn else 0.0
|
870
|
+
)
|
871
|
+
|
872
|
+
# Termination analysis
|
873
|
+
termination_counts = defaultdict(int)
|
874
|
+
for traj in trajectories:
|
875
|
+
termination_counts[traj.termination_reason] += 1
|
876
|
+
termination_breakdown = {
|
877
|
+
reason: count / num_trajectories for reason, count in termination_counts.items()
|
878
|
+
}
|
879
|
+
|
880
|
+
# Average final position
|
881
|
+
final_positions = [t.final_position for t in trajectories if t.final_position is not None]
|
882
|
+
avg_final_position = None
|
883
|
+
if final_positions:
|
884
|
+
avg_x = sum(pos[0] for pos in final_positions) / len(final_positions)
|
885
|
+
avg_y = sum(pos[1] for pos in final_positions) / len(final_positions)
|
886
|
+
avg_final_position = (avg_x, avg_y)
|
887
|
+
|
888
|
+
return MiniGridAggregateResults(
|
889
|
+
model_name=model_name,
|
890
|
+
difficulty=difficulty,
|
891
|
+
num_trajectories=num_trajectories,
|
892
|
+
success_rate=success_rate,
|
893
|
+
avg_total_steps=avg_total_steps,
|
894
|
+
avg_total_turns=avg_total_turns,
|
895
|
+
avg_total_reward=avg_total_reward,
|
896
|
+
task_completion_rates=task_completion_rates,
|
897
|
+
avg_efficiency_ratio=avg_efficiency_ratio,
|
898
|
+
avg_exploration_coverage=avg_exploration_coverage,
|
899
|
+
avg_wall_collisions=avg_wall_collisions,
|
900
|
+
avg_backtrack_count=avg_backtrack_count,
|
901
|
+
unique_achievements_unlocked=all_achievements,
|
902
|
+
total_achievement_count=total_achievement_count,
|
903
|
+
avg_achievements_per_trajectory=avg_achievements_per_trajectory,
|
904
|
+
achievement_unlock_rates=achievement_unlock_rates,
|
905
|
+
avg_actions_per_turn_overall=avg_actions_per_turn_overall,
|
906
|
+
actions_per_turn_distribution=dict(actions_per_turn_dist),
|
907
|
+
termination_breakdown=termination_breakdown,
|
908
|
+
avg_final_position=avg_final_position,
|
909
|
+
composite_score_avg=composite_score_avg,
|
910
|
+
composite_score_best=composite_score_best,
|
911
|
+
navigation_score_avg=navigation_score_avg,
|
912
|
+
navigation_score_best=navigation_score_best,
|
913
|
+
)
|
914
|
+
|
915
|
+
def _generate_summary_table(
|
916
|
+
self, aggregate_results: List[MiniGridAggregateResults]
|
917
|
+
) -> pd.DataFrame:
|
918
|
+
"""Generate main summary table with key metrics."""
|
919
|
+
|
920
|
+
data = []
|
921
|
+
for agg in aggregate_results:
|
922
|
+
data.append(
|
923
|
+
{
|
924
|
+
"Model": agg.model_name,
|
925
|
+
"Difficulty": agg.difficulty,
|
926
|
+
"✓ Success Rate": f"{agg.success_rate:.1%}", # Made more prominent with checkmark
|
927
|
+
"Composite Score": f"{agg.composite_score_avg:.1f}",
|
928
|
+
"Navigation Score": f"{agg.navigation_score_avg:.1f}",
|
929
|
+
"Avg Steps": f"{agg.avg_total_steps:.1f}",
|
930
|
+
"Avg Turns": f"{agg.avg_total_turns:.1f}",
|
931
|
+
"Efficiency": f"{agg.avg_efficiency_ratio:.2f}",
|
932
|
+
"Exploration": f"{agg.avg_exploration_coverage:.1%}",
|
933
|
+
"Wall Collisions": f"{agg.avg_wall_collisions:.1f}",
|
934
|
+
"Achievements": len(agg.unique_achievements_unlocked),
|
935
|
+
"Avg Actions/Turn": f"{agg.avg_actions_per_turn_overall:.1f}",
|
936
|
+
}
|
937
|
+
)
|
938
|
+
|
939
|
+
return pd.DataFrame(data)
|
940
|
+
|
941
|
+
def _generate_achievement_percentage_table(
|
942
|
+
self, grouped_results: Dict[str, Dict[str, List[MiniGridTrajectoryResult]]]
|
943
|
+
) -> pd.DataFrame:
|
944
|
+
"""Generate table showing percentage of trajectories achieving each achievement."""
|
945
|
+
|
946
|
+
data = []
|
947
|
+
|
948
|
+
for model_name, difficulties in grouped_results.items():
|
949
|
+
for difficulty, trajectories in difficulties.items():
|
950
|
+
if not trajectories:
|
951
|
+
continue
|
952
|
+
|
953
|
+
num_trajectories = len(trajectories)
|
954
|
+
row = {"Model": model_name, "Difficulty": difficulty}
|
955
|
+
|
956
|
+
# Count achievements
|
957
|
+
achievement_counts = defaultdict(int)
|
958
|
+
for traj in trajectories:
|
959
|
+
for ach in traj.achievements_unlocked:
|
960
|
+
achievement_counts[ach] += 1
|
961
|
+
|
962
|
+
# Add percentage for each achievement
|
963
|
+
for achievement in ALL_ACHIEVEMENTS:
|
964
|
+
count = achievement_counts[achievement]
|
965
|
+
percentage = count / num_trajectories if num_trajectories > 0 else 0.0
|
966
|
+
row[achievement] = f"{percentage:.1%}"
|
967
|
+
|
968
|
+
data.append(row)
|
969
|
+
|
970
|
+
df = pd.DataFrame(data)
|
971
|
+
|
972
|
+
# Reorder columns: Model, Difficulty, then achievements by category
|
973
|
+
base_cols = ["Model", "Difficulty"]
|
974
|
+
achievement_cols = []
|
975
|
+
for category in ["basic", "intermediate", "advanced"]:
|
976
|
+
for ach in MINIGRID_ACHIEVEMENTS[category]:
|
977
|
+
if ach in df.columns:
|
978
|
+
achievement_cols.append(ach)
|
979
|
+
|
980
|
+
return df[base_cols + achievement_cols]
|
981
|
+
|
982
|
+
def _generate_task_completion_table(
|
983
|
+
self, aggregate_results: List[MiniGridAggregateResults]
|
984
|
+
) -> pd.DataFrame:
|
985
|
+
"""Generate table showing completion rates by task type."""
|
986
|
+
|
987
|
+
data = []
|
988
|
+
for agg in aggregate_results:
|
989
|
+
row = {
|
990
|
+
"Model": agg.model_name,
|
991
|
+
"Difficulty": agg.difficulty,
|
992
|
+
}
|
993
|
+
|
994
|
+
for task_type, completion_rate in agg.task_completion_rates.items():
|
995
|
+
row[task_type] = f"{completion_rate:.1%}"
|
996
|
+
|
997
|
+
data.append(row)
|
998
|
+
|
999
|
+
return pd.DataFrame(data)
|
1000
|
+
|
1001
|
+
def _generate_navigation_analysis(
|
1002
|
+
self, aggregate_results: List[MiniGridAggregateResults]
|
1003
|
+
) -> pd.DataFrame:
|
1004
|
+
"""Generate analysis of navigation metrics."""
|
1005
|
+
|
1006
|
+
data = []
|
1007
|
+
for agg in aggregate_results:
|
1008
|
+
data.append(
|
1009
|
+
{
|
1010
|
+
"Model": agg.model_name,
|
1011
|
+
"Difficulty": agg.difficulty,
|
1012
|
+
"Efficiency Ratio": f"{agg.avg_efficiency_ratio:.3f}",
|
1013
|
+
"Exploration Coverage": f"{agg.avg_exploration_coverage:.1%}",
|
1014
|
+
"Wall Collisions": f"{agg.avg_wall_collisions:.1f}",
|
1015
|
+
"Backtrack Count": f"{agg.avg_backtrack_count:.1f}",
|
1016
|
+
"Navigation Score": f"{agg.navigation_score_avg:.1f}",
|
1017
|
+
"Final Position": f"({agg.avg_final_position[0]:.1f}, {agg.avg_final_position[1]:.1f})"
|
1018
|
+
if agg.avg_final_position
|
1019
|
+
else "N/A",
|
1020
|
+
}
|
1021
|
+
)
|
1022
|
+
|
1023
|
+
return pd.DataFrame(data)
|
1024
|
+
|
1025
|
+
def _generate_trajectory_breakdown(self) -> pd.DataFrame:
|
1026
|
+
"""Generate detailed trajectory-by-trajectory breakdown."""
|
1027
|
+
|
1028
|
+
data = []
|
1029
|
+
for traj in self.trajectory_results:
|
1030
|
+
# Achievement category breakdown
|
1031
|
+
easy_achievements = len(
|
1032
|
+
[a for a in traj.achievements_unlocked if a in MINIGRID_ACHIEVEMENTS["basic"]]
|
1033
|
+
)
|
1034
|
+
medium_achievements = len(
|
1035
|
+
[
|
1036
|
+
a
|
1037
|
+
for a in traj.achievements_unlocked
|
1038
|
+
if a in MINIGRID_ACHIEVEMENTS["intermediate"]
|
1039
|
+
]
|
1040
|
+
)
|
1041
|
+
hard_achievements = len(
|
1042
|
+
[a for a in traj.achievements_unlocked if a in MINIGRID_ACHIEVEMENTS["advanced"]]
|
1043
|
+
)
|
1044
|
+
|
1045
|
+
data.append(
|
1046
|
+
{
|
1047
|
+
"Trajectory ID": traj.trajectory_id[:8], # Short ID
|
1048
|
+
"Model": traj.model_name,
|
1049
|
+
"Difficulty": traj.difficulty,
|
1050
|
+
"Task Type": traj.task_type,
|
1051
|
+
"Seed": traj.seed,
|
1052
|
+
"Success": "✓" if traj.success else "✗",
|
1053
|
+
"Steps": traj.total_steps,
|
1054
|
+
"Turns": traj.total_turns,
|
1055
|
+
"Efficiency": f"{traj.efficiency_ratio:.3f}",
|
1056
|
+
"Exploration": f"{traj.exploration_coverage:.1%}",
|
1057
|
+
"Wall Collisions": traj.wall_collision_count,
|
1058
|
+
"Total Achievements": len(traj.achievements_unlocked),
|
1059
|
+
"Basic": easy_achievements,
|
1060
|
+
"Intermediate": medium_achievements,
|
1061
|
+
"Advanced": hard_achievements,
|
1062
|
+
"Termination": traj.termination_reason,
|
1063
|
+
"Final Position": f"({traj.final_position[0]}, {traj.final_position[1]})"
|
1064
|
+
if traj.final_position
|
1065
|
+
else "N/A",
|
1066
|
+
"Achievements": ", ".join(sorted(traj.achievements_unlocked))
|
1067
|
+
if traj.achievements_unlocked
|
1068
|
+
else "None",
|
1069
|
+
}
|
1070
|
+
)
|
1071
|
+
|
1072
|
+
return pd.DataFrame(data)
|
1073
|
+
|
1074
|
+
def print_report(self, report: Dict[str, Any]):
|
1075
|
+
"""Print a formatted evaluation report."""
|
1076
|
+
|
1077
|
+
print("\n" + "=" * 80)
|
1078
|
+
print("🎯 MINIGRID EVALUATION REPORT")
|
1079
|
+
print("=" * 80)
|
1080
|
+
|
1081
|
+
# Pure success summary first - the most important metric
|
1082
|
+
aggregate_results = [
|
1083
|
+
MiniGridAggregateResults(**agg) for agg in report["raw_aggregate_results"]
|
1084
|
+
]
|
1085
|
+
print_pure_success_summary(aggregate_results)
|
1086
|
+
|
1087
|
+
# Summary table
|
1088
|
+
print("\n📊 EVALUATION SUMMARY")
|
1089
|
+
summary_df = report["evaluation_summary"]
|
1090
|
+
print(summary_df.to_string(index=False, max_colwidth=12))
|
1091
|
+
|
1092
|
+
# Achievement breakdown
|
1093
|
+
print("\n🏆 ACHIEVEMENT UNLOCK RATES")
|
1094
|
+
achievement_df = report["achievement_percentage_table"]
|
1095
|
+
if not achievement_df.empty:
|
1096
|
+
print("Format: percentage of trajectories that unlocked each achievement")
|
1097
|
+
|
1098
|
+
# Print by category for better readability
|
1099
|
+
for category in ["basic", "intermediate", "advanced"]:
|
1100
|
+
category_cols = ["Model", "Difficulty"] + [
|
1101
|
+
col for col in achievement_df.columns if col in MINIGRID_ACHIEVEMENTS[category]
|
1102
|
+
]
|
1103
|
+
if len(category_cols) > 2:
|
1104
|
+
category_data = achievement_df[category_cols]
|
1105
|
+
if not category_data.empty:
|
1106
|
+
print(f"\n{category.upper()} ACHIEVEMENTS:")
|
1107
|
+
print(category_data.to_string(index=False))
|
1108
|
+
|
1109
|
+
# Task completion breakdown
|
1110
|
+
print("\n📋 TASK COMPLETION RATES")
|
1111
|
+
task_df = report["task_completion_breakdown"]
|
1112
|
+
print(task_df.to_string(index=False))
|
1113
|
+
|
1114
|
+
# Navigation analysis
|
1115
|
+
print("\n🧭 NAVIGATION ANALYSIS")
|
1116
|
+
nav_df = report["navigation_analysis"]
|
1117
|
+
print(nav_df.to_string(index=False))
|
1118
|
+
|
1119
|
+
# Trajectory breakdown (summary stats only for space)
|
1120
|
+
traj_df = report["trajectory_by_trajectory_breakdown"]
|
1121
|
+
print(f"\n📋 TRAJECTORY BREAKDOWN ({len(traj_df)} total trajectories)")
|
1122
|
+
print("Sample trajectories:")
|
1123
|
+
sample_cols = [
|
1124
|
+
"Model",
|
1125
|
+
"Difficulty",
|
1126
|
+
"Task Type",
|
1127
|
+
"Success",
|
1128
|
+
"Steps",
|
1129
|
+
"Total Achievements",
|
1130
|
+
"Termination",
|
1131
|
+
]
|
1132
|
+
sample_df = traj_df[sample_cols].head(5)
|
1133
|
+
print(sample_df.to_string(index=False, max_colwidth=12))
|
1134
|
+
if len(traj_df) > 5:
|
1135
|
+
print(f"... and {len(traj_df) - 5} more trajectories")
|
1136
|
+
|
1137
|
+
print("\n" + "=" * 80)
|
1138
|
+
|
1139
|
+
def _save_traces(self):
|
1140
|
+
"""Save synth-sdk traces to disk."""
|
1141
|
+
# Get all traces from event store
|
1142
|
+
traces = event_store.get_system_traces()
|
1143
|
+
|
1144
|
+
if not traces:
|
1145
|
+
print("⚠️ No traces found in event store")
|
1146
|
+
return
|
1147
|
+
|
1148
|
+
# Create traces directory
|
1149
|
+
traces_dir = Path("src/evals/minigrid") / f"run_{int(time.time())}" / "traces"
|
1150
|
+
traces_dir.mkdir(parents=True, exist_ok=True)
|
1151
|
+
|
1152
|
+
print(f"💾 Saving {len(traces)} traces to {traces_dir}")
|
1153
|
+
|
1154
|
+
for trace in traces:
|
1155
|
+
trace_file = traces_dir / f"minigrid_trace_{trace.system_instance_id}.json"
|
1156
|
+
with open(trace_file, "w") as f:
|
1157
|
+
json.dump(trace.to_dict(), f, indent=2)
|
1158
|
+
|
1159
|
+
print(f"✅ Traces saved. To view: ./run_viewer.sh {traces_dir.parent}")
|
1160
|
+
return traces_dir
|
1161
|
+
|
1162
|
+
|
1163
|
+
# Convenience function for quick evaluations
|
1164
|
+
async def run_minigrid_eval(
|
1165
|
+
model_names: List[str],
|
1166
|
+
difficulties: List[str] = ["easy", "medium"],
|
1167
|
+
task_types: List[str] = None,
|
1168
|
+
num_trajectories: int = 3,
|
1169
|
+
max_turns: int = 30,
|
1170
|
+
) -> Dict[str, Any]:
|
1171
|
+
"""Quick evaluation runner with automatic report generation."""
|
1172
|
+
|
1173
|
+
framework = MiniGridEvalFramework()
|
1174
|
+
report = await framework.run_evaluation(
|
1175
|
+
model_names=model_names,
|
1176
|
+
difficulties=difficulties,
|
1177
|
+
task_types=task_types,
|
1178
|
+
num_trajectories_per_condition=num_trajectories,
|
1179
|
+
max_turns=max_turns,
|
1180
|
+
)
|
1181
|
+
|
1182
|
+
framework.print_report(report)
|
1183
|
+
|
1184
|
+
# Add pure success scores to the report for easy access
|
1185
|
+
aggregate_results = [MiniGridAggregateResults(**agg) for agg in report["raw_aggregate_results"]]
|
1186
|
+
report["pure_success_scores"] = get_pure_success_scores(aggregate_results)
|
1187
|
+
|
1188
|
+
return report
|