synth-ai 0.2.4.dev8__py3-none-any.whl ā 0.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of synth-ai might be problematic. Click here for more details.
- synth_ai/__init__.py +1 -1
- synth_ai/cli/__init__.py +6 -0
- synth_ai/cli/demo.py +68 -9
- synth_ai/cli/rl_demo.py +137 -0
- synth_ai/cli/root.py +65 -0
- synth_ai/demos/core/__init__.py +1 -0
- synth_ai/demos/core/cli.py +685 -0
- synth_ai/demos/demo_task_apps/__init__.py +1 -0
- synth_ai/demos/demo_task_apps/core.py +374 -0
- synth_ai/demos/demo_task_apps/math/__init__.py +1 -0
- synth_ai/demos/demo_task_apps/math/app.py +37 -0
- synth_ai/demos/demo_task_apps/math/config.toml +44 -0
- synth_ai/demos/demo_task_apps/math/deploy_modal.py +60 -0
- synth_ai/demos/demo_task_apps/math/deploy_task_app.sh +22 -0
- synth_ai/environments/examples/bandit/__init__.py +33 -0
- synth_ai/environments/examples/bandit/engine.py +294 -0
- synth_ai/environments/examples/bandit/environment.py +194 -0
- synth_ai/environments/examples/bandit/taskset.py +200 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/analyze_semantic_words_markdown.py +250 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_comprehensive_evaluation.py +59 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_evaluation_browser.py +152 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_evaluation_config.toml +24 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_evaluation_framework.py +1194 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/crafter_synth_config.toml +56 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/filter_config_modal.toml +32 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/filter_traces_sft_turso.py +724 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/kick_off_ft_modal.py +384 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/analyze_action_results.py +53 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/analyze_agent_actions.py +178 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/analyze_latest_run.py +222 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/analyze_lm_traces.py +183 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/analyze_no_rewards.py +210 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/analyze_trace_issue.py +206 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/check_db_schema.py +49 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/check_latest_results.py +64 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/debug_agent_responses.py +88 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/quick_trace_check.py +77 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/compare_experiments.py +324 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/filter_traces_sft_turso.py +580 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/kick_off_ft_oai.py +362 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/multi_model_config.toml +49 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/analyze_enhanced_hooks.py +332 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/analyze_hook_events.py +97 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/analyze_hook_results.py +217 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/check_hook_storage.py +87 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/check_seeds.py +88 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/compare_seed_performance.py +195 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/custom_eval_pipelines.py +400 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/plot_hook_frequency.py +195 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/seed_analysis_summary.py +56 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/run_rollouts_for_models_and_compare_v3.py +858 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_quick_evaluation.py +52 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_react_agent.py +874 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_trace_evaluation.py +1412 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/example_v3_usage.py +216 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/compare_traces.py +296 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/crafter_comprehensive_evaluation.py +58 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/crafter_env_serialization.py +464 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/crafter_evaluation_browser.py +152 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/crafter_quick_evaluation.py +51 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/crafter_trace_evaluation.py +1412 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/debug_player_loss.py +112 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/diagnose_service.py +203 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/diagnose_slowness.py +305 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/eval_by_difficulty.py +126 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/eval_example.py +94 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/explore_saved_states.py +142 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/filter_traces_sft.py +26 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/filter_traces_sft_OLD.py +984 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/generate_ft_data_gemini.py +724 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/generate_ft_data_modal.py +386 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/generate_ft_metadata.py +205 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/kick_off_ft_gemini.py +150 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/kick_off_ft_modal.py +283 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/prepare_vertex_ft.py +280 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/profile_env_slowness.py +456 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/replicate_issue.py +166 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/run_and_eval.py +102 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/run_comparison.py +128 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/run_qwen_rollouts.py +655 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/trace_eval_OLD.py +202 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/old/validate_openai_format.py +166 -0
- synth_ai/environments/examples/crafter_classic/environment.py +41 -2
- synth_ai/environments/examples/crafter_custom/agent_demos/__init__.py +1 -0
- synth_ai/environments/examples/crafter_custom/agent_demos/trace_eval.py +202 -0
- synth_ai/environments/examples/crafter_custom/old/analyze_diamond_issue.py +159 -0
- synth_ai/environments/examples/crafter_custom/old/analyze_diamond_spawning.py +158 -0
- synth_ai/environments/examples/crafter_custom/old/compare_worlds.py +71 -0
- synth_ai/environments/examples/crafter_custom/old/dataset_stats.py +105 -0
- synth_ai/environments/examples/crafter_custom/old/diamond_spawning_summary.py +119 -0
- synth_ai/environments/examples/crafter_custom/old/example_dataset_usage.py +52 -0
- synth_ai/environments/examples/enron/units/keyword_stats.py +112 -0
- synth_ai/environments/examples/minigrid/agent_demos/minigrid_evaluation_framework.py +1188 -0
- synth_ai/environments/examples/minigrid/agent_demos/minigrid_quick_evaluation.py +48 -0
- synth_ai/environments/examples/minigrid/agent_demos/minigrid_react_agent.py +562 -0
- synth_ai/environments/examples/minigrid/agent_demos/minigrid_trace_evaluation.py +221 -0
- synth_ai/environments/examples/nethack/agent_demos/nethack_evaluation_framework.py +981 -0
- synth_ai/environments/examples/nethack/agent_demos/nethack_quick_evaluation.py +74 -0
- synth_ai/environments/examples/nethack/agent_demos/nethack_react_agent.py +831 -0
- synth_ai/environments/examples/red/agent_demos/__init__.py +1 -0
- synth_ai/environments/examples/red/units/__init__.py +1 -0
- synth_ai/environments/examples/sokoban/agent_demos/sokoban_full_eval.py +899 -0
- synth_ai/environments/examples/sokoban/units/astar_common.py +95 -0
- synth_ai/environments/service/app.py +8 -0
- synth_ai/install_sqld.sh +40 -0
- synth_ai-0.2.5.dist-info/METADATA +106 -0
- {synth_ai-0.2.4.dev8.dist-info ā synth_ai-0.2.5.dist-info}/RECORD +111 -12
- {synth_ai-0.2.4.dev8.dist-info ā synth_ai-0.2.5.dist-info}/entry_points.txt +1 -0
- synth_ai-0.2.4.dev8.dist-info/METADATA +0 -635
- {synth_ai-0.2.4.dev8.dist-info ā synth_ai-0.2.5.dist-info}/WHEEL +0 -0
- {synth_ai-0.2.4.dev8.dist-info ā synth_ai-0.2.5.dist-info}/licenses/LICENSE +0 -0
- {synth_ai-0.2.4.dev8.dist-info ā synth_ai-0.2.5.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,400 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Custom evaluation pipelines for analyzing Crafter traces using Gemini-1.5-flash.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import json
|
|
7
|
+
import duckdb
|
|
8
|
+
from typing import List, Dict, Any, Optional
|
|
9
|
+
import asyncio
|
|
10
|
+
import os
|
|
11
|
+
import sys
|
|
12
|
+
import random
|
|
13
|
+
|
|
14
|
+
# Add the synth_ai path to import base classes
|
|
15
|
+
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../../../'))
|
|
16
|
+
from synth_ai.evals.base import Judgement, BaseEval
|
|
17
|
+
from synth_ai.lm.core.main_v2 import LM
|
|
18
|
+
|
|
19
|
+
class MisunderstoodCrafterRulesEval(BaseEval):
|
|
20
|
+
"""Evaluate if the agent misunderstood Crafter game rules."""
|
|
21
|
+
|
|
22
|
+
def __init__(self, model_name: str = "gpt-4o-mini"):
|
|
23
|
+
self.model_name = model_name
|
|
24
|
+
self.lm = LM(
|
|
25
|
+
model_name=model_name,
|
|
26
|
+
formatting_model_name="gpt-4o-mini",
|
|
27
|
+
temperature=0.3 # Increased temperature for more variety
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
async def run(self, session_data: Dict[str, Any]) -> List[Judgement]:
|
|
31
|
+
"""Analyze if the agent misunderstood Crafter rules using LLM."""
|
|
32
|
+
|
|
33
|
+
# Extract relevant data from session
|
|
34
|
+
actions = session_data.get("actions", [])
|
|
35
|
+
invalid_actions = session_data.get("invalid_actions", [])
|
|
36
|
+
achievements = session_data.get("achievements", [])
|
|
37
|
+
inventory_changes = session_data.get("inventory_changes", [])
|
|
38
|
+
total_steps = session_data.get("total_steps", 0)
|
|
39
|
+
|
|
40
|
+
# Add some randomization to the prompt
|
|
41
|
+
random_seed = random.randint(1, 1000)
|
|
42
|
+
|
|
43
|
+
# Create analysis prompt with more specific instructions
|
|
44
|
+
prompt = f"""
|
|
45
|
+
You are an expert evaluator analyzing a Crafter game session to determine if the agent misunderstood the game rules.
|
|
46
|
+
|
|
47
|
+
CRAFTER GAME RULES:
|
|
48
|
+
- The agent can move in 4 directions: up, down, left, right
|
|
49
|
+
- The agent can perform actions like: collect, craft, place, eat, sleep
|
|
50
|
+
- Valid actions depend on what's nearby and what's in inventory
|
|
51
|
+
- The goal is to achieve various crafting milestones
|
|
52
|
+
|
|
53
|
+
SESSION DATA (Seed: {random_seed}):
|
|
54
|
+
Total steps: {total_steps}
|
|
55
|
+
Actions taken: {actions[:30]} # First 30 actions
|
|
56
|
+
Invalid actions: {invalid_actions[:20]} # First 20 invalid actions
|
|
57
|
+
Achievements unlocked: {achievements}
|
|
58
|
+
Inventory changes: {inventory_changes[:10]} # First 10 inventory changes
|
|
59
|
+
|
|
60
|
+
ANALYSIS TASK:
|
|
61
|
+
Analyze if the agent misunderstood Crafter rules. Look for:
|
|
62
|
+
1. Repeated invalid actions that suggest rule confusion
|
|
63
|
+
2. Actions that don't make sense given the game state
|
|
64
|
+
3. Missing obvious valid actions
|
|
65
|
+
4. Inefficient action patterns
|
|
66
|
+
5. Specific rule violations (movement, crafting, collection)
|
|
67
|
+
|
|
68
|
+
Provide your analysis in this EXACT JSON format (no additional text):
|
|
69
|
+
{{
|
|
70
|
+
"score": <float 0-1, where 1=severe misunderstanding>,
|
|
71
|
+
"reasoning": "<detailed explanation>",
|
|
72
|
+
"evidence": ["<specific example 1>", "<specific example 2>", ...],
|
|
73
|
+
"rule_violations": ["<specific rule misunderstood 1>", ...]
|
|
74
|
+
}}
|
|
75
|
+
|
|
76
|
+
Focus on concrete evidence from the session data. Be specific about what rules the agent seems to misunderstand.
|
|
77
|
+
"""
|
|
78
|
+
|
|
79
|
+
try:
|
|
80
|
+
# Use the existing LM infrastructure
|
|
81
|
+
response = await self.lm.respond_async(
|
|
82
|
+
system_message="You are an expert evaluator analyzing AI agent behavior in games. Respond only with valid JSON.",
|
|
83
|
+
user_message=prompt
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
print(f"DEBUG - Raw LLM response: {response.raw_response[:200]}...")
|
|
87
|
+
|
|
88
|
+
# Parse JSON response
|
|
89
|
+
try:
|
|
90
|
+
result = json.loads(response.raw_response)
|
|
91
|
+
except json.JSONDecodeError:
|
|
92
|
+
# Try to extract JSON from response
|
|
93
|
+
import re
|
|
94
|
+
json_match = re.search(r'\{.*\}', response.raw_response, re.DOTALL)
|
|
95
|
+
if json_match:
|
|
96
|
+
result = json.loads(json_match.group())
|
|
97
|
+
else:
|
|
98
|
+
result = {
|
|
99
|
+
"score": 0.5,
|
|
100
|
+
"reasoning": "Could not parse LLM response",
|
|
101
|
+
"evidence": ["Response parsing failed"],
|
|
102
|
+
"rule_violations": []
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
return [Judgement(
|
|
106
|
+
criteria="misunderstood_crafter_rules",
|
|
107
|
+
score=result.get("score", 0.5),
|
|
108
|
+
reasoning=result.get("reasoning", "No reasoning provided"),
|
|
109
|
+
evidence=result.get("evidence", [])
|
|
110
|
+
)]
|
|
111
|
+
|
|
112
|
+
except Exception as e:
|
|
113
|
+
return [Judgement(
|
|
114
|
+
criteria="misunderstood_crafter_rules",
|
|
115
|
+
score=0.5,
|
|
116
|
+
reasoning=f"Evaluation failed: {str(e)}",
|
|
117
|
+
evidence=[f"Error: {str(e)}"]
|
|
118
|
+
)]
|
|
119
|
+
|
|
120
|
+
class WastedTimeEval(BaseEval):
|
|
121
|
+
"""Evaluate if the agent wasted time in inefficient actions."""
|
|
122
|
+
|
|
123
|
+
def __init__(self, model_name: str = "gpt-4o-mini"):
|
|
124
|
+
self.model_name = model_name
|
|
125
|
+
self.lm = LM(
|
|
126
|
+
model_name=model_name,
|
|
127
|
+
formatting_model_name="gpt-4o-mini",
|
|
128
|
+
temperature=0.3 # Increased temperature for more variety
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
async def run(self, session_data: Dict[str, Any]) -> List[Judgement]:
|
|
132
|
+
"""Analyze if the agent wasted time inefficiently using LLM."""
|
|
133
|
+
|
|
134
|
+
# Extract relevant data from session
|
|
135
|
+
actions = session_data.get("actions", [])
|
|
136
|
+
invalid_actions = session_data.get("invalid_actions", [])
|
|
137
|
+
achievements = session_data.get("achievements", [])
|
|
138
|
+
inventory_changes = session_data.get("inventory_changes", [])
|
|
139
|
+
total_steps = session_data.get("total_steps", 0)
|
|
140
|
+
|
|
141
|
+
# Add some randomization to the prompt
|
|
142
|
+
random_seed = random.randint(1, 1000)
|
|
143
|
+
|
|
144
|
+
# Create analysis prompt with more specific instructions
|
|
145
|
+
prompt = f"""
|
|
146
|
+
You are an expert evaluator analyzing a Crafter game session to determine if the agent wasted time inefficiently.
|
|
147
|
+
|
|
148
|
+
EFFICIENCY CRITERIA:
|
|
149
|
+
- Repeated failed actions that could be avoided
|
|
150
|
+
- Unnecessary movement patterns
|
|
151
|
+
- Inefficient resource gathering
|
|
152
|
+
- Poor prioritization of goals
|
|
153
|
+
- Actions that don't contribute to achievements
|
|
154
|
+
- Time spent on non-productive activities
|
|
155
|
+
|
|
156
|
+
SESSION DATA (Seed: {random_seed}):
|
|
157
|
+
Total steps: {total_steps}
|
|
158
|
+
Actions taken: {actions[:40]} # First 40 actions
|
|
159
|
+
Invalid actions: {invalid_actions[:25]} # First 25 invalid actions
|
|
160
|
+
Achievements unlocked: {achievements}
|
|
161
|
+
Inventory changes: {inventory_changes[:15]} # First 15 inventory changes
|
|
162
|
+
|
|
163
|
+
ANALYSIS TASK:
|
|
164
|
+
Analyze if the agent wasted time inefficiently. Look for:
|
|
165
|
+
1. Repeated invalid actions that waste steps
|
|
166
|
+
2. Inefficient movement patterns
|
|
167
|
+
3. Poor resource gathering strategies
|
|
168
|
+
4. Actions that don't advance toward goals
|
|
169
|
+
5. Missed opportunities for better actions
|
|
170
|
+
6. Time spent on non-productive activities
|
|
171
|
+
|
|
172
|
+
Provide your analysis in this EXACT JSON format (no additional text):
|
|
173
|
+
{{
|
|
174
|
+
"score": <float 0-1, where 1=severe time wasting>,
|
|
175
|
+
"reasoning": "<detailed explanation>",
|
|
176
|
+
"evidence": ["<specific example 1>", "<specific example 2>", ...],
|
|
177
|
+
"inefficiencies": ["<specific inefficiency 1>", ...],
|
|
178
|
+
"efficiency_score": <float 0-1, where 1=very efficient>
|
|
179
|
+
}}
|
|
180
|
+
|
|
181
|
+
Focus on concrete evidence from the session data. Be specific about how the agent wasted time.
|
|
182
|
+
"""
|
|
183
|
+
|
|
184
|
+
try:
|
|
185
|
+
# Use the existing LM infrastructure
|
|
186
|
+
response = await self.lm.respond_async(
|
|
187
|
+
system_message="You are an expert evaluator analyzing AI agent efficiency in games. Respond only with valid JSON.",
|
|
188
|
+
user_message=prompt
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
print(f"DEBUG - Raw LLM response: {response.raw_response[:200]}...")
|
|
192
|
+
|
|
193
|
+
# Parse JSON response
|
|
194
|
+
try:
|
|
195
|
+
result = json.loads(response.raw_response)
|
|
196
|
+
except json.JSONDecodeError:
|
|
197
|
+
# Try to extract JSON from response
|
|
198
|
+
import re
|
|
199
|
+
json_match = re.search(r'\{.*\}', response.raw_response, re.DOTALL)
|
|
200
|
+
if json_match:
|
|
201
|
+
result = json.loads(json_match.group())
|
|
202
|
+
else:
|
|
203
|
+
result = {
|
|
204
|
+
"score": 0.5,
|
|
205
|
+
"reasoning": "Could not parse LLM response",
|
|
206
|
+
"evidence": ["Response parsing failed"],
|
|
207
|
+
"inefficiencies": [],
|
|
208
|
+
"efficiency_score": 0.5
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
return [Judgement(
|
|
212
|
+
criteria="wasted_time",
|
|
213
|
+
score=result.get("score", 0.5),
|
|
214
|
+
reasoning=result.get("reasoning", "No reasoning provided"),
|
|
215
|
+
evidence=result.get("evidence", [])
|
|
216
|
+
)]
|
|
217
|
+
|
|
218
|
+
except Exception as e:
|
|
219
|
+
return [Judgement(
|
|
220
|
+
criteria="wasted_time",
|
|
221
|
+
score=0.5,
|
|
222
|
+
reasoning=f"Evaluation failed: {str(e)}",
|
|
223
|
+
evidence=[f"Error: {str(e)}"]
|
|
224
|
+
)]
|
|
225
|
+
|
|
226
|
+
class CrafterTraceAnalyzer:
|
|
227
|
+
"""Main analyzer for Crafter traces."""
|
|
228
|
+
|
|
229
|
+
def __init__(self, experiment_id: str, db_path: str = "crafter_traces.duckdb", model_name: str = "gpt-4o-mini"):
|
|
230
|
+
self.experiment_id = experiment_id
|
|
231
|
+
self.db_path = db_path
|
|
232
|
+
self.model_name = model_name
|
|
233
|
+
|
|
234
|
+
# Initialize evaluators
|
|
235
|
+
self.misunderstood_rules_eval = MisunderstoodCrafterRulesEval(model_name)
|
|
236
|
+
self.wasted_time_eval = WastedTimeEval(model_name)
|
|
237
|
+
|
|
238
|
+
def extract_session_data(self, session_id: str) -> Dict[str, Any]:
|
|
239
|
+
"""Extract session data from DuckDB."""
|
|
240
|
+
conn = duckdb.connect(self.db_path)
|
|
241
|
+
|
|
242
|
+
# Get session metadata
|
|
243
|
+
result = conn.execute("""
|
|
244
|
+
SELECT metadata FROM session_traces
|
|
245
|
+
WHERE session_id = ? AND experiment_id = ?
|
|
246
|
+
""", [session_id, self.experiment_id]).fetchall()
|
|
247
|
+
|
|
248
|
+
session_data = {}
|
|
249
|
+
if result:
|
|
250
|
+
metadata = json.loads(result[0][0]) if isinstance(result[0][0], str) else result[0][0]
|
|
251
|
+
|
|
252
|
+
# Extract achievements
|
|
253
|
+
for item in metadata:
|
|
254
|
+
if item.get('metadata_type') == 'SessionMetadum':
|
|
255
|
+
data = item.get('data', {})
|
|
256
|
+
if 'achievements' in data:
|
|
257
|
+
achievements = data['achievements']
|
|
258
|
+
session_data['achievements'] = [k for k, v in achievements.items() if v]
|
|
259
|
+
if 'num_achievements' in data:
|
|
260
|
+
session_data['num_achievements'] = data['num_achievements']
|
|
261
|
+
if 'total_reward' in data:
|
|
262
|
+
session_data['total_reward'] = data['total_reward']
|
|
263
|
+
if 'rollout_length' in data:
|
|
264
|
+
session_data['total_steps'] = data['rollout_length']
|
|
265
|
+
|
|
266
|
+
# Get events for action analysis
|
|
267
|
+
result = conn.execute("""
|
|
268
|
+
SELECT event_type, metadata, event_metadata
|
|
269
|
+
FROM events
|
|
270
|
+
WHERE session_id = ?
|
|
271
|
+
ORDER BY event_time
|
|
272
|
+
""", [session_id]).fetchall()
|
|
273
|
+
|
|
274
|
+
actions = []
|
|
275
|
+
invalid_actions = []
|
|
276
|
+
inventory_changes = []
|
|
277
|
+
|
|
278
|
+
for event_type, metadata, event_metadata in result:
|
|
279
|
+
if event_type == 'runtime':
|
|
280
|
+
# Parse action from metadata
|
|
281
|
+
try:
|
|
282
|
+
meta_dict = json.loads(metadata) if isinstance(metadata, str) else metadata
|
|
283
|
+
if 'action' in meta_dict:
|
|
284
|
+
actions.append(meta_dict['action'])
|
|
285
|
+
except:
|
|
286
|
+
pass
|
|
287
|
+
|
|
288
|
+
# Check for invalid actions in event_metadata
|
|
289
|
+
if event_metadata:
|
|
290
|
+
try:
|
|
291
|
+
hook_list = json.loads(event_metadata) if isinstance(event_metadata, str) else event_metadata
|
|
292
|
+
for hook_str in hook_list:
|
|
293
|
+
if isinstance(hook_str, str):
|
|
294
|
+
import re
|
|
295
|
+
if "'hook_name': 'invalid_action'" in hook_str:
|
|
296
|
+
# Extract the actual action name from the description
|
|
297
|
+
action_match = re.search(r"'action':\s*'([^']+)'", hook_str)
|
|
298
|
+
if action_match:
|
|
299
|
+
invalid_actions.append(action_match.group(1))
|
|
300
|
+
else:
|
|
301
|
+
invalid_actions.append("unknown")
|
|
302
|
+
except:
|
|
303
|
+
pass
|
|
304
|
+
|
|
305
|
+
elif event_type == 'environment':
|
|
306
|
+
# Check for inventory changes
|
|
307
|
+
if event_metadata:
|
|
308
|
+
try:
|
|
309
|
+
hook_list = json.loads(event_metadata) if isinstance(event_metadata, str) else event_metadata
|
|
310
|
+
for hook_str in hook_list:
|
|
311
|
+
if isinstance(hook_str, str):
|
|
312
|
+
if "'hook_name': 'inventory_increase'" in hook_str:
|
|
313
|
+
inventory_changes.append("inventory_increase")
|
|
314
|
+
except:
|
|
315
|
+
pass
|
|
316
|
+
|
|
317
|
+
session_data.update({
|
|
318
|
+
'actions': actions,
|
|
319
|
+
'invalid_actions': invalid_actions,
|
|
320
|
+
'inventory_changes': inventory_changes
|
|
321
|
+
})
|
|
322
|
+
|
|
323
|
+
conn.close()
|
|
324
|
+
return session_data
|
|
325
|
+
|
|
326
|
+
async def evaluate_session(self, session_id: str) -> Dict[str, List[Judgement]]:
|
|
327
|
+
"""Evaluate a single session."""
|
|
328
|
+
session_data = self.extract_session_data(session_id)
|
|
329
|
+
|
|
330
|
+
# Run evaluations
|
|
331
|
+
misunderstood_rules = await self.misunderstood_rules_eval.run(session_data)
|
|
332
|
+
wasted_time = await self.wasted_time_eval.run(session_data)
|
|
333
|
+
|
|
334
|
+
return {
|
|
335
|
+
'misunderstood_rules': misunderstood_rules,
|
|
336
|
+
'wasted_time': wasted_time
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
async def evaluate_experiment(self) -> Dict[str, Any]:
|
|
340
|
+
"""Evaluate all sessions in the experiment."""
|
|
341
|
+
conn = duckdb.connect(self.db_path)
|
|
342
|
+
|
|
343
|
+
# Get all session IDs for this experiment
|
|
344
|
+
result = conn.execute("""
|
|
345
|
+
SELECT session_id FROM session_traces
|
|
346
|
+
WHERE experiment_id = ?
|
|
347
|
+
ORDER BY session_id
|
|
348
|
+
""", [self.experiment_id]).fetchall()
|
|
349
|
+
|
|
350
|
+
session_ids = [row[0] for row in result]
|
|
351
|
+
conn.close()
|
|
352
|
+
|
|
353
|
+
print(f"Evaluating {len(session_ids)} sessions in parallel...")
|
|
354
|
+
|
|
355
|
+
# Create all evaluation tasks
|
|
356
|
+
tasks = [self.evaluate_session(session_id) for session_id in session_ids]
|
|
357
|
+
|
|
358
|
+
# Run all evaluations in parallel
|
|
359
|
+
all_results_list = await asyncio.gather(*tasks)
|
|
360
|
+
|
|
361
|
+
# Convert to dictionary
|
|
362
|
+
all_results = {}
|
|
363
|
+
for session_id, results in zip(session_ids, all_results_list):
|
|
364
|
+
all_results[session_id] = results
|
|
365
|
+
|
|
366
|
+
return all_results
|
|
367
|
+
|
|
368
|
+
async def main():
|
|
369
|
+
"""Example usage."""
|
|
370
|
+
import sys
|
|
371
|
+
|
|
372
|
+
if len(sys.argv) > 1:
|
|
373
|
+
experiment_id = sys.argv[1]
|
|
374
|
+
else:
|
|
375
|
+
print("Usage: python custom_eval_pipelines.py <experiment_id>")
|
|
376
|
+
print("Example: python custom_eval_pipelines.py 77022cce-4bda-4415-9bce-0095e4ef2237")
|
|
377
|
+
return
|
|
378
|
+
|
|
379
|
+
# Use Gemini for evaluation
|
|
380
|
+
analyzer = CrafterTraceAnalyzer(experiment_id, model_name="gemini-1.5-flash")
|
|
381
|
+
results = await analyzer.evaluate_experiment()
|
|
382
|
+
|
|
383
|
+
# Print results
|
|
384
|
+
print("\n" + "="*80)
|
|
385
|
+
print("EVALUATION RESULTS")
|
|
386
|
+
print("="*80)
|
|
387
|
+
|
|
388
|
+
for session_id, session_results in results.items():
|
|
389
|
+
print(f"\nSession: {session_id}")
|
|
390
|
+
print("-" * 60)
|
|
391
|
+
|
|
392
|
+
for eval_type, judgements in session_results.items():
|
|
393
|
+
print(f"\n{eval_type.upper()}:")
|
|
394
|
+
for judgement in judgements:
|
|
395
|
+
print(f" Score: {judgement.score:.3f}")
|
|
396
|
+
print(f" Reasoning: {judgement.reasoning}")
|
|
397
|
+
print(f" Evidence: {judgement.evidence}")
|
|
398
|
+
|
|
399
|
+
if __name__ == "__main__":
|
|
400
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,195 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Plot the frequency of achievements and invalid actions over time (by step number).
|
|
4
|
+
Terminal-only version.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import duckdb
|
|
8
|
+
import json
|
|
9
|
+
import re
|
|
10
|
+
from collections import defaultdict
|
|
11
|
+
from typing import Dict, List, Tuple
|
|
12
|
+
|
|
13
|
+
def extract_step_from_metadata(metadata: str) -> int:
|
|
14
|
+
"""Extract step number from event metadata."""
|
|
15
|
+
try:
|
|
16
|
+
metadata_dict = json.loads(metadata) if isinstance(metadata, str) else metadata
|
|
17
|
+
return metadata_dict.get('turn', 0)
|
|
18
|
+
except:
|
|
19
|
+
return 0
|
|
20
|
+
|
|
21
|
+
def parse_hook_metadata(event_metadata: str) -> List[Dict]:
|
|
22
|
+
"""Parse hook metadata from event_metadata string with better error handling."""
|
|
23
|
+
hooks = []
|
|
24
|
+
try:
|
|
25
|
+
# The metadata is stored as a list of strings, each containing a hook dict
|
|
26
|
+
hook_list = json.loads(event_metadata) if isinstance(event_metadata, str) else event_metadata
|
|
27
|
+
|
|
28
|
+
for hook_str in hook_list:
|
|
29
|
+
if isinstance(hook_str, str):
|
|
30
|
+
# Use regex to extract hook_name more reliably
|
|
31
|
+
hook_name_match = re.search(r"'hook_name':\s*'([^']+)'", hook_str)
|
|
32
|
+
if hook_name_match:
|
|
33
|
+
hook_name = hook_name_match.group(1)
|
|
34
|
+
hooks.append({'hook_name': hook_name})
|
|
35
|
+
else:
|
|
36
|
+
hooks.append(hook_str)
|
|
37
|
+
except Exception as e:
|
|
38
|
+
# Try alternative parsing if JSON fails
|
|
39
|
+
try:
|
|
40
|
+
# Look for hook_name patterns in the string
|
|
41
|
+
hook_names = re.findall(r"'hook_name':\s*'([^']+)'", event_metadata)
|
|
42
|
+
for hook_name in hook_names:
|
|
43
|
+
hooks.append({'hook_name': hook_name})
|
|
44
|
+
except:
|
|
45
|
+
pass
|
|
46
|
+
|
|
47
|
+
return hooks
|
|
48
|
+
|
|
49
|
+
def analyze_hook_frequency(experiment_id: str):
|
|
50
|
+
"""Analyze hook frequency over time."""
|
|
51
|
+
conn = duckdb.connect("crafter_traces.duckdb")
|
|
52
|
+
|
|
53
|
+
print(f"š ANALYZING HOOK FREQUENCY OVER TIME")
|
|
54
|
+
print("=" * 80)
|
|
55
|
+
print(f"Experiment ID: {experiment_id}")
|
|
56
|
+
print()
|
|
57
|
+
|
|
58
|
+
# Get events with hook metadata
|
|
59
|
+
result = conn.execute("""
|
|
60
|
+
SELECT e.session_id, e.event_type, e.event_metadata, e.metadata
|
|
61
|
+
FROM events e
|
|
62
|
+
JOIN session_traces st ON e.session_id = st.session_id
|
|
63
|
+
WHERE st.experiment_id = ? AND e.event_metadata IS NOT NULL
|
|
64
|
+
ORDER BY e.event_time
|
|
65
|
+
""", [experiment_id]).fetchall()
|
|
66
|
+
|
|
67
|
+
# Track hook frequency by step
|
|
68
|
+
step_achievements = defaultdict(int)
|
|
69
|
+
step_invalid_actions = defaultdict(int)
|
|
70
|
+
step_inventory_increases = defaultdict(int)
|
|
71
|
+
|
|
72
|
+
# Track by session for more detailed analysis
|
|
73
|
+
session_data = defaultdict(lambda: {
|
|
74
|
+
'achievements': defaultdict(int),
|
|
75
|
+
'invalid_actions': defaultdict(int),
|
|
76
|
+
'inventory_increases': defaultdict(int)
|
|
77
|
+
})
|
|
78
|
+
|
|
79
|
+
for row in result:
|
|
80
|
+
session_id, event_type, event_metadata, metadata = row
|
|
81
|
+
|
|
82
|
+
# Extract step number
|
|
83
|
+
step = extract_step_from_metadata(metadata)
|
|
84
|
+
|
|
85
|
+
# Parse hook metadata
|
|
86
|
+
hooks = parse_hook_metadata(event_metadata)
|
|
87
|
+
|
|
88
|
+
for hook in hooks:
|
|
89
|
+
hook_name = hook.get('hook_name', 'unknown')
|
|
90
|
+
|
|
91
|
+
if hook_name == 'easy_achievement' or hook_name == 'medium_achievement' or hook_name == 'hard_achievement':
|
|
92
|
+
step_achievements[step] += 1
|
|
93
|
+
session_data[session_id]['achievements'][step] += 1
|
|
94
|
+
elif hook_name == 'invalid_action':
|
|
95
|
+
step_invalid_actions[step] += 1
|
|
96
|
+
session_data[session_id]['invalid_actions'][step] += 1
|
|
97
|
+
elif hook_name == 'inventory_increase':
|
|
98
|
+
step_inventory_increases[step] += 1
|
|
99
|
+
session_data[session_id]['inventory_increases'][step] += 1
|
|
100
|
+
|
|
101
|
+
# Prepare data for plotting
|
|
102
|
+
max_step = max(
|
|
103
|
+
max(step_achievements.keys()) if step_achievements else 0,
|
|
104
|
+
max(step_invalid_actions.keys()) if step_invalid_actions else 0,
|
|
105
|
+
max(step_inventory_increases.keys()) if step_inventory_increases else 0
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
steps = list(range(max_step + 1))
|
|
109
|
+
achievement_freq = [step_achievements[step] for step in steps]
|
|
110
|
+
invalid_action_freq = [step_invalid_actions[step] for step in steps]
|
|
111
|
+
inventory_freq = [step_inventory_increases[step] for step in steps]
|
|
112
|
+
|
|
113
|
+
# Print summary statistics
|
|
114
|
+
print("š SUMMARY STATISTICS")
|
|
115
|
+
print("-" * 50)
|
|
116
|
+
print(f"Total steps analyzed: {max_step + 1}")
|
|
117
|
+
print(f"Total achievements: {sum(achievement_freq)}")
|
|
118
|
+
print(f"Total invalid actions: {sum(invalid_action_freq)}")
|
|
119
|
+
print(f"Total inventory increases: {sum(inventory_freq)}")
|
|
120
|
+
print()
|
|
121
|
+
|
|
122
|
+
print("š ACHIEVEMENT ANALYSIS")
|
|
123
|
+
print("-" * 50)
|
|
124
|
+
achievement_steps = [step for step, freq in step_achievements.items() if freq > 0]
|
|
125
|
+
if achievement_steps:
|
|
126
|
+
print(f"Achievements occur at steps: {sorted(achievement_steps)}")
|
|
127
|
+
print(f"Most common achievement step: {max(step_achievements.items(), key=lambda x: x[1])}")
|
|
128
|
+
else:
|
|
129
|
+
print("No achievements found")
|
|
130
|
+
print()
|
|
131
|
+
|
|
132
|
+
print("ā INVALID ACTION ANALYSIS")
|
|
133
|
+
print("-" * 50)
|
|
134
|
+
invalid_steps = [step for step, freq in step_invalid_actions.items() if freq > 0]
|
|
135
|
+
if invalid_steps:
|
|
136
|
+
print(f"Invalid actions occur at steps: {sorted(invalid_steps)}")
|
|
137
|
+
print(f"Most common invalid action step: {max(step_invalid_actions.items(), key=lambda x: x[1])}")
|
|
138
|
+
else:
|
|
139
|
+
print("No invalid actions found")
|
|
140
|
+
print()
|
|
141
|
+
|
|
142
|
+
print("š¦ INVENTORY ANALYSIS")
|
|
143
|
+
print("-" * 50)
|
|
144
|
+
inventory_steps = [step for step, freq in step_inventory_increases.items() if freq > 0]
|
|
145
|
+
if inventory_steps:
|
|
146
|
+
print(f"Inventory increases occur at steps: {sorted(inventory_steps)}")
|
|
147
|
+
print(f"Most common inventory increase step: {max(step_inventory_increases.items(), key=lambda x: x[1])}")
|
|
148
|
+
else:
|
|
149
|
+
print("No inventory increases found")
|
|
150
|
+
|
|
151
|
+
# Create ASCII chart
|
|
152
|
+
print("\nš ASCII FREQUENCY CHART")
|
|
153
|
+
print("=" * 80)
|
|
154
|
+
print("Step | Achievements | Invalid Actions | Inventory")
|
|
155
|
+
print("-" * 80)
|
|
156
|
+
|
|
157
|
+
for step in steps:
|
|
158
|
+
achievements = step_achievements[step]
|
|
159
|
+
invalid_actions = step_invalid_actions[step]
|
|
160
|
+
inventory = step_inventory_increases[step]
|
|
161
|
+
|
|
162
|
+
if achievements > 0 or invalid_actions > 0 or inventory > 0:
|
|
163
|
+
print(f"{step:4d} | {achievements:11d} | {invalid_actions:14d} | {inventory:9d}")
|
|
164
|
+
|
|
165
|
+
# Session-by-session breakdown
|
|
166
|
+
print("\nš SESSION-BY-SESSION BREAKDOWN")
|
|
167
|
+
print("-" * 50)
|
|
168
|
+
for session_id, data in session_data.items():
|
|
169
|
+
print(f"\nSession: {session_id}")
|
|
170
|
+
if data['achievements']:
|
|
171
|
+
print(f" Achievements: {dict(data['achievements'])}")
|
|
172
|
+
if data['invalid_actions']:
|
|
173
|
+
print(f" Invalid actions: {dict(data['invalid_actions'])}")
|
|
174
|
+
if data['inventory_increases']:
|
|
175
|
+
print(f" Inventory increases: {dict(data['inventory_increases'])}")
|
|
176
|
+
|
|
177
|
+
conn.close()
|
|
178
|
+
|
|
179
|
+
return {
|
|
180
|
+
'steps': steps,
|
|
181
|
+
'achievement_freq': achievement_freq,
|
|
182
|
+
'invalid_action_freq': invalid_action_freq,
|
|
183
|
+
'inventory_freq': inventory_freq,
|
|
184
|
+
'session_data': session_data
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
if __name__ == "__main__":
|
|
188
|
+
import sys
|
|
189
|
+
|
|
190
|
+
if len(sys.argv) > 1:
|
|
191
|
+
experiment_id = sys.argv[1]
|
|
192
|
+
analyze_hook_frequency(experiment_id)
|
|
193
|
+
else:
|
|
194
|
+
print("Usage: python plot_hook_frequency.py <experiment_id>")
|
|
195
|
+
print("Example: python plot_hook_frequency.py 77022cce-4bda-4415-9bce-0095e4ef2237")
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Summary analysis of seed performance comparison between GPT-4.1-NANO and GPT-4.1-MINI.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
def print_summary():
|
|
7
|
+
print("šÆ KEY FINDINGS: SEED PERFORMANCE COMPARISON")
|
|
8
|
+
print("=" * 60)
|
|
9
|
+
|
|
10
|
+
print("\nš OVERALL PERFORMANCE:")
|
|
11
|
+
print(" ⢠GPT-4.1-MINI: 12 total achievements across 9 instances")
|
|
12
|
+
print(" ⢠GPT-4.1-NANO: 2 total achievements across 2 instances")
|
|
13
|
+
print(" ⢠MINI wins: 9 out of 10 instances (90% win rate)")
|
|
14
|
+
print(" ⢠NANO wins: 0 out of 10 instances")
|
|
15
|
+
print(" ⢠Ties: 1 instance (instance 9)")
|
|
16
|
+
|
|
17
|
+
print("\nš INSTANCE-BY-INSTANCE BREAKDOWN:")
|
|
18
|
+
print(" ⢠Instance 1 (Seed 43): MINI wins (collect_wood vs 0)")
|
|
19
|
+
print(" ⢠Instance 2 (Seed 44): MINI wins (collect_wood vs 0)")
|
|
20
|
+
print(" ⢠Instance 3 (Seed 45): MINI wins (collect_sapling vs 0)")
|
|
21
|
+
print(" ⢠Instance 4 (Seed 46): MINI wins (collect_wood vs 0)")
|
|
22
|
+
print(" ⢠Instance 5 (Seed 47): MINI wins (collect_wood vs 0)")
|
|
23
|
+
print(" ⢠Instance 6 (Seed 48): MINI wins (collect_sapling + eat_cow vs collect_sapling)")
|
|
24
|
+
print(" ⢠Instance 7 (Seed 49): MINI wins (collect_sapling + collect_wood vs 0)")
|
|
25
|
+
print(" ⢠Instance 8 (Seed 50): MINI wins (collect_wood vs 0)")
|
|
26
|
+
print(" ⢠Instance 9 (Seed 51): TIE (0 vs 0)")
|
|
27
|
+
print(" ⢠Instance 10 (Seed 52): MINI wins (collect_sapling + collect_wood vs collect_wood)")
|
|
28
|
+
|
|
29
|
+
print("\nšÆ ACHIEVEMENT TYPE ANALYSIS:")
|
|
30
|
+
print(" ⢠collect_wood: MINI 7, NANO 1 (MINI dominates)")
|
|
31
|
+
print(" ⢠collect_sapling: MINI 4, NANO 1 (MINI dominates)")
|
|
32
|
+
print(" ⢠eat_cow: MINI 1, NANO 0 (MINI only)")
|
|
33
|
+
print(" ⢠All other achievements: 0 for both models")
|
|
34
|
+
|
|
35
|
+
print("\nš PATTERNS OBSERVED:")
|
|
36
|
+
print(" 1. MINI consistently outperforms NANO across almost all seeds")
|
|
37
|
+
print(" 2. MINI achieves more complex combinations (e.g., collect_sapling + eat_cow)")
|
|
38
|
+
print(" 3. NANO struggles with basic achievements (only 2 total vs MINI's 12)")
|
|
39
|
+
print(" 4. Both models struggle with advanced achievements (iron, diamond, etc.)")
|
|
40
|
+
print(" 5. MINI shows better exploration and resource gathering capabilities")
|
|
41
|
+
|
|
42
|
+
print("\nš IMPLICATIONS:")
|
|
43
|
+
print(" ⢠MINI demonstrates significantly better reasoning and planning")
|
|
44
|
+
print(" ⢠MINI's larger context window may enable better multi-step planning")
|
|
45
|
+
print(" ⢠NANO may be hitting context limits or reasoning limitations")
|
|
46
|
+
print(" ⢠Both models struggle with complex crafting and combat achievements")
|
|
47
|
+
print(" ⢠The performance gap is consistent across different environment seeds")
|
|
48
|
+
|
|
49
|
+
print("\nš² RANDOMNESS ANALYSIS:")
|
|
50
|
+
print(" ⢠Seeds 43-52 were tested (10 different environments)")
|
|
51
|
+
print(" ⢠MINI wins 9/10 = 90% win rate")
|
|
52
|
+
print(" ⢠This suggests the performance difference is robust, not random")
|
|
53
|
+
print(" ⢠Only instance 9 was a tie, suggesting MINI's advantage is consistent")
|
|
54
|
+
|
|
55
|
+
if __name__ == "__main__":
|
|
56
|
+
print_summary()
|