kaggle-environments 1.23.3__py3-none-any.whl → 1.23.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kaggle-environments might be problematic. Click here for more details.

Files changed (46) hide show
  1. kaggle_environments/envs/open_spiel_env/games/repeated_poker/repeated_poker.js +2 -2
  2. kaggle_environments/envs/open_spiel_env/games/repeated_poker/visualizer/default/src/components/getRepeatedPokerStateForStep.js +6 -6
  3. kaggle_environments/envs/open_spiel_env/games/repeated_poker/visualizer/default/src/images/poker_chip_1.svg +22 -0
  4. kaggle_environments/envs/open_spiel_env/games/repeated_poker/visualizer/default/src/images/poker_chip_10.svg +22 -0
  5. kaggle_environments/envs/open_spiel_env/games/repeated_poker/visualizer/default/src/images/poker_chip_100.svg +48 -0
  6. kaggle_environments/envs/open_spiel_env/games/repeated_poker/visualizer/default/src/images/poker_chip_25.svg +22 -0
  7. kaggle_environments/envs/open_spiel_env/games/repeated_poker/visualizer/default/src/images/poker_chip_5.svg +22 -0
  8. kaggle_environments/envs/open_spiel_env/games/repeated_poker/visualizer/default/src/repeated_poker_renderer.js +550 -331
  9. kaggle_environments/envs/werewolf/README.md +190 -0
  10. kaggle_environments/envs/werewolf/harness/__init__.py +0 -0
  11. kaggle_environments/envs/werewolf/harness/base.py +767 -0
  12. kaggle_environments/envs/werewolf/harness/litellm_models.yaml +51 -0
  13. kaggle_environments/envs/werewolf/harness/test_base.py +35 -0
  14. kaggle_environments/envs/werewolf/runner.py +146 -0
  15. kaggle_environments/envs/werewolf/scripts/__init__.py +0 -0
  16. kaggle_environments/envs/werewolf/scripts/add_audio.py +425 -0
  17. kaggle_environments/envs/werewolf/scripts/configs/audio/standard.yaml +24 -0
  18. kaggle_environments/envs/werewolf/scripts/configs/run/block_basic.yaml +102 -0
  19. kaggle_environments/envs/werewolf/scripts/configs/run/comprehensive.yaml +100 -0
  20. kaggle_environments/envs/werewolf/scripts/configs/run/roundrobin_discussion_DisableDoctorSelfSave_DisableDoctorConsecutiveSave_large.yaml +104 -0
  21. kaggle_environments/envs/werewolf/scripts/configs/run/roundrobin_discussion_large.yaml +103 -0
  22. kaggle_environments/envs/werewolf/scripts/configs/run/roundrobin_discussion_small.yaml +103 -0
  23. kaggle_environments/envs/werewolf/scripts/configs/run/rule_experiment/standard.yaml +103 -0
  24. kaggle_environments/envs/werewolf/scripts/configs/run/rule_experiment/standard_DisableDoctorSelfSave_DisableDoctorConsecutiveSave.yaml +104 -0
  25. kaggle_environments/envs/werewolf/scripts/configs/run/rule_experiment/standard_DisableDoctorSelfSave_SeerRevealTeam.yaml +105 -0
  26. kaggle_environments/envs/werewolf/scripts/configs/run/rule_experiment/standard_DisableDoctorSelfSave_SeerRevealTeam_NightEliminationNoReveal_DayExileNoReveal.yaml +105 -0
  27. kaggle_environments/envs/werewolf/scripts/configs/run/rule_experiment/standard_DisableDoctorSelfSave_SeerRevealTeam_NightEliminationRevealTeam_DayExileRevealTeam.yaml +105 -0
  28. kaggle_environments/envs/werewolf/scripts/configs/run/rule_experiment/standard_disable_doctor_self_save.yaml +103 -0
  29. kaggle_environments/envs/werewolf/scripts/configs/run/rule_experiment/standard_parallel_voting.yaml +103 -0
  30. kaggle_environments/envs/werewolf/scripts/configs/run/rule_experiment/standard_parallel_voting_no_tie_exile.yaml +103 -0
  31. kaggle_environments/envs/werewolf/scripts/configs/run/rule_experiment/standard_parallel_voting_roundbiddiscussion.yaml +105 -0
  32. kaggle_environments/envs/werewolf/scripts/configs/run/run_config.yaml +58 -0
  33. kaggle_environments/envs/werewolf/scripts/configs/run/vertex_api_example_config.yaml +115 -0
  34. kaggle_environments/envs/werewolf/scripts/measure_cost.py +251 -0
  35. kaggle_environments/envs/werewolf/scripts/plot_existing_trajectories.py +135 -0
  36. kaggle_environments/envs/werewolf/scripts/rerender_html.py +87 -0
  37. kaggle_environments/envs/werewolf/scripts/run.py +93 -0
  38. kaggle_environments/envs/werewolf/scripts/run_block.py +237 -0
  39. kaggle_environments/envs/werewolf/scripts/run_pairwise_matrix.py +222 -0
  40. kaggle_environments/envs/werewolf/scripts/self_play.py +196 -0
  41. kaggle_environments/envs/werewolf/scripts/utils.py +47 -0
  42. {kaggle_environments-1.23.3.dist-info → kaggle_environments-1.23.4.dist-info}/METADATA +1 -1
  43. {kaggle_environments-1.23.3.dist-info → kaggle_environments-1.23.4.dist-info}/RECORD +46 -8
  44. {kaggle_environments-1.23.3.dist-info → kaggle_environments-1.23.4.dist-info}/WHEEL +0 -0
  45. {kaggle_environments-1.23.3.dist-info → kaggle_environments-1.23.4.dist-info}/entry_points.txt +0 -0
  46. {kaggle_environments-1.23.3.dist-info → kaggle_environments-1.23.4.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,51 @@
1
+ openrouter/deepseek/deepseek-chat-v3.1:
2
+ input_cost_per_token: 2e-7
3
+ output_cost_per_token: 8e-7
4
+ openrouter/openai/gpt-4o-mini:
5
+ input_cost_per_token: 1.5e-7
6
+ output_cost_per_token: 6e-7
7
+ openrouter/qwen/qwen3-235b-a22b-2507:
8
+ input_cost_per_token: 7.8e-8
9
+ output_cost_per_token: 3.12e-7
10
+ openrouter/z-ai/glm-4.5:
11
+ input_cost_per_token: 2e-7
12
+ output_cost_per_token: 8e-7
13
+ openrouter/openai/gpt-oss-120b:
14
+ input_cost_per_token: 7.2e-8
15
+ output_cost_per_token: 2.8e-7
16
+ openrouter/openai/gpt-oss-20b:
17
+ input_cost_per_token: 4e-8
18
+ output_cost_per_token: 1.5e-7
19
+ openrouter/qwen/qwen3-30b-a3b:
20
+ input_cost_per_token: 1e-7
21
+ output_cost_per_token: 3e-7
22
+ openrouter/openai/gpt-5:
23
+ input_cost_per_token: 1.25e-6
24
+ output_cost_per_token: 1e-5
25
+ openrouter/openai/gpt-4.1:
26
+ input_cost_per_token: 2e-6
27
+ output_cost_per_token: 8e-6
28
+ openrouter/anthropic/claude-sonnet-4:
29
+ input_cost_per_token: 3e-6
30
+ output_cost_per_token: 1.5e-5
31
+ openrouter/x-ai/grok-4:
32
+ input_cost_per_token: 3e-6
33
+ output_cost_per_token: 1.5e-5
34
+ openrouter/google/gemini-2.5-flash-lite:
35
+ input_cost_per_token: 1e-7
36
+ output_cost_per_token: 4e-7
37
+ openrouter/google/gemini-2.5-pro:
38
+ input_cost_per_token: 1.25e-6
39
+ output_cost_per_token: 1e-5
40
+ openrouter/google/gemini-2.5-flash:
41
+ input_cost_per_token: 3e-7
42
+ output_cost_per_token: 2.5e-6
43
+ vertex_ai/gemini-2.5-pro:
44
+ input_cost_per_token: 1.25e-6
45
+ output_cost_per_token: 1e-5
46
+ vertex_ai/gemini-2.5-flash:
47
+ input_cost_per_token: 3e-7
48
+ output_cost_per_token: 2.5e-6
49
+ vertex_ai/gemini-2.5-flash-lite:
50
+ input_cost_per_token: 1e-7
51
+ output_cost_per_token: 4e-7
@@ -0,0 +1,35 @@
1
+ import json
2
+ import os
3
+
4
+ import litellm
5
+ import pytest
6
+ from dotenv import load_dotenv
7
+
8
+ load_dotenv()
9
+
10
+
11
+ @pytest.mark.skip("Require the key to run test.")
12
+ def test_vertex_ai():
13
+ model = "vertex_ai/deepseek-ai/deepseek-r1-0528-maas"
14
+ file_path = os.environ["GOOGLE_APPLICATION_CREDENTIALS"]
15
+ with open(file_path, "r") as file:
16
+ vertex_credentials = json.load(file)
17
+
18
+ vertex_credentials_json = json.dumps(vertex_credentials)
19
+
20
+ response = litellm.completion(
21
+ model=model,
22
+ messages=[{"role": "user", "content": "hi"}],
23
+ temperature=0.7,
24
+ vertex_ai_project=os.environ["VERTEXAI_PROJECT"],
25
+ vertex_ai_location=os.environ["VERTEXAI_LOCATION"],
26
+ vertex_credentials=vertex_credentials_json,
27
+ )
28
+ print(response)
29
+
30
+
31
+ @pytest.mark.skip("Require the key to run test.")
32
+ def test_together():
33
+ model = "together_ai/deepseek-ai/DeepSeek-R1"
34
+ response = litellm.completion(model=model, messages=[{"role": "user", "content": "hi"}])
35
+ print(response)
@@ -0,0 +1,146 @@
1
+ import logging
2
+ import os
3
+ import random
4
+ import subprocess
5
+ import time
6
+ from datetime import datetime
7
+
8
+ from kaggle_environments import PROJECT_ROOT, make
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+
13
+ class LogExecutionTime:
14
+ """
15
+ A context manager to log the execution time of a code block.
16
+ The elapsed time is stored in the `elapsed_time` attribute.
17
+
18
+ Example:
19
+ logger = logging.getLogger(__name__)
20
+ with LogExecutionTime(logger, "My Task") as timer:
21
+ # Code to be timed
22
+ time.sleep(1)
23
+ print(f"Task took {timer.elapsed_time:.2f} seconds.")
24
+ print(f"Formatted time: {timer.elapsed_time_formatted()}")
25
+ """
26
+
27
+ def __init__(self, logger_obj: logging.Logger, task_str: str):
28
+ """
29
+ Initializes the context manager.
30
+
31
+ Args:
32
+ logger_obj: The logger instance to use for output.
33
+ task_str: A descriptive string for the task being timed.
34
+ """
35
+ self.logger = logger_obj
36
+ self.task_str = task_str
37
+ self.start_time = None
38
+ self.elapsed_time = 0.0
39
+
40
+ def __enter__(self):
41
+ """Records the start time when entering the context."""
42
+ self.start_time = time.time()
43
+ self.logger.info(f"Starting: {self.task_str}")
44
+ return self
45
+
46
+ def __exit__(self, exc_type, exc_val, exc_tb):
47
+ """Calculates and logs the elapsed time upon exiting the context."""
48
+ end_time = time.time()
49
+ self.elapsed_time = end_time - self.start_time
50
+ self.logger.info(f"Finished: {self.task_str} in {self.elapsed_time_formatted()}.")
51
+
52
+ def elapsed_time_formatted(self) -> str:
53
+ """Returns the elapsed time as a formatted string (HH:MM:SS)."""
54
+ return time.strftime("%H:%M:%S", time.gmtime(self.elapsed_time))
55
+
56
+
57
+ def append_timestamp_to_dir(dir_path, append=True):
58
+ if not append:
59
+ return dir_path
60
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
61
+ out = dir_path + f"_{timestamp}"
62
+ return out
63
+
64
+
65
+ def shuffle_roles_inplace(config):
66
+ agents = config["agents"]
67
+ roles = [agent["role"] for agent in agents]
68
+ random.shuffle(roles)
69
+ for new_role, agent in zip(roles, agents):
70
+ agent["role"] = new_role
71
+
72
+
73
+ def run_werewolf(output_dir, base_name, config, agents, debug):
74
+ """
75
+ Runs a game of Werewolf, saves the replay, and logs the execution time.
76
+
77
+ Args:
78
+ output_dir (str): The directory where the output files will be saved.
79
+ base_name (str): The base name for the output files (HTML, JSON).
80
+ config (dict): The configuration for the Werewolf environment.
81
+ agents (list): A list of agents to participate in the game.
82
+ debug (bool): A flag to enable or disable debug mode.
83
+ """
84
+ start_time = time.time()
85
+ logger.info(f"Results saved to {output_dir}.")
86
+ os.makedirs(output_dir, exist_ok=True)
87
+ html_file = os.path.join(output_dir, f"{base_name}.html")
88
+ json_file = os.path.join(output_dir, f"{base_name}.json")
89
+
90
+ with LogExecutionTime(logger_obj=logger, task_str="env run") as timer:
91
+ env = make("werewolf", debug=debug, configuration=config)
92
+ env.run(agents)
93
+
94
+ env.info["total_run_time"] = timer.elapsed_time
95
+ env.info["total_run_time_formatted"] = timer.elapsed_time_formatted()
96
+
97
+ logger.info("Game finished")
98
+ env_out = env.render(mode="html")
99
+ with open(html_file, "w") as out:
100
+ out.write(env_out)
101
+ logger.info(f"HTML replay written to {html_file}")
102
+ env_out = env.render(mode="json")
103
+ with open(json_file, "w") as out:
104
+ out.write(env_out)
105
+ logger.info(f"JSON replay written to {json_file}")
106
+ end_time = time.time()
107
+ elapsed_time = end_time - start_time
108
+ formatted_time = time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
109
+ logger.info(f"Script finished in {formatted_time}.")
110
+ return env
111
+
112
+
113
+ def setup_logger(output_dir, base_name):
114
+ """
115
+ Sets up a logger to output to both the console and a log file.
116
+
117
+ Args:
118
+ output_dir (str): The directory where the log file will be saved.
119
+ base_name (str): The base name for the log file.
120
+ """
121
+ log_file = os.path.join(output_dir, f"{base_name}.log")
122
+ os.makedirs(output_dir, exist_ok=True)
123
+ handlers = [logging.StreamHandler(), logging.FileHandler(log_file, mode="w")]
124
+ logging.basicConfig(
125
+ level=logging.INFO,
126
+ format="%(asctime)s - %(levelname)s - %(message)s",
127
+ handlers=handlers,
128
+ )
129
+
130
+
131
+ def log_git_hash():
132
+ try:
133
+ result = subprocess.run(
134
+ ["git", "rev-parse", "HEAD"],
135
+ cwd=PROJECT_ROOT,
136
+ capture_output=True,
137
+ text=True,
138
+ check=False, # Don't raise exception on non-zero exit code
139
+ )
140
+ if result.returncode == 0:
141
+ git_hash = result.stdout.strip()
142
+ logger.info(f"Running from git commit: {git_hash}")
143
+ else:
144
+ logger.info("Not a git repository or git command failed.")
145
+ except FileNotFoundError:
146
+ logger.info("Git command not found.")
File without changes
@@ -0,0 +1,425 @@
1
+ import argparse
2
+ import hashlib
3
+ import http.server
4
+ import json
5
+ import logging
6
+ import os
7
+ import socketserver
8
+ import wave
9
+
10
+ import yaml
11
+ from dotenv import load_dotenv
12
+ from google import genai
13
+ from google.api_core.exceptions import GoogleAPICallError
14
+ from google.cloud import texttospeech
15
+ from google.genai import types
16
+
17
+ from kaggle_environments.envs.werewolf.game.consts import EventName
18
+ from kaggle_environments.envs.werewolf.runner import setup_logger
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+
23
+ def load_config(config_path):
24
+ """Loads the configuration from a YAML file."""
25
+ with open(config_path, "r") as f:
26
+ return yaml.safe_load(f)
27
+
28
+
29
+ def wave_file(filename, pcm, channels=1, rate=24000, sample_width=2):
30
+ """Saves PCM audio data to a WAV file."""
31
+ with wave.open(filename, "wb") as wf:
32
+ wf.setnchannels(channels)
33
+ wf.setsampwidth(sample_width)
34
+ wf.setframerate(rate)
35
+ wf.writeframes(pcm)
36
+
37
+
38
+ def get_tts_audio_genai(client, text: str, voice_name: str) -> bytes | None:
39
+ """Fetches TTS audio from Gemini API."""
40
+ if not text or not client:
41
+ return None
42
+ try:
43
+ response = client.models.generate_content(
44
+ model="gemini-2.5-flash-preview-tts",
45
+ contents=text,
46
+ config=types.GenerateContentConfig(
47
+ response_modalities=["AUDIO"],
48
+ speech_config=types.SpeechConfig(
49
+ voice_config=types.VoiceConfig(
50
+ prebuilt_voice_config=types.PrebuiltVoiceConfig(voice_name=voice_name)
51
+ )
52
+ ),
53
+ ),
54
+ )
55
+ return response.candidates[0].content.parts[0].inline_data.data
56
+ except (GoogleAPICallError, ValueError) as e:
57
+ logger.error(f" - Error generating audio for '{text[:30]}...': {e}")
58
+ return None
59
+
60
+
61
+ def get_tts_audio_vertex(
62
+ client, text: str, voice_name: str, model_name: str = "gemini-2.5-flash-preview-tts"
63
+ ) -> bytes | None:
64
+ """Fetches TTS audio from Vertex AI API."""
65
+ if not text or not client:
66
+ return None
67
+ try:
68
+ synthesis_input = texttospeech.SynthesisInput(text=text)
69
+
70
+ voice = texttospeech.VoiceSelectionParams(language_code="en-US", name=voice_name, model_name=model_name)
71
+
72
+ audio_config = texttospeech.AudioConfig(audio_encoding=texttospeech.AudioEncoding.MP3, sample_rate_hertz=24000)
73
+
74
+ response = client.synthesize_speech(
75
+ input=synthesis_input,
76
+ voice=voice,
77
+ audio_config=audio_config,
78
+ )
79
+ return response.audio_content
80
+ except (GoogleAPICallError, ValueError) as e:
81
+ logger.error(f" - Error generating audio using Vertex AI for '{text[:30]}...': {e}")
82
+ return None
83
+
84
+
85
+ def extract_game_data_from_json(replay_json):
86
+ """Extracts dialogue and events from a replay JSON object."""
87
+ logger.info("Extracting game data from replay...")
88
+ unique_speaker_messages = set()
89
+ dynamic_moderator_messages = set()
90
+ moderator_log_steps = replay_json.get("info", {}).get("MODERATOR_OBSERVATION", [])
91
+
92
+ for step_log in moderator_log_steps:
93
+ for data_entry in step_log:
94
+ # We must read from 'json_str' to match the werewolf.js renderer
95
+ json_str = data_entry.get("json_str")
96
+ data_type = data_entry.get("data_type") # We still need this for filtering
97
+
98
+ try:
99
+ # Parse the event data from the json_str, just like the JS does
100
+ event = json.loads(json_str)
101
+ data = event.get("data", {}) # Get the data payload from inside the parsed event
102
+ event_name = event.get("event_name")
103
+ description = event.get("description", "")
104
+ day_count = event.get("day")
105
+
106
+ except json.JSONDecodeError as e:
107
+ logger.warning(f" - Skipping log entry, failed to parse json_str: {e}")
108
+ continue
109
+
110
+ # This logic below remains the same, but it now correctly uses
111
+ # the 'data' payload from the parsed 'json_str'.
112
+ if data_type == "ChatDataEntry":
113
+ if data.get("actor_id") and data.get("message"):
114
+ unique_speaker_messages.add((data["actor_id"], data["message"]))
115
+ elif data_type == "DayExileVoteDataEntry":
116
+ if data.get("actor_id") and data.get("target_id"):
117
+ dynamic_moderator_messages.add(f"{data['actor_id']} votes to exile {data['target_id']}.")
118
+ elif data_type == "WerewolfNightVoteDataEntry":
119
+ if data.get("actor_id") and data.get("target_id"):
120
+ dynamic_moderator_messages.add(f"{data['actor_id']} votes to eliminate {data['target_id']}.")
121
+ elif data_type == "SeerInspectActionDataEntry":
122
+ if data.get("actor_id") and data.get("target_id"):
123
+ dynamic_moderator_messages.add(f"{data['actor_id']} inspects {data['target_id']}.")
124
+ elif data_type == "DoctorHealActionDataEntry":
125
+ if data.get("actor_id") and data.get("target_id"):
126
+ dynamic_moderator_messages.add(f"{data['actor_id']} heals {data['target_id']}.")
127
+ elif data_type == "DayExileElectedDataEntry":
128
+ if all(k in data for k in ["elected_player_id", "elected_player_role_name"]):
129
+ dynamic_moderator_messages.add(
130
+ f"{data['elected_player_id']} was exiled by vote. Their role was a {data['elected_player_role_name']}."
131
+ )
132
+ elif data_type == "WerewolfNightEliminationDataEntry":
133
+ if all(k in data for k in ["eliminated_player_id", "eliminated_player_role_name"]):
134
+ dynamic_moderator_messages.add(
135
+ f"{data['eliminated_player_id']} was eliminated. Their role was a {data['eliminated_player_role_name']}."
136
+ )
137
+ elif data_type == "DoctorSaveDataEntry":
138
+ if "saved_player_id" in data:
139
+ dynamic_moderator_messages.add(f"{data['saved_player_id']} was attacked but saved by a Doctor!")
140
+ elif data_type == "SeerInspectResultDataEntry":
141
+ if data.get("role"):
142
+ dynamic_moderator_messages.add(
143
+ f"{data['actor_id']} saw {data['target_id']}'s role is {data['role']}."
144
+ )
145
+ elif data.get("team"):
146
+ dynamic_moderator_messages.add(
147
+ f"{data['actor_id']} saw {data['target_id']}'s team is {data['team']}."
148
+ )
149
+ elif data_type == "GameEndResultsDataEntry":
150
+ if "winner_team" in data:
151
+ dynamic_moderator_messages.add(f"The game is over. The {data['winner_team']} team has won!")
152
+ elif data_type == "WerewolfNightEliminationElectedDataEntry":
153
+ if "elected_target_player_id" in data:
154
+ dynamic_moderator_messages.add(
155
+ f"The werewolves have chosen to eliminate {data['elected_target_player_id']}."
156
+ )
157
+ elif event_name == EventName.DAY_START:
158
+ dynamic_moderator_messages.add(f"Day {day_count} begins!")
159
+ elif event_name == EventName.NIGHT_START:
160
+ dynamic_moderator_messages.add(f"Night {day_count} begins!")
161
+ elif event_name == EventName.MODERATOR_ANNOUNCEMENT:
162
+ if "discussion rule is" in description:
163
+ dynamic_moderator_messages.add("Discussion begins!")
164
+ elif "Voting phase begins" in description:
165
+ dynamic_moderator_messages.add("Exile voting begins!")
166
+
167
+ logger.info(f"Found {len(unique_speaker_messages)} unique player messages.")
168
+ logger.info(f"Found {len(dynamic_moderator_messages)} dynamic moderator messages.")
169
+ return unique_speaker_messages, dynamic_moderator_messages
170
+
171
+
172
+ def generate_audio_files(
173
+ client,
174
+ tts_provider,
175
+ unique_speaker_messages,
176
+ dynamic_moderator_messages,
177
+ player_voice_map,
178
+ audio_config,
179
+ output_dir,
180
+ ):
181
+ """Generates and saves all required audio files, returning a map for the HTML."""
182
+ logger.info("Extracting dialogue and generating audio files...")
183
+ audio_map = {}
184
+ paths = audio_config["paths"]
185
+ audio_dir = os.path.join(output_dir, paths["audio_dir_name"])
186
+ moderator_voice = audio_config["voices"]["moderator"]
187
+ static_moderator_messages = audio_config["audio"]["static_moderator_messages"]
188
+
189
+ messages_to_generate = []
190
+ for key, message in static_moderator_messages.items():
191
+ messages_to_generate.append(("moderator", key, message, moderator_voice))
192
+ for message in dynamic_moderator_messages:
193
+ messages_to_generate.append(("moderator", message, message, moderator_voice))
194
+ for speaker_id, message in unique_speaker_messages:
195
+ voice = player_voice_map.get(speaker_id)
196
+ if voice:
197
+ messages_to_generate.append((speaker_id, message, message, voice))
198
+ else:
199
+ logger.warning(f" - Warning: No voice found for speaker: {speaker_id}")
200
+
201
+ for speaker, key, message, voice in messages_to_generate:
202
+ map_key = f"{speaker}:{key}"
203
+ filename = hashlib.md5(map_key.encode()).hexdigest() + ".wav"
204
+ audio_path_on_disk = os.path.join(audio_dir, filename)
205
+ audio_path_for_html = os.path.join(paths["audio_dir_name"], filename)
206
+
207
+ if not os.path.exists(audio_path_on_disk):
208
+ logger.info(f' - Generating audio for {speaker} ({voice}): "{message[:40]}..." ')
209
+ audio_content = None
210
+ if tts_provider == "vertex_ai":
211
+ model_name = audio_config.get("vertex_ai_model", "gemini-2.5-flash-preview-tts")
212
+ audio_content = get_tts_audio_vertex(client, message, voice_name=voice, model_name=model_name)
213
+ else: # google_genai
214
+ audio_content = get_tts_audio_genai(client, message, voice_name=voice)
215
+
216
+ if audio_content:
217
+ wave_file(audio_path_on_disk, audio_content)
218
+ audio_map[map_key] = audio_path_for_html
219
+ else:
220
+ audio_map[map_key] = audio_path_for_html
221
+
222
+ return audio_map
223
+
224
+
225
+ def generate_debug_audio_files(
226
+ output_dir, client, tts_provider, unique_speaker_messages, dynamic_moderator_messages, audio_config
227
+ ):
228
+ """Generates a single debug audio file and maps all events to it."""
229
+ logger.info("Generating single debug audio for UI testing...")
230
+ paths = audio_config["paths"]
231
+ debug_audio_dir = os.path.join(output_dir, paths["debug_audio_dir_name"])
232
+ os.makedirs(debug_audio_dir, exist_ok=True)
233
+ audio_map = {}
234
+
235
+ debug_message = "Testing start, testing end."
236
+ filename = "debug_audio.wav"
237
+ audio_path_on_disk = os.path.join(debug_audio_dir, filename)
238
+ audio_path_for_html = os.path.join(paths["debug_audio_dir_name"], filename)
239
+
240
+ if not os.path.exists(audio_path_on_disk):
241
+ logger.info(f' - Generating debug audio: "{debug_message}"')
242
+ audio_content = None
243
+ if tts_provider == "vertex_ai":
244
+ model_name = audio_config.get("vertex_ai_model", "gemini-2.5-flash-preview-tts")
245
+ debug_voice = "Charon"
246
+ audio_content = get_tts_audio_vertex(client, debug_message, voice_name=debug_voice, model_name=model_name)
247
+ else:
248
+ debug_voice = "achird"
249
+ audio_content = get_tts_audio_genai(client, debug_message, voice_name=debug_voice)
250
+
251
+ if audio_content:
252
+ wave_file(audio_path_on_disk, audio_content)
253
+ else:
254
+ logger.error(" - Failed to generate debug audio. The map will be empty.")
255
+ return {}
256
+ else:
257
+ logger.info(f" - Using existing debug audio file: {audio_path_on_disk}")
258
+
259
+ static_moderator_messages = audio_config["audio"]["static_moderator_messages"]
260
+
261
+ messages_to_map = []
262
+ for key in static_moderator_messages:
263
+ messages_to_map.append(("moderator", key))
264
+ for message in dynamic_moderator_messages:
265
+ messages_to_map.append(("moderator", message))
266
+ for speaker_id, message in unique_speaker_messages:
267
+ messages_to_map.append((speaker_id, message))
268
+
269
+ for speaker, key in messages_to_map:
270
+ map_key = f"{speaker}:{key}"
271
+ audio_map[map_key] = audio_path_for_html
272
+
273
+ logger.info(f" - Mapped all {len(audio_map)} audio events to '{audio_path_for_html}'")
274
+ return audio_map
275
+
276
+
277
+ def render_html(existing_html_path, audio_map, output_file):
278
+ """Reads an existing HTML replay, injects the audio map, and saves it."""
279
+ logger.info(f"Reading existing HTML from: {existing_html_path}")
280
+ with open(existing_html_path, "r", encoding="utf-8") as f:
281
+ html_content = f.read()
282
+
283
+ logger.info("Injecting the local audio map into the HTML...")
284
+ audio_map_json = json.dumps(audio_map)
285
+ injection_script = f"<script>window.AUDIO_MAP = {audio_map_json};</script>"
286
+ html_content = html_content.replace("</head>", f"{injection_script}</head>")
287
+
288
+ with open(output_file, "w", encoding="utf-8") as f:
289
+ f.write(html_content)
290
+ logger.info(f"Successfully generated audio-enabled HTML at: {output_file}")
291
+
292
+
293
+ def start_server(directory, port, filename):
294
+ """Starts a local HTTP server to serve the replay."""
295
+ logger.info(f"\nStarting local server to serve from the '{directory}' directory...")
296
+
297
+ class Handler(http.server.SimpleHTTPRequestHandler):
298
+ def __init__(self, *args, **kwargs):
299
+ super().__init__(*args, directory=directory, **kwargs)
300
+
301
+ with socketserver.TCPServer(("", port), Handler) as httpd:
302
+ print(f"\nServing replay at: http://localhost:{port}/{filename}")
303
+ print("Open this URL in your web browser.")
304
+ print(f"Or you can zip the '{directory}' directory and share it.")
305
+ print("Press Ctrl+C to stop the server.")
306
+ try:
307
+ httpd.serve_forever()
308
+ except KeyboardInterrupt:
309
+ print("\nServer stopped.")
310
+
311
+
312
+ def main():
313
+ """Main function to add audio to a Werewolf replay."""
314
+ parser = argparse.ArgumentParser(description="Add audio to a Werewolf game replay.")
315
+ parser.add_argument(
316
+ "-i", "--run_dir", type=str, required=True, help="Path to the directory of a game run generated by run.py."
317
+ )
318
+ parser.add_argument(
319
+ "-o",
320
+ "--output_dir",
321
+ type=str,
322
+ help="Output directory for the audio-enabled replay. Defaults to 'werewolf_replay_audio' inside the run directory.",
323
+ )
324
+ parser.add_argument(
325
+ "-c",
326
+ "--config_path",
327
+ type=str,
328
+ default=os.path.join(os.path.dirname(__file__), "configs/audio/standard.yaml"),
329
+ help="Path to the audio configuration YAML file.",
330
+ )
331
+ parser.add_argument(
332
+ "--debug-audio", action="store_true", help="Generate a single debug audio file for all events for UI testing."
333
+ )
334
+ parser.add_argument(
335
+ "--serve", action="store_true", help="Start a local HTTP server to view the replay after generation."
336
+ )
337
+ parser.add_argument(
338
+ "--tts-provider",
339
+ type=str,
340
+ default="vertex_ai",
341
+ choices=["vertex_ai", "google_genai"],
342
+ help="The TTS provider to use for audio synthesis.",
343
+ )
344
+ args = parser.parse_args()
345
+
346
+ if not args.output_dir:
347
+ args.output_dir = os.path.join(args.run_dir, "werewolf_replay_audio")
348
+
349
+ os.makedirs(args.output_dir, exist_ok=True)
350
+ setup_logger(output_dir=args.output_dir, base_name="add_audio")
351
+
352
+ logger.info(f"Loading audio config from: {args.config_path}")
353
+ audio_config = load_config(args.config_path)
354
+
355
+ replay_json_path = os.path.join(args.run_dir, "werewolf_game.json")
356
+ logger.info(f"Loading game replay from: {replay_json_path}")
357
+ if not os.path.exists(replay_json_path):
358
+ logger.error(f"Replay file not found: {replay_json_path}")
359
+ logger.error("Please ensure you provide a valid run directory created by run.py.")
360
+ return
361
+ with open(replay_json_path, "r") as f:
362
+ replay_data = json.load(f)
363
+
364
+ game_config = replay_data["configuration"]
365
+ player_voices = audio_config["voices"]["players"]
366
+ player_voice_map = {
367
+ agent_config["id"]: player_voices.get(agent_config["id"]) for agent_config in game_config["agents"]
368
+ }
369
+
370
+ load_dotenv()
371
+ client = None
372
+ if args.tts_provider == "vertex_ai":
373
+ if not os.getenv("GOOGLE_CLOUD_PROJECT"):
374
+ logger.error("Error: GOOGLE_CLOUD_PROJECT environment variable not found. It is required for Vertex AI.")
375
+ return
376
+ try:
377
+ client = texttospeech.TextToSpeechClient()
378
+ except Exception as e:
379
+ logger.error(f"Failed to initialize Vertex AI client: {e}")
380
+ logger.error("Please ensure you have authenticated with 'gcloud auth application-default login'")
381
+ return
382
+ else: # google_genai
383
+ if not os.getenv("GEMINI_API_KEY"):
384
+ logger.error(
385
+ "Error: GEMINI_API_KEY environment variable not found. Audio generation with google.genai requires it."
386
+ )
387
+ return
388
+ client = genai.Client()
389
+
390
+ unique_speaker_messages, dynamic_moderator_messages = extract_game_data_from_json(replay_data)
391
+
392
+ paths = audio_config["paths"]
393
+ audio_dir = os.path.join(args.output_dir, paths["audio_dir_name"])
394
+ os.makedirs(audio_dir, exist_ok=True)
395
+
396
+ if args.debug_audio:
397
+ audio_map = generate_debug_audio_files(
398
+ args.output_dir,
399
+ client,
400
+ args.tts_provider,
401
+ unique_speaker_messages,
402
+ dynamic_moderator_messages,
403
+ audio_config,
404
+ )
405
+ else:
406
+ audio_map = generate_audio_files(
407
+ client,
408
+ args.tts_provider,
409
+ unique_speaker_messages,
410
+ dynamic_moderator_messages,
411
+ player_voice_map,
412
+ audio_config,
413
+ args.output_dir,
414
+ )
415
+
416
+ original_html_path = os.path.join(args.run_dir, "werewolf_game.html")
417
+ output_html_file = os.path.join(args.output_dir, paths["output_html_filename"])
418
+ render_html(original_html_path, audio_map, output_html_file)
419
+
420
+ if args.serve:
421
+ start_server(args.output_dir, audio_config["server"]["port"], paths["output_html_filename"])
422
+
423
+
424
+ if __name__ == "__main__":
425
+ main()
@@ -0,0 +1,24 @@
1
+ # Settings for the dump_audio.py script
2
+ server:
3
+ port: 7999
4
+ paths:
5
+ audio_dir_name: "audio"
6
+ debug_audio_dir_name: "debug_audio"
7
+ output_html_filename: "replay.html"
8
+ voices:
9
+ moderator: "enceladus"
10
+ players:
11
+ Kai: 'Kore'
12
+ Jordan: 'Charon'
13
+ Charlie: 'Leda'
14
+ Taylor: 'Despina'
15
+ Alex: 'Erinome'
16
+ Jamie: 'Gacrux'
17
+ Quinn: 'Achird'
18
+ Casey: 'Puck'
19
+ audio:
20
+ static_moderator_messages:
21
+ night_begins: "(rate=\"fast\", volume=\"soft\", voice=\"mysterious\")[As darkness descends, the village falls silent.](rate=\"medium\", pitch=\"-2st\")[Everyone, close your eyes.]"
22
+ day_begins: "(rate=\"fast\", volume=\"loud\")[Wake up, villagers!] (rate=\"medium\", voice=\"neutral\")[The sun rises on a new day.] (break=\"50ms\") (rate=\"medium\", voice=\"somber\")[Let's see who survived the night.]"
23
+ discussion_begins: "(voice=\"authoritative\")[The town meeting now begins.] (voice=\"neutral\")[You have a few minutes to discuss and find the werewolves among you.] (voice=\"authoritative\")[Begin.]"
24
+ voting_begins: "(rate=\"slow\", voice=\"serious\")[The time for talk is over.] (break=\"50ms\") (rate=\"medium\", volume=\"loud\", voice=\"dramatic\")[Now, you must cast your votes!]"