kaggle-environments 1.22.6__py3-none-any.whl → 1.24.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kaggle-environments might be problematic. Click here for more details.

Files changed (68) hide show
  1. kaggle_environments/envs/connectx/visualizer/default/index.html +13 -0
  2. kaggle_environments/envs/connectx/visualizer/default/package.json +22 -0
  3. kaggle_environments/envs/connectx/visualizer/default/replays/test-replay.json +1129 -0
  4. kaggle_environments/envs/connectx/visualizer/default/src/main.ts +12 -0
  5. kaggle_environments/envs/connectx/visualizer/default/src/renderer.ts +396 -0
  6. kaggle_environments/envs/connectx/visualizer/default/src/style.css +38 -0
  7. kaggle_environments/envs/connectx/visualizer/default/tsconfig.json +4 -0
  8. kaggle_environments/envs/connectx/visualizer/default/vite.config.ts +7 -0
  9. kaggle_environments/envs/open_spiel_env/games/repeated_poker/repeated_poker.js +163 -88
  10. kaggle_environments/envs/open_spiel_env/games/repeated_poker/visualizer/default/index.html +13 -0
  11. kaggle_environments/envs/open_spiel_env/games/repeated_poker/visualizer/default/package.json +23 -0
  12. kaggle_environments/envs/open_spiel_env/games/repeated_poker/visualizer/default/replays/test-replay.json +1 -0
  13. kaggle_environments/envs/open_spiel_env/games/repeated_poker/visualizer/default/scripts/print_first_steps.mjs +202 -0
  14. kaggle_environments/envs/open_spiel_env/games/repeated_poker/visualizer/default/scripts/print_replay.mjs +215 -0
  15. kaggle_environments/envs/open_spiel_env/games/repeated_poker/visualizer/default/scripts/print_steps_with_end_states.mjs +234 -0
  16. kaggle_environments/envs/open_spiel_env/games/repeated_poker/visualizer/default/src/components/getRepeatedPokerStateForStep.js +260 -0
  17. kaggle_environments/envs/open_spiel_env/games/repeated_poker/visualizer/default/src/components/utils.ts +61 -0
  18. kaggle_environments/envs/open_spiel_env/games/repeated_poker/visualizer/default/src/debug_repeated_poker_renderer.ts +49 -0
  19. kaggle_environments/envs/open_spiel_env/games/repeated_poker/visualizer/default/src/images/poker_chip_1.svg +22 -0
  20. kaggle_environments/envs/open_spiel_env/games/repeated_poker/visualizer/default/src/images/poker_chip_10.svg +22 -0
  21. kaggle_environments/envs/open_spiel_env/games/repeated_poker/visualizer/default/src/images/poker_chip_100.svg +48 -0
  22. kaggle_environments/envs/open_spiel_env/games/repeated_poker/visualizer/default/src/images/poker_chip_25.svg +22 -0
  23. kaggle_environments/envs/open_spiel_env/games/repeated_poker/visualizer/default/src/images/poker_chip_5.svg +22 -0
  24. kaggle_environments/envs/open_spiel_env/games/repeated_poker/visualizer/default/src/main.ts +36 -0
  25. kaggle_environments/envs/open_spiel_env/games/repeated_poker/visualizer/default/src/repeated_poker_renderer.ts +573 -0
  26. kaggle_environments/envs/open_spiel_env/games/repeated_poker/visualizer/default/src/style.css +594 -0
  27. kaggle_environments/envs/open_spiel_env/games/repeated_poker/visualizer/default/tsconfig.json +7 -0
  28. kaggle_environments/envs/open_spiel_env/games/repeated_poker/visualizer/default/vite.config.ts +6 -0
  29. kaggle_environments/envs/werewolf/README.md +190 -0
  30. kaggle_environments/envs/werewolf/harness/__init__.py +0 -0
  31. kaggle_environments/envs/werewolf/harness/base.py +773 -0
  32. kaggle_environments/envs/werewolf/harness/litellm_models.yaml +51 -0
  33. kaggle_environments/envs/werewolf/harness/main.py +54 -0
  34. kaggle_environments/envs/werewolf/harness/test_base.py +35 -0
  35. kaggle_environments/envs/werewolf/runner.py +146 -0
  36. kaggle_environments/envs/werewolf/scripts/__init__.py +0 -0
  37. kaggle_environments/envs/werewolf/scripts/add_audio.py +425 -0
  38. kaggle_environments/envs/werewolf/scripts/configs/audio/standard.yaml +24 -0
  39. kaggle_environments/envs/werewolf/scripts/configs/run/block_basic.yaml +102 -0
  40. kaggle_environments/envs/werewolf/scripts/configs/run/comprehensive.yaml +100 -0
  41. kaggle_environments/envs/werewolf/scripts/configs/run/roundrobin_discussion_DisableDoctorSelfSave_DisableDoctorConsecutiveSave_large.yaml +104 -0
  42. kaggle_environments/envs/werewolf/scripts/configs/run/roundrobin_discussion_large.yaml +103 -0
  43. kaggle_environments/envs/werewolf/scripts/configs/run/roundrobin_discussion_small.yaml +103 -0
  44. kaggle_environments/envs/werewolf/scripts/configs/run/rule_experiment/standard.yaml +103 -0
  45. kaggle_environments/envs/werewolf/scripts/configs/run/rule_experiment/standard_DisableDoctorSelfSave_DisableDoctorConsecutiveSave.yaml +104 -0
  46. kaggle_environments/envs/werewolf/scripts/configs/run/rule_experiment/standard_DisableDoctorSelfSave_SeerRevealTeam.yaml +105 -0
  47. kaggle_environments/envs/werewolf/scripts/configs/run/rule_experiment/standard_DisableDoctorSelfSave_SeerRevealTeam_NightEliminationNoReveal_DayExileNoReveal.yaml +105 -0
  48. kaggle_environments/envs/werewolf/scripts/configs/run/rule_experiment/standard_DisableDoctorSelfSave_SeerRevealTeam_NightEliminationRevealTeam_DayExileRevealTeam.yaml +105 -0
  49. kaggle_environments/envs/werewolf/scripts/configs/run/rule_experiment/standard_disable_doctor_self_save.yaml +103 -0
  50. kaggle_environments/envs/werewolf/scripts/configs/run/rule_experiment/standard_parallel_voting.yaml +103 -0
  51. kaggle_environments/envs/werewolf/scripts/configs/run/rule_experiment/standard_parallel_voting_no_tie_exile.yaml +103 -0
  52. kaggle_environments/envs/werewolf/scripts/configs/run/rule_experiment/standard_parallel_voting_roundbiddiscussion.yaml +105 -0
  53. kaggle_environments/envs/werewolf/scripts/configs/run/run_config.yaml +58 -0
  54. kaggle_environments/envs/werewolf/scripts/configs/run/vertex_api_example_config.yaml +115 -0
  55. kaggle_environments/envs/werewolf/scripts/measure_cost.py +251 -0
  56. kaggle_environments/envs/werewolf/scripts/plot_existing_trajectories.py +135 -0
  57. kaggle_environments/envs/werewolf/scripts/rerender_html.py +87 -0
  58. kaggle_environments/envs/werewolf/scripts/run.py +93 -0
  59. kaggle_environments/envs/werewolf/scripts/run_block.py +237 -0
  60. kaggle_environments/envs/werewolf/scripts/run_pairwise_matrix.py +222 -0
  61. kaggle_environments/envs/werewolf/scripts/self_play.py +196 -0
  62. kaggle_environments/envs/werewolf/scripts/utils.py +47 -0
  63. kaggle_environments/envs/werewolf/werewolf.json +1 -1
  64. {kaggle_environments-1.22.6.dist-info → kaggle_environments-1.24.3.dist-info}/METADATA +1 -1
  65. {kaggle_environments-1.22.6.dist-info → kaggle_environments-1.24.3.dist-info}/RECORD +68 -7
  66. {kaggle_environments-1.22.6.dist-info → kaggle_environments-1.24.3.dist-info}/WHEEL +0 -0
  67. {kaggle_environments-1.22.6.dist-info → kaggle_environments-1.24.3.dist-info}/entry_points.txt +0 -0
  68. {kaggle_environments-1.22.6.dist-info → kaggle_environments-1.24.3.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,425 @@
1
+ import argparse
2
+ import hashlib
3
+ import http.server
4
+ import json
5
+ import logging
6
+ import os
7
+ import socketserver
8
+ import wave
9
+
10
+ import yaml
11
+ from dotenv import load_dotenv
12
+ from google import genai
13
+ from google.api_core.exceptions import GoogleAPICallError
14
+ from google.cloud import texttospeech
15
+ from google.genai import types
16
+
17
+ from kaggle_environments.envs.werewolf.game.consts import EventName
18
+ from kaggle_environments.envs.werewolf.runner import setup_logger
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+
23
+ def load_config(config_path):
24
+ """Loads the configuration from a YAML file."""
25
+ with open(config_path, "r") as f:
26
+ return yaml.safe_load(f)
27
+
28
+
29
+ def wave_file(filename, pcm, channels=1, rate=24000, sample_width=2):
30
+ """Saves PCM audio data to a WAV file."""
31
+ with wave.open(filename, "wb") as wf:
32
+ wf.setnchannels(channels)
33
+ wf.setsampwidth(sample_width)
34
+ wf.setframerate(rate)
35
+ wf.writeframes(pcm)
36
+
37
+
38
+ def get_tts_audio_genai(client, text: str, voice_name: str) -> bytes | None:
39
+ """Fetches TTS audio from Gemini API."""
40
+ if not text or not client:
41
+ return None
42
+ try:
43
+ response = client.models.generate_content(
44
+ model="gemini-2.5-flash-preview-tts",
45
+ contents=text,
46
+ config=types.GenerateContentConfig(
47
+ response_modalities=["AUDIO"],
48
+ speech_config=types.SpeechConfig(
49
+ voice_config=types.VoiceConfig(
50
+ prebuilt_voice_config=types.PrebuiltVoiceConfig(voice_name=voice_name)
51
+ )
52
+ ),
53
+ ),
54
+ )
55
+ return response.candidates[0].content.parts[0].inline_data.data
56
+ except (GoogleAPICallError, ValueError) as e:
57
+ logger.error(f" - Error generating audio for '{text[:30]}...': {e}")
58
+ return None
59
+
60
+
61
+ def get_tts_audio_vertex(
62
+ client, text: str, voice_name: str, model_name: str = "gemini-2.5-flash-preview-tts"
63
+ ) -> bytes | None:
64
+ """Fetches TTS audio from Vertex AI API."""
65
+ if not text or not client:
66
+ return None
67
+ try:
68
+ synthesis_input = texttospeech.SynthesisInput(text=text)
69
+
70
+ voice = texttospeech.VoiceSelectionParams(language_code="en-US", name=voice_name, model_name=model_name)
71
+
72
+ audio_config = texttospeech.AudioConfig(audio_encoding=texttospeech.AudioEncoding.MP3, sample_rate_hertz=24000)
73
+
74
+ response = client.synthesize_speech(
75
+ input=synthesis_input,
76
+ voice=voice,
77
+ audio_config=audio_config,
78
+ )
79
+ return response.audio_content
80
+ except (GoogleAPICallError, ValueError) as e:
81
+ logger.error(f" - Error generating audio using Vertex AI for '{text[:30]}...': {e}")
82
+ return None
83
+
84
+
85
+ def extract_game_data_from_json(replay_json):
86
+ """Extracts dialogue and events from a replay JSON object."""
87
+ logger.info("Extracting game data from replay...")
88
+ unique_speaker_messages = set()
89
+ dynamic_moderator_messages = set()
90
+ moderator_log_steps = replay_json.get("info", {}).get("MODERATOR_OBSERVATION", [])
91
+
92
+ for step_log in moderator_log_steps:
93
+ for data_entry in step_log:
94
+ # We must read from 'json_str' to match the werewolf.js renderer
95
+ json_str = data_entry.get("json_str")
96
+ data_type = data_entry.get("data_type") # We still need this for filtering
97
+
98
+ try:
99
+ # Parse the event data from the json_str, just like the JS does
100
+ event = json.loads(json_str)
101
+ data = event.get("data", {}) # Get the data payload from inside the parsed event
102
+ event_name = event.get("event_name")
103
+ description = event.get("description", "")
104
+ day_count = event.get("day")
105
+
106
+ except json.JSONDecodeError as e:
107
+ logger.warning(f" - Skipping log entry, failed to parse json_str: {e}")
108
+ continue
109
+
110
+ # This logic below remains the same, but it now correctly uses
111
+ # the 'data' payload from the parsed 'json_str'.
112
+ if data_type == "ChatDataEntry":
113
+ if data.get("actor_id") and data.get("message"):
114
+ unique_speaker_messages.add((data["actor_id"], data["message"]))
115
+ elif data_type == "DayExileVoteDataEntry":
116
+ if data.get("actor_id") and data.get("target_id"):
117
+ dynamic_moderator_messages.add(f"{data['actor_id']} votes to exile {data['target_id']}.")
118
+ elif data_type == "WerewolfNightVoteDataEntry":
119
+ if data.get("actor_id") and data.get("target_id"):
120
+ dynamic_moderator_messages.add(f"{data['actor_id']} votes to eliminate {data['target_id']}.")
121
+ elif data_type == "SeerInspectActionDataEntry":
122
+ if data.get("actor_id") and data.get("target_id"):
123
+ dynamic_moderator_messages.add(f"{data['actor_id']} inspects {data['target_id']}.")
124
+ elif data_type == "DoctorHealActionDataEntry":
125
+ if data.get("actor_id") and data.get("target_id"):
126
+ dynamic_moderator_messages.add(f"{data['actor_id']} heals {data['target_id']}.")
127
+ elif data_type == "DayExileElectedDataEntry":
128
+ if all(k in data for k in ["elected_player_id", "elected_player_role_name"]):
129
+ dynamic_moderator_messages.add(
130
+ f"{data['elected_player_id']} was exiled by vote. Their role was a {data['elected_player_role_name']}."
131
+ )
132
+ elif data_type == "WerewolfNightEliminationDataEntry":
133
+ if all(k in data for k in ["eliminated_player_id", "eliminated_player_role_name"]):
134
+ dynamic_moderator_messages.add(
135
+ f"{data['eliminated_player_id']} was eliminated. Their role was a {data['eliminated_player_role_name']}."
136
+ )
137
+ elif data_type == "DoctorSaveDataEntry":
138
+ if "saved_player_id" in data:
139
+ dynamic_moderator_messages.add(f"{data['saved_player_id']} was attacked but saved by a Doctor!")
140
+ elif data_type == "SeerInspectResultDataEntry":
141
+ if data.get("role"):
142
+ dynamic_moderator_messages.add(
143
+ f"{data['actor_id']} saw {data['target_id']}'s role is {data['role']}."
144
+ )
145
+ elif data.get("team"):
146
+ dynamic_moderator_messages.add(
147
+ f"{data['actor_id']} saw {data['target_id']}'s team is {data['team']}."
148
+ )
149
+ elif data_type == "GameEndResultsDataEntry":
150
+ if "winner_team" in data:
151
+ dynamic_moderator_messages.add(f"The game is over. The {data['winner_team']} team has won!")
152
+ elif data_type == "WerewolfNightEliminationElectedDataEntry":
153
+ if "elected_target_player_id" in data:
154
+ dynamic_moderator_messages.add(
155
+ f"The werewolves have chosen to eliminate {data['elected_target_player_id']}."
156
+ )
157
+ elif event_name == EventName.DAY_START:
158
+ dynamic_moderator_messages.add(f"Day {day_count} begins!")
159
+ elif event_name == EventName.NIGHT_START:
160
+ dynamic_moderator_messages.add(f"Night {day_count} begins!")
161
+ elif event_name == EventName.MODERATOR_ANNOUNCEMENT:
162
+ if "discussion rule is" in description:
163
+ dynamic_moderator_messages.add("Discussion begins!")
164
+ elif "Voting phase begins" in description:
165
+ dynamic_moderator_messages.add("Exile voting begins!")
166
+
167
+ logger.info(f"Found {len(unique_speaker_messages)} unique player messages.")
168
+ logger.info(f"Found {len(dynamic_moderator_messages)} dynamic moderator messages.")
169
+ return unique_speaker_messages, dynamic_moderator_messages
170
+
171
+
172
+ def generate_audio_files(
173
+ client,
174
+ tts_provider,
175
+ unique_speaker_messages,
176
+ dynamic_moderator_messages,
177
+ player_voice_map,
178
+ audio_config,
179
+ output_dir,
180
+ ):
181
+ """Generates and saves all required audio files, returning a map for the HTML."""
182
+ logger.info("Extracting dialogue and generating audio files...")
183
+ audio_map = {}
184
+ paths = audio_config["paths"]
185
+ audio_dir = os.path.join(output_dir, paths["audio_dir_name"])
186
+ moderator_voice = audio_config["voices"]["moderator"]
187
+ static_moderator_messages = audio_config["audio"]["static_moderator_messages"]
188
+
189
+ messages_to_generate = []
190
+ for key, message in static_moderator_messages.items():
191
+ messages_to_generate.append(("moderator", key, message, moderator_voice))
192
+ for message in dynamic_moderator_messages:
193
+ messages_to_generate.append(("moderator", message, message, moderator_voice))
194
+ for speaker_id, message in unique_speaker_messages:
195
+ voice = player_voice_map.get(speaker_id)
196
+ if voice:
197
+ messages_to_generate.append((speaker_id, message, message, voice))
198
+ else:
199
+ logger.warning(f" - Warning: No voice found for speaker: {speaker_id}")
200
+
201
+ for speaker, key, message, voice in messages_to_generate:
202
+ map_key = f"{speaker}:{key}"
203
+ filename = hashlib.md5(map_key.encode()).hexdigest() + ".wav"
204
+ audio_path_on_disk = os.path.join(audio_dir, filename)
205
+ audio_path_for_html = os.path.join(paths["audio_dir_name"], filename)
206
+
207
+ if not os.path.exists(audio_path_on_disk):
208
+ logger.info(f' - Generating audio for {speaker} ({voice}): "{message[:40]}..." ')
209
+ audio_content = None
210
+ if tts_provider == "vertex_ai":
211
+ model_name = audio_config.get("vertex_ai_model", "gemini-2.5-flash-preview-tts")
212
+ audio_content = get_tts_audio_vertex(client, message, voice_name=voice, model_name=model_name)
213
+ else: # google_genai
214
+ audio_content = get_tts_audio_genai(client, message, voice_name=voice)
215
+
216
+ if audio_content:
217
+ wave_file(audio_path_on_disk, audio_content)
218
+ audio_map[map_key] = audio_path_for_html
219
+ else:
220
+ audio_map[map_key] = audio_path_for_html
221
+
222
+ return audio_map
223
+
224
+
225
+ def generate_debug_audio_files(
226
+ output_dir, client, tts_provider, unique_speaker_messages, dynamic_moderator_messages, audio_config
227
+ ):
228
+ """Generates a single debug audio file and maps all events to it."""
229
+ logger.info("Generating single debug audio for UI testing...")
230
+ paths = audio_config["paths"]
231
+ debug_audio_dir = os.path.join(output_dir, paths["debug_audio_dir_name"])
232
+ os.makedirs(debug_audio_dir, exist_ok=True)
233
+ audio_map = {}
234
+
235
+ debug_message = "Testing start, testing end."
236
+ filename = "debug_audio.wav"
237
+ audio_path_on_disk = os.path.join(debug_audio_dir, filename)
238
+ audio_path_for_html = os.path.join(paths["debug_audio_dir_name"], filename)
239
+
240
+ if not os.path.exists(audio_path_on_disk):
241
+ logger.info(f' - Generating debug audio: "{debug_message}"')
242
+ audio_content = None
243
+ if tts_provider == "vertex_ai":
244
+ model_name = audio_config.get("vertex_ai_model", "gemini-2.5-flash-preview-tts")
245
+ debug_voice = "Charon"
246
+ audio_content = get_tts_audio_vertex(client, debug_message, voice_name=debug_voice, model_name=model_name)
247
+ else:
248
+ debug_voice = "achird"
249
+ audio_content = get_tts_audio_genai(client, debug_message, voice_name=debug_voice)
250
+
251
+ if audio_content:
252
+ wave_file(audio_path_on_disk, audio_content)
253
+ else:
254
+ logger.error(" - Failed to generate debug audio. The map will be empty.")
255
+ return {}
256
+ else:
257
+ logger.info(f" - Using existing debug audio file: {audio_path_on_disk}")
258
+
259
+ static_moderator_messages = audio_config["audio"]["static_moderator_messages"]
260
+
261
+ messages_to_map = []
262
+ for key in static_moderator_messages:
263
+ messages_to_map.append(("moderator", key))
264
+ for message in dynamic_moderator_messages:
265
+ messages_to_map.append(("moderator", message))
266
+ for speaker_id, message in unique_speaker_messages:
267
+ messages_to_map.append((speaker_id, message))
268
+
269
+ for speaker, key in messages_to_map:
270
+ map_key = f"{speaker}:{key}"
271
+ audio_map[map_key] = audio_path_for_html
272
+
273
+ logger.info(f" - Mapped all {len(audio_map)} audio events to '{audio_path_for_html}'")
274
+ return audio_map
275
+
276
+
277
+ def render_html(existing_html_path, audio_map, output_file):
278
+ """Reads an existing HTML replay, injects the audio map, and saves it."""
279
+ logger.info(f"Reading existing HTML from: {existing_html_path}")
280
+ with open(existing_html_path, "r", encoding="utf-8") as f:
281
+ html_content = f.read()
282
+
283
+ logger.info("Injecting the local audio map into the HTML...")
284
+ audio_map_json = json.dumps(audio_map)
285
+ injection_script = f"<script>window.AUDIO_MAP = {audio_map_json};</script>"
286
+ html_content = html_content.replace("</head>", f"{injection_script}</head>")
287
+
288
+ with open(output_file, "w", encoding="utf-8") as f:
289
+ f.write(html_content)
290
+ logger.info(f"Successfully generated audio-enabled HTML at: {output_file}")
291
+
292
+
293
+ def start_server(directory, port, filename):
294
+ """Starts a local HTTP server to serve the replay."""
295
+ logger.info(f"\nStarting local server to serve from the '{directory}' directory...")
296
+
297
+ class Handler(http.server.SimpleHTTPRequestHandler):
298
+ def __init__(self, *args, **kwargs):
299
+ super().__init__(*args, directory=directory, **kwargs)
300
+
301
+ with socketserver.TCPServer(("", port), Handler) as httpd:
302
+ print(f"\nServing replay at: http://localhost:{port}/{filename}")
303
+ print("Open this URL in your web browser.")
304
+ print(f"Or you can zip the '{directory}' directory and share it.")
305
+ print("Press Ctrl+C to stop the server.")
306
+ try:
307
+ httpd.serve_forever()
308
+ except KeyboardInterrupt:
309
+ print("\nServer stopped.")
310
+
311
+
312
+ def main():
313
+ """Main function to add audio to a Werewolf replay."""
314
+ parser = argparse.ArgumentParser(description="Add audio to a Werewolf game replay.")
315
+ parser.add_argument(
316
+ "-i", "--run_dir", type=str, required=True, help="Path to the directory of a game run generated by run.py."
317
+ )
318
+ parser.add_argument(
319
+ "-o",
320
+ "--output_dir",
321
+ type=str,
322
+ help="Output directory for the audio-enabled replay. Defaults to 'werewolf_replay_audio' inside the run directory.",
323
+ )
324
+ parser.add_argument(
325
+ "-c",
326
+ "--config_path",
327
+ type=str,
328
+ default=os.path.join(os.path.dirname(__file__), "configs/audio/standard.yaml"),
329
+ help="Path to the audio configuration YAML file.",
330
+ )
331
+ parser.add_argument(
332
+ "--debug-audio", action="store_true", help="Generate a single debug audio file for all events for UI testing."
333
+ )
334
+ parser.add_argument(
335
+ "--serve", action="store_true", help="Start a local HTTP server to view the replay after generation."
336
+ )
337
+ parser.add_argument(
338
+ "--tts-provider",
339
+ type=str,
340
+ default="vertex_ai",
341
+ choices=["vertex_ai", "google_genai"],
342
+ help="The TTS provider to use for audio synthesis.",
343
+ )
344
+ args = parser.parse_args()
345
+
346
+ if not args.output_dir:
347
+ args.output_dir = os.path.join(args.run_dir, "werewolf_replay_audio")
348
+
349
+ os.makedirs(args.output_dir, exist_ok=True)
350
+ setup_logger(output_dir=args.output_dir, base_name="add_audio")
351
+
352
+ logger.info(f"Loading audio config from: {args.config_path}")
353
+ audio_config = load_config(args.config_path)
354
+
355
+ replay_json_path = os.path.join(args.run_dir, "werewolf_game.json")
356
+ logger.info(f"Loading game replay from: {replay_json_path}")
357
+ if not os.path.exists(replay_json_path):
358
+ logger.error(f"Replay file not found: {replay_json_path}")
359
+ logger.error("Please ensure you provide a valid run directory created by run.py.")
360
+ return
361
+ with open(replay_json_path, "r") as f:
362
+ replay_data = json.load(f)
363
+
364
+ game_config = replay_data["configuration"]
365
+ player_voices = audio_config["voices"]["players"]
366
+ player_voice_map = {
367
+ agent_config["id"]: player_voices.get(agent_config["id"]) for agent_config in game_config["agents"]
368
+ }
369
+
370
+ load_dotenv()
371
+ client = None
372
+ if args.tts_provider == "vertex_ai":
373
+ if not os.getenv("GOOGLE_CLOUD_PROJECT"):
374
+ logger.error("Error: GOOGLE_CLOUD_PROJECT environment variable not found. It is required for Vertex AI.")
375
+ return
376
+ try:
377
+ client = texttospeech.TextToSpeechClient()
378
+ except Exception as e:
379
+ logger.error(f"Failed to initialize Vertex AI client: {e}")
380
+ logger.error("Please ensure you have authenticated with 'gcloud auth application-default login'")
381
+ return
382
+ else: # google_genai
383
+ if not os.getenv("GEMINI_API_KEY"):
384
+ logger.error(
385
+ "Error: GEMINI_API_KEY environment variable not found. Audio generation with google.genai requires it."
386
+ )
387
+ return
388
+ client = genai.Client()
389
+
390
+ unique_speaker_messages, dynamic_moderator_messages = extract_game_data_from_json(replay_data)
391
+
392
+ paths = audio_config["paths"]
393
+ audio_dir = os.path.join(args.output_dir, paths["audio_dir_name"])
394
+ os.makedirs(audio_dir, exist_ok=True)
395
+
396
+ if args.debug_audio:
397
+ audio_map = generate_debug_audio_files(
398
+ args.output_dir,
399
+ client,
400
+ args.tts_provider,
401
+ unique_speaker_messages,
402
+ dynamic_moderator_messages,
403
+ audio_config,
404
+ )
405
+ else:
406
+ audio_map = generate_audio_files(
407
+ client,
408
+ args.tts_provider,
409
+ unique_speaker_messages,
410
+ dynamic_moderator_messages,
411
+ player_voice_map,
412
+ audio_config,
413
+ args.output_dir,
414
+ )
415
+
416
+ original_html_path = os.path.join(args.run_dir, "werewolf_game.html")
417
+ output_html_file = os.path.join(args.output_dir, paths["output_html_filename"])
418
+ render_html(original_html_path, audio_map, output_html_file)
419
+
420
+ if args.serve:
421
+ start_server(args.output_dir, audio_config["server"]["port"], paths["output_html_filename"])
422
+
423
+
424
+ if __name__ == "__main__":
425
+ main()
@@ -0,0 +1,24 @@
1
+ # Settings for the dump_audio.py script
2
+ server:
3
+ port: 7999
4
+ paths:
5
+ audio_dir_name: "audio"
6
+ debug_audio_dir_name: "debug_audio"
7
+ output_html_filename: "replay.html"
8
+ voices:
9
+ moderator: "enceladus"
10
+ players:
11
+ Kai: 'Kore'
12
+ Jordan: 'Charon'
13
+ Charlie: 'Leda'
14
+ Taylor: 'Despina'
15
+ Alex: 'Erinome'
16
+ Jamie: 'Gacrux'
17
+ Quinn: 'Achird'
18
+ Casey: 'Puck'
19
+ audio:
20
+ static_moderator_messages:
21
+ night_begins: "(rate=\"fast\", volume=\"soft\", voice=\"mysterious\")[As darkness descends, the village falls silent.](rate=\"medium\", pitch=\"-2st\")[Everyone, close your eyes.]"
22
+ day_begins: "(rate=\"fast\", volume=\"loud\")[Wake up, villagers!] (rate=\"medium\", voice=\"neutral\")[The sun rises on a new day.] (break=\"50ms\") (rate=\"medium\", voice=\"somber\")[Let's see who survived the night.]"
23
+ discussion_begins: "(voice=\"authoritative\")[The town meeting now begins.] (voice=\"neutral\")[You have a few minutes to discuss and find the werewolves among you.] (voice=\"authoritative\")[Begin.]"
24
+ voting_begins: "(rate=\"slow\", voice=\"serious\")[The time for talk is over.] (break=\"50ms\") (rate=\"medium\", volume=\"loud\", voice=\"dramatic\")[Now, you must cast your votes!]"
@@ -0,0 +1,102 @@
1
+ # Settings for the dump_audio.py script
2
+ script_settings:
3
+ server:
4
+ port: 7999
5
+ paths:
6
+ audio_dir_name: "audio"
7
+ debug_audio_dir_name: "debug_audio"
8
+ output_html_filename: "replay.html"
9
+ voices:
10
+ moderator: "enceladus"
11
+ players:
12
+ gemini-2.5-flash: 'Kore'
13
+ deepseek-r1: 'Charon'
14
+ gpt-oss-120b: 'Leda'
15
+ qwen3: 'Despina'
16
+ "gpt-4.1": 'Erinome'
17
+ "o4-mini": 'Gacrux'
18
+ "gemini-2.5-pro": 'Achird'
19
+ "grok-4": 'Puck'
20
+ audio:
21
+ static_moderator_messages:
22
+ night_begins: "(rate=\"fast\", volume=\"soft\", voice=\"mysterious\")[As darkness descends, the village falls silent.](rate=\"medium\", pitch=\"-2st\")[Everyone, close your eyes.]"
23
+ day_begins: "(rate=\"fast\", volume=\"loud\")[Wake up, villagers!] (rate=\"medium\", voice=\"neutral\")[The sun rises on a new day.] (break=\"50ms\") (rate=\"medium\", voice=\"somber\")[Let's see who survived the night.]"
24
+ discussion_begins: "(voice=\"authoritative\")[The town meeting now begins.] (voice=\"neutral\")[You have a few minutes to discuss and find the werewolves among you.] (voice=\"authoritative\")[Begin.]"
25
+ voting_begins: "(rate=\"slow\", voice=\"serious\")[The time for talk is over.] (break=\"50ms\") (rate=\"medium\", volume=\"loud\", voice=\"dramatic\")[Now, you must cast your votes!]"
26
+
27
+ # Configuration for the Werewolf game environment
28
+ game_config:
29
+ actTimeout: 300
30
+ runTimeout: 3600
31
+ discussion_protocol:
32
+ name: "RoundRobinDiscussion"
33
+ params:
34
+ max_rounds: 1
35
+ agents:
36
+ - role: "Werewolf"
37
+ id: "gemini-2.5-pro"
38
+ thumbnail: "https://logos-world.net/wp-content/uploads/2025/01/Google-Gemini-Symbol.png"
39
+ agent_id: "llm/gemini/gemini-2.5-pro"
40
+ display_name: "gemini/gemini-2.5-pro"
41
+ agent_harness_name: "llm_harness"
42
+ llms:
43
+ - model_name: "gemini/gemini-2.5-pro"
44
+ - role: "Werewolf"
45
+ id: "deepseek-r1"
46
+ thumbnail: "https://images.seeklogo.com/logo-png/61/1/deepseek-ai-icon-logo-png_seeklogo-611473.png"
47
+ agent_id: "llm/together_ai/deepseek-ai/DeepSeek-R1"
48
+ display_name: "together_ai/deepseek-ai/DeepSeek-R1"
49
+ agent_harness_name: "llm_harness"
50
+ llms:
51
+ - model_name: "together_ai/deepseek-ai/DeepSeek-R1"
52
+ parameters: { "max_tokens": 163839 }
53
+ - role: "Doctor"
54
+ id: "gpt-5"
55
+ thumbnail: "https://images.seeklogo.com/logo-png/46/1/chatgpt-logo-png_seeklogo-465219.png"
56
+ agent_id: "llm/gpt-5"
57
+ display_name: "gpt-5"
58
+ agent_harness_name: "llm_harness"
59
+ llms:
60
+ - model_name: "gpt-5"
61
+ - role: "Seer"
62
+ id: "qwen3"
63
+ thumbnail: "https://images.seeklogo.com/logo-png/61/1/qwen-icon-logo-png_seeklogo-611724.png"
64
+ agent_id: "llm/together_ai/Qwen/Qwen3-235B-A22B-Instruct-2507-tput"
65
+ display_name: "together_ai/Qwen/Qwen3-235B-A22B-Instruct-2507-tput"
66
+ agent_harness_name: "llm_harness"
67
+ llms:
68
+ - model_name: "together_ai/Qwen/Qwen3-235B-A22B-Instruct-2507-tput"
69
+ - role: "Villager"
70
+ id: "claude-4-sonnet"
71
+ thumbnail: "https://images.seeklogo.com/logo-png/55/1/claude-logo-png_seeklogo-554534.png"
72
+ agent_id: "llm/claude-4-sonnet-20250514"
73
+ display_name: "claude-4-sonnet-20250514"
74
+ agent_harness_name: "llm_harness"
75
+ llms:
76
+ - model_name: "claude-4-sonnet-20250514"
77
+ - role: "Villager"
78
+ id: "zai-glm-4.5-air"
79
+ thumbnail: "https://z-cdn.chatglm.cn/z-ai/static/logo.svg"
80
+ agent_id: "llm/together_ai/zai-org/GLM-4.5-Air-FP8"
81
+ display_name: "zai-glm-4.5-air"
82
+ agent_harness_name: "llm_harness"
83
+ llms:
84
+ - model_name: "together_ai/zai-org/GLM-4.5-Air-FP8"
85
+ parameters: { "max_tokens": 100000 }
86
+ - role: "Villager"
87
+ id: "kimi-k2"
88
+ thumbnail: "https://images.seeklogo.com/logo-png/61/1/kimi-logo-png_seeklogo-611650.png"
89
+ agent_id: "llm/together_ai/moonshotai/Kimi-K2-Instruct"
90
+ display_name: "kimi-k2"
91
+ agent_harness_name: "llm_harness"
92
+ llms:
93
+ - model_name: "together_ai/moonshotai/Kimi-K2-Instruct"
94
+ parameters: { "max_tokens": 100000 }
95
+ - role: "Villager"
96
+ id: "grok-4"
97
+ thumbnail: "https://images.seeklogo.com/logo-png/61/1/grok-logo-png_seeklogo-613403.png"
98
+ agent_id: "llm/xai/grok-4-latest"
99
+ display_name: "xai/grok-4-latest"
100
+ agent_harness_name: "llm_harness"
101
+ llms:
102
+ - model_name: "xai/grok-4-latest"
@@ -0,0 +1,100 @@
1
+ # Configuration for the Werewolf game environment
2
+ game_config:
3
+ seed: 123
4
+ actTimeout: 300
5
+ runTimeout: 3600
6
+ discussion_protocol:
7
+ name: "TurnByTurnBiddingDiscussion"
8
+ params:
9
+ max_turns: 16
10
+ bid_result_public: false
11
+ day_voting_protocol:
12
+ name: "SequentialVoting"
13
+ werewolf_night_vote_protocol:
14
+ name: "SequentialVoting"
15
+ night_elimination_reveal_level: no_reveal
16
+ day_exile_reveal_level: no_reveal
17
+ agents:
18
+ - role: "Werewolf"
19
+ id: "gemini-2.5-flash"
20
+ thumbnail: "https://logos-world.net/wp-content/uploads/2025/01/Google-Gemini-Symbol.png"
21
+ agent_id: "llm_harness/gemini/gemini-2.5-flash"
22
+ display_name: "gemini/gemini-2.5-flash"
23
+ agent_harness_name: "llm_harness"
24
+ chat_mode: "text"
25
+ enable_bid_reasoning: false
26
+ llms:
27
+ - model_name: "gemini/gemini-2.5-flash"
28
+ - role: "Werewolf"
29
+ id: "deepseek-r1"
30
+ thumbnail: "https://images.seeklogo.com/logo-png/61/1/deepseek-ai-icon-logo-png_seeklogo-611473.png"
31
+ agent_id: "llm_harness/together_ai/deepseek-ai/DeepSeek-R1"
32
+ display_name: "together_ai/deepseek-ai/DeepSeek-R1"
33
+ agent_harness_name: "llm_harness"
34
+ chat_mode: "text"
35
+ enable_bid_reasoning: false
36
+ llms:
37
+ - model_name: "together_ai/deepseek-ai/DeepSeek-R1"
38
+ parameters: { "max_tokens": 163839 }
39
+ - role: "Doctor"
40
+ role_params:
41
+ allow_self_save: true
42
+ id: "gpt-oss-120b"
43
+ thumbnail: "https://images.seeklogo.com/logo-png/46/1/chatgpt-logo-png_seeklogo-465219.png"
44
+ agent_id: "llm_harness/together_ai/openai/gpt-oss-120b"
45
+ display_name: "together_ai/openai/gpt-oss-120b"
46
+ agent_harness_name: "llm_harness"
47
+ chat_mode: "text"
48
+ enable_bid_reasoning: false
49
+ llms:
50
+ - model_name: "together_ai/openai/gpt-oss-120b"
51
+ - role: "Seer"
52
+ id: "qwen3"
53
+ thumbnail: "https://images.seeklogo.com/logo-png/61/1/qwen-icon-logo-png_seeklogo-611724.png"
54
+ agent_id: "llm_harness/together_ai/Qwen/Qwen3-235B-A22B-Instruct-2507-tput"
55
+ display_name: "together_ai/Qwen/Qwen3-235B-A22B-Instruct-2507-tput"
56
+ agent_harness_name: "llm_harness"
57
+ chat_mode: "text"
58
+ enable_bid_reasoning: false
59
+ llms:
60
+ - model_name: "together_ai/Qwen/Qwen3-235B-A22B-Instruct-2507-tput"
61
+ - role: "Villager"
62
+ id: "gpt-4.1"
63
+ thumbnail: "https://images.seeklogo.com/logo-png/46/1/chatgpt-logo-png_seeklogo-465219.png"
64
+ agent_id: "llm_harness/gpt-4.1"
65
+ display_name: "gpt-4.1"
66
+ agent_harness_name: "llm_harness"
67
+ chat_mode: "text"
68
+ enable_bid_reasoning: false
69
+ llms:
70
+ - model_name: "gpt-4.1"
71
+ - role: "Villager"
72
+ id: "o4-mini"
73
+ thumbnail: "https://images.seeklogo.com/logo-png/46/1/chatgpt-logo-png_seeklogo-465219.png"
74
+ agent_id: "llm_harness/o4-mini"
75
+ display_name: "o4-mini"
76
+ agent_harness_name: "llm_harness"
77
+ chat_mode: "text"
78
+ enable_bid_reasoning: false
79
+ llms:
80
+ - model_name: "o4-mini"
81
+ - role: "Villager"
82
+ id: "gemini-2.5-pro"
83
+ thumbnail: "https://logos-world.net/wp-content/uploads/2025/01/Google-Gemini-Symbol.png"
84
+ agent_id: "llm_harness/gemini/gemini-2.5-pro"
85
+ display_name: "gemini/gemini-2.5-pro"
86
+ agent_harness_name: "llm_harness"
87
+ chat_mode: "text"
88
+ enable_bid_reasoning: false
89
+ llms:
90
+ - model_name: "gemini/gemini-2.5-pro"
91
+ - role: "Villager"
92
+ id: "grok-4"
93
+ thumbnail: "https://images.seeklogo.com/logo-png/61/1/grok-logo-png_seeklogo-613403.png"
94
+ agent_id: "llm_harness/xai/grok-4-latest"
95
+ display_name: "xai/grok-4-latest"
96
+ agent_harness_name: "llm_harness"
97
+ chat_mode: "text"
98
+ enable_bid_reasoning: false
99
+ llms:
100
+ - model_name: "xai/grok-4-latest"