ai-plays-jackbox 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. ai_plays_jackbox/__init__.py +0 -0
  2. ai_plays_jackbox/bot/__init__.py +0 -0
  3. ai_plays_jackbox/bot/bot_base.py +219 -0
  4. ai_plays_jackbox/bot/bot_factory.py +31 -0
  5. ai_plays_jackbox/bot/bot_personality.py +111 -0
  6. ai_plays_jackbox/bot/jackbox5/__init__.py +0 -0
  7. ai_plays_jackbox/bot/jackbox5/bot_base.py +26 -0
  8. ai_plays_jackbox/bot/jackbox5/mad_verse_city.py +121 -0
  9. ai_plays_jackbox/bot/jackbox5/patently_stupid.py +168 -0
  10. ai_plays_jackbox/bot/jackbox6/__init__.py +0 -0
  11. ai_plays_jackbox/bot/jackbox6/bot_base.py +20 -0
  12. ai_plays_jackbox/bot/jackbox6/dictionarium.py +105 -0
  13. ai_plays_jackbox/bot/jackbox6/joke_boat.py +105 -0
  14. ai_plays_jackbox/bot/jackbox7/__init__.py +0 -0
  15. ai_plays_jackbox/bot/jackbox7/bot_base.py +20 -0
  16. ai_plays_jackbox/bot/jackbox7/quiplash3.py +108 -0
  17. ai_plays_jackbox/bot/jackbox8/__init__.py +0 -0
  18. ai_plays_jackbox/bot/jackbox8/bot_base.py +20 -0
  19. ai_plays_jackbox/bot/jackbox8/job_job.py +205 -0
  20. ai_plays_jackbox/bot/standalone/__init__.py +0 -0
  21. ai_plays_jackbox/bot/standalone/drawful2.py +159 -0
  22. ai_plays_jackbox/cli/__init__.py +0 -0
  23. ai_plays_jackbox/cli/main.py +117 -0
  24. ai_plays_jackbox/constants.py +4 -0
  25. ai_plays_jackbox/llm/__init__.py +1 -0
  26. ai_plays_jackbox/llm/chat_model.py +39 -0
  27. ai_plays_jackbox/llm/chat_model_factory.py +35 -0
  28. ai_plays_jackbox/llm/gemini_model.py +86 -0
  29. ai_plays_jackbox/llm/ollama_model.py +53 -0
  30. ai_plays_jackbox/llm/openai_model.py +86 -0
  31. ai_plays_jackbox/room/__init__.py +0 -0
  32. ai_plays_jackbox/room/room.py +87 -0
  33. ai_plays_jackbox/run.py +23 -0
  34. ai_plays_jackbox/scripts/lint.py +18 -0
  35. ai_plays_jackbox/ui/__init__.py +0 -0
  36. ai_plays_jackbox/ui/main.py +12 -0
  37. ai_plays_jackbox/ui/startup.py +271 -0
  38. ai_plays_jackbox-0.4.1.dist-info/METADATA +158 -0
  39. ai_plays_jackbox-0.4.1.dist-info/RECORD +42 -0
  40. ai_plays_jackbox-0.4.1.dist-info/WHEEL +4 -0
  41. ai_plays_jackbox-0.4.1.dist-info/entry_points.txt +4 -0
  42. ai_plays_jackbox-0.4.1.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,159 @@
1
+ import random
2
+
3
+ from loguru import logger
4
+
5
+ from ai_plays_jackbox.bot.bot_base import JackBoxBotBase
6
+
7
+ _DRAWING_PROMPT_TEMPLATE = """
8
+ You are playing Drawful 2.
9
+
10
+ Generate an image with the following prompt: {prompt}
11
+
12
+ When generating your response, follow these rules:
13
+ - Your personality is: {personality}
14
+ - Make sure to implement your personality somehow into the drawing, but keep the prompt in mind
15
+ - The image must be a simple sketch
16
+ - The image must have a white background and use black for the lines
17
+ - Avoid intricate details
18
+ """
19
+
20
+
21
+ class Drawful2Bot(JackBoxBotBase):
22
+ _drawing_completed: bool = False
23
+
24
+ def __init__(self, *args, **kwargs):
25
+ super().__init__(*args, **kwargs)
26
+
27
+ @property
28
+ def _player_operation_key(self) -> str:
29
+ return f"player:{self._player_id}"
30
+
31
+ def _is_player_operation_key(self, operation_key: str) -> bool:
32
+ return operation_key == self._player_operation_key
33
+
34
+ @property
35
+ def _room_operation_key(self) -> str:
36
+ return "room"
37
+
38
+ def _is_room_operation_key(self, operation_key: str) -> bool:
39
+ return operation_key == self._room_operation_key
40
+
41
+ def _handle_welcome(self, data: dict):
42
+ pass
43
+
44
+ def _handle_player_operation(self, data: dict):
45
+ if not data:
46
+ return
47
+ room_state = data.get("state", None)
48
+ if not room_state:
49
+ return
50
+ prompt = data.get("prompt")
51
+ prompt_text = self._html_to_text(prompt.get("html", "")) if prompt is not None else ""
52
+
53
+ match room_state:
54
+ case "Draw":
55
+ colors = data.get("colors", ["#fb405a", "#7a2259"])
56
+ selected_color = colors[0]
57
+ canvas_height = int(data.get("size", {}).get("height", 320))
58
+ canvas_width = int(data.get("size", {}).get("width", 320))
59
+ lines = self._generate_drawing(prompt_text, canvas_height, canvas_width)
60
+ object_key = data.get("objectKey", "")
61
+ if object_key != "":
62
+ if not self._drawing_completed:
63
+ self._object_update(
64
+ object_key,
65
+ {
66
+ "lines": [{"color": selected_color, "thickness": 1, "points": l} for l in lines],
67
+ "submit": True,
68
+ },
69
+ )
70
+ # This prevents the bot from trying to draw multiple times
71
+ self._drawing_completed = True
72
+
73
+ case "EnterSingleText":
74
+ # We need to reset this once we're entering options
75
+ self._drawing_completed = False
76
+ # Listen, the bot can't see the drawing
77
+ # so they're just going to say something
78
+ text_key = data.get("textKey", "")
79
+ self._text_update(text_key, self._generate_random_response())
80
+
81
+ case "MakeSingleChoice":
82
+ # Bot still can't see the drawing
83
+ # so just pick something
84
+ if data.get("type", "single") == "repeating":
85
+ pass
86
+ choices = data.get("choices", [])
87
+ choices_as_ints = [i for i in range(0, len(choices))]
88
+ selected_choice = random.choice(choices_as_ints)
89
+ self._client_send({"action": "choose", "choice": selected_choice})
90
+
91
+ def _handle_room_operation(self, data: dict):
92
+ pass
93
+
94
+ def _generate_drawing(self, prompt: str, canvas_height: int, canvas_width: int) -> list[str]:
95
+ logger.info("Generating drawing...")
96
+ image_prompt = _DRAWING_PROMPT_TEMPLATE.format(prompt=prompt, personality=self._personality)
97
+ image_bytes = self._chat_model.generate_sketch(
98
+ image_prompt,
99
+ "",
100
+ temperature=self._chat_model._chat_model_temperature,
101
+ top_p=self._chat_model._chat_model_top_p,
102
+ )
103
+ return self._image_bytes_to_polylines(image_bytes, canvas_height, canvas_width)
104
+
105
+ def _generate_random_response(self) -> str:
106
+ possible_responses = [
107
+ "Abstract awkward silence",
108
+ "Abstract existential dread",
109
+ "Abstract late-stage capitalism",
110
+ "Abstract lost hope",
111
+ "Abstract the void",
112
+ "Baby Yoda, but weird",
113
+ "Barbie, but weird",
114
+ "Confused dentist",
115
+ "Confused gym teacher",
116
+ "Confused lawyer",
117
+ "Confused therapist",
118
+ "DJ in trouble",
119
+ "Definitely banana",
120
+ "Definitely blob",
121
+ "Definitely potato",
122
+ "Definitely spaghetti",
123
+ "Taylor Swift, but weird",
124
+ "Waluigi, but weird",
125
+ "banana with feelings",
126
+ "chicken riding a scooter",
127
+ "cloud with feelings",
128
+ "confused gym teacher",
129
+ "confused therapist",
130
+ "dentist in trouble",
131
+ "duck + existential dread",
132
+ "duck riding a scooter",
133
+ "excited janitor",
134
+ "ferret + awkward silence",
135
+ "giraffe + awkward silence",
136
+ "giraffe filing taxes",
137
+ "hamster + existential dread",
138
+ "hamster + lost hope",
139
+ "hamster riding a scooter",
140
+ "janitor in trouble",
141
+ "joyful octopus",
142
+ "lawyer in trouble",
143
+ "llama + awkward silence",
144
+ "llama + late-stage capitalism",
145
+ "lonely dentist",
146
+ "lonely hamster",
147
+ "lonely janitor",
148
+ "lonely pirate",
149
+ "mango with feelings",
150
+ "pirate in trouble",
151
+ "sad DJ",
152
+ "sad hamster",
153
+ "spaghetti with feelings",
154
+ "terrified duck",
155
+ "terrified ferret",
156
+ "terrified lawyer",
157
+ ]
158
+ chosen_response = random.choice(possible_responses)
159
+ return chosen_response
File without changes
@@ -0,0 +1,117 @@
1
+ import argparse
2
+
3
+ from ai_plays_jackbox.bot.bot_personality import JackBoxBotVariant
4
+ from ai_plays_jackbox.constants import (
5
+ DEFAULT_NUM_OF_BOTS,
6
+ DEFAULT_TEMPERATURE,
7
+ DEFAULT_TOP_P,
8
+ )
9
+ from ai_plays_jackbox.llm.chat_model_factory import CHAT_MODEL_PROVIDERS
10
+ from ai_plays_jackbox.run import run
11
+
12
+
13
+ def _validate_room_code(string_to_check: str) -> str:
14
+ if not string_to_check.isalpha() or len(string_to_check) != 4:
15
+ raise argparse.ArgumentTypeError("Must be 4 letters")
16
+ return string_to_check
17
+
18
+
19
+ def _validate_num_of_bots(string_to_check: str) -> int:
20
+ try:
21
+ number_value = int(string_to_check)
22
+ except ValueError:
23
+ raise argparse.ArgumentTypeError("Must number 1-10")
24
+ if number_value < 1 or number_value > 10:
25
+ raise argparse.ArgumentTypeError("Must number 1-10")
26
+ return number_value
27
+
28
+
29
+ def _validate_temperature(string_to_check: str) -> float:
30
+ try:
31
+ number_value = float(string_to_check)
32
+ except ValueError:
33
+ raise argparse.ArgumentTypeError("Must number 0.1-2.0")
34
+ if number_value <= 0 or number_value > 2.0:
35
+ raise argparse.ArgumentTypeError("Must number 0.1-2.0")
36
+ return number_value
37
+
38
+
39
+ def _validate_top_p(string_to_check: str) -> float:
40
+ try:
41
+ number_value = float(string_to_check)
42
+ except ValueError:
43
+ raise argparse.ArgumentTypeError("Must number 0.0-1.0")
44
+ if number_value < 0 or number_value > 1.0:
45
+ raise argparse.ArgumentTypeError("Must number 0.0-1.0")
46
+ return number_value
47
+
48
+
49
+ def main():
50
+ parser = argparse.ArgumentParser()
51
+ parser.add_argument(
52
+ "--room-code",
53
+ required=True,
54
+ help="The JackBox room code",
55
+ type=_validate_room_code,
56
+ metavar="WXYZ",
57
+ )
58
+ parser.add_argument(
59
+ "--chat-model-provider",
60
+ required=True,
61
+ help="Choose which chat model platform to use",
62
+ choices=list(CHAT_MODEL_PROVIDERS.keys()),
63
+ type=str,
64
+ )
65
+ parser.add_argument(
66
+ "--chat-model-name",
67
+ required=False,
68
+ help="Choose which chat model to use (Will default to default for provider)",
69
+ type=str,
70
+ )
71
+ parser.add_argument(
72
+ "--num-of-bots",
73
+ required=False,
74
+ default=DEFAULT_NUM_OF_BOTS,
75
+ help="How many bots to have play",
76
+ type=_validate_num_of_bots,
77
+ metavar=str(DEFAULT_NUM_OF_BOTS),
78
+ )
79
+ parser.add_argument(
80
+ "--bots-in-play",
81
+ required=False,
82
+ nargs="*",
83
+ help="Which bots are in play?",
84
+ choices=[variant.name for variant in JackBoxBotVariant],
85
+ type=str,
86
+ )
87
+ parser.add_argument(
88
+ "--temperature",
89
+ required=False,
90
+ default=DEFAULT_TEMPERATURE,
91
+ help="Temperature for Gen AI",
92
+ type=_validate_temperature,
93
+ metavar=str(DEFAULT_TEMPERATURE),
94
+ )
95
+ parser.add_argument(
96
+ "--top-p",
97
+ required=False,
98
+ default=DEFAULT_TOP_P,
99
+ help="Top P for Gen AI",
100
+ type=_validate_top_p,
101
+ metavar=str(DEFAULT_TOP_P),
102
+ )
103
+ args = parser.parse_args()
104
+
105
+ run(
106
+ args.room_code.upper(),
107
+ args.chat_model_provider,
108
+ chat_model_name=args.chat_model_name,
109
+ num_of_bots=args.num_of_bots,
110
+ bots_in_play=args.bots_in_play,
111
+ chat_model_temperature=args.temperature,
112
+ chat_model_top_p=args.top_p,
113
+ )
114
+
115
+
116
+ if __name__ == "__main__":
117
+ main()
@@ -0,0 +1,4 @@
1
+ ECAST_HOST = "ecast.jackboxgames.com"
2
+ DEFAULT_TEMPERATURE = 0.5
3
+ DEFAULT_TOP_P = 0.9
4
+ DEFAULT_NUM_OF_BOTS = 4
@@ -0,0 +1 @@
1
+
@@ -0,0 +1,39 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import Optional
3
+
4
+
5
+ class ChatModel(ABC):
6
+ _model: str
7
+ _chat_model_temperature: float
8
+ _chat_model_top_p: float
9
+
10
+ def __init__(self, model: str, chat_model_temperature: float = 0.5, chat_model_top_p: float = 0.9):
11
+ self._model = model
12
+ self._chat_model_temperature = chat_model_temperature
13
+ self._chat_model_top_p = chat_model_top_p
14
+
15
+ @classmethod
16
+ @abstractmethod
17
+ def get_default_model(self) -> str:
18
+ pass
19
+
20
+ @abstractmethod
21
+ def generate_text(
22
+ self,
23
+ prompt: str,
24
+ instructions: str,
25
+ max_tokens: Optional[int] = None,
26
+ temperature: Optional[float] = None,
27
+ top_p: Optional[float] = None,
28
+ ) -> str:
29
+ pass
30
+
31
+ @abstractmethod
32
+ def generate_sketch(
33
+ self,
34
+ prompt: str,
35
+ instructions: str,
36
+ temperature: Optional[float] = None,
37
+ top_p: Optional[float] = None,
38
+ ) -> bytes:
39
+ pass
@@ -0,0 +1,35 @@
1
+ from typing import Optional
2
+
3
+ from ai_plays_jackbox.llm.chat_model import ChatModel
4
+ from ai_plays_jackbox.llm.gemini_model import GeminiModel
5
+ from ai_plays_jackbox.llm.ollama_model import OllamaModel
6
+ from ai_plays_jackbox.llm.openai_model import OpenAIModel
7
+
8
+ CHAT_MODEL_PROVIDERS: dict[str, type[ChatModel]] = {
9
+ "openai": OpenAIModel,
10
+ "gemini": GeminiModel,
11
+ "ollama": OllamaModel,
12
+ }
13
+
14
+
15
+ class ChatModelFactory:
16
+ @staticmethod
17
+ def get_chat_model(
18
+ chat_model_provider: str,
19
+ chat_model_name: Optional[str] = None,
20
+ chat_model_temperature: float = 0.5,
21
+ chat_model_top_p: float = 0.9,
22
+ ) -> ChatModel:
23
+ chat_model_provider = chat_model_provider.lower()
24
+ if chat_model_provider not in CHAT_MODEL_PROVIDERS.keys():
25
+ raise ValueError(f"Unknown chat model provider: {chat_model_provider}")
26
+
27
+ return CHAT_MODEL_PROVIDERS[chat_model_provider](
28
+ (
29
+ chat_model_name
30
+ if chat_model_name is not None
31
+ else CHAT_MODEL_PROVIDERS[chat_model_provider].get_default_model()
32
+ ),
33
+ chat_model_temperature=chat_model_temperature,
34
+ chat_model_top_p=chat_model_top_p,
35
+ )
@@ -0,0 +1,86 @@
1
+ import os
2
+ from typing import Optional
3
+
4
+ from google import genai
5
+ from google.genai.types import GenerateContentConfig
6
+ from loguru import logger
7
+
8
+ from ai_plays_jackbox.llm.chat_model import ChatModel
9
+
10
+
11
+ class GeminiModel(ChatModel):
12
+ _gemini_vertex_ai_client: genai.Client
13
+
14
+ def __init__(self, *args, **kwargs):
15
+ super().__init__(*args, **kwargs)
16
+ self._gemini_vertex_ai_client = genai.Client(
17
+ vertexai=bool(os.environ.get("GOOGLE_GENAI_USE_VERTEXAI")),
18
+ api_key=os.environ.get("GOOGLE_GEMINI_DEVELOPER_API_KEY"),
19
+ project=os.environ.get("GOOGLE_CLOUD_PROJECT"),
20
+ location=os.environ.get("GOOGLE_CLOUD_LOCATION"),
21
+ )
22
+
23
+ # Check connection and if model exists, this will hard fail if connection can't be made
24
+ # Or if the model is not found
25
+ _ = self._gemini_vertex_ai_client.models.get(model=self._model)
26
+
27
+ @classmethod
28
+ def get_default_model(cls):
29
+ return "gemini-2.0-flash-001"
30
+
31
+ def generate_text(
32
+ self,
33
+ prompt: str,
34
+ instructions: str,
35
+ max_tokens: Optional[int] = None,
36
+ temperature: Optional[float] = None,
37
+ top_p: Optional[float] = None,
38
+ ) -> str:
39
+ if temperature is None:
40
+ temperature = self._chat_model_temperature
41
+ if top_p is None:
42
+ top_p = self._chat_model_top_p
43
+
44
+ chat_response = self._gemini_vertex_ai_client.models.generate_content(
45
+ model=self._model,
46
+ contents=prompt,
47
+ config=GenerateContentConfig(
48
+ system_instruction=[instructions],
49
+ max_output_tokens=max_tokens,
50
+ temperature=temperature,
51
+ top_p=top_p,
52
+ ),
53
+ )
54
+
55
+ text = str(chat_response.text).strip().replace("\n", "")
56
+ logger.info(f"Generated text: {text}")
57
+ return text
58
+
59
+ def generate_sketch(
60
+ self,
61
+ prompt: str,
62
+ instructions: str,
63
+ temperature: Optional[float] = None,
64
+ top_p: Optional[float] = None,
65
+ ) -> bytes:
66
+ image_gen_response = self._gemini_vertex_ai_client.models.generate_content(
67
+ model="gemini-2.0-flash-preview-image-generation",
68
+ contents=prompt,
69
+ config=GenerateContentConfig(
70
+ system_instruction=[instructions],
71
+ temperature=temperature,
72
+ top_p=top_p,
73
+ response_modalities=["IMAGE"],
74
+ ),
75
+ )
76
+
77
+ if (
78
+ image_gen_response.candidates
79
+ and image_gen_response.candidates[0].content
80
+ and image_gen_response.candidates[0].content.parts
81
+ ):
82
+ for part in image_gen_response.candidates[0].content.parts:
83
+ if part.inline_data is not None and part.inline_data.data is not None:
84
+ return part.inline_data.data
85
+
86
+ return b""
@@ -0,0 +1,53 @@
1
+ from typing import Optional
2
+
3
+ from loguru import logger
4
+ from ollama import Options, chat, show
5
+
6
+ from ai_plays_jackbox.llm.chat_model import ChatModel
7
+
8
+
9
+ class OllamaModel(ChatModel):
10
+
11
+ def __init__(self, *args, **kwargs):
12
+ super().__init__(*args, **kwargs)
13
+
14
+ # Check connection and if model exists, this will hard fail if connection can't be made
15
+ # Or if the model is not found
16
+ _ = show(self._model)
17
+
18
+ @classmethod
19
+ def get_default_model(cls):
20
+ return "gemma3:12b"
21
+
22
+ def generate_text(
23
+ self,
24
+ prompt: str,
25
+ instructions: str,
26
+ max_tokens: Optional[int] = None,
27
+ temperature: Optional[float] = None,
28
+ top_p: Optional[float] = None,
29
+ ) -> str:
30
+ if temperature is None:
31
+ temperature = self._chat_model_temperature
32
+ if top_p is None:
33
+ top_p = self._chat_model_top_p
34
+
35
+ instructions_formatted = {"role": "system", "content": instructions}
36
+ chat_response = chat(
37
+ model=self._model,
38
+ messages=[instructions_formatted, {"role": "user", "content": prompt}],
39
+ stream=False,
40
+ options=Options(num_predict=max_tokens, temperature=temperature, top_p=top_p),
41
+ )
42
+ text = str(chat_response.message.content).strip().replace("\n", " ")
43
+ logger.info(f"Generated text: {text}")
44
+ return text
45
+
46
+ def generate_sketch(
47
+ self,
48
+ prompt: str,
49
+ instructions: str,
50
+ temperature: Optional[float] = None,
51
+ top_p: Optional[float] = None,
52
+ ) -> bytes:
53
+ raise Exception("Ollama model not supported yet for sketches")
@@ -0,0 +1,86 @@
1
+ import base64
2
+ import os
3
+ from typing import Optional
4
+
5
+ from loguru import logger
6
+ from openai import OpenAI
7
+ from openai.types.chat import (
8
+ ChatCompletionDeveloperMessageParam,
9
+ ChatCompletionUserMessageParam,
10
+ )
11
+ from openai.types.responses import Response
12
+
13
+ from ai_plays_jackbox.llm.chat_model import ChatModel
14
+
15
+
16
+ class OpenAIModel(ChatModel):
17
+ _open_ai_client: OpenAI
18
+
19
+ def __init__(self, *args, **kwargs):
20
+ super().__init__(*args, **kwargs)
21
+ self._open_ai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
22
+
23
+ # Check connection and if model exists, this will hard fail if connection can't be made
24
+ # Or if the model is not found
25
+ _ = self._open_ai_client.models.retrieve(self._model)
26
+
27
+ @classmethod
28
+ def get_default_model(cls):
29
+ return "gpt-4o-mini"
30
+
31
+ def generate_text(
32
+ self,
33
+ prompt: str,
34
+ instructions: str,
35
+ max_tokens: Optional[int] = None,
36
+ temperature: Optional[float] = None,
37
+ top_p: Optional[float] = None,
38
+ ) -> str:
39
+ if temperature is None:
40
+ temperature = self._chat_model_temperature
41
+ if top_p is None:
42
+ top_p = self._chat_model_top_p
43
+
44
+ chat_response = self._open_ai_client.chat.completions.create(
45
+ model=self._model,
46
+ messages=[
47
+ ChatCompletionDeveloperMessageParam(content=instructions, role="developer"),
48
+ ChatCompletionUserMessageParam(content=prompt, role="user"),
49
+ ],
50
+ stream=False,
51
+ max_completion_tokens=max_tokens,
52
+ temperature=temperature,
53
+ top_p=top_p,
54
+ )
55
+ text = str(chat_response.choices[0].message.content).strip().replace("\n", "")
56
+ logger.info(f"Generated text: {text}")
57
+ return text
58
+
59
+ def generate_sketch(
60
+ self,
61
+ prompt: str,
62
+ instructions: str,
63
+ temperature: Optional[float] = None,
64
+ top_p: Optional[float] = None,
65
+ ) -> bytes:
66
+ image_gen_response: Response = self._open_ai_client.responses.create(
67
+ model=self._model,
68
+ instructions=instructions,
69
+ input=prompt,
70
+ temperature=temperature,
71
+ top_p=top_p,
72
+ tools=[
73
+ {
74
+ "type": "image_generation",
75
+ "quality": "low",
76
+ "size": "1024x1024",
77
+ }
78
+ ],
79
+ )
80
+ # Save the image to a file
81
+ image_data = [output.result for output in image_gen_response.output if output.type == "image_generation_call"]
82
+ image_base64 = ""
83
+ if image_data:
84
+ image_base64 = str(image_data[0])
85
+
86
+ return base64.b64decode(image_base64)
File without changes
@@ -0,0 +1,87 @@
1
+ import random
2
+ import threading
3
+ from time import sleep
4
+ from typing import Optional
5
+
6
+ import requests
7
+ from loguru import logger
8
+
9
+ from ai_plays_jackbox.bot.bot_base import JackBoxBotBase
10
+ from ai_plays_jackbox.bot.bot_factory import JackBoxBotFactory
11
+ from ai_plays_jackbox.bot.bot_personality import JackBoxBotVariant
12
+ from ai_plays_jackbox.constants import ECAST_HOST
13
+ from ai_plays_jackbox.llm.chat_model import ChatModel
14
+
15
+
16
+ class JackBoxRoom:
17
+ _bots: list[JackBoxBotBase]
18
+
19
+ def __init__(self):
20
+ self._bots = []
21
+ self._lock = threading.Lock()
22
+
23
+ def play(
24
+ self,
25
+ room_code: str,
26
+ chat_model: ChatModel,
27
+ num_of_bots: int = 4,
28
+ bots_in_play: Optional[list] = None,
29
+ ):
30
+ room_type = self._get_room_type(room_code)
31
+ if not room_type:
32
+ logger.error(f"Unable to find room {room_code}")
33
+ return
34
+ logger.info(f"We're playing {room_type}!")
35
+ bot_factory = JackBoxBotFactory()
36
+ if bots_in_play is None or len(bots_in_play) == 0:
37
+ bots_to_make = random.sample(list(JackBoxBotVariant), num_of_bots)
38
+ else:
39
+ bots_in_play_variants = [variant for variant in JackBoxBotVariant if variant.name in bots_in_play]
40
+ bots_to_make = random.choices(bots_in_play_variants, k=num_of_bots)
41
+
42
+ for b in bots_to_make:
43
+ bot = bot_factory.get_bot(
44
+ room_type,
45
+ chat_model,
46
+ name=b.value.name,
47
+ personality=b.value.personality,
48
+ )
49
+ self._bots.append(bot)
50
+ with self._lock:
51
+ bot.connect(room_code)
52
+ sleep(0.5)
53
+
54
+ try:
55
+ while True:
56
+ sleep(1)
57
+ if self.is_finished():
58
+ print("All bots disconnected, ending...")
59
+ break
60
+ except KeyboardInterrupt:
61
+ self.end()
62
+
63
+ def is_finished(self) -> bool:
64
+ for b in self._bots:
65
+ if not b.is_disconnected():
66
+ return False
67
+ return True
68
+
69
+ def end(self):
70
+ for b in self._bots:
71
+ with self._lock:
72
+ b.disconnect()
73
+
74
+ def _get_room_type(self, room_code: str) -> str:
75
+ try:
76
+ response = requests.request(
77
+ "GET",
78
+ f"https://{ECAST_HOST}/api/v2/rooms/{room_code}",
79
+ headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; rv:68.0) Gecko/20100101 Firefox/68.0"},
80
+ )
81
+ response.raise_for_status()
82
+ response_data = response.json()
83
+ except requests.HTTPError:
84
+ if response.status_code != 404:
85
+ logger.error(f"Received {response.status_code} when trying to get room type.")
86
+ return ""
87
+ return response_data["body"]["appTag"]