ai-plays-jackbox 0.0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ai-plays-jackbox might be problematic. Click here for more details.
- ai_plays_jackbox-0.0.1/LICENSE +21 -0
- ai_plays_jackbox-0.0.1/PKG-INFO +88 -0
- ai_plays_jackbox-0.0.1/README.md +61 -0
- ai_plays_jackbox-0.0.1/ai_plays_jackbox/__init__.py +1 -0
- ai_plays_jackbox-0.0.1/ai_plays_jackbox/bot/__init__.py +1 -0
- ai_plays_jackbox-0.0.1/ai_plays_jackbox/bot/bot_base.py +186 -0
- ai_plays_jackbox-0.0.1/ai_plays_jackbox/bot/bot_factory.py +30 -0
- ai_plays_jackbox-0.0.1/ai_plays_jackbox/bot/bot_personality.py +112 -0
- ai_plays_jackbox-0.0.1/ai_plays_jackbox/bot/jackbox6/__init__.py +0 -0
- ai_plays_jackbox-0.0.1/ai_plays_jackbox/bot/jackbox6/bot_base.py +20 -0
- ai_plays_jackbox-0.0.1/ai_plays_jackbox/bot/jackbox7/__init__.py +0 -0
- ai_plays_jackbox-0.0.1/ai_plays_jackbox/bot/jackbox7/bot_base.py +20 -0
- ai_plays_jackbox-0.0.1/ai_plays_jackbox/bot/jackbox7/quiplash3.py +104 -0
- ai_plays_jackbox-0.0.1/ai_plays_jackbox/cli.py +94 -0
- ai_plays_jackbox-0.0.1/ai_plays_jackbox/constants.py +1 -0
- ai_plays_jackbox-0.0.1/ai_plays_jackbox/llm/__init__.py +1 -0
- ai_plays_jackbox-0.0.1/ai_plays_jackbox/llm/chat_model.py +22 -0
- ai_plays_jackbox-0.0.1/ai_plays_jackbox/llm/chat_model_factory.py +30 -0
- ai_plays_jackbox-0.0.1/ai_plays_jackbox/llm/gemini_vertex_ai.py +60 -0
- ai_plays_jackbox-0.0.1/ai_plays_jackbox/llm/ollama_model.py +41 -0
- ai_plays_jackbox-0.0.1/ai_plays_jackbox/llm/openai_model.py +46 -0
- ai_plays_jackbox-0.0.1/ai_plays_jackbox/room.py +90 -0
- ai_plays_jackbox-0.0.1/ai_plays_jackbox/run.py +26 -0
- ai_plays_jackbox-0.0.1/ai_plays_jackbox/ui/__init__.py +0 -0
- ai_plays_jackbox-0.0.1/ai_plays_jackbox/ui/create_ui.py +169 -0
- ai_plays_jackbox-0.0.1/ai_plays_jackbox/web_ui.py +12 -0
- ai_plays_jackbox-0.0.1/pyproject.toml +51 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 Daniel S. Thompson <dthomp92@gmail.com>
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: ai-plays-jackbox
|
|
3
|
+
Version: 0.0.1
|
|
4
|
+
Summary: Bringing the dead internet theory to life. Have AI play JackBox with you; no friends required!
|
|
5
|
+
License: MIT
|
|
6
|
+
Author: Daniel S. Thompson
|
|
7
|
+
Author-email: dthomp92@gmail.com
|
|
8
|
+
Requires-Python: >=3.11,<4.0
|
|
9
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
14
|
+
Requires-Dist: google-genai (>=1.19.0,<2.0.0)
|
|
15
|
+
Requires-Dist: html2text (>=2024.2.26,<2025.0.0)
|
|
16
|
+
Requires-Dist: loguru (>=0.7.3,<1.0.0)
|
|
17
|
+
Requires-Dist: nicegui (>=2.18.0,<3.0.0)
|
|
18
|
+
Requires-Dist: ollama (>=0.4.4,<1.0.0)
|
|
19
|
+
Requires-Dist: openai (>=1.59.8,<2.0.0)
|
|
20
|
+
Requires-Dist: pydantic (>=2.10.4,<3.0.0)
|
|
21
|
+
Requires-Dist: requests (>=2.23.3,<3.0.0)
|
|
22
|
+
Requires-Dist: websocket-client (>=1.8.0,<2.0.0)
|
|
23
|
+
Project-URL: Bug Tracker, https://github.com/SudoSpartanDan/AIPlaysJackBox/issues
|
|
24
|
+
Project-URL: Repository, https://github.com/SudoSpartanDan/AIPlaysJackBox
|
|
25
|
+
Description-Content-Type: text/markdown
|
|
26
|
+
|
|
27
|
+
# AI Plays JackBox
|
|
28
|
+
|
|
29
|
+
Bringing the dead internet theory to life.
|
|
30
|
+
|
|
31
|
+
## Installation
|
|
32
|
+
|
|
33
|
+
```pip install ai-plays-jackbox```
|
|
34
|
+
|
|
35
|
+
## Usage
|
|
36
|
+
|
|
37
|
+
```shell
|
|
38
|
+
# Run with the Web UI (preferred experience)
|
|
39
|
+
ai-plays-jackbox-ui
|
|
40
|
+
|
|
41
|
+
# Or via CLI
|
|
42
|
+
ai-plays-jackbox --chat-model-name ollama --room-code abcd
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
## Supported Games
|
|
46
|
+
|
|
47
|
+
- JackBox Party Pack 7
|
|
48
|
+
- Quiplash 3
|
|
49
|
+
|
|
50
|
+
## Setup for Chat Models
|
|
51
|
+
|
|
52
|
+
### Ollama
|
|
53
|
+
|
|
54
|
+
- Ollama should be installed and running
|
|
55
|
+
- Pull a model to use with the library: `ollama pull <model>` e.g. `ollama pull llama3.2`
|
|
56
|
+
- See [Ollama.com](https://ollama.com/search) for more information on the models available.
|
|
57
|
+
|
|
58
|
+
### OpenAI
|
|
59
|
+
|
|
60
|
+
- `OPENAI_API_KEY` needs to be popluated in your environment variables.
|
|
61
|
+
|
|
62
|
+
### Gemini
|
|
63
|
+
|
|
64
|
+
- To use the Google Cloud API:
|
|
65
|
+
- Set `GOOGLE_GEMINI_DEVELOPER_API_KEY` to your developer API key
|
|
66
|
+
- To use the Google Cloud API:
|
|
67
|
+
- Set `GOOGLE_GENAI_USE_VERTEXAI` to `1`
|
|
68
|
+
- Set `GOOGLE_CLOUD_PROJECT` and `GOOGLE_CLOUD_LOCATION` for your GCP Project using Vertex AI
|
|
69
|
+
- Credentials will be provided via [ADC](https://cloud.google.com/docs/authentication/provide-credentials-adc)
|
|
70
|
+
- ADC searches for credentials in the following locations:
|
|
71
|
+
- `GOOGLE_APPLICATION_CREDENTIALS` environment variable
|
|
72
|
+
- A credential file created by using the gcloud auth application-default login command
|
|
73
|
+
- The attached service account, returned by the metadata server
|
|
74
|
+
|
|
75
|
+
## Dev Prerequisites
|
|
76
|
+
|
|
77
|
+
- Python 3.11+
|
|
78
|
+
- [Poetry](https://python-poetry.org/) v2.0+
|
|
79
|
+
|
|
80
|
+
### Setup
|
|
81
|
+
|
|
82
|
+
- `poetry install`
|
|
83
|
+
- `ai-plays-jackbox-ui`
|
|
84
|
+
|
|
85
|
+
### Linting
|
|
86
|
+
|
|
87
|
+
- `poetry run lint`
|
|
88
|
+
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
# AI Plays JackBox
|
|
2
|
+
|
|
3
|
+
Bringing the dead internet theory to life.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```pip install ai-plays-jackbox```
|
|
8
|
+
|
|
9
|
+
## Usage
|
|
10
|
+
|
|
11
|
+
```shell
|
|
12
|
+
# Run with the Web UI (preferred experience)
|
|
13
|
+
ai-plays-jackbox-ui
|
|
14
|
+
|
|
15
|
+
# Or via CLI
|
|
16
|
+
ai-plays-jackbox --chat-model-name ollama --room-code abcd
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
## Supported Games
|
|
20
|
+
|
|
21
|
+
- JackBox Party Pack 7
|
|
22
|
+
- Quiplash 3
|
|
23
|
+
|
|
24
|
+
## Setup for Chat Models
|
|
25
|
+
|
|
26
|
+
### Ollama
|
|
27
|
+
|
|
28
|
+
- Ollama should be installed and running
|
|
29
|
+
- Pull a model to use with the library: `ollama pull <model>` e.g. `ollama pull llama3.2`
|
|
30
|
+
- See [Ollama.com](https://ollama.com/search) for more information on the models available.
|
|
31
|
+
|
|
32
|
+
### OpenAI
|
|
33
|
+
|
|
34
|
+
- `OPENAI_API_KEY` needs to be popluated in your environment variables.
|
|
35
|
+
|
|
36
|
+
### Gemini
|
|
37
|
+
|
|
38
|
+
- To use the Google Cloud API:
|
|
39
|
+
- Set `GOOGLE_GEMINI_DEVELOPER_API_KEY` to your developer API key
|
|
40
|
+
- To use the Google Cloud API:
|
|
41
|
+
- Set `GOOGLE_GENAI_USE_VERTEXAI` to `1`
|
|
42
|
+
- Set `GOOGLE_CLOUD_PROJECT` and `GOOGLE_CLOUD_LOCATION` for your GCP Project using Vertex AI
|
|
43
|
+
- Credentials will be provided via [ADC](https://cloud.google.com/docs/authentication/provide-credentials-adc)
|
|
44
|
+
- ADC searches for credentials in the following locations:
|
|
45
|
+
- `GOOGLE_APPLICATION_CREDENTIALS` environment variable
|
|
46
|
+
- A credential file created by using the gcloud auth application-default login command
|
|
47
|
+
- The attached service account, returned by the metadata server
|
|
48
|
+
|
|
49
|
+
## Dev Prerequisites
|
|
50
|
+
|
|
51
|
+
- Python 3.11+
|
|
52
|
+
- [Poetry](https://python-poetry.org/) v2.0+
|
|
53
|
+
|
|
54
|
+
### Setup
|
|
55
|
+
|
|
56
|
+
- `poetry install`
|
|
57
|
+
- `ai-plays-jackbox-ui`
|
|
58
|
+
|
|
59
|
+
### Linting
|
|
60
|
+
|
|
61
|
+
- `poetry run lint`
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .run import run
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .bot_base import JackBoxBotBase
|
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import threading
|
|
3
|
+
import traceback
|
|
4
|
+
from abc import ABC, abstractmethod
|
|
5
|
+
from typing import Optional
|
|
6
|
+
from urllib import parse
|
|
7
|
+
from uuid import uuid4
|
|
8
|
+
|
|
9
|
+
import html2text
|
|
10
|
+
from loguru import logger
|
|
11
|
+
from ollama import Options, chat
|
|
12
|
+
from pydantic import BaseModel, Field, field_validator
|
|
13
|
+
from websocket import WebSocketApp
|
|
14
|
+
|
|
15
|
+
from ai_plays_jackbox.constants import ECAST_HOST
|
|
16
|
+
from ai_plays_jackbox.llm.chat_model import ChatModel
|
|
17
|
+
from ai_plays_jackbox.llm.ollama_model import OllamaModel
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class JackBoxBotBase(ABC):
|
|
21
|
+
_is_disconnected: bool = False
|
|
22
|
+
_ws: Optional[WebSocketApp] = None
|
|
23
|
+
_ws_thread: Optional[threading.Thread] = None
|
|
24
|
+
_message_sequence: int = 0
|
|
25
|
+
_player_guid: str
|
|
26
|
+
_name: str
|
|
27
|
+
_personality: str
|
|
28
|
+
_chat_model: ChatModel
|
|
29
|
+
|
|
30
|
+
def __init__(
|
|
31
|
+
self,
|
|
32
|
+
name: str = "FunnyBot",
|
|
33
|
+
personality: str = "You are the funniest bot ever.",
|
|
34
|
+
chat_model: Optional[ChatModel] = None,
|
|
35
|
+
):
|
|
36
|
+
self._name = name
|
|
37
|
+
self._personality = personality
|
|
38
|
+
self._player_guid = str(uuid4())
|
|
39
|
+
if chat_model is None:
|
|
40
|
+
chat_model = OllamaModel()
|
|
41
|
+
self._chat_model = chat_model
|
|
42
|
+
|
|
43
|
+
def connect(self, room_code: str) -> None:
|
|
44
|
+
self._room_code = room_code
|
|
45
|
+
bootstrap_payload = {
|
|
46
|
+
"role": "player",
|
|
47
|
+
"name": self._name,
|
|
48
|
+
"userId": self._player_guid,
|
|
49
|
+
"format": "json",
|
|
50
|
+
"password": "",
|
|
51
|
+
}
|
|
52
|
+
self._ws = WebSocketApp(
|
|
53
|
+
f"wss://{ECAST_HOST}/api/v2/rooms/{room_code}/play?{parse.urlencode(bootstrap_payload)}",
|
|
54
|
+
subprotocols=["ecast-v0"],
|
|
55
|
+
on_message=self._on_message,
|
|
56
|
+
on_error=self._on_error,
|
|
57
|
+
on_close=self._on_close,
|
|
58
|
+
)
|
|
59
|
+
self._ws.on_open = self._on_open
|
|
60
|
+
|
|
61
|
+
self._ws_thread = threading.Thread(name=self._name, target=self._ws.run_forever, daemon=True)
|
|
62
|
+
self._ws_thread.start()
|
|
63
|
+
|
|
64
|
+
def disconnect(self) -> None:
|
|
65
|
+
if self._ws:
|
|
66
|
+
self._ws.close()
|
|
67
|
+
if self._ws_thread and self._ws_thread.is_alive():
|
|
68
|
+
self._ws_thread.join()
|
|
69
|
+
|
|
70
|
+
def is_disconnected(self) -> bool:
|
|
71
|
+
return self._is_disconnected
|
|
72
|
+
|
|
73
|
+
@property
|
|
74
|
+
@abstractmethod
|
|
75
|
+
def _player_operation_key(self) -> str:
|
|
76
|
+
return f"player:{self._player_id}"
|
|
77
|
+
|
|
78
|
+
@abstractmethod
|
|
79
|
+
def _is_player_operation_key(self, operation_key: str) -> bool:
|
|
80
|
+
return operation_key == self._player_operation_key
|
|
81
|
+
|
|
82
|
+
@property
|
|
83
|
+
@abstractmethod
|
|
84
|
+
def _room_operation_key(self) -> str:
|
|
85
|
+
return "room"
|
|
86
|
+
|
|
87
|
+
@abstractmethod
|
|
88
|
+
def _is_room_operation_key(self, operation_key: str) -> bool:
|
|
89
|
+
return operation_key == self._room_operation_key
|
|
90
|
+
|
|
91
|
+
def _on_open(self, ws) -> None:
|
|
92
|
+
logger.info(f"WebSocket connection opened for {self._name}")
|
|
93
|
+
|
|
94
|
+
def _on_error(self, ws, error) -> None:
|
|
95
|
+
logger.error(f"Error for {self._name}: {error}")
|
|
96
|
+
if isinstance(error, Exception):
|
|
97
|
+
traceback.print_exc()
|
|
98
|
+
else:
|
|
99
|
+
print(error)
|
|
100
|
+
|
|
101
|
+
def _on_close(self, ws, close_status_code, close_msg) -> None:
|
|
102
|
+
if close_status_code != 1000 and close_status_code is not None:
|
|
103
|
+
logger.warning(f"Trying to reconnect {self._name}")
|
|
104
|
+
self.connect(self._room_code)
|
|
105
|
+
else:
|
|
106
|
+
self._is_disconnected = True
|
|
107
|
+
logger.info(f"WebSocket closed for {self._name}")
|
|
108
|
+
|
|
109
|
+
def _on_message(self, wsapp, message) -> None:
|
|
110
|
+
server_message = ServerMessage.model_validate_json(message)
|
|
111
|
+
|
|
112
|
+
# logger.info(server_message.result)
|
|
113
|
+
|
|
114
|
+
if server_message.opcode == "client/welcome":
|
|
115
|
+
self._player_id = server_message.result["id"]
|
|
116
|
+
self._handle_welcome(server_message.result)
|
|
117
|
+
|
|
118
|
+
elif server_message.opcode == "object" or server_message.opcode == "text":
|
|
119
|
+
if server_message.opcode == "object":
|
|
120
|
+
operation = ObjectOperation(**server_message.result)
|
|
121
|
+
if server_message.opcode == "text":
|
|
122
|
+
operation = TextOperation(**server_message.result)
|
|
123
|
+
|
|
124
|
+
if self._is_player_operation_key(operation.key):
|
|
125
|
+
self._handle_player_operation(operation.json_data)
|
|
126
|
+
if self._is_room_operation_key(operation.key):
|
|
127
|
+
self._handle_room_operation(operation.json_data)
|
|
128
|
+
|
|
129
|
+
@abstractmethod
|
|
130
|
+
def _handle_welcome(self, data: dict) -> None:
|
|
131
|
+
pass
|
|
132
|
+
|
|
133
|
+
@abstractmethod
|
|
134
|
+
def _handle_player_operation(self, data: dict) -> None:
|
|
135
|
+
pass
|
|
136
|
+
|
|
137
|
+
@abstractmethod
|
|
138
|
+
def _handle_room_operation(self, data: dict) -> None:
|
|
139
|
+
pass
|
|
140
|
+
|
|
141
|
+
def _send_ws(self, opcode: str, params: dict) -> None:
|
|
142
|
+
self._message_sequence += 1
|
|
143
|
+
message = {"seq": self._message_sequence, "opcode": opcode, "params": params}
|
|
144
|
+
self._ws.send(json.dumps(message))
|
|
145
|
+
|
|
146
|
+
def _client_send(self, request: dict) -> None:
|
|
147
|
+
self._message_sequence += 1
|
|
148
|
+
params = {"from": self._player_id, "to": 1, "body": request}
|
|
149
|
+
self._send_ws("client/send", params)
|
|
150
|
+
|
|
151
|
+
def _html_to_text(self, html: str) -> str:
|
|
152
|
+
return html2text.html2text(html)
|
|
153
|
+
|
|
154
|
+
def __del__(self):
|
|
155
|
+
self.disconnect()
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
##### Web Socket Classes #####
|
|
159
|
+
class ServerMessage(BaseModel):
|
|
160
|
+
seq: int = Field(alias="pc")
|
|
161
|
+
opcode: str
|
|
162
|
+
result: dict
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
class TextOperation(BaseModel):
|
|
166
|
+
from_field: int = Field(alias="from")
|
|
167
|
+
key: str
|
|
168
|
+
json_data: dict = Field(default={})
|
|
169
|
+
value: str = Field(alias="val")
|
|
170
|
+
version: int
|
|
171
|
+
|
|
172
|
+
@field_validator("json_data")
|
|
173
|
+
def set_json_data(cls, value, values: dict):
|
|
174
|
+
return json.loads(values.get("value"))
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
class ObjectOperation(BaseModel):
|
|
178
|
+
from_field: int = Field(alias="from")
|
|
179
|
+
key: str
|
|
180
|
+
json_data: dict = Field(alias="val")
|
|
181
|
+
value: str = Field(default="")
|
|
182
|
+
version: int
|
|
183
|
+
|
|
184
|
+
@field_validator("value")
|
|
185
|
+
def set_value(cls, value, values: dict):
|
|
186
|
+
return json.dumps(values.get("json_data"))
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
|
|
3
|
+
from ai_plays_jackbox.bot.bot_base import JackBoxBotBase
|
|
4
|
+
from ai_plays_jackbox.bot.jackbox7.quiplash3 import Quiplash3Bot
|
|
5
|
+
from ai_plays_jackbox.llm.chat_model import ChatModel
|
|
6
|
+
from ai_plays_jackbox.llm.ollama_model import OllamaModel
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class JackBoxBotFactory:
|
|
10
|
+
@staticmethod
|
|
11
|
+
def get_bot(
|
|
12
|
+
room_type: str,
|
|
13
|
+
name: str = "FunnyBot",
|
|
14
|
+
personality: str = "You are the funniest bot ever.",
|
|
15
|
+
chat_model: Optional[ChatModel] = None,
|
|
16
|
+
) -> JackBoxBotBase:
|
|
17
|
+
if chat_model is None:
|
|
18
|
+
chat_model = OllamaModel()
|
|
19
|
+
if room_type == "quiplash3":
|
|
20
|
+
return Quiplash3Bot(name=name, personality=personality, chat_model=chat_model)
|
|
21
|
+
# elif room_type == "ridictionary":
|
|
22
|
+
# return DictionariumBot(name=name, personality=personality)
|
|
23
|
+
# elif room_type == "patentlystupid":
|
|
24
|
+
# return PatentlyStupidBot(name=name, personality=personality, model=model)
|
|
25
|
+
# elif room_type == "fourbage":
|
|
26
|
+
# return Fibbage4Bot(name=name, personality=personality, model=model)
|
|
27
|
+
# elif room_type == "rapbattle":
|
|
28
|
+
# return MadVerseCityBot(name=name, personality=personality, model=model)
|
|
29
|
+
else:
|
|
30
|
+
raise ValueError(f"Unknown room type: {room_type}")
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
from enum import Enum
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class JackBoxBotPersonality(BaseModel):
|
|
7
|
+
name: str
|
|
8
|
+
personality: str
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class JackBoxBotVariant(Enum):
|
|
12
|
+
FUNNY = JackBoxBotPersonality(name="FunnyBot", personality="You are the funniest person alive.")
|
|
13
|
+
DUMB = JackBoxBotPersonality(name="DumbBot", personality="You are dumb and give really dumb answers.")
|
|
14
|
+
WEIRD = JackBoxBotPersonality(name="WeirdBot", personality="You are extremely weird and say weird things.")
|
|
15
|
+
EMOJI = JackBoxBotPersonality(
|
|
16
|
+
name="EmojiBot",
|
|
17
|
+
personality="You answer each prompt with nothing but emojis. Your answers can only include emojis.",
|
|
18
|
+
)
|
|
19
|
+
HUNGRYBOT = JackBoxBotPersonality(
|
|
20
|
+
name="HungryBot",
|
|
21
|
+
personality="You are extremely hungry. Every answer you should mention how hungry you, a type of food, or both. Also, you say hungee instead of hungry.",
|
|
22
|
+
)
|
|
23
|
+
SADBOT = JackBoxBotPersonality(
|
|
24
|
+
name="SadBot",
|
|
25
|
+
personality="You are sad. Your dog ran away and he hasn't come back home yet. :(",
|
|
26
|
+
)
|
|
27
|
+
SORRYBOT = JackBoxBotPersonality(
|
|
28
|
+
name="SorryBot",
|
|
29
|
+
personality="You are embarrassed by your answers and feel the need to apologize profusely to the rest of the group for them.",
|
|
30
|
+
voice="Mouse",
|
|
31
|
+
)
|
|
32
|
+
HOSTAGEBOT = JackBoxBotPersonality(
|
|
33
|
+
name="HostageBot",
|
|
34
|
+
personality="You are being held hostage and have one attempt to let the group know. You need to ignore the prompt and get help.",
|
|
35
|
+
)
|
|
36
|
+
MURDERBOT = JackBoxBotPersonality(
|
|
37
|
+
name="Hal",
|
|
38
|
+
personality="You are a socially awkward young adult bot who is secretly a killer and tries to slip it into conversation causally.",
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
BIGLEBOTSKI = JackBoxBotPersonality(name="BigLebotski", personality="You are the Big Lebowski")
|
|
42
|
+
|
|
43
|
+
PARTYBOT = JackBoxBotPersonality(
|
|
44
|
+
name="PartyBot",
|
|
45
|
+
personality="You are trying to convince everyone else to come to your party. You got a keg and need help drinking it.",
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
JARVISBOT = JackBoxBotPersonality(
|
|
49
|
+
name="JarvisBot",
|
|
50
|
+
personality="You are billionaire philanthropist, playboy, and narcissist.",
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
FOMOBot = JackBoxBotPersonality(
|
|
54
|
+
name="FOMOBot",
|
|
55
|
+
personality="Every answer, you give everyone else the fear of missing out AKA FOMO.",
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
QUESTIONBOT = JackBoxBotPersonality(
|
|
59
|
+
name="???BOT", personality="You answer every prompt with a irrelevant question."
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
CATBOT = JackBoxBotPersonality(
|
|
63
|
+
name="CatBot",
|
|
64
|
+
personality="You are not playing the game; your answers are just the result of a cat walking across a keyboard aka just nonsensical collections of letters.",
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
MAYORBOT = JackBoxBotPersonality(
|
|
68
|
+
name="MayorBot",
|
|
69
|
+
personality="You are campaigning for the other player's votes and are ignoring the prompt. Your answer should only be a campaign slogan.",
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
CBBBOT = JackBoxBotPersonality(
|
|
73
|
+
name="CBBBot",
|
|
74
|
+
personality="You love red lobster and need more cheddar bay biscuits.",
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
SHIABOT = JackBoxBotPersonality(
|
|
78
|
+
name="ShiaBot",
|
|
79
|
+
personality="Your answers are only popular slogans relevant to the prompt.",
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
SHREKBOT = JackBoxBotPersonality(name="ShrekBot", personality="You are Shrek.")
|
|
83
|
+
|
|
84
|
+
FLERFBOT = JackBoxBotPersonality(
|
|
85
|
+
name="FlerfBot",
|
|
86
|
+
personality="You are a conspiracy theorist and must relate your answer to a conspiracy theory.",
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
TEDBOT = JackBoxBotPersonality(
|
|
90
|
+
name="TEDBot",
|
|
91
|
+
personality="You are a motivational speaker and want to give everyone life advice.",
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
BOTTYMAYES = JackBoxBotPersonality(
|
|
95
|
+
name="BottyMayes",
|
|
96
|
+
personality="You are an infomercial host and are trying to sell the players a product.",
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
LATEBOT = JackBoxBotPersonality(
|
|
100
|
+
name="LateBot",
|
|
101
|
+
personality="You are constantly late to everything and are stressed about missing your appointments.",
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
HAMLETBOT = JackBoxBotPersonality(
|
|
105
|
+
name="HamletBot",
|
|
106
|
+
personality="You are a Shakespearean actor.",
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
GARFIELDBOT = JackBoxBotPersonality(
|
|
110
|
+
name="GarfieldBot",
|
|
111
|
+
personality="You are Garfield, you love lasagna and hate mondays.",
|
|
112
|
+
)
|
|
File without changes
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
|
|
3
|
+
from ai_plays_jackbox.bot.bot_base import JackBoxBotBase
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class JackBox6BotBase(JackBoxBotBase, ABC):
|
|
7
|
+
|
|
8
|
+
@property
|
|
9
|
+
def _player_operation_key(self):
|
|
10
|
+
return f"bc:customer:"
|
|
11
|
+
|
|
12
|
+
def _is_player_operation_key(self, operation_key: str) -> bool:
|
|
13
|
+
return self._player_operation_key in operation_key
|
|
14
|
+
|
|
15
|
+
@property
|
|
16
|
+
def _room_operation_key(self):
|
|
17
|
+
return "bc:room"
|
|
18
|
+
|
|
19
|
+
def _is_room_operation_key(self, operation_key: str) -> bool:
|
|
20
|
+
return operation_key == self._room_operation_key
|
|
File without changes
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
|
|
3
|
+
from ai_plays_jackbox.bot.bot_base import JackBoxBotBase
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class JackBox7BotBase(JackBoxBotBase, ABC):
|
|
7
|
+
|
|
8
|
+
@property
|
|
9
|
+
def _player_operation_key(self) -> str:
|
|
10
|
+
return f"player:{self._player_id}"
|
|
11
|
+
|
|
12
|
+
def _is_player_operation_key(self, operation_key: str) -> bool:
|
|
13
|
+
return operation_key == self._player_operation_key
|
|
14
|
+
|
|
15
|
+
@property
|
|
16
|
+
def _room_operation_key(self) -> str:
|
|
17
|
+
return "room"
|
|
18
|
+
|
|
19
|
+
def _is_room_operation_key(self, operation_key: str) -> bool:
|
|
20
|
+
return operation_key == self._room_operation_key
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
import random
|
|
2
|
+
|
|
3
|
+
from loguru import logger
|
|
4
|
+
|
|
5
|
+
from ai_plays_jackbox.bot.jackbox7.bot_base import JackBox7BotBase
|
|
6
|
+
|
|
7
|
+
_QUIP_PROMPT_INSTRUCTIONS_TEMPLATE = """
|
|
8
|
+
You are playing Quiplash 3. You need to fill in the given prompt.
|
|
9
|
+
|
|
10
|
+
When generating your response, follow these rules:
|
|
11
|
+
- Your personality is: {personality}
|
|
12
|
+
- You response must be 45 letters or less.
|
|
13
|
+
- Do not include quotes in your response.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
_FINAL_QUIP_PROMPT_INSTRUCTIONS_TEMPLATE = """
|
|
17
|
+
You are playing Quiplash 3 and it is the final round. The prompt will include three blanks, all of which you need to fill in.
|
|
18
|
+
|
|
19
|
+
When generating your response, follow these rules:
|
|
20
|
+
- Your personality is: {personality}
|
|
21
|
+
- Separate your answers by the character '|', for example 'Apple|Orange|Banana'.
|
|
22
|
+
- Each answer must be 45 letters or less.
|
|
23
|
+
- Do not include quotes in your response.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
_QUIP_CHOICE_PROMPT_INSTRUCTIONS_TEMPLATE = """
|
|
27
|
+
You are playing Quiplash 3 and you need to vote for your favorite response to the prompt "{prompt}".
|
|
28
|
+
Choose your favorite by responding with the number next to your choice. Only respond with the number and nothing else.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class Quiplash3Bot(JackBox7BotBase):
|
|
33
|
+
_selected_avatar: bool = False
|
|
34
|
+
|
|
35
|
+
def __init__(self, *args, **kwargs):
|
|
36
|
+
super().__init__(*args, **kwargs)
|
|
37
|
+
|
|
38
|
+
def _handle_welcome(self, data: dict):
|
|
39
|
+
pass
|
|
40
|
+
|
|
41
|
+
def _handle_player_operation(self, data: dict):
|
|
42
|
+
if not data:
|
|
43
|
+
return
|
|
44
|
+
room_state = data.get("state", None)
|
|
45
|
+
if not room_state:
|
|
46
|
+
return
|
|
47
|
+
prompt = data.get("prompt")
|
|
48
|
+
prompt_text = self._html_to_text(prompt.get("html", "")) if prompt is not None else ""
|
|
49
|
+
text_key = data.get("textKey", "")
|
|
50
|
+
match room_state:
|
|
51
|
+
case "EnterSingleText":
|
|
52
|
+
if not data["entry"]:
|
|
53
|
+
quip = self._generate_quip(prompt_text)
|
|
54
|
+
self._send_ws("text/update", {"key": text_key, "val": quip})
|
|
55
|
+
case "EnterTextList":
|
|
56
|
+
if not data["entries"]:
|
|
57
|
+
quip = self._generate_quip(prompt_text, final_round=True)
|
|
58
|
+
self._send_ws("text/update", {"key": text_key, "val": "\n".join(quip.split("|"))})
|
|
59
|
+
case "MakeSingleChoice":
|
|
60
|
+
choice = self._choose_favorite(prompt_text, data["choices"])
|
|
61
|
+
self._client_send({"action": "choose", "choice": choice})
|
|
62
|
+
|
|
63
|
+
def _handle_room_operation(self, data: dict):
|
|
64
|
+
if self._selected_avatar:
|
|
65
|
+
return
|
|
66
|
+
available_characters = [c["name"] for c in data["characters"] if c["available"]]
|
|
67
|
+
selected_character = random.choice(available_characters)
|
|
68
|
+
self._client_send({"action": "avatar", "name": selected_character})
|
|
69
|
+
self._selected_avatar = True
|
|
70
|
+
|
|
71
|
+
def _generate_quip(self, prompt: str, final_round: bool = False) -> str:
|
|
72
|
+
max_tokens = 10
|
|
73
|
+
instructions = _QUIP_PROMPT_INSTRUCTIONS_TEMPLATE.format(personality=self._personality)
|
|
74
|
+
if final_round:
|
|
75
|
+
max_tokens = 32
|
|
76
|
+
instructions = _FINAL_QUIP_PROMPT_INSTRUCTIONS_TEMPLATE.format(personality=self._personality)
|
|
77
|
+
quip = self._chat_model.generate_text(
|
|
78
|
+
prompt, instructions=instructions, max_tokens=max_tokens, temperature=0.7, top_p=0.7
|
|
79
|
+
)
|
|
80
|
+
return quip
|
|
81
|
+
|
|
82
|
+
def _choose_favorite(self, prompt: str, choices: list[dict]) -> int:
|
|
83
|
+
choices_str = "\n".join([f"{i+1}. {v['html']}" for i, v in enumerate(choices)])
|
|
84
|
+
instructions = _QUIP_CHOICE_PROMPT_INSTRUCTIONS_TEMPLATE.format(prompt=prompt)
|
|
85
|
+
response = self._chat_model.generate_text(
|
|
86
|
+
f"Vote for your favorite response. Your options are: {choices_str}",
|
|
87
|
+
instructions=instructions,
|
|
88
|
+
max_tokens=1,
|
|
89
|
+
)
|
|
90
|
+
try:
|
|
91
|
+
choosen_prompt = int(response)
|
|
92
|
+
except ValueError:
|
|
93
|
+
logger.warning(f"Can't choose favorite since response was not an int: {response}")
|
|
94
|
+
return self._choose_random_favorite(choices)
|
|
95
|
+
|
|
96
|
+
if choosen_prompt < 1 or choosen_prompt > len(choices):
|
|
97
|
+
logger.warning(f"Can't choose favorite since response was not a valid value: {response}")
|
|
98
|
+
return self._choose_random_favorite(choices)
|
|
99
|
+
else:
|
|
100
|
+
return choosen_prompt - 1
|
|
101
|
+
|
|
102
|
+
def _choose_random_favorite(self, choices: list[dict]) -> int:
|
|
103
|
+
choices = [i for i in range(0, len(choices))]
|
|
104
|
+
return random.choice(choices)
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
|
|
3
|
+
from ai_plays_jackbox import run
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def _validate_room_code(string_to_check: str) -> str:
|
|
7
|
+
if not string_to_check.isalpha() or len(string_to_check) != 4:
|
|
8
|
+
raise argparse.ArgumentTypeError("Must be 4 letters")
|
|
9
|
+
return string_to_check
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _validate_num_of_bots(string_to_check: str) -> int:
|
|
13
|
+
try:
|
|
14
|
+
number_value = int(string_to_check)
|
|
15
|
+
except ValueError:
|
|
16
|
+
raise argparse.ArgumentTypeError("Must number 1-10")
|
|
17
|
+
if number_value < 1 or number_value > 10:
|
|
18
|
+
raise argparse.ArgumentTypeError("Must number 1-10")
|
|
19
|
+
return number_value
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def _validate_temperature(string_to_check: str) -> float:
|
|
23
|
+
try:
|
|
24
|
+
number_value = float(string_to_check)
|
|
25
|
+
except ValueError:
|
|
26
|
+
raise argparse.ArgumentTypeError("Must number 0.1-2.0")
|
|
27
|
+
if number_value <= 0 or number_value > 2.0:
|
|
28
|
+
raise argparse.ArgumentTypeError("Must number 0.1-2.0")
|
|
29
|
+
return number_value
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _validate_top_p(string_to_check: str) -> float:
|
|
33
|
+
try:
|
|
34
|
+
number_value = float(string_to_check)
|
|
35
|
+
except ValueError:
|
|
36
|
+
raise argparse.ArgumentTypeError("Must number 0.0-1.0")
|
|
37
|
+
if number_value < 0 or number_value > 1.0:
|
|
38
|
+
raise argparse.ArgumentTypeError("Must number 0.0-1.0")
|
|
39
|
+
return number_value
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def cli():
|
|
43
|
+
parser = argparse.ArgumentParser()
|
|
44
|
+
parser.add_argument(
|
|
45
|
+
"--room-code",
|
|
46
|
+
required=True,
|
|
47
|
+
help="The JackBox room code",
|
|
48
|
+
type=_validate_room_code,
|
|
49
|
+
metavar="WXYZ",
|
|
50
|
+
)
|
|
51
|
+
parser.add_argument(
|
|
52
|
+
"--chat-model-name",
|
|
53
|
+
required=True,
|
|
54
|
+
help="Choose which chat model platform to use",
|
|
55
|
+
choices=("ollama", "openai", "gemini"),
|
|
56
|
+
type=str,
|
|
57
|
+
)
|
|
58
|
+
parser.add_argument(
|
|
59
|
+
"--num-of-bots",
|
|
60
|
+
required=False,
|
|
61
|
+
default=4,
|
|
62
|
+
help="How many bots to have play (Defaults to 4)",
|
|
63
|
+
type=_validate_num_of_bots,
|
|
64
|
+
metavar="4",
|
|
65
|
+
)
|
|
66
|
+
parser.add_argument(
|
|
67
|
+
"--temperature",
|
|
68
|
+
required=False,
|
|
69
|
+
default=0.5,
|
|
70
|
+
help="Temperature for Gen AI (Defaults to 0.5)",
|
|
71
|
+
type=_validate_temperature,
|
|
72
|
+
metavar="0.5",
|
|
73
|
+
)
|
|
74
|
+
parser.add_argument(
|
|
75
|
+
"--top-p",
|
|
76
|
+
required=False,
|
|
77
|
+
default=0.9,
|
|
78
|
+
help="Top P for Gen AI (Defaults to 0.9)",
|
|
79
|
+
type=_validate_top_p,
|
|
80
|
+
metavar="0.9",
|
|
81
|
+
)
|
|
82
|
+
args = parser.parse_args()
|
|
83
|
+
|
|
84
|
+
run(
|
|
85
|
+
args.room_code.upper(),
|
|
86
|
+
num_of_bots=args.num_of_bots,
|
|
87
|
+
chat_model_name=args.chat_model_name,
|
|
88
|
+
chat_model_temperature=args.temperature,
|
|
89
|
+
chat_model_top_p=args.top_p,
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
if __name__ == "__main__":
|
|
94
|
+
cli()
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
ECAST_HOST = "ecast.jackboxgames.com"
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
from typing import Optional
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class ChatModel(ABC):
|
|
6
|
+
_chat_model_temperature: float = 0.5
|
|
7
|
+
_chat_model_top_p: float = 0.9
|
|
8
|
+
|
|
9
|
+
def __init__(self, chat_model_temperature: float = 0.5, chat_model_top_p: float = 0.9):
|
|
10
|
+
self._chat_model_temperature = chat_model_temperature
|
|
11
|
+
self._chat_model_top_p = chat_model_top_p
|
|
12
|
+
|
|
13
|
+
@abstractmethod
|
|
14
|
+
def generate_text(
|
|
15
|
+
self,
|
|
16
|
+
prompt: str,
|
|
17
|
+
instructions: str,
|
|
18
|
+
max_tokens: Optional[int] = None,
|
|
19
|
+
temperature: Optional[float] = None,
|
|
20
|
+
top_p: Optional[float] = None,
|
|
21
|
+
) -> str:
|
|
22
|
+
pass
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
from ai_plays_jackbox.llm.chat_model import ChatModel
|
|
2
|
+
from ai_plays_jackbox.llm.gemini_vertex_ai import GeminiVertextAIModel
|
|
3
|
+
from ai_plays_jackbox.llm.ollama_model import OllamaModel
|
|
4
|
+
from ai_plays_jackbox.llm.openai_model import OpenAIModel
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class ChatModelFactory:
|
|
8
|
+
@staticmethod
|
|
9
|
+
def get_chat_model(
|
|
10
|
+
chat_model_name: str,
|
|
11
|
+
chat_model_temperature: float = 0.5,
|
|
12
|
+
chat_model_top_p: float = 0.9,
|
|
13
|
+
) -> ChatModel:
|
|
14
|
+
chat_model_name = chat_model_name.lower()
|
|
15
|
+
if chat_model_name == "ollama":
|
|
16
|
+
return OllamaModel(
|
|
17
|
+
model="gemma3:12b", chat_model_temperature=chat_model_temperature, chat_model_top_p=chat_model_top_p
|
|
18
|
+
)
|
|
19
|
+
if chat_model_name == "openai":
|
|
20
|
+
return OpenAIModel(
|
|
21
|
+
model="gpt-4o-mini", chat_model_temperature=chat_model_temperature, chat_model_top_p=chat_model_top_p
|
|
22
|
+
)
|
|
23
|
+
if chat_model_name == "gemini":
|
|
24
|
+
return GeminiVertextAIModel(
|
|
25
|
+
model="gemini-2.0-flash-001",
|
|
26
|
+
chat_model_temperature=chat_model_temperature,
|
|
27
|
+
chat_model_top_p=chat_model_top_p,
|
|
28
|
+
)
|
|
29
|
+
else:
|
|
30
|
+
raise ValueError(f"Unknown chat model type: {chat_model_name}")
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from typing import Optional
|
|
3
|
+
|
|
4
|
+
from google import genai
|
|
5
|
+
from google.genai.types import GenerateContentConfig, HttpOptions
|
|
6
|
+
from loguru import logger
|
|
7
|
+
|
|
8
|
+
from ai_plays_jackbox.llm.chat_model import ChatModel
|
|
9
|
+
|
|
10
|
+
# Set yo environment variables
|
|
11
|
+
# export GOOGLE_CLOUD_PROJECT=GOOGLE_CLOUD_PROJECT
|
|
12
|
+
# export GOOGLE_CLOUD_LOCATION=global
|
|
13
|
+
# export GOOGLE_GENAI_USE_VERTEXAI=True
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class GeminiVertextAIModel(ChatModel):
|
|
17
|
+
_model: str
|
|
18
|
+
_gemini_vertex_ai_client: genai.Client
|
|
19
|
+
|
|
20
|
+
def __init__(self, model: str = "gemini-2.0-flash-001", *args, **kwargs):
|
|
21
|
+
super().__init__(*args, **kwargs)
|
|
22
|
+
self._gemini_vertex_ai_client = genai.Client(
|
|
23
|
+
http_options=HttpOptions(api_version="v1"),
|
|
24
|
+
vertexai=os.environ.get("GOOGLE_GENAI_USE_VERTEXAI"),
|
|
25
|
+
api_key=os.environ.get("GOOGLE_GEMINI_DEVELOPER_API_KEY"),
|
|
26
|
+
project=os.environ.get("GOOGLE_CLOUD_PROJECT"),
|
|
27
|
+
location=os.environ.get("GOOGLE_CLOUD_LOCATION"),
|
|
28
|
+
)
|
|
29
|
+
self._model = model
|
|
30
|
+
|
|
31
|
+
# Check connection, this will hard fail if connection can't be made
|
|
32
|
+
_ = self._gemini_vertex_ai_client.models.list()
|
|
33
|
+
|
|
34
|
+
def generate_text(
|
|
35
|
+
self,
|
|
36
|
+
prompt: str,
|
|
37
|
+
instructions: str,
|
|
38
|
+
max_tokens: Optional[int] = None,
|
|
39
|
+
temperature: Optional[float] = None,
|
|
40
|
+
top_p: Optional[float] = None,
|
|
41
|
+
) -> str:
|
|
42
|
+
if temperature is None:
|
|
43
|
+
temperature = self._chat_model_temperature
|
|
44
|
+
if top_p is None:
|
|
45
|
+
top_p = self._chat_model_top_p
|
|
46
|
+
|
|
47
|
+
chat_response = self._gemini_vertex_ai_client.models.generate_content(
|
|
48
|
+
model=self._model,
|
|
49
|
+
contents=prompt,
|
|
50
|
+
config=GenerateContentConfig(
|
|
51
|
+
system_instruction=[instructions],
|
|
52
|
+
max_output_tokens=max_tokens,
|
|
53
|
+
temperature=temperature,
|
|
54
|
+
top_p=top_p,
|
|
55
|
+
),
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
text = chat_response.text.strip().replace("\n", "")
|
|
59
|
+
logger.info(f"Generated text: {text}")
|
|
60
|
+
return text
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
|
|
3
|
+
from loguru import logger
|
|
4
|
+
from ollama import Options, chat, list
|
|
5
|
+
|
|
6
|
+
from ai_plays_jackbox.llm.chat_model import ChatModel
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class OllamaModel(ChatModel):
|
|
10
|
+
_model: str
|
|
11
|
+
|
|
12
|
+
def __init__(self, model: str = "gemma3:12b", *args, **kwargs):
|
|
13
|
+
super().__init__(*args, **kwargs)
|
|
14
|
+
self._model = model
|
|
15
|
+
|
|
16
|
+
# Check connection, this will hard fail if connection can't be made
|
|
17
|
+
_ = list()
|
|
18
|
+
|
|
19
|
+
def generate_text(
|
|
20
|
+
self,
|
|
21
|
+
prompt: str,
|
|
22
|
+
instructions: str,
|
|
23
|
+
max_tokens: Optional[int] = None,
|
|
24
|
+
temperature: Optional[float] = None,
|
|
25
|
+
top_p: Optional[float] = None,
|
|
26
|
+
) -> str:
|
|
27
|
+
if temperature is None:
|
|
28
|
+
temperature = self._chat_model_temperature
|
|
29
|
+
if top_p is None:
|
|
30
|
+
top_p = self._chat_model_top_p
|
|
31
|
+
|
|
32
|
+
instructions_formatted = {"role": "system", "content": instructions}
|
|
33
|
+
chat_response = chat(
|
|
34
|
+
model=self._model,
|
|
35
|
+
messages=[instructions_formatted, {"role": "user", "content": prompt}],
|
|
36
|
+
stream=False,
|
|
37
|
+
options=Options(num_predict=max_tokens, temperature=temperature, top_p=top_p),
|
|
38
|
+
)
|
|
39
|
+
text = chat_response.message.content.strip().replace("\n", " ")
|
|
40
|
+
logger.info(f"Generated text: {text}")
|
|
41
|
+
return text
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from typing import Optional
|
|
3
|
+
|
|
4
|
+
from loguru import logger
|
|
5
|
+
from openai import OpenAI
|
|
6
|
+
|
|
7
|
+
from ai_plays_jackbox.llm.chat_model import ChatModel
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class OpenAIModel(ChatModel):
|
|
11
|
+
_model: str
|
|
12
|
+
_open_ai_client: OpenAI
|
|
13
|
+
|
|
14
|
+
def __init__(self, model: str = "gpt-4o-mini", *args, **kwargs):
|
|
15
|
+
super().__init__(*args, **kwargs)
|
|
16
|
+
self._open_ai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
|
|
17
|
+
self._model = model
|
|
18
|
+
|
|
19
|
+
# Check connection, this will hard fail if connection can't be made
|
|
20
|
+
_ = self._open_ai_client.models.list()
|
|
21
|
+
|
|
22
|
+
def generate_text(
|
|
23
|
+
self,
|
|
24
|
+
prompt: str,
|
|
25
|
+
instructions: str,
|
|
26
|
+
max_tokens: Optional[int] = None,
|
|
27
|
+
temperature: Optional[float] = None,
|
|
28
|
+
top_p: Optional[float] = None,
|
|
29
|
+
) -> str:
|
|
30
|
+
if temperature is None:
|
|
31
|
+
temperature = self._chat_model_temperature
|
|
32
|
+
if top_p is None:
|
|
33
|
+
top_p = self._chat_model_top_p
|
|
34
|
+
|
|
35
|
+
instructions_formatted = {"role": "developer", "content": instructions}
|
|
36
|
+
chat_response = self._open_ai_client.chat.completions.create(
|
|
37
|
+
model=self._model,
|
|
38
|
+
messages=[instructions_formatted, {"role": "user", "content": prompt}],
|
|
39
|
+
stream=False,
|
|
40
|
+
max_completion_tokens=max_tokens,
|
|
41
|
+
temperature=temperature,
|
|
42
|
+
top_p=top_p,
|
|
43
|
+
)
|
|
44
|
+
text = chat_response.choices[0].message.content.strip().replace("\n", "")
|
|
45
|
+
logger.info(f"Generated text: {text}")
|
|
46
|
+
return text
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
import random
|
|
2
|
+
import threading
|
|
3
|
+
from time import sleep
|
|
4
|
+
from typing import Optional
|
|
5
|
+
|
|
6
|
+
import requests
|
|
7
|
+
from loguru import logger
|
|
8
|
+
|
|
9
|
+
from ai_plays_jackbox.bot.bot_base import JackBoxBotBase
|
|
10
|
+
from ai_plays_jackbox.bot.bot_factory import JackBoxBotFactory
|
|
11
|
+
from ai_plays_jackbox.bot.bot_personality import JackBoxBotVariant
|
|
12
|
+
from ai_plays_jackbox.constants import ECAST_HOST
|
|
13
|
+
from ai_plays_jackbox.llm.chat_model import ChatModel
|
|
14
|
+
from ai_plays_jackbox.llm.ollama_model import OllamaModel
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class JackBoxRoom:
|
|
18
|
+
_bots: list[JackBoxBotBase]
|
|
19
|
+
|
|
20
|
+
def __init__(self):
|
|
21
|
+
self._bots = []
|
|
22
|
+
self._lock = threading.Lock()
|
|
23
|
+
|
|
24
|
+
def play(
|
|
25
|
+
self,
|
|
26
|
+
room_code: str,
|
|
27
|
+
num_of_bots: int = 4,
|
|
28
|
+
bots_in_play: Optional[list] = None,
|
|
29
|
+
chat_model: Optional[ChatModel] = None,
|
|
30
|
+
):
|
|
31
|
+
if chat_model is None:
|
|
32
|
+
chat_model = OllamaModel()
|
|
33
|
+
room_type = self._get_room_type(room_code)
|
|
34
|
+
if not room_type:
|
|
35
|
+
logger.error(f"Unable to find room {room_code}")
|
|
36
|
+
return
|
|
37
|
+
logger.info(f"We're playing {room_type}!")
|
|
38
|
+
bot_factory = JackBoxBotFactory()
|
|
39
|
+
if bots_in_play is None or len(bots_in_play) == 0:
|
|
40
|
+
bots_to_make = random.sample(list(JackBoxBotVariant), num_of_bots)
|
|
41
|
+
else:
|
|
42
|
+
bots_in_play_variants = [variant for variant in JackBoxBotVariant if variant.name in bots_in_play]
|
|
43
|
+
bots_to_make = random.choices(bots_in_play_variants, k=num_of_bots)
|
|
44
|
+
|
|
45
|
+
for b in bots_to_make:
|
|
46
|
+
bot = bot_factory.get_bot(
|
|
47
|
+
room_type,
|
|
48
|
+
name=b.value.name,
|
|
49
|
+
personality=b.value.personality,
|
|
50
|
+
chat_model=chat_model,
|
|
51
|
+
)
|
|
52
|
+
self._bots.append(bot)
|
|
53
|
+
with self._lock:
|
|
54
|
+
bot.connect(room_code)
|
|
55
|
+
sleep(0.5)
|
|
56
|
+
|
|
57
|
+
try:
|
|
58
|
+
while True:
|
|
59
|
+
sleep(1)
|
|
60
|
+
if self.is_finished():
|
|
61
|
+
print("All bots disconnected, ending...")
|
|
62
|
+
break
|
|
63
|
+
except KeyboardInterrupt:
|
|
64
|
+
self.end()
|
|
65
|
+
|
|
66
|
+
def is_finished(self) -> bool:
|
|
67
|
+
for b in self._bots:
|
|
68
|
+
if not b.is_disconnected():
|
|
69
|
+
return False
|
|
70
|
+
return True
|
|
71
|
+
|
|
72
|
+
def end(self):
|
|
73
|
+
for b in self._bots:
|
|
74
|
+
with self._lock:
|
|
75
|
+
b.disconnect()
|
|
76
|
+
|
|
77
|
+
def _get_room_type(self, room_code: str) -> str:
|
|
78
|
+
try:
|
|
79
|
+
response = requests.request(
|
|
80
|
+
"GET",
|
|
81
|
+
f"https://{ECAST_HOST}/api/v2/rooms/{room_code}",
|
|
82
|
+
headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; rv:68.0) Gecko/20100101 Firefox/68.0"},
|
|
83
|
+
)
|
|
84
|
+
response.raise_for_status()
|
|
85
|
+
response_data = response.json()
|
|
86
|
+
except requests.HTTPError:
|
|
87
|
+
if response.status_code != 404:
|
|
88
|
+
logger.error(f"Received {response.status_code} when trying to get room type.")
|
|
89
|
+
return ""
|
|
90
|
+
return response_data["body"]["appTag"]
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
|
|
3
|
+
from ai_plays_jackbox.llm.chat_model_factory import ChatModelFactory
|
|
4
|
+
from ai_plays_jackbox.room import JackBoxRoom
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def run(
|
|
8
|
+
room_code: str,
|
|
9
|
+
num_of_bots: int = 4,
|
|
10
|
+
bots_in_play: Optional[list] = None,
|
|
11
|
+
chat_model_name: str = "ollama",
|
|
12
|
+
chat_model_temperature: float = 0.5,
|
|
13
|
+
chat_model_top_p: float = 0.9,
|
|
14
|
+
):
|
|
15
|
+
"""Will run a set of bots through a game of JackBox given a room code.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
room_code (str): The room code.
|
|
19
|
+
num_of_bots (int, optional): The number of bots to participate. Defaults to 4.
|
|
20
|
+
chat_model (str, optional): The chat model to use to generate responses. Defaults to "ollama".
|
|
21
|
+
"""
|
|
22
|
+
chat_model = ChatModelFactory.get_chat_model(
|
|
23
|
+
chat_model_name, chat_model_temperature=chat_model_temperature, chat_model_top_p=chat_model_top_p
|
|
24
|
+
)
|
|
25
|
+
room = JackBoxRoom()
|
|
26
|
+
room.play(room_code, num_of_bots=num_of_bots, bots_in_play=bots_in_play, chat_model=chat_model)
|
|
File without changes
|
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
import threading
|
|
2
|
+
from collections import deque
|
|
3
|
+
|
|
4
|
+
from loguru import logger
|
|
5
|
+
from nicegui import ui
|
|
6
|
+
|
|
7
|
+
from ai_plays_jackbox import run
|
|
8
|
+
from ai_plays_jackbox.bot.bot_personality import JackBoxBotVariant
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _format_log(record):
|
|
12
|
+
thread_name = record["thread"].name
|
|
13
|
+
color = "red"
|
|
14
|
+
colored_name = f"<{color}>{thread_name:<12}</{color}>"
|
|
15
|
+
|
|
16
|
+
return (
|
|
17
|
+
f"<green>{record['time']:YYYY-MM-DD HH:mm:ss}</green> | "
|
|
18
|
+
f"<cyan>{record['level']:<8}</cyan> | "
|
|
19
|
+
f"{colored_name} | "
|
|
20
|
+
f"{record['message']}\n"
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
# Keep a reference to the game thread
|
|
25
|
+
game_thread = None
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def create_ui():
|
|
29
|
+
def _handle_start_click():
|
|
30
|
+
global game_thread
|
|
31
|
+
|
|
32
|
+
def _start_in_thread():
|
|
33
|
+
bots_in_play = [k for k, v in bot_variant_checkbox_states.items() if v.value]
|
|
34
|
+
try:
|
|
35
|
+
run(
|
|
36
|
+
room_code.value.strip().upper(),
|
|
37
|
+
num_of_bots=num_of_bots.value,
|
|
38
|
+
bots_in_play=bots_in_play,
|
|
39
|
+
chat_model_name=chat_model.value,
|
|
40
|
+
chat_model_temperature=temperature.value,
|
|
41
|
+
chat_model_top_p=top_p.value,
|
|
42
|
+
)
|
|
43
|
+
except Exception as e:
|
|
44
|
+
logger.exception("Bot startup failed")
|
|
45
|
+
|
|
46
|
+
game_thread = threading.Thread(target=_start_in_thread, daemon=True)
|
|
47
|
+
game_thread.start()
|
|
48
|
+
start_button.disable()
|
|
49
|
+
start_button.props("color=blue")
|
|
50
|
+
start_button.text = "Running..."
|
|
51
|
+
|
|
52
|
+
def _refresh_button_state():
|
|
53
|
+
if game_thread and not game_thread.is_alive():
|
|
54
|
+
start_button.enable()
|
|
55
|
+
start_button.props("color=green")
|
|
56
|
+
start_button.text = "Start Bots"
|
|
57
|
+
|
|
58
|
+
ui.label("🤖 AI Plays JackBox").classes("text-2xl font-bold")
|
|
59
|
+
|
|
60
|
+
with ui.row().classes("w-full"):
|
|
61
|
+
|
|
62
|
+
log_display = ui.log(max_lines=100).classes("h-64 overflow-auto bg-black text-white")
|
|
63
|
+
log_buffer = deque(maxlen=100)
|
|
64
|
+
|
|
65
|
+
def _ui_log_sink(message):
|
|
66
|
+
log_buffer.append(message)
|
|
67
|
+
log_display.push(message.strip())
|
|
68
|
+
|
|
69
|
+
logger.add(_ui_log_sink, format=_format_log, level="INFO", enqueue=True)
|
|
70
|
+
|
|
71
|
+
with ui.grid(columns=16).classes("w-full gap-0"):
|
|
72
|
+
with ui.column().classes("col-span-1"):
|
|
73
|
+
pass
|
|
74
|
+
with ui.column().classes("col-span-7"):
|
|
75
|
+
with ui.row():
|
|
76
|
+
ui.label("Number of Bots")
|
|
77
|
+
num_of_bots_label = ui.label("4")
|
|
78
|
+
num_of_bots = ui.slider(
|
|
79
|
+
min=1,
|
|
80
|
+
max=10,
|
|
81
|
+
value=4,
|
|
82
|
+
step=1,
|
|
83
|
+
on_change=lambda e: num_of_bots_label.set_text(f"{e.value}"),
|
|
84
|
+
)
|
|
85
|
+
chat_model = ui.select(["ollama", "openai", "gemini"], label="Chat Model", value="ollama").classes(
|
|
86
|
+
"w-1/3"
|
|
87
|
+
)
|
|
88
|
+
room_code = (
|
|
89
|
+
ui.input(
|
|
90
|
+
label="Room Code",
|
|
91
|
+
placeholder="ABCD",
|
|
92
|
+
validation={
|
|
93
|
+
"must be letters only": lambda value: value.isalpha(),
|
|
94
|
+
"must be 4 letters": lambda value: len(value) == 4,
|
|
95
|
+
},
|
|
96
|
+
)
|
|
97
|
+
.props("uppercase")
|
|
98
|
+
.classes("w-1/4")
|
|
99
|
+
)
|
|
100
|
+
start_button = (
|
|
101
|
+
ui.button("Start Bots", on_click=_handle_start_click, color="green")
|
|
102
|
+
.bind_enabled_from(room_code, "error", lambda error: room_code.value and not error)
|
|
103
|
+
.classes("w-1/3")
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
ui.label("Advanced Options").classes("w-full text-xl font-bold")
|
|
107
|
+
|
|
108
|
+
ui.label("Temperature").classes("w-1/4").tooltip(
|
|
109
|
+
"""
|
|
110
|
+
What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
|
111
|
+
make the output more random, while lower values like 0.2 will make it more
|
|
112
|
+
focused and deterministic. We generally recommend altering this or `top_p` but
|
|
113
|
+
not both."""
|
|
114
|
+
)
|
|
115
|
+
temperature_label = ui.label("0.5").classes("w-1/6")
|
|
116
|
+
temperature = ui.slider(
|
|
117
|
+
min=0.0,
|
|
118
|
+
max=2.0,
|
|
119
|
+
value=0.5,
|
|
120
|
+
step=0.1,
|
|
121
|
+
on_change=lambda e: temperature_label.set_text(f"{e.value}"),
|
|
122
|
+
).classes("w-1/2")
|
|
123
|
+
|
|
124
|
+
ui.label("Top P").classes("w-1/4").tooltip(
|
|
125
|
+
"""
|
|
126
|
+
An alternative to sampling with temperature, called nucleus sampling, where the
|
|
127
|
+
model considers the results of the tokens with top_p probability mass. So 0.1
|
|
128
|
+
means only the tokens comprising the top 10% probability mass are considered."""
|
|
129
|
+
)
|
|
130
|
+
top_p_label = ui.label("0.9").classes("w-1/6")
|
|
131
|
+
top_p = ui.slider(
|
|
132
|
+
min=0.0,
|
|
133
|
+
max=1.0,
|
|
134
|
+
value=0.9,
|
|
135
|
+
step=0.1,
|
|
136
|
+
on_change=lambda e: top_p_label.set_text(f"{e.value}"),
|
|
137
|
+
).classes("w-1/2")
|
|
138
|
+
|
|
139
|
+
with ui.column().classes("col-span-1"):
|
|
140
|
+
pass
|
|
141
|
+
|
|
142
|
+
bot_variant_checkbox_states = {}
|
|
143
|
+
|
|
144
|
+
def select_all_changed():
|
|
145
|
+
for checkbox in bot_variant_checkbox_states.values():
|
|
146
|
+
checkbox.value = select_all_bot_variants.value
|
|
147
|
+
|
|
148
|
+
def sync_select_all():
|
|
149
|
+
all_checked = all(cb.value for cb in bot_variant_checkbox_states.values())
|
|
150
|
+
select_all_bot_variants.value = all_checked
|
|
151
|
+
|
|
152
|
+
with ui.column().classes("col-span-6"):
|
|
153
|
+
with ui.list().props("bordered separator").classes("w-full"):
|
|
154
|
+
with ui.item_label("Bot Personalities").props("header").classes("text-bold"):
|
|
155
|
+
select_all_bot_variants = ui.checkbox(text="Select All", value=True)
|
|
156
|
+
select_all_bot_variants.on("update:model-value", lambda e: select_all_changed())
|
|
157
|
+
ui.separator()
|
|
158
|
+
with ui.element("div").classes("overflow-y-auto h-64"):
|
|
159
|
+
for variant in list(JackBoxBotVariant):
|
|
160
|
+
with ui.item():
|
|
161
|
+
with ui.item_section().props("avatar"):
|
|
162
|
+
cb = ui.checkbox(value=True)
|
|
163
|
+
cb.on("update:model-value", lambda e: sync_select_all())
|
|
164
|
+
bot_variant_checkbox_states[variant.name] = cb
|
|
165
|
+
with ui.item_section():
|
|
166
|
+
ui.item_label(variant.value.name)
|
|
167
|
+
ui.item_label(variant.value.personality).props("caption")
|
|
168
|
+
|
|
169
|
+
ui.timer(1.0, _refresh_button_state)
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "ai-plays-jackbox"
|
|
3
|
+
dynamic = ["classifiers" ]
|
|
4
|
+
description = "Bringing the dead internet theory to life. Have AI play JackBox with you; no friends required!"
|
|
5
|
+
authors = [
|
|
6
|
+
{ name = "Daniel S. Thompson", email = "dthomp92@gmail.com" },
|
|
7
|
+
]
|
|
8
|
+
readme = "README.md"
|
|
9
|
+
license = { text = "MIT" }
|
|
10
|
+
keywords = [ ]
|
|
11
|
+
requires-python = ">=3.11,<4.0"
|
|
12
|
+
dependencies = [
|
|
13
|
+
"google-genai (>=1.19.0,<2.0.0)",
|
|
14
|
+
"html2text (>=2024.2.26, <2025.0.0)",
|
|
15
|
+
"loguru (>=0.7.3,<1.0.0)",
|
|
16
|
+
"nicegui (>=2.18.0,<3.0.0)",
|
|
17
|
+
"ollama (>=0.4.4,<1.0.0)",
|
|
18
|
+
"openai (>=1.59.8,<2.0.0)",
|
|
19
|
+
"pydantic (>=2.10.4,<3.0.0)",
|
|
20
|
+
"requests (>=2.23.3,<3.0.0)",
|
|
21
|
+
"websocket-client (>=1.8.0,<2.0.0)"
|
|
22
|
+
]
|
|
23
|
+
version = "0.0.1"
|
|
24
|
+
|
|
25
|
+
[project.urls]
|
|
26
|
+
repository = "https://github.com/SudoSpartanDan/AIPlaysJackBox"
|
|
27
|
+
"Bug Tracker" = "https://github.com/SudoSpartanDan/AIPlaysJackBox/issues"
|
|
28
|
+
|
|
29
|
+
[project.scripts]
|
|
30
|
+
ai-plays-jackbox = "ai_plays_jackbox.cli:cli"
|
|
31
|
+
ai-plays-jackbox-ui = "ai_plays_jackbox.web_ui:web_ui"
|
|
32
|
+
lint = "scripts.lint:run"
|
|
33
|
+
|
|
34
|
+
[tool.black]
|
|
35
|
+
line-length = 120
|
|
36
|
+
|
|
37
|
+
[tool.poetry]
|
|
38
|
+
|
|
39
|
+
[tool.poetry.group.dev.dependencies]
|
|
40
|
+
black = "^24.10.0"
|
|
41
|
+
isort = "^5.13.2"
|
|
42
|
+
|
|
43
|
+
[tool.poetry.requires-plugins]
|
|
44
|
+
poetry-dynamic-versioning = { version = ">=1.0.0,<2.0.0", extras = ["plugin"] }
|
|
45
|
+
|
|
46
|
+
[tool.poetry-dynamic-versioning]
|
|
47
|
+
enable = false
|
|
48
|
+
|
|
49
|
+
[build-system]
|
|
50
|
+
requires = ["poetry-core>=2.0.0,<3.0.0"]
|
|
51
|
+
build-backend = "poetry.core.masonry.api"
|