ai-plays-jackbox 0.1.0__tar.gz → 0.2.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ai-plays-jackbox might be problematic. Click here for more details.
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/PKG-INFO +5 -3
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/README.md +3 -2
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/bot/bot_factory.py +3 -0
- ai_plays_jackbox-0.2.1/ai_plays_jackbox/bot/jackbox6/dictionarium.py +105 -0
- ai_plays_jackbox-0.2.1/ai_plays_jackbox/bot/jackbox8/bot_base.py +20 -0
- ai_plays_jackbox-0.2.1/ai_plays_jackbox/bot/jackbox8/job_job.py +205 -0
- ai_plays_jackbox-0.2.1/ai_plays_jackbox/cli/__init__.py +0 -0
- ai_plays_jackbox-0.1.0/ai_plays_jackbox/cli.py → ai_plays_jackbox-0.2.1/ai_plays_jackbox/cli/main.py +2 -2
- ai_plays_jackbox-0.2.1/ai_plays_jackbox/room/__init__.py +0 -0
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/run.py +1 -1
- ai_plays_jackbox-0.2.1/ai_plays_jackbox/scripts/lint.py +18 -0
- ai_plays_jackbox-0.2.1/ai_plays_jackbox/ui/__init__.py +0 -0
- ai_plays_jackbox-0.2.1/ai_plays_jackbox/ui/main.py +12 -0
- ai_plays_jackbox-0.2.1/ai_plays_jackbox/ui/startup.py +248 -0
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/pyproject.toml +6 -4
- ai_plays_jackbox-0.1.0/ai_plays_jackbox/ui/create_ui.py +0 -197
- ai_plays_jackbox-0.1.0/ai_plays_jackbox/web_ui.py +0 -12
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/LICENSE +0 -0
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/__init__.py +0 -0
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/bot/__init__.py +0 -0
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/bot/bot_base.py +0 -0
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/bot/bot_personality.py +0 -0
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/bot/jackbox5/__init__.py +0 -0
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/bot/jackbox5/bot_base.py +0 -0
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/bot/jackbox5/mad_verse_city.py +0 -0
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/bot/jackbox5/patently_stupid.py +0 -0
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/bot/jackbox6/__init__.py +0 -0
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/bot/jackbox6/bot_base.py +0 -0
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/bot/jackbox6/joke_boat.py +0 -0
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/bot/jackbox7/__init__.py +0 -0
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/bot/jackbox7/bot_base.py +0 -0
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/bot/jackbox7/quiplash3.py +0 -0
- {ai_plays_jackbox-0.1.0/ai_plays_jackbox/bot/standalone → ai_plays_jackbox-0.2.1/ai_plays_jackbox/bot/jackbox8}/__init__.py +0 -0
- {ai_plays_jackbox-0.1.0/ai_plays_jackbox/ui → ai_plays_jackbox-0.2.1/ai_plays_jackbox/bot/standalone}/__init__.py +0 -0
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/bot/standalone/drawful2.py +0 -0
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/constants.py +0 -0
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/llm/__init__.py +0 -0
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/llm/chat_model.py +0 -0
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/llm/chat_model_factory.py +0 -0
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/llm/gemini_model.py +0 -0
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/llm/ollama_model.py +0 -0
- {ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/llm/openai_model.py +0 -0
- {ai_plays_jackbox-0.1.0/ai_plays_jackbox → ai_plays_jackbox-0.2.1/ai_plays_jackbox/room}/room.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: ai-plays-jackbox
|
|
3
|
-
Version: 0.1
|
|
3
|
+
Version: 0.2.1
|
|
4
4
|
Summary: Bringing the dead internet theory to life. Have AI play JackBox with you; no friends required!
|
|
5
5
|
License: MIT
|
|
6
6
|
Author: Daniel S. Thompson
|
|
@@ -20,6 +20,7 @@ Requires-Dist: numpy (>=2.2.6,<3.0.0)
|
|
|
20
20
|
Requires-Dist: ollama (>=0.4.4,<1.0.0)
|
|
21
21
|
Requires-Dist: openai (>=1.59.8,<2.0.0)
|
|
22
22
|
Requires-Dist: opencv-python (>=4.11.0.86,<5.0.0.0)
|
|
23
|
+
Requires-Dist: psutil (>=7.0.0,<8.0.0)
|
|
23
24
|
Requires-Dist: pydantic (>=2.10.4,<3.0.0)
|
|
24
25
|
Requires-Dist: requests (>=2.23.3,<3.0.0)
|
|
25
26
|
Requires-Dist: websocket-client (>=1.8.0,<2.0.0)
|
|
@@ -35,7 +36,7 @@ Description-Content-Type: text/markdown
|
|
|
35
36
|
|
|
36
37
|
Bringing the dead internet theory to life. Have AI play JackBox with you; no friends required!
|
|
37
38
|
|
|
38
|
-

|
|
39
|
+

|
|
39
40
|
|
|
40
41
|
## Installation
|
|
41
42
|
|
|
@@ -81,6 +82,7 @@ options:
|
|
|
81
82
|
| --------------------- | ---------------------- | ---------------- |
|
|
82
83
|
| JackBox Party Pack 5 | Mad Verse City | [ ] |
|
|
83
84
|
| JackBox Party Pack 5 | Patently Stupid | [x] |
|
|
85
|
+
| JackBox Party Pack 6 | Dictionarium | [ ] |
|
|
84
86
|
| JackBox Party Pack 6 | Joke Boat | [ ] |
|
|
85
87
|
| JackBox Party Pack 7 | Quiplash 3 | [ ] |
|
|
86
88
|
| Standalone | Drawful 2 | [x] |
|
|
@@ -149,6 +151,6 @@ Some of the games lean heavy into players interacting with each other. Could I p
|
|
|
149
151
|
|
|
150
152
|
### Linting
|
|
151
153
|
|
|
152
|
-
- `poetry run python scripts/lint.py`
|
|
154
|
+
- `poetry run python ai_plays_jackbox/scripts/lint.py`
|
|
153
155
|
- `poetry run mypy ai_plays_jackbox`
|
|
154
156
|
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
|
|
7
7
|
Bringing the dead internet theory to life. Have AI play JackBox with you; no friends required!
|
|
8
8
|
|
|
9
|
-

|
|
9
|
+

|
|
10
10
|
|
|
11
11
|
## Installation
|
|
12
12
|
|
|
@@ -52,6 +52,7 @@ options:
|
|
|
52
52
|
| --------------------- | ---------------------- | ---------------- |
|
|
53
53
|
| JackBox Party Pack 5 | Mad Verse City | [ ] |
|
|
54
54
|
| JackBox Party Pack 5 | Patently Stupid | [x] |
|
|
55
|
+
| JackBox Party Pack 6 | Dictionarium | [ ] |
|
|
55
56
|
| JackBox Party Pack 6 | Joke Boat | [ ] |
|
|
56
57
|
| JackBox Party Pack 7 | Quiplash 3 | [ ] |
|
|
57
58
|
| Standalone | Drawful 2 | [x] |
|
|
@@ -120,5 +121,5 @@ Some of the games lean heavy into players interacting with each other. Could I p
|
|
|
120
121
|
|
|
121
122
|
### Linting
|
|
122
123
|
|
|
123
|
-
- `poetry run python scripts/lint.py`
|
|
124
|
+
- `poetry run python ai_plays_jackbox/scripts/lint.py`
|
|
124
125
|
- `poetry run mypy ai_plays_jackbox`
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from ai_plays_jackbox.bot.bot_base import JackBoxBotBase
|
|
2
2
|
from ai_plays_jackbox.bot.jackbox5.mad_verse_city import MadVerseCityBot
|
|
3
3
|
from ai_plays_jackbox.bot.jackbox5.patently_stupid import PatentlyStupidBot
|
|
4
|
+
from ai_plays_jackbox.bot.jackbox6.dictionarium import DictionariumBot
|
|
4
5
|
from ai_plays_jackbox.bot.jackbox6.joke_boat import JokeBoatBot
|
|
5
6
|
from ai_plays_jackbox.bot.jackbox7.quiplash3 import Quiplash3Bot
|
|
6
7
|
from ai_plays_jackbox.bot.standalone.drawful2 import Drawful2Bot
|
|
@@ -12,6 +13,8 @@ BOT_TYPES: dict[str, type[JackBoxBotBase]] = {
|
|
|
12
13
|
"drawful2international": Drawful2Bot,
|
|
13
14
|
"rapbattle": MadVerseCityBot,
|
|
14
15
|
"jokeboat": JokeBoatBot,
|
|
16
|
+
"ridictionary": DictionariumBot,
|
|
17
|
+
# "apply-yourself": JobJobBot,
|
|
15
18
|
}
|
|
16
19
|
|
|
17
20
|
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
import random
|
|
2
|
+
|
|
3
|
+
from loguru import logger
|
|
4
|
+
|
|
5
|
+
from ai_plays_jackbox.bot.jackbox6.bot_base import JackBox6BotBase
|
|
6
|
+
|
|
7
|
+
_DEFINITION_PROMPT_TEMPLATE = """
|
|
8
|
+
You are playing Dictionarium.
|
|
9
|
+
|
|
10
|
+
{prompt}
|
|
11
|
+
|
|
12
|
+
When generating your response, follow these rules:
|
|
13
|
+
- Your personality is: {personality}
|
|
14
|
+
- You response must be {max_length} characters or less.
|
|
15
|
+
- Do not include quotes in your response.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
_SYNONYM_PROMPT_TEMPLATE = """
|
|
19
|
+
You are playing Dictionarium.
|
|
20
|
+
|
|
21
|
+
{prompt}
|
|
22
|
+
|
|
23
|
+
When generating your response, follow these rules:
|
|
24
|
+
- Your personality is: {personality}
|
|
25
|
+
- You response must be {max_length} characters or less.
|
|
26
|
+
- Do not include quotes in your response.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
_SENTENCE_PROMPT_TEMPLATE = """
|
|
30
|
+
You are playing Dictionarium. You need to use a made up word in a sentence.
|
|
31
|
+
|
|
32
|
+
{prompt}
|
|
33
|
+
|
|
34
|
+
When generating your response, follow these rules:
|
|
35
|
+
- Your personality is: {personality}
|
|
36
|
+
- You response must be {max_length} characters or less.
|
|
37
|
+
- Do not include quotes in your response.
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class DictionariumBot(JackBox6BotBase):
|
|
42
|
+
def __init__(self, *args, **kwargs):
|
|
43
|
+
super().__init__(*args, **kwargs)
|
|
44
|
+
|
|
45
|
+
def _handle_welcome(self, data: dict):
|
|
46
|
+
pass
|
|
47
|
+
|
|
48
|
+
def _handle_player_operation(self, data: dict):
|
|
49
|
+
if not data:
|
|
50
|
+
return
|
|
51
|
+
room_state = data.get("state", None)
|
|
52
|
+
if not room_state:
|
|
53
|
+
return
|
|
54
|
+
|
|
55
|
+
prompt = data.get("prompt", {})
|
|
56
|
+
prompt_html = prompt.get("html", "")
|
|
57
|
+
clean_prompt = self._html_to_text(prompt_html)
|
|
58
|
+
max_length = data.get("maxLength", 150)
|
|
59
|
+
|
|
60
|
+
match room_state:
|
|
61
|
+
case "EnterSingleText":
|
|
62
|
+
entry = data.get("entry", None)
|
|
63
|
+
entry_id = data.get("entryId", "")
|
|
64
|
+
print(data)
|
|
65
|
+
if not entry:
|
|
66
|
+
if entry_id == "Definition":
|
|
67
|
+
logger.info("Generating definition...")
|
|
68
|
+
template = _DEFINITION_PROMPT_TEMPLATE
|
|
69
|
+
elif entry_id == "Synonym":
|
|
70
|
+
logger.info("Generating synonym...")
|
|
71
|
+
template = _SYNONYM_PROMPT_TEMPLATE
|
|
72
|
+
elif entry_id == "Sentence":
|
|
73
|
+
logger.info("Generating sentence...")
|
|
74
|
+
template = _SENTENCE_PROMPT_TEMPLATE
|
|
75
|
+
else:
|
|
76
|
+
return
|
|
77
|
+
|
|
78
|
+
formatted_prompt = template.format(
|
|
79
|
+
personality=self._personality,
|
|
80
|
+
prompt=clean_prompt,
|
|
81
|
+
max_length=max_length,
|
|
82
|
+
)
|
|
83
|
+
submission = self._chat_model.generate_text(
|
|
84
|
+
formatted_prompt,
|
|
85
|
+
"",
|
|
86
|
+
temperature=self._chat_model._chat_model_temperature,
|
|
87
|
+
top_p=self._chat_model._chat_model_top_p,
|
|
88
|
+
)
|
|
89
|
+
submission = submission[: max_length - 1]
|
|
90
|
+
self._client_send({"action": "write", "entry": submission})
|
|
91
|
+
|
|
92
|
+
case "MakeSingleChoice":
|
|
93
|
+
choice_type = data.get("choiceType", "")
|
|
94
|
+
if (
|
|
95
|
+
choice_type == "ChooseDefinition"
|
|
96
|
+
or choice_type == "ChooseSynonym"
|
|
97
|
+
or choice_type == "ChooseSentence"
|
|
98
|
+
):
|
|
99
|
+
choices: list[dict] = data.get("choices", [])
|
|
100
|
+
choice_indexes = [i for i in range(0, len(choices))]
|
|
101
|
+
selected_choice = random.choice(choice_indexes)
|
|
102
|
+
self._client_send({"action": "choose", "choice": selected_choice})
|
|
103
|
+
|
|
104
|
+
def _handle_room_operation(self, data: dict):
|
|
105
|
+
pass
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
from abc import ABC
|
|
2
|
+
|
|
3
|
+
from ai_plays_jackbox.bot.bot_base import JackBoxBotBase
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class JackBox8BotBase(JackBoxBotBase, ABC):
|
|
7
|
+
|
|
8
|
+
@property
|
|
9
|
+
def _player_operation_key(self) -> str:
|
|
10
|
+
return f"player:{self._player_id}"
|
|
11
|
+
|
|
12
|
+
def _is_player_operation_key(self, operation_key: str) -> bool:
|
|
13
|
+
return operation_key == self._player_operation_key
|
|
14
|
+
|
|
15
|
+
@property
|
|
16
|
+
def _room_operation_key(self) -> str:
|
|
17
|
+
return "room"
|
|
18
|
+
|
|
19
|
+
def _is_room_operation_key(self, operation_key: str) -> bool:
|
|
20
|
+
return operation_key == self._room_operation_key
|
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
import random
|
|
2
|
+
import string
|
|
3
|
+
|
|
4
|
+
from loguru import logger
|
|
5
|
+
|
|
6
|
+
from ai_plays_jackbox.bot.jackbox8.bot_base import JackBox8BotBase
|
|
7
|
+
|
|
8
|
+
_RESPONSE_PROMPT_TEMPLATE = """
|
|
9
|
+
You are playing Job Job. You need response to the given prompt.
|
|
10
|
+
|
|
11
|
+
When generating your response, follow these rules:
|
|
12
|
+
- Your personality is: {personality}
|
|
13
|
+
- Your response must be {max_length} letters or less.
|
|
14
|
+
- Your response must have a minimum of {min_words} words.
|
|
15
|
+
- Do not include quotes in your response.
|
|
16
|
+
|
|
17
|
+
{instruction}
|
|
18
|
+
|
|
19
|
+
Your prompt is:
|
|
20
|
+
|
|
21
|
+
{prompt}
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
_COMPOSITION_PROMPT_TEMPLATE = """
|
|
25
|
+
You are playing Job Job. You must create a response to a interview question using only specific words given.
|
|
26
|
+
|
|
27
|
+
When generating your response, follow these rules:
|
|
28
|
+
- Your personality is: {personality}
|
|
29
|
+
- Your response must only use the allowed words or characters, nothing else
|
|
30
|
+
- If you decide to use a character, you must have it separated by a space from any words
|
|
31
|
+
- You can select a maximum of {max_words} words
|
|
32
|
+
|
|
33
|
+
Your interview question is:
|
|
34
|
+
|
|
35
|
+
{prompt}
|
|
36
|
+
|
|
37
|
+
Your allowed words or characters are:
|
|
38
|
+
|
|
39
|
+
{all_possible_words_str}
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class JobJobBot(JackBox8BotBase):
|
|
44
|
+
def __init__(self, *args, **kwargs):
|
|
45
|
+
super().__init__(*args, **kwargs)
|
|
46
|
+
|
|
47
|
+
def _handle_welcome(self, data: dict):
|
|
48
|
+
pass
|
|
49
|
+
|
|
50
|
+
def _handle_player_operation(self, data: dict):
|
|
51
|
+
if not data:
|
|
52
|
+
return
|
|
53
|
+
|
|
54
|
+
kind = data.get("kind", "")
|
|
55
|
+
has_controls = data.get("hasControls", False)
|
|
56
|
+
response_key = data.get("responseKey", "")
|
|
57
|
+
done_key = data.get("doneKey", "")
|
|
58
|
+
|
|
59
|
+
if has_controls:
|
|
60
|
+
if "skip:" in response_key:
|
|
61
|
+
self._object_update(response_key, {"action": "skip"})
|
|
62
|
+
return
|
|
63
|
+
|
|
64
|
+
match kind:
|
|
65
|
+
case "writing":
|
|
66
|
+
instruction = data.get("instruction", "")
|
|
67
|
+
prompt = data.get("prompt", "")
|
|
68
|
+
max_length = data.get("maxLength", 128)
|
|
69
|
+
min_words = data.get("minWords", 5)
|
|
70
|
+
text_key = data.get("textKey", "")
|
|
71
|
+
response = self._generate_response(instruction, prompt, max_length, min_words)
|
|
72
|
+
self._text_update(text_key, response)
|
|
73
|
+
self._object_update(done_key, {"done": True})
|
|
74
|
+
|
|
75
|
+
case "magnets":
|
|
76
|
+
prompt = data.get("prompt", "")
|
|
77
|
+
answer_key = data.get("answerKey", "")
|
|
78
|
+
stash = data.get("stash", [[]])
|
|
79
|
+
max_words = data.get("maxWords", 12)
|
|
80
|
+
composition_list = self._generate_composition_list(prompt, stash, max_words)
|
|
81
|
+
self._object_update(
|
|
82
|
+
answer_key,
|
|
83
|
+
{
|
|
84
|
+
"final": True,
|
|
85
|
+
"text": composition_list,
|
|
86
|
+
},
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
case "resumagents":
|
|
90
|
+
prompt = data.get("prompt", "")
|
|
91
|
+
answer_key = data.get("answerKey", "")
|
|
92
|
+
stash = data.get("stash", [[]])
|
|
93
|
+
max_words = data.get("maxWords", 12)
|
|
94
|
+
max_words_per_answer = data.get("maxWordsPerAnswer", 8)
|
|
95
|
+
num_answers = data.get("numAnswers", 8)
|
|
96
|
+
resume_composition_list = self._generate_resume_composition_list(
|
|
97
|
+
prompt, stash, max_words, max_words_per_answer, num_answers
|
|
98
|
+
)
|
|
99
|
+
self._object_update(
|
|
100
|
+
answer_key,
|
|
101
|
+
{
|
|
102
|
+
"final": True,
|
|
103
|
+
"text": resume_composition_list,
|
|
104
|
+
},
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
case "voting":
|
|
108
|
+
choices: list[dict] = data.get("choices", [])
|
|
109
|
+
choice_indexes = [i for i in range(0, len(choices))]
|
|
110
|
+
selected_choice = random.choice(choice_indexes)
|
|
111
|
+
self._object_update(response_key, {"action": "choose", "choice": selected_choice})
|
|
112
|
+
|
|
113
|
+
def _handle_room_operation(self, data: dict):
|
|
114
|
+
pass
|
|
115
|
+
|
|
116
|
+
def _generate_response(self, instruction: str, prompt: str, max_length: int, min_words: int) -> str:
|
|
117
|
+
formatted_prompt = _RESPONSE_PROMPT_TEMPLATE.format(
|
|
118
|
+
personality=self._personality,
|
|
119
|
+
max_length=max_length,
|
|
120
|
+
min_words=min_words,
|
|
121
|
+
instruction=instruction,
|
|
122
|
+
prompt=prompt,
|
|
123
|
+
)
|
|
124
|
+
response = self._chat_model.generate_text(
|
|
125
|
+
formatted_prompt,
|
|
126
|
+
"",
|
|
127
|
+
temperature=self._chat_model._chat_model_temperature,
|
|
128
|
+
top_p=self._chat_model._chat_model_top_p,
|
|
129
|
+
)
|
|
130
|
+
if len(response) > max_length:
|
|
131
|
+
response = response[: max_length - 1]
|
|
132
|
+
return response
|
|
133
|
+
|
|
134
|
+
def _generate_composition_list(
|
|
135
|
+
self,
|
|
136
|
+
prompt: str,
|
|
137
|
+
stash: list[list[str]],
|
|
138
|
+
max_words: int,
|
|
139
|
+
) -> list[dict]:
|
|
140
|
+
|
|
141
|
+
possible_word_choices = []
|
|
142
|
+
|
|
143
|
+
for stash_entry in stash:
|
|
144
|
+
for word in stash_entry:
|
|
145
|
+
possible_word_choices.append(word)
|
|
146
|
+
|
|
147
|
+
all_possible_words_str = "\n".join([word for word in possible_word_choices])
|
|
148
|
+
formatted_prompt = _COMPOSITION_PROMPT_TEMPLATE.format(
|
|
149
|
+
personality=self._personality,
|
|
150
|
+
all_possible_words_str=all_possible_words_str,
|
|
151
|
+
max_words=max_words,
|
|
152
|
+
prompt=prompt,
|
|
153
|
+
)
|
|
154
|
+
response = self._chat_model.generate_text(
|
|
155
|
+
formatted_prompt,
|
|
156
|
+
"",
|
|
157
|
+
temperature=self._chat_model._chat_model_temperature,
|
|
158
|
+
top_p=self._chat_model._chat_model_top_p,
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
## Listen, I know this is isn't the fastest way to search
|
|
162
|
+
## It's 12 words, bite me with your Big O notation
|
|
163
|
+
composition_list = []
|
|
164
|
+
response_list = response.split(" ")
|
|
165
|
+
for response_word in response_list:
|
|
166
|
+
found_word = False
|
|
167
|
+
response_word = response_word.strip()
|
|
168
|
+
if not all(char in string.punctuation for char in response_word):
|
|
169
|
+
response_word = response_word.translate(str.maketrans("", "", string.punctuation)).lower()
|
|
170
|
+
|
|
171
|
+
if not found_word:
|
|
172
|
+
for stash_index, stash_entry in enumerate(stash):
|
|
173
|
+
for check_word_index, check_word in enumerate(stash_entry):
|
|
174
|
+
if response_word == check_word.lower():
|
|
175
|
+
composition_list.append(
|
|
176
|
+
{
|
|
177
|
+
"index": stash_index,
|
|
178
|
+
"word": check_word_index,
|
|
179
|
+
}
|
|
180
|
+
)
|
|
181
|
+
found_word = True
|
|
182
|
+
break
|
|
183
|
+
if found_word:
|
|
184
|
+
break
|
|
185
|
+
|
|
186
|
+
if not found_word:
|
|
187
|
+
logger.warning(f"Word not found in choices: {response_word}")
|
|
188
|
+
|
|
189
|
+
if len(composition_list) > max_words:
|
|
190
|
+
composition_list = composition_list[: max_words - 1]
|
|
191
|
+
return composition_list
|
|
192
|
+
|
|
193
|
+
def _generate_resume_composition_list(
|
|
194
|
+
self,
|
|
195
|
+
prompt: str,
|
|
196
|
+
stash: list[list[str]],
|
|
197
|
+
max_words: int,
|
|
198
|
+
max_words_per_answers: int,
|
|
199
|
+
num_of_answers: int,
|
|
200
|
+
) -> list[list[dict]]:
|
|
201
|
+
# TODO Figure this out
|
|
202
|
+
resume_composition_list = []
|
|
203
|
+
for _ in range(0, num_of_answers):
|
|
204
|
+
resume_composition_list.append([{"index": 0, "word": 0}])
|
|
205
|
+
return resume_composition_list
|
|
File without changes
|
ai_plays_jackbox-0.1.0/ai_plays_jackbox/cli.py → ai_plays_jackbox-0.2.1/ai_plays_jackbox/cli/main.py
RENAMED
|
@@ -45,7 +45,7 @@ def _validate_top_p(string_to_check: str) -> float:
|
|
|
45
45
|
return number_value
|
|
46
46
|
|
|
47
47
|
|
|
48
|
-
def
|
|
48
|
+
def main():
|
|
49
49
|
parser = argparse.ArgumentParser()
|
|
50
50
|
parser.add_argument(
|
|
51
51
|
"--room-code",
|
|
@@ -104,4 +104,4 @@ def cli():
|
|
|
104
104
|
|
|
105
105
|
|
|
106
106
|
if __name__ == "__main__":
|
|
107
|
-
|
|
107
|
+
main()
|
|
File without changes
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
import subprocess
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def run():
|
|
5
|
+
commands = [
|
|
6
|
+
["autoflake", "--in-place", "--recursive", "--remove-all-unused-imports", "--verbose", "ai_plays_jackbox"],
|
|
7
|
+
["isort", "--profile", "black", "--project=ai_plays_jackbox", "ai_plays_jackbox"],
|
|
8
|
+
["black", "-l", "120", "ai_plays_jackbox"],
|
|
9
|
+
["mypy", "ai_plays_jackbox"],
|
|
10
|
+
]
|
|
11
|
+
|
|
12
|
+
for cmd in commands:
|
|
13
|
+
print(f"\n>>> Running: {' '.join(cmd)}")
|
|
14
|
+
subprocess.run(["poetry", "run"] + cmd, check=True)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
if __name__ == "__main__":
|
|
18
|
+
run()
|
|
File without changes
|
|
@@ -0,0 +1,248 @@
|
|
|
1
|
+
from multiprocessing import Process, Queue
|
|
2
|
+
|
|
3
|
+
import psutil
|
|
4
|
+
from loguru import logger
|
|
5
|
+
from nicegui import app, ui
|
|
6
|
+
|
|
7
|
+
from ai_plays_jackbox.bot.bot_personality import JackBoxBotVariant
|
|
8
|
+
from ai_plays_jackbox.constants import (
|
|
9
|
+
DEFAULT_NUM_OF_BOTS,
|
|
10
|
+
DEFAULT_TEMPERATURE,
|
|
11
|
+
DEFAULT_TOP_P,
|
|
12
|
+
)
|
|
13
|
+
from ai_plays_jackbox.llm.chat_model_factory import CHAT_MODEL_PROVIDERS
|
|
14
|
+
from ai_plays_jackbox.run import run
|
|
15
|
+
|
|
16
|
+
LOG_QUEUE: Queue = Queue()
|
|
17
|
+
LOG_DISPLAY = None
|
|
18
|
+
SELECT_ALL_BOT_VARIANTS = None
|
|
19
|
+
BOT_VARIANT_CHECKBOX_STATES: dict = {}
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def _format_log(record):
|
|
23
|
+
thread_name = record["thread"].name
|
|
24
|
+
color = "red"
|
|
25
|
+
colored_name = f"<{color}>{thread_name:<12}</{color}>"
|
|
26
|
+
|
|
27
|
+
return (
|
|
28
|
+
f"<green>{record['time']:YYYY-MM-DD HH:mm:ss}</green> | "
|
|
29
|
+
f"<cyan>{record['level']:<8}</cyan> | "
|
|
30
|
+
f"{colored_name} | "
|
|
31
|
+
f"{record['message']}\n"
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _build_log_display():
|
|
36
|
+
global LOG_DISPLAY
|
|
37
|
+
with ui.row().classes("w-full"):
|
|
38
|
+
LOG_DISPLAY = ui.log(max_lines=100).classes("h-64 overflow-auto bg-black text-white")
|
|
39
|
+
ui.timer(interval=0.5, callback=_poll_log_queue)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _poll_log_queue():
|
|
43
|
+
global LOG_DISPLAY
|
|
44
|
+
try:
|
|
45
|
+
while not LOG_QUEUE.empty():
|
|
46
|
+
log_msg = LOG_QUEUE.get_nowait()
|
|
47
|
+
LOG_DISPLAY.push(log_msg)
|
|
48
|
+
except Exception as e:
|
|
49
|
+
LOG_DISPLAY.push(f"[ERROR] Failed to read log: {e}")
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def _start(
|
|
53
|
+
room_code: str,
|
|
54
|
+
chat_model_provider: str,
|
|
55
|
+
chat_model_name: str,
|
|
56
|
+
num_of_bots: int,
|
|
57
|
+
bots_in_play: list[str],
|
|
58
|
+
temperature: float,
|
|
59
|
+
top_p: float,
|
|
60
|
+
log_queue: Queue,
|
|
61
|
+
):
|
|
62
|
+
logger.add(lambda msg: log_queue.put(msg), format=_format_log)
|
|
63
|
+
|
|
64
|
+
try:
|
|
65
|
+
run(
|
|
66
|
+
room_code.strip().upper(),
|
|
67
|
+
chat_model_provider,
|
|
68
|
+
chat_model_name=chat_model_name,
|
|
69
|
+
num_of_bots=num_of_bots,
|
|
70
|
+
bots_in_play=bots_in_play,
|
|
71
|
+
chat_model_temperature=temperature,
|
|
72
|
+
chat_model_top_p=top_p,
|
|
73
|
+
)
|
|
74
|
+
except Exception as e:
|
|
75
|
+
logger.exception("Bot startup failed")
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def _is_game_process_alive():
|
|
79
|
+
game_pid = app.storage.general.get("game_pid", None)
|
|
80
|
+
is_game_alive = game_pid is not None and psutil.pid_exists(game_pid) and psutil.Process(game_pid).is_running()
|
|
81
|
+
if not is_game_alive:
|
|
82
|
+
app.storage.general["game_pid"] = None
|
|
83
|
+
return is_game_alive
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def _handle_start_click(
|
|
87
|
+
room_code: str,
|
|
88
|
+
chat_model_provider: str,
|
|
89
|
+
chat_model_name: str,
|
|
90
|
+
num_of_bots: int,
|
|
91
|
+
temperature: float,
|
|
92
|
+
top_p: float,
|
|
93
|
+
):
|
|
94
|
+
global BOT_VARIANT_CHECKBOX_STATES
|
|
95
|
+
|
|
96
|
+
if not _is_game_process_alive():
|
|
97
|
+
logger.info("Starting...")
|
|
98
|
+
game_thread = Process(
|
|
99
|
+
target=_start,
|
|
100
|
+
args=(
|
|
101
|
+
room_code,
|
|
102
|
+
chat_model_provider,
|
|
103
|
+
chat_model_name,
|
|
104
|
+
num_of_bots,
|
|
105
|
+
[k for k, v in BOT_VARIANT_CHECKBOX_STATES.items() if v.value],
|
|
106
|
+
temperature,
|
|
107
|
+
top_p,
|
|
108
|
+
LOG_QUEUE,
|
|
109
|
+
),
|
|
110
|
+
daemon=True,
|
|
111
|
+
)
|
|
112
|
+
game_thread.start()
|
|
113
|
+
app.storage.general["game_pid"] = game_thread.pid
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def _select_all_bot_variants_changed():
|
|
117
|
+
for checkbox in BOT_VARIANT_CHECKBOX_STATES.values():
|
|
118
|
+
checkbox.value = SELECT_ALL_BOT_VARIANTS.value
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def _sync_select_all_bot_variants():
|
|
122
|
+
all_checked = all(cb.value for cb in BOT_VARIANT_CHECKBOX_STATES.values())
|
|
123
|
+
SELECT_ALL_BOT_VARIANTS.value = all_checked
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def _setup_bot_variant_display():
|
|
127
|
+
global SELECT_ALL_BOT_VARIANTS
|
|
128
|
+
with ui.list().props("bordered separator").classes("w-full"):
|
|
129
|
+
with ui.item_label("Bot Personalities").props("header").classes("text-bold"):
|
|
130
|
+
SELECT_ALL_BOT_VARIANTS = ui.checkbox(text="Select All", value=True)
|
|
131
|
+
SELECT_ALL_BOT_VARIANTS.on("update:model-value", lambda e: _select_all_bot_variants_changed())
|
|
132
|
+
ui.separator()
|
|
133
|
+
with ui.element("div").classes("overflow-y-auto h-64"):
|
|
134
|
+
for variant in list(JackBoxBotVariant):
|
|
135
|
+
with ui.item():
|
|
136
|
+
with ui.item_section().props("avatar"):
|
|
137
|
+
cb = ui.checkbox(value=True)
|
|
138
|
+
cb.on("update:model-value", lambda e: _sync_select_all_bot_variants())
|
|
139
|
+
BOT_VARIANT_CHECKBOX_STATES[variant.name] = cb
|
|
140
|
+
with ui.item_section():
|
|
141
|
+
ui.item_label(variant.value.name)
|
|
142
|
+
ui.item_label(variant.value.personality).props("caption")
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def startup():
|
|
146
|
+
ui.page_title("AI Plays JackBox")
|
|
147
|
+
ui.label("🤖 AI Plays JackBox").classes("text-2xl font-bold")
|
|
148
|
+
|
|
149
|
+
_build_log_display()
|
|
150
|
+
|
|
151
|
+
with ui.grid(columns=16).classes("w-full gap-0"):
|
|
152
|
+
with ui.column().classes("col-span-1"):
|
|
153
|
+
pass
|
|
154
|
+
with ui.column().classes("col-span-7"):
|
|
155
|
+
with ui.row():
|
|
156
|
+
ui.label("Number of Bots")
|
|
157
|
+
num_of_bots_label = ui.label(str(DEFAULT_NUM_OF_BOTS))
|
|
158
|
+
num_of_bots = ui.slider(
|
|
159
|
+
min=1,
|
|
160
|
+
max=10,
|
|
161
|
+
value=DEFAULT_NUM_OF_BOTS,
|
|
162
|
+
step=1,
|
|
163
|
+
on_change=lambda e: num_of_bots_label.set_text(f"{e.value}"),
|
|
164
|
+
)
|
|
165
|
+
chat_model_provider = ui.select(
|
|
166
|
+
list(CHAT_MODEL_PROVIDERS.keys()),
|
|
167
|
+
label="Chat Model Provider",
|
|
168
|
+
value=list(CHAT_MODEL_PROVIDERS.keys())[0],
|
|
169
|
+
on_change=lambda e: chat_model_name.set_value(CHAT_MODEL_PROVIDERS[e.value].get_default_model()),
|
|
170
|
+
).classes("w-1/3")
|
|
171
|
+
|
|
172
|
+
chat_model_name = ui.input(
|
|
173
|
+
label="Chat Model Name",
|
|
174
|
+
value=CHAT_MODEL_PROVIDERS[chat_model_provider.value].get_default_model(),
|
|
175
|
+
).classes("w-1/3")
|
|
176
|
+
|
|
177
|
+
room_code = (
|
|
178
|
+
ui.input(
|
|
179
|
+
label="Room Code",
|
|
180
|
+
placeholder="ABCD",
|
|
181
|
+
validation={
|
|
182
|
+
"must be letters only": lambda value: value.isalpha(),
|
|
183
|
+
"must be 4 letters": lambda value: len(value) == 4,
|
|
184
|
+
},
|
|
185
|
+
)
|
|
186
|
+
.props("uppercase")
|
|
187
|
+
.classes("w-1/4")
|
|
188
|
+
)
|
|
189
|
+
start_button = (
|
|
190
|
+
ui.button(
|
|
191
|
+
"Start Bots",
|
|
192
|
+
on_click=lambda _: _handle_start_click(
|
|
193
|
+
room_code.value,
|
|
194
|
+
chat_model_provider.value,
|
|
195
|
+
chat_model_name.value,
|
|
196
|
+
num_of_bots.value,
|
|
197
|
+
temperature.value,
|
|
198
|
+
top_p.value,
|
|
199
|
+
),
|
|
200
|
+
)
|
|
201
|
+
.bind_enabled_from(room_code, "error", lambda error: room_code.value and not error)
|
|
202
|
+
.classes("w-full")
|
|
203
|
+
)
|
|
204
|
+
ui.timer(
|
|
205
|
+
interval=0.5,
|
|
206
|
+
callback=lambda: start_button.props(
|
|
207
|
+
f"color={'blue' if _is_game_process_alive() else 'green'}"
|
|
208
|
+
).set_text("Running..." if _is_game_process_alive() else "Start Bots"),
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
ui.label("Advanced Options").classes("w-full text-xl font-bold")
|
|
212
|
+
|
|
213
|
+
ui.label("Temperature").classes("w-1/4").tooltip(
|
|
214
|
+
"""
|
|
215
|
+
What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
|
216
|
+
make the output more random, while lower values like 0.2 will make it more
|
|
217
|
+
focused and deterministic. We generally recommend altering this or `top_p` but
|
|
218
|
+
not both."""
|
|
219
|
+
)
|
|
220
|
+
temperature_label = ui.label(str(DEFAULT_TEMPERATURE)).classes("w-1/6")
|
|
221
|
+
temperature = ui.slider(
|
|
222
|
+
min=0.0,
|
|
223
|
+
max=2.0,
|
|
224
|
+
value=DEFAULT_TEMPERATURE,
|
|
225
|
+
step=0.1,
|
|
226
|
+
on_change=lambda e: temperature_label.set_text(f"{e.value}"),
|
|
227
|
+
).classes("w-1/2")
|
|
228
|
+
|
|
229
|
+
ui.label("Top P").classes("w-1/4").tooltip(
|
|
230
|
+
"""
|
|
231
|
+
An alternative to sampling with temperature, called nucleus sampling, where the
|
|
232
|
+
model considers the results of the tokens with top_p probability mass. So 0.1
|
|
233
|
+
means only the tokens comprising the top 10% probability mass are considered."""
|
|
234
|
+
)
|
|
235
|
+
top_p_label = ui.label(str(DEFAULT_TOP_P)).classes("w-1/6")
|
|
236
|
+
top_p = ui.slider(
|
|
237
|
+
min=0.0,
|
|
238
|
+
max=1.0,
|
|
239
|
+
value=DEFAULT_TOP_P,
|
|
240
|
+
step=0.1,
|
|
241
|
+
on_change=lambda e: top_p_label.set_text(f"{e.value}"),
|
|
242
|
+
).classes("w-1/2")
|
|
243
|
+
|
|
244
|
+
with ui.column().classes("col-span-1"):
|
|
245
|
+
pass
|
|
246
|
+
|
|
247
|
+
with ui.column().classes("col-span-6"):
|
|
248
|
+
_setup_bot_variant_display()
|
|
@@ -21,7 +21,8 @@ dependencies = [
|
|
|
21
21
|
"websocket-client (>=1.8.0,<2.0.0)",
|
|
22
22
|
"numpy (>=2.2.6,<3.0.0)",
|
|
23
23
|
"opencv-python (>=4.11.0.86,<5.0.0.0)",
|
|
24
|
-
"demoji (>=1.1.0,<2.0.0)"
|
|
24
|
+
"demoji (>=1.1.0,<2.0.0)",
|
|
25
|
+
"psutil (>=7.0.0,<8.0.0)"
|
|
25
26
|
]
|
|
26
27
|
|
|
27
28
|
[project.urls]
|
|
@@ -29,8 +30,8 @@ repository = "https://github.com/SudoSpartanDan/AIPlaysJackBox"
|
|
|
29
30
|
"Bug Tracker" = "https://github.com/SudoSpartanDan/AIPlaysJackBox/issues"
|
|
30
31
|
|
|
31
32
|
[project.scripts]
|
|
32
|
-
ai-plays-jackbox = "ai_plays_jackbox.cli:
|
|
33
|
-
ai-plays-jackbox-ui = "ai_plays_jackbox.
|
|
33
|
+
ai-plays-jackbox = "ai_plays_jackbox.cli.main:main"
|
|
34
|
+
ai-plays-jackbox-ui = "ai_plays_jackbox.ui.main:main"
|
|
34
35
|
|
|
35
36
|
[tool.black]
|
|
36
37
|
line-length = 120
|
|
@@ -43,7 +44,7 @@ module = ["demoji.*"]
|
|
|
43
44
|
follow_untyped_imports = true
|
|
44
45
|
|
|
45
46
|
[tool.poetry]
|
|
46
|
-
version = "0.1
|
|
47
|
+
version = "0.2.1"
|
|
47
48
|
|
|
48
49
|
[tool.poetry.group.dev.dependencies]
|
|
49
50
|
autoflake = "^2.3.1"
|
|
@@ -51,6 +52,7 @@ black = "^25.1.0"
|
|
|
51
52
|
isort = "^6.0.1"
|
|
52
53
|
mypy = "^1.16.0"
|
|
53
54
|
types-requests = "^2.32.0.20250602"
|
|
55
|
+
types-psutil = "^7.0.0.20250601"
|
|
54
56
|
|
|
55
57
|
[tool.poetry-dynamic-versioning]
|
|
56
58
|
enable = true
|
|
@@ -1,197 +0,0 @@
|
|
|
1
|
-
import threading
|
|
2
|
-
from collections import deque
|
|
3
|
-
|
|
4
|
-
from loguru import logger
|
|
5
|
-
from nicegui import ui
|
|
6
|
-
|
|
7
|
-
from ai_plays_jackbox.bot.bot_personality import JackBoxBotVariant
|
|
8
|
-
from ai_plays_jackbox.constants import (
|
|
9
|
-
DEFAULT_NUM_OF_BOTS,
|
|
10
|
-
DEFAULT_TEMPERATURE,
|
|
11
|
-
DEFAULT_TOP_P,
|
|
12
|
-
)
|
|
13
|
-
from ai_plays_jackbox.llm.chat_model_factory import CHAT_MODEL_PROVIDERS
|
|
14
|
-
from ai_plays_jackbox.run import run
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
def _format_log(record):
|
|
18
|
-
thread_name = record["thread"].name
|
|
19
|
-
color = "red"
|
|
20
|
-
colored_name = f"<{color}>{thread_name:<12}</{color}>"
|
|
21
|
-
|
|
22
|
-
return (
|
|
23
|
-
f"<green>{record['time']:YYYY-MM-DD HH:mm:ss}</green> | "
|
|
24
|
-
f"<cyan>{record['level']:<8}</cyan> | "
|
|
25
|
-
f"{colored_name} | "
|
|
26
|
-
f"{record['message']}\n"
|
|
27
|
-
)
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
# Keep a reference to the game thread
|
|
31
|
-
game_thread = None
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
def create_ui():
|
|
35
|
-
ui.page_title("AI Plays JackBox")
|
|
36
|
-
|
|
37
|
-
def _handle_start_click():
|
|
38
|
-
global game_thread
|
|
39
|
-
|
|
40
|
-
if game_thread and game_thread.is_alive():
|
|
41
|
-
return
|
|
42
|
-
|
|
43
|
-
def _start_in_thread():
|
|
44
|
-
bots_in_play = [k for k, v in bot_variant_checkbox_states.items() if v.value]
|
|
45
|
-
try:
|
|
46
|
-
run(
|
|
47
|
-
room_code.value.strip().upper(),
|
|
48
|
-
chat_model_provider.value,
|
|
49
|
-
chat_model_name=chat_model_name.value,
|
|
50
|
-
num_of_bots=num_of_bots.value,
|
|
51
|
-
bots_in_play=bots_in_play,
|
|
52
|
-
chat_model_temperature=temperature.value,
|
|
53
|
-
chat_model_top_p=top_p.value,
|
|
54
|
-
)
|
|
55
|
-
except Exception as e:
|
|
56
|
-
logger.exception("Bot startup failed")
|
|
57
|
-
|
|
58
|
-
game_thread = threading.Thread(target=_start_in_thread, daemon=True)
|
|
59
|
-
game_thread.start()
|
|
60
|
-
start_button.disable()
|
|
61
|
-
start_button.props("color=blue")
|
|
62
|
-
start_button.text = "Running..."
|
|
63
|
-
start_button.update()
|
|
64
|
-
|
|
65
|
-
def _refresh_button_state():
|
|
66
|
-
if not game_thread or not game_thread.is_alive():
|
|
67
|
-
if start_button.props["color"] != "green":
|
|
68
|
-
start_button.enable()
|
|
69
|
-
start_button.props("color=green")
|
|
70
|
-
start_button.text = "Start Bots"
|
|
71
|
-
else:
|
|
72
|
-
if start_button.props["color"] != "blue":
|
|
73
|
-
start_button.disable()
|
|
74
|
-
start_button.props("color=blue")
|
|
75
|
-
start_button.text = "Running..."
|
|
76
|
-
|
|
77
|
-
ui.label("🤖 AI Plays JackBox").classes("text-2xl font-bold")
|
|
78
|
-
|
|
79
|
-
with ui.row().classes("w-full"):
|
|
80
|
-
|
|
81
|
-
log_display = ui.log(max_lines=100).classes("h-64 overflow-auto bg-black text-white")
|
|
82
|
-
log_buffer = deque(maxlen=100)
|
|
83
|
-
|
|
84
|
-
def _ui_log_sink(message):
|
|
85
|
-
log_buffer.append(message)
|
|
86
|
-
log_display.push(message.strip())
|
|
87
|
-
|
|
88
|
-
logger.add(_ui_log_sink, format=_format_log, level="INFO", enqueue=True)
|
|
89
|
-
|
|
90
|
-
with ui.grid(columns=16).classes("w-full gap-0"):
|
|
91
|
-
with ui.column().classes("col-span-1"):
|
|
92
|
-
pass
|
|
93
|
-
with ui.column().classes("col-span-7"):
|
|
94
|
-
with ui.row():
|
|
95
|
-
ui.label("Number of Bots")
|
|
96
|
-
num_of_bots_label = ui.label(str(DEFAULT_NUM_OF_BOTS))
|
|
97
|
-
num_of_bots = ui.slider(
|
|
98
|
-
min=1,
|
|
99
|
-
max=10,
|
|
100
|
-
value=DEFAULT_NUM_OF_BOTS,
|
|
101
|
-
step=1,
|
|
102
|
-
on_change=lambda e: num_of_bots_label.set_text(f"{e.value}"),
|
|
103
|
-
)
|
|
104
|
-
chat_model_provider = ui.select(
|
|
105
|
-
list(CHAT_MODEL_PROVIDERS.keys()),
|
|
106
|
-
label="Chat Model Provider",
|
|
107
|
-
value=list(CHAT_MODEL_PROVIDERS.keys())[0],
|
|
108
|
-
on_change=lambda e: chat_model_name.set_value(CHAT_MODEL_PROVIDERS[e.value].get_default_model()),
|
|
109
|
-
).classes("w-1/3")
|
|
110
|
-
|
|
111
|
-
chat_model_name = ui.input(
|
|
112
|
-
label="Chat Model Name",
|
|
113
|
-
value=CHAT_MODEL_PROVIDERS[chat_model_provider.value].get_default_model(),
|
|
114
|
-
).classes("w-1/3")
|
|
115
|
-
|
|
116
|
-
room_code = (
|
|
117
|
-
ui.input(
|
|
118
|
-
label="Room Code",
|
|
119
|
-
placeholder="ABCD",
|
|
120
|
-
validation={
|
|
121
|
-
"must be letters only": lambda value: value.isalpha(),
|
|
122
|
-
"must be 4 letters": lambda value: len(value) == 4,
|
|
123
|
-
},
|
|
124
|
-
)
|
|
125
|
-
.props("uppercase")
|
|
126
|
-
.classes("w-1/4")
|
|
127
|
-
)
|
|
128
|
-
start_button = (
|
|
129
|
-
ui.button("Start Bots", on_click=_handle_start_click, color="green")
|
|
130
|
-
.bind_enabled_from(room_code, "error", lambda error: room_code.value and not error)
|
|
131
|
-
.classes("w-full")
|
|
132
|
-
)
|
|
133
|
-
|
|
134
|
-
ui.label("Advanced Options").classes("w-full text-xl font-bold")
|
|
135
|
-
|
|
136
|
-
ui.label("Temperature").classes("w-1/4").tooltip(
|
|
137
|
-
"""
|
|
138
|
-
What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
|
139
|
-
make the output more random, while lower values like 0.2 will make it more
|
|
140
|
-
focused and deterministic. We generally recommend altering this or `top_p` but
|
|
141
|
-
not both."""
|
|
142
|
-
)
|
|
143
|
-
temperature_label = ui.label(str(DEFAULT_TEMPERATURE)).classes("w-1/6")
|
|
144
|
-
temperature = ui.slider(
|
|
145
|
-
min=0.0,
|
|
146
|
-
max=2.0,
|
|
147
|
-
value=DEFAULT_TEMPERATURE,
|
|
148
|
-
step=0.1,
|
|
149
|
-
on_change=lambda e: temperature_label.set_text(f"{e.value}"),
|
|
150
|
-
).classes("w-1/2")
|
|
151
|
-
|
|
152
|
-
ui.label("Top P").classes("w-1/4").tooltip(
|
|
153
|
-
"""
|
|
154
|
-
An alternative to sampling with temperature, called nucleus sampling, where the
|
|
155
|
-
model considers the results of the tokens with top_p probability mass. So 0.1
|
|
156
|
-
means only the tokens comprising the top 10% probability mass are considered."""
|
|
157
|
-
)
|
|
158
|
-
top_p_label = ui.label(str(DEFAULT_TOP_P)).classes("w-1/6")
|
|
159
|
-
top_p = ui.slider(
|
|
160
|
-
min=0.0,
|
|
161
|
-
max=1.0,
|
|
162
|
-
value=DEFAULT_TOP_P,
|
|
163
|
-
step=0.1,
|
|
164
|
-
on_change=lambda e: top_p_label.set_text(f"{e.value}"),
|
|
165
|
-
).classes("w-1/2")
|
|
166
|
-
|
|
167
|
-
with ui.column().classes("col-span-1"):
|
|
168
|
-
pass
|
|
169
|
-
|
|
170
|
-
bot_variant_checkbox_states = {}
|
|
171
|
-
|
|
172
|
-
def select_all_changed():
|
|
173
|
-
for checkbox in bot_variant_checkbox_states.values():
|
|
174
|
-
checkbox.value = select_all_bot_variants.value
|
|
175
|
-
|
|
176
|
-
def sync_select_all():
|
|
177
|
-
all_checked = all(cb.value for cb in bot_variant_checkbox_states.values())
|
|
178
|
-
select_all_bot_variants.value = all_checked
|
|
179
|
-
|
|
180
|
-
with ui.column().classes("col-span-6"):
|
|
181
|
-
with ui.list().props("bordered separator").classes("w-full"):
|
|
182
|
-
with ui.item_label("Bot Personalities").props("header").classes("text-bold"):
|
|
183
|
-
select_all_bot_variants = ui.checkbox(text="Select All", value=True)
|
|
184
|
-
select_all_bot_variants.on("update:model-value", lambda e: select_all_changed())
|
|
185
|
-
ui.separator()
|
|
186
|
-
with ui.element("div").classes("overflow-y-auto h-64"):
|
|
187
|
-
for variant in list(JackBoxBotVariant):
|
|
188
|
-
with ui.item():
|
|
189
|
-
with ui.item_section().props("avatar"):
|
|
190
|
-
cb = ui.checkbox(value=True)
|
|
191
|
-
cb.on("update:model-value", lambda e: sync_select_all())
|
|
192
|
-
bot_variant_checkbox_states[variant.name] = cb
|
|
193
|
-
with ui.item_section():
|
|
194
|
-
ui.item_label(variant.value.name)
|
|
195
|
-
ui.item_label(variant.value.personality).props("caption")
|
|
196
|
-
|
|
197
|
-
ui.timer(1.0, _refresh_button_state)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/bot/jackbox5/mad_verse_city.py
RENAMED
|
File without changes
|
{ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/bot/jackbox5/patently_stupid.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/bot/jackbox6/joke_boat.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/bot/jackbox7/quiplash3.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/bot/standalone/drawful2.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{ai_plays_jackbox-0.1.0 → ai_plays_jackbox-0.2.1}/ai_plays_jackbox/llm/chat_model_factory.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{ai_plays_jackbox-0.1.0/ai_plays_jackbox → ai_plays_jackbox-0.2.1/ai_plays_jackbox/room}/room.py
RENAMED
|
File without changes
|