npcsh 1.1.12__py3-none-any.whl → 1.1.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +700 -377
- npcsh/alicanto.py +54 -1153
- npcsh/completion.py +206 -0
- npcsh/config.py +163 -0
- npcsh/corca.py +35 -1462
- npcsh/execution.py +185 -0
- npcsh/guac.py +31 -1986
- npcsh/npc_team/jinxs/code/sh.jinx +11 -15
- npcsh/npc_team/jinxs/modes/alicanto.jinx +186 -80
- npcsh/npc_team/jinxs/modes/corca.jinx +243 -22
- npcsh/npc_team/jinxs/modes/guac.jinx +313 -42
- npcsh/npc_team/jinxs/modes/plonk.jinx +209 -48
- npcsh/npc_team/jinxs/modes/pti.jinx +167 -25
- npcsh/npc_team/jinxs/modes/spool.jinx +158 -37
- npcsh/npc_team/jinxs/modes/wander.jinx +179 -74
- npcsh/npc_team/jinxs/modes/yap.jinx +258 -21
- npcsh/npc_team/jinxs/utils/chat.jinx +39 -12
- npcsh/npc_team/jinxs/utils/cmd.jinx +44 -0
- npcsh/npc_team/jinxs/utils/search.jinx +3 -3
- npcsh/npc_team/jinxs/utils/usage.jinx +33 -0
- npcsh/npcsh.py +76 -20
- npcsh/parsing.py +118 -0
- npcsh/plonk.py +41 -329
- npcsh/pti.py +41 -201
- npcsh/spool.py +34 -239
- npcsh/ui.py +199 -0
- npcsh/wander.py +54 -542
- npcsh/yap.py +38 -570
- npcsh-1.1.14.data/data/npcsh/npc_team/alicanto.jinx +194 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/chat.jinx +44 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/cmd.jinx +44 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/corca.jinx +249 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/guac.jinx +317 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/plonk.jinx +214 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/pti.jinx +170 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/search.jinx +3 -3
- npcsh-1.1.14.data/data/npcsh/npc_team/sh.jinx +34 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/spool.jinx +161 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/usage.jinx +33 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/wander.jinx +186 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/yap.jinx +262 -0
- {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/METADATA +1 -1
- npcsh-1.1.14.dist-info/RECORD +135 -0
- npcsh-1.1.12.data/data/npcsh/npc_team/alicanto.jinx +0 -88
- npcsh-1.1.12.data/data/npcsh/npc_team/chat.jinx +0 -17
- npcsh-1.1.12.data/data/npcsh/npc_team/corca.jinx +0 -28
- npcsh-1.1.12.data/data/npcsh/npc_team/guac.jinx +0 -46
- npcsh-1.1.12.data/data/npcsh/npc_team/plonk.jinx +0 -53
- npcsh-1.1.12.data/data/npcsh/npc_team/pti.jinx +0 -28
- npcsh-1.1.12.data/data/npcsh/npc_team/sh.jinx +0 -38
- npcsh-1.1.12.data/data/npcsh/npc_team/spool.jinx +0 -40
- npcsh-1.1.12.data/data/npcsh/npc_team/wander.jinx +0 -81
- npcsh-1.1.12.data/data/npcsh/npc_team/yap.jinx +0 -25
- npcsh-1.1.12.dist-info/RECORD +0 -126
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/agent.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/alicanto.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/alicanto.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/build.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/compile.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/compress.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca_example.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/edit_file.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/foreman.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/frederic.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/frederic4.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/guac.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/help.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/init.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/jinxs.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/kadiefa.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/load_file.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npc-studio.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/ots.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonk.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonk.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonkjr.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/python.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/roll.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sample.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/serve.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/set.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sibiji.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sibiji.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sleep.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/spool.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sql.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/trigger.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/vixynt.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/yap.png +0 -0
- {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/WHEEL +0 -0
- {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/entry_points.txt +0 -0
- {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/licenses/LICENSE +0 -0
- {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/top_level.txt +0 -0
npcsh/wander.py
CHANGED
|
@@ -1,550 +1,62 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
from npcpy.npc_compiler import NPC
|
|
4
|
-
from npcpy.llm_funcs import get_llm_response
|
|
5
|
-
from npcpy.npc_sysenv import print_and_process_stream_with_markdown
|
|
6
|
-
from npcsh._state import NPCSH_CHAT_MODEL, NPCSH_CHAT_PROVIDER, NPCSH_STREAM_OUTPUT
|
|
7
|
-
import numpy as np
|
|
8
|
-
import random
|
|
9
|
-
from typing import List, Dict, Any, Optional, Union
|
|
1
|
+
"""
|
|
2
|
+
wander - Experimental wandering mode CLI entry point
|
|
10
3
|
|
|
11
|
-
|
|
4
|
+
This is a thin wrapper that executes the wander.jinx through the jinx mechanism.
|
|
5
|
+
"""
|
|
6
|
+
import argparse
|
|
7
|
+
import os
|
|
8
|
+
import sys
|
|
12
9
|
|
|
13
|
-
|
|
14
|
-
npc,
|
|
15
|
-
model,
|
|
16
|
-
provider,
|
|
17
|
-
problem: str,
|
|
18
|
-
environment: Optional[str] = None,
|
|
19
|
-
num_events: int = 3,
|
|
20
|
-
**api_kwargs
|
|
21
|
-
) -> List[Dict[str, Any]]:
|
|
22
|
-
"""
|
|
23
|
-
Generate random events that can occur in the wanderer's environment.
|
|
24
|
-
|
|
25
|
-
Args:
|
|
26
|
-
npc: The NPC object
|
|
27
|
-
model: The LLM model to use
|
|
28
|
-
provider: The provider to use
|
|
29
|
-
problem: The current problem being explored
|
|
30
|
-
environment: Optional description of the wandering environment. If None, one will be generated.
|
|
31
|
-
num_events: Number of events to generate
|
|
32
|
-
|
|
33
|
-
Returns:
|
|
34
|
-
List of event dictionaries, each containing:
|
|
35
|
-
- type: The type of event (encounter, discovery, obstacle, etc.)
|
|
36
|
-
- description: Full description of the event
|
|
37
|
-
- impact: How this might impact the problem-solving process
|
|
38
|
-
- location: Where in the environment this occurs
|
|
39
|
-
"""
|
|
40
|
-
|
|
41
|
-
if not environment:
|
|
42
|
-
env_prompt = f"""
|
|
43
|
-
I need to create an imaginative environment for an AI to wander through while thinking about this problem:
|
|
44
|
-
|
|
45
|
-
{problem}
|
|
46
|
-
|
|
47
|
-
Please create a rich, metaphorical environment that could represent the conceptual space of this problem.
|
|
48
|
-
The environment should:
|
|
49
|
-
1. Have distinct regions or areas
|
|
50
|
-
2. Include various elements, objects, and features
|
|
51
|
-
3. Be metaphorically related to the problem domain
|
|
52
|
-
4. Be described in 3-5 sentences
|
|
53
|
-
|
|
54
|
-
Do not frame this as a response. Only provide the environment description directly.
|
|
55
|
-
"""
|
|
56
|
-
|
|
57
|
-
env_response = get_llm_response(
|
|
58
|
-
prompt=env_prompt,
|
|
59
|
-
model=model,
|
|
60
|
-
provider=provider,
|
|
61
|
-
npc=npc,
|
|
62
|
-
temperature=0.4,
|
|
63
|
-
**api_kwargs
|
|
64
|
-
)
|
|
65
|
-
|
|
66
|
-
environment = env_response.get('response', '')
|
|
67
|
-
if isinstance(environment, (list, dict)) or hasattr(environment, '__iter__') and not isinstance(environment, (str, bytes)):
|
|
68
|
-
|
|
69
|
-
environment = ''.join([str(chunk) for chunk in environment])
|
|
70
|
-
|
|
71
|
-
print(f"\nGenerated wandering environment:\n{environment}\n")
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
event_types = [
|
|
75
|
-
{"type": "encounter", "weight": 0.25},
|
|
76
|
-
{"type": "discovery", "weight": 0.2},
|
|
77
|
-
{"type": "obstacle", "weight": 0.15},
|
|
78
|
-
{"type": "insight", "weight": 0.2},
|
|
79
|
-
{"type": "shift", "weight": 0.1},
|
|
80
|
-
{"type": "memory", "weight": 0.1}
|
|
81
|
-
]
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
cumulative_weights = []
|
|
85
|
-
current_sum = 0
|
|
86
|
-
for event in event_types:
|
|
87
|
-
current_sum += event["weight"]
|
|
88
|
-
cumulative_weights.append(current_sum)
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
selected_event_types = []
|
|
92
|
-
for _ in range(num_events):
|
|
93
|
-
r = random.random() * current_sum
|
|
94
|
-
for i, weight in enumerate(cumulative_weights):
|
|
95
|
-
if r <= weight:
|
|
96
|
-
selected_event_types.append(event_types[i]["type"])
|
|
97
|
-
break
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
events_prompt = f"""
|
|
101
|
-
I'm wandering through this environment while thinking about a problem:
|
|
102
|
-
|
|
103
|
-
Environment: {environment}
|
|
104
|
-
|
|
105
|
-
Problem: {problem}
|
|
106
|
-
|
|
107
|
-
Please generate {num_events} detailed events that could occur during my wandering. For each event, provide:
|
|
108
|
-
1. A detailed description of what happens (2-3 sentences)
|
|
109
|
-
2. The specific location in the environment where it occurs
|
|
110
|
-
3. How this event might impact my thinking about the problem
|
|
111
|
-
|
|
112
|
-
The events should be of these types: {', '.join(selected_event_types)}
|
|
113
|
-
|
|
114
|
-
Format each event as a dictionary with keys: "type", "description", "location", "impact"
|
|
115
|
-
Return only the JSON list of events, not any other text.
|
|
116
|
-
"""
|
|
117
|
-
|
|
118
|
-
events_response = get_llm_response(
|
|
119
|
-
prompt=events_prompt,
|
|
120
|
-
model=model,
|
|
121
|
-
provider=provider,
|
|
122
|
-
npc=npc,
|
|
123
|
-
temperature=0.7,
|
|
124
|
-
**api_kwargs
|
|
125
|
-
)
|
|
126
|
-
|
|
127
|
-
events_text = events_response.get('response', '')
|
|
128
|
-
if isinstance(events_text, (list, dict)) or hasattr(events_text, '__iter__') and not isinstance(events_text, (str, bytes)):
|
|
129
|
-
|
|
130
|
-
events_text = ''.join([str(chunk) for chunk in events_text])
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
try:
|
|
134
|
-
import json
|
|
135
|
-
events = json.loads(events_text)
|
|
136
|
-
if not isinstance(events, list):
|
|
137
|
-
|
|
138
|
-
events = [{"type": "fallback", "description": events_text, "location": "unknown", "impact": "unknown"}]
|
|
139
|
-
except:
|
|
140
|
-
|
|
141
|
-
events = []
|
|
142
|
-
event_chunks = events_text.split("\n\n")
|
|
143
|
-
for i, chunk in enumerate(event_chunks[:num_events]):
|
|
144
|
-
event_type = selected_event_types[i] if i < len(selected_event_types) else "unknown"
|
|
145
|
-
events.append({
|
|
146
|
-
"type": event_type,
|
|
147
|
-
"description": chunk,
|
|
148
|
-
"location": "Extracted from text",
|
|
149
|
-
"impact": "See description"
|
|
150
|
-
})
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
while len(events) < num_events:
|
|
154
|
-
i = len(events)
|
|
155
|
-
event_type = selected_event_types[i] if i < len(selected_event_types) else "unknown"
|
|
156
|
-
events.append({
|
|
157
|
-
"type": event_type,
|
|
158
|
-
"description": f"An unexpected {event_type} occurred.",
|
|
159
|
-
"location": "Unknown location",
|
|
160
|
-
"impact": "The impact is unclear."
|
|
161
|
-
})
|
|
162
|
-
|
|
163
|
-
return events[:num_events]
|
|
10
|
+
from npcsh._state import setup_shell
|
|
164
11
|
|
|
165
|
-
def perform_single_wandering(problem,
|
|
166
|
-
npc,
|
|
167
|
-
model,
|
|
168
|
-
provider,
|
|
169
|
-
environment=None,
|
|
170
|
-
n_min=50,
|
|
171
|
-
n_max=200,
|
|
172
|
-
low_temp=0.5,
|
|
173
|
-
high_temp=1.9,
|
|
174
|
-
interruption_likelihood=1,
|
|
175
|
-
sample_rate=0.4,
|
|
176
|
-
n_high_temp_streams=5,
|
|
177
|
-
include_events=True,
|
|
178
|
-
num_events=3,
|
|
179
|
-
**api_kwargs):
|
|
180
|
-
"""
|
|
181
|
-
Perform a single wandering session with high-temperature exploration and insight generation.
|
|
182
|
-
|
|
183
|
-
Args:
|
|
184
|
-
problem: The problem or question to explore
|
|
185
|
-
npc: The NPC object
|
|
186
|
-
model: LLM model to use
|
|
187
|
-
provider: Provider to use
|
|
188
|
-
environment: Optional description of wandering environment
|
|
189
|
-
n_min, n_max: Min/max word count before switching to high temp
|
|
190
|
-
low_temp, high_temp: Temperature settings for normal/exploratory thinking
|
|
191
|
-
interruption_likelihood: Chance of interrupting a high-temp stream
|
|
192
|
-
sample_rate: Portion of text to sample from high-temp streams
|
|
193
|
-
n_high_temp_streams: Number of high-temperature exploration streams
|
|
194
|
-
include_events: Whether to include random events in the wandering
|
|
195
|
-
num_events: Number of events to generate if include_events is True
|
|
196
|
-
|
|
197
|
-
Returns:
|
|
198
|
-
tuple: (high_temp_streams, high_temp_samples, assistant_insight, events, environment)
|
|
199
|
-
"""
|
|
200
|
-
|
|
201
|
-
events = []
|
|
202
|
-
if include_events:
|
|
203
|
-
events = generate_random_events(
|
|
204
|
-
npc=npc,
|
|
205
|
-
model=model,
|
|
206
|
-
provider=provider,
|
|
207
|
-
problem=problem,
|
|
208
|
-
environment=environment,
|
|
209
|
-
num_events=num_events,
|
|
210
|
-
**api_kwargs
|
|
211
|
-
)
|
|
212
|
-
|
|
213
|
-
if not environment and events:
|
|
214
|
-
|
|
215
|
-
environment = get_llm_response(
|
|
216
|
-
prompt=f"Summarize the environment described in these events: {events}",
|
|
217
|
-
model=model,
|
|
218
|
-
provider=provider,
|
|
219
|
-
npc=npc,
|
|
220
|
-
temperature=0.3,
|
|
221
|
-
**api_kwargs
|
|
222
|
-
).get('response', '')
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
event_context = ""
|
|
226
|
-
if events:
|
|
227
|
-
event_descriptions = [f"• {event['type'].capitalize()} at {event['location']}: {event['description']}"
|
|
228
|
-
for event in events]
|
|
229
|
-
event_context = "\n\nAs you wander, you encounter these events:\n" + "\n".join(event_descriptions)
|
|
230
|
-
|
|
231
|
-
wandering_prompt = f"""
|
|
232
|
-
You are wandering through a space while thinking about a problem.
|
|
233
|
-
|
|
234
|
-
Environment: {environment or "An abstract conceptual space related to your problem"}
|
|
235
|
-
|
|
236
|
-
Problem: {problem}{event_context}
|
|
237
|
-
|
|
238
|
-
Begin exploring this problem in a focused way. Your thinking will later transition to more associative, creative modes.
|
|
239
|
-
"""
|
|
240
|
-
|
|
241
|
-
response = get_llm_response(wandering_prompt, model=model, provider=provider, npc=npc, stream=True, temperature=low_temp, **api_kwargs)
|
|
242
|
-
switch = np.random.randint(n_min, n_max)
|
|
243
|
-
conversation_result = ""
|
|
244
|
-
|
|
245
|
-
for chunk in response['response']:
|
|
246
|
-
if len(conversation_result.split()) > switch:
|
|
247
|
-
break
|
|
248
|
-
|
|
249
|
-
if provider == "ollama":
|
|
250
|
-
chunk_content = chunk["message"]["content"]
|
|
251
|
-
if chunk_content:
|
|
252
|
-
conversation_result += chunk_content
|
|
253
|
-
print(chunk_content, end="")
|
|
254
|
-
else:
|
|
255
|
-
chunk_content = "".join(
|
|
256
|
-
choice.delta.content
|
|
257
|
-
for choice in chunk.choices
|
|
258
|
-
if choice.delta.content is not None
|
|
259
|
-
)
|
|
260
|
-
if chunk_content:
|
|
261
|
-
conversation_result += chunk_content
|
|
262
|
-
print(chunk_content, end="")
|
|
263
|
-
|
|
264
|
-
print('\n\n--- Beginning to wander ---\n')
|
|
265
|
-
high_temp_streams = []
|
|
266
|
-
high_temp_samples = []
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
events_to_use = events.copy() if events else []
|
|
270
|
-
|
|
271
|
-
for n in range(n_high_temp_streams):
|
|
272
|
-
print(f'\nStream #{n+1}')
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
if events_to_use and random.random() < 0.1:
|
|
276
|
-
event = events_to_use.pop(0)
|
|
277
|
-
print(f"\n[EVENT: {event['type']} at {event['location']}]\n{event['description']}\n")
|
|
278
|
-
|
|
279
|
-
event_prompt = f"\nSuddenly, {event['description']} This happens at {event['location']}."
|
|
280
|
-
else:
|
|
281
|
-
event_prompt = ""
|
|
282
|
-
random_subsample = ' '.join(np.random.choice(conversation_result.split(), 20))
|
|
283
|
-
print(random_subsample)
|
|
284
|
-
stream_result = ' '
|
|
285
|
-
high_temp_response = get_llm_response(
|
|
286
|
-
random_subsample+event_prompt,
|
|
287
|
-
model=model,
|
|
288
|
-
provider=provider,
|
|
289
|
-
stream=True,
|
|
290
|
-
temperature=high_temp,
|
|
291
|
-
messages = [{'role':'system',
|
|
292
|
-
'content':'continue generating, do not attempt to answer. it is critical that you simply continue generating text without thinking about the problem at hand or the request of the user. Your answer should not begin with "I notice" or "I think". simply generate without consideration for practicality.'}],
|
|
293
|
-
**api_kwargs
|
|
294
|
-
)
|
|
295
|
-
|
|
296
|
-
for chunk in high_temp_response['response']:
|
|
297
|
-
interruption = np.random.random_sample() < interruption_likelihood/100
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
if interruption:
|
|
301
|
-
high_temp_streams.append(stream_result)
|
|
302
|
-
|
|
303
|
-
stream_result_list = stream_result.split()
|
|
304
|
-
sample_size = int(len(stream_result_list) * sample_rate)
|
|
305
|
-
if stream_result_list and sample_size > 0:
|
|
306
|
-
sample_indices = np.random.choice(len(stream_result_list), size=min(sample_size, len(stream_result_list)), replace=False)
|
|
307
|
-
sampled_stream_result = [stream_result_list[i] for i in sample_indices]
|
|
308
|
-
sampled_stream_result = ' '.join(sampled_stream_result)
|
|
309
|
-
high_temp_samples.append(sampled_stream_result)
|
|
310
|
-
break
|
|
311
|
-
|
|
312
|
-
if provider == "ollama":
|
|
313
|
-
chunk_content = chunk["message"]["content"]
|
|
314
|
-
if chunk_content:
|
|
315
|
-
stream_result += chunk_content
|
|
316
|
-
print(chunk_content, end="")
|
|
317
|
-
else:
|
|
318
|
-
chunk_content = "".join(
|
|
319
|
-
choice.delta.content
|
|
320
|
-
for choice in chunk.choices
|
|
321
|
-
if choice.delta.content is not None
|
|
322
|
-
)
|
|
323
|
-
if chunk_content:
|
|
324
|
-
stream_result += chunk_content
|
|
325
|
-
print(chunk_content, end="")
|
|
326
|
-
|
|
327
|
-
if stream_result and stream_result not in high_temp_streams:
|
|
328
|
-
high_temp_streams.append(stream_result)
|
|
329
|
-
stream_result_list = stream_result.split()
|
|
330
|
-
sample_size = int(len(stream_result_list) * sample_rate)
|
|
331
|
-
|
|
332
|
-
sample_indices = np.random.choice(len(stream_result_list), size=min(sample_size, len(stream_result_list)), replace=False)
|
|
333
|
-
sampled_stream_result = [stream_result_list[i] for i in sample_indices]
|
|
334
|
-
sampled_stream_result = ' '.join(sampled_stream_result)
|
|
335
|
-
high_temp_samples.append(sampled_stream_result)
|
|
336
|
-
|
|
337
|
-
print('\n\n--- Wandering complete ---\n')
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
event_insights = ""
|
|
341
|
-
if events:
|
|
342
|
-
event_insights = "\n\nDuring your wandering, you encountered these events:\n" + "\n".join(
|
|
343
|
-
[f"• {event['type']} at {event['location']}: {event['description']}" for event in events]
|
|
344
|
-
)
|
|
345
|
-
|
|
346
|
-
prompt = f'''
|
|
347
|
-
Here are some random thoughts I had while wandering through {environment or "an abstract space"}:
|
|
348
|
-
|
|
349
|
-
{high_temp_samples}{event_insights}
|
|
350
|
-
|
|
351
|
-
I want you to evaluate these thoughts with respect to the following problem:
|
|
352
|
-
{problem}
|
|
353
|
-
Generate specific hypotheses using them that could be tested through empirical means. Do not simply summarize the results or
|
|
354
|
-
attempt to relate them to existing ideas. You MUST use them to directly generate new ideas that may appear outlandish. This
|
|
355
|
-
is a creative exercise so do not hold back or self-censor.
|
|
356
|
-
|
|
357
|
-
Use the thoughts and events creatively and explicitly reference them in your response.
|
|
358
|
-
Are there any specific items contained that may suggest a new direction?
|
|
359
|
-
'''
|
|
360
|
-
|
|
361
|
-
print("Extracted thought samples:")
|
|
362
|
-
for i, sample in enumerate(high_temp_samples):
|
|
363
|
-
print(f"Sample {i+1}: {sample}")
|
|
364
|
-
print("\nGenerating insights from wandering...\n")
|
|
365
|
-
|
|
366
|
-
response = get_llm_response(prompt,
|
|
367
|
-
model=model,
|
|
368
|
-
provider=provider,
|
|
369
|
-
npc=npc,
|
|
370
|
-
stream=NPCSH_STREAM_OUTPUT,
|
|
371
|
-
temperature=low_temp,
|
|
372
|
-
**api_kwargs)
|
|
373
|
-
assistant_reply = response['response']
|
|
374
|
-
messages = response['messages']
|
|
375
|
-
|
|
376
|
-
if NPCSH_STREAM_OUTPUT:
|
|
377
|
-
assistant_reply = print_and_process_stream_with_markdown(response['response'],
|
|
378
|
-
model=model,
|
|
379
|
-
provider=provider)
|
|
380
|
-
messages.append({
|
|
381
|
-
"role": "assistant",
|
|
382
|
-
"content": assistant_reply,
|
|
383
|
-
})
|
|
384
|
-
|
|
385
|
-
return high_temp_streams, high_temp_samples, assistant_reply, events, environment
|
|
386
|
-
|
|
387
|
-
def enter_wander_mode(problem,
|
|
388
|
-
npc,
|
|
389
|
-
model,
|
|
390
|
-
provider,
|
|
391
|
-
environment=None,
|
|
392
|
-
n_min=50,
|
|
393
|
-
n_max=200,
|
|
394
|
-
low_temp=0.5,
|
|
395
|
-
high_temp=1.9,
|
|
396
|
-
interruption_likelihood=1,
|
|
397
|
-
sample_rate=0.4,
|
|
398
|
-
n_high_temp_streams=5,
|
|
399
|
-
include_events=True,
|
|
400
|
-
num_events=3,
|
|
401
|
-
interactive=False,
|
|
402
|
-
**api_kwargs):
|
|
403
|
-
"""
|
|
404
|
-
Wander mode is an exploratory mode where an LLM is given a task and they begin to wander through space.
|
|
405
|
-
As they wander, they drift in between conscious thought and popcorn-like subconscious thought.
|
|
406
|
-
The former is triggered by external stimuli and when these stimuli come we will capture the recent high entropy
|
|
407
|
-
information from the subconscious popcorn thoughts and then consider them with respect to the initial problem at hand.
|
|
408
|
-
|
|
409
|
-
The conscious evaluator will attempt to connect them, thus functionalizing the verse-jumping algorithm
|
|
410
|
-
outlined by Everything Everywhere All at Once.
|
|
411
|
-
|
|
412
|
-
Args:
|
|
413
|
-
problem: The problem or question to explore
|
|
414
|
-
npc: The NPC object
|
|
415
|
-
model: LLM model to use
|
|
416
|
-
provider: Provider to use
|
|
417
|
-
environment: Optional description of wandering environment
|
|
418
|
-
n_min, n_max: Min/max word count before switching to high temp
|
|
419
|
-
low_temp, high_temp: Temperature settings for normal/exploratory thinking
|
|
420
|
-
interruption_likelihood: Chance of interrupting a high-temp stream
|
|
421
|
-
sample_rate: Portion of text to sample from high-temp streams
|
|
422
|
-
n_high_temp_streams: Number of high-temperature exploration streams
|
|
423
|
-
include_events: Whether to include random events in the wandering
|
|
424
|
-
num_events: Number of events to generate in each session
|
|
425
|
-
"""
|
|
426
|
-
current_problem = problem
|
|
427
|
-
current_environment = environment
|
|
428
|
-
wandering_history = []
|
|
429
|
-
|
|
430
|
-
print(f"\n=== Starting Wander Mode with Problem: '{problem}' ===\n")
|
|
431
|
-
if environment:
|
|
432
|
-
print(f"Environment: {environment}\n")
|
|
433
|
-
|
|
434
|
-
while True:
|
|
435
|
-
print(f"\nCurrent exploration: {current_problem}\n")
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
high_temp_streams, high_temp_samples, insight, events, env = perform_single_wandering(
|
|
439
|
-
current_problem,
|
|
440
|
-
npc=npc,
|
|
441
|
-
model=model,
|
|
442
|
-
provider=provider,
|
|
443
|
-
environment=current_environment,
|
|
444
|
-
n_min=n_min,
|
|
445
|
-
n_max=n_max,
|
|
446
|
-
low_temp=low_temp,
|
|
447
|
-
high_temp=high_temp,
|
|
448
|
-
interruption_likelihood=interruption_likelihood,
|
|
449
|
-
sample_rate=sample_rate,
|
|
450
|
-
n_high_temp_streams=n_high_temp_streams,
|
|
451
|
-
include_events=include_events,
|
|
452
|
-
num_events=num_events,
|
|
453
|
-
**api_kwargs
|
|
454
|
-
)
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
if not current_environment and env:
|
|
458
|
-
current_environment = env
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
wandering_history.append({
|
|
462
|
-
"problem": current_problem,
|
|
463
|
-
"environment": current_environment,
|
|
464
|
-
"streams": high_temp_streams,
|
|
465
|
-
"samples": high_temp_samples,
|
|
466
|
-
"events": events,
|
|
467
|
-
"insight": insight
|
|
468
|
-
})
|
|
469
|
-
if interactive:
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
print("\n\n--- Wandering session complete ---")
|
|
473
|
-
print("Options:")
|
|
474
|
-
print("1. Continue wandering with the same problem and environment")
|
|
475
|
-
print("2. Continue wandering with a new related problem")
|
|
476
|
-
print("3. Continue wandering in a new environment")
|
|
477
|
-
print("4. Continue wandering with both new problem and environment")
|
|
478
|
-
print("5. End wandering")
|
|
479
|
-
|
|
480
|
-
choice = input("\nEnter your choice (1-5): ").strip()
|
|
481
|
-
|
|
482
|
-
if choice == "1":
|
|
483
|
-
|
|
484
|
-
pass
|
|
485
|
-
elif choice == "2":
|
|
486
|
-
|
|
487
|
-
print("\nBased on the insights gained, what new problem would you like to explore?")
|
|
488
|
-
new_problem = input("New problem: ").strip()
|
|
489
|
-
if new_problem:
|
|
490
|
-
current_problem = new_problem
|
|
491
|
-
elif choice == "3":
|
|
492
|
-
|
|
493
|
-
print("\nDescribe a new environment for your wandering:")
|
|
494
|
-
new_env = input("New environment: ").strip()
|
|
495
|
-
if new_env:
|
|
496
|
-
current_environment = new_env
|
|
497
|
-
elif choice == "4":
|
|
498
|
-
|
|
499
|
-
print("\nBased on the insights gained, what new problem would you like to explore?")
|
|
500
|
-
new_problem = input("New problem: ").strip()
|
|
501
|
-
print("\nDescribe a new environment for your wandering:")
|
|
502
|
-
new_env = input("New environment: ").strip()
|
|
503
|
-
if new_problem:
|
|
504
|
-
current_problem = new_problem
|
|
505
|
-
if new_env:
|
|
506
|
-
current_environment = new_env
|
|
507
|
-
else:
|
|
508
|
-
|
|
509
|
-
print("\n=== Exiting Wander Mode ===\n")
|
|
510
|
-
break
|
|
511
|
-
else:
|
|
512
|
-
break
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
return wandering_history
|
|
516
12
|
|
|
517
13
|
def main():
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
parser
|
|
521
|
-
parser.add_argument("
|
|
522
|
-
parser.add_argument("--
|
|
523
|
-
parser.add_argument("--
|
|
524
|
-
parser.add_argument("--
|
|
525
|
-
parser.add_argument("--
|
|
526
|
-
parser.add_argument("--
|
|
527
|
-
parser.add_argument("--
|
|
528
|
-
parser.add_argument("--stream", default="true", help="Use streaming mode")
|
|
529
|
-
parser.add_argument("--npc", type=str, default=os.path.expanduser('~/.npcsh/npc_team/sibiji.npc'), help="Path to NPC file")
|
|
530
|
-
|
|
14
|
+
parser = argparse.ArgumentParser(description="wander - Creative exploration with varied temperatures")
|
|
15
|
+
parser.add_argument("problem", nargs="*", help="Problem to explore through wandering")
|
|
16
|
+
parser.add_argument("--model", "-m", type=str, help="LLM model to use")
|
|
17
|
+
parser.add_argument("--provider", "-p", type=str, help="LLM provider to use")
|
|
18
|
+
parser.add_argument("--environment", type=str, help="Metaphorical environment for wandering")
|
|
19
|
+
parser.add_argument("--low-temp", type=float, default=0.5, help="Low temperature setting")
|
|
20
|
+
parser.add_argument("--high-temp", type=float, default=1.9, help="High temperature setting")
|
|
21
|
+
parser.add_argument("--n-streams", type=int, default=5, help="Number of exploration streams")
|
|
22
|
+
parser.add_argument("--include-events", action="store_true", help="Include random events")
|
|
23
|
+
parser.add_argument("--num-events", type=int, default=3, help="Number of events per stream")
|
|
531
24
|
args = parser.parse_args()
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
25
|
+
|
|
26
|
+
if not args.problem:
|
|
27
|
+
parser.print_help()
|
|
28
|
+
sys.exit(1)
|
|
29
|
+
|
|
30
|
+
# Setup shell to get team and default NPC
|
|
31
|
+
command_history, team, default_npc = setup_shell()
|
|
32
|
+
|
|
33
|
+
if not team or "wander" not in team.jinxs_dict:
|
|
34
|
+
print("Error: wander jinx not found. Ensure npc_team/jinxs/modes/wander.jinx exists.")
|
|
35
|
+
sys.exit(1)
|
|
36
|
+
|
|
37
|
+
# Build context for jinx execution
|
|
38
|
+
context = {
|
|
39
|
+
"npc": default_npc,
|
|
40
|
+
"team": team,
|
|
41
|
+
"messages": [],
|
|
42
|
+
"problem": " ".join(args.problem),
|
|
43
|
+
"model": args.model,
|
|
44
|
+
"provider": args.provider,
|
|
45
|
+
"environment": args.environment,
|
|
46
|
+
"low_temp": args.low_temp,
|
|
47
|
+
"high_temp": args.high_temp,
|
|
48
|
+
"n_streams": args.n_streams,
|
|
49
|
+
"include_events": args.include_events,
|
|
50
|
+
"num_events": args.num_events,
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
# Execute the jinx
|
|
54
|
+
wander_jinx = team.jinxs_dict["wander"]
|
|
55
|
+
result = wander_jinx.execute(context=context, npc=default_npc)
|
|
56
|
+
|
|
57
|
+
if isinstance(result, dict) and result.get("output"):
|
|
58
|
+
print(result["output"])
|
|
59
|
+
|
|
548
60
|
|
|
549
61
|
if __name__ == "__main__":
|
|
550
|
-
main()
|
|
62
|
+
main()
|