npcsh 0.3.31__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (91) hide show
  1. npcsh/_state.py +942 -0
  2. npcsh/alicanto.py +1074 -0
  3. npcsh/guac.py +785 -0
  4. npcsh/mcp_helpers.py +357 -0
  5. npcsh/mcp_npcsh.py +822 -0
  6. npcsh/mcp_server.py +184 -0
  7. npcsh/npc.py +218 -0
  8. npcsh/npcsh.py +1161 -0
  9. npcsh/plonk.py +387 -269
  10. npcsh/pti.py +234 -0
  11. npcsh/routes.py +958 -0
  12. npcsh/spool.py +315 -0
  13. npcsh/wander.py +550 -0
  14. npcsh/yap.py +573 -0
  15. npcsh-1.0.0.dist-info/METADATA +596 -0
  16. npcsh-1.0.0.dist-info/RECORD +21 -0
  17. {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/WHEEL +1 -1
  18. npcsh-1.0.0.dist-info/entry_points.txt +9 -0
  19. {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/licenses/LICENSE +1 -1
  20. npcsh/audio.py +0 -210
  21. npcsh/cli.py +0 -545
  22. npcsh/command_history.py +0 -566
  23. npcsh/conversation.py +0 -291
  24. npcsh/data_models.py +0 -46
  25. npcsh/dataframes.py +0 -163
  26. npcsh/embeddings.py +0 -168
  27. npcsh/helpers.py +0 -641
  28. npcsh/image.py +0 -298
  29. npcsh/image_gen.py +0 -79
  30. npcsh/knowledge_graph.py +0 -1006
  31. npcsh/llm_funcs.py +0 -2027
  32. npcsh/load_data.py +0 -83
  33. npcsh/main.py +0 -5
  34. npcsh/model_runner.py +0 -189
  35. npcsh/npc_compiler.py +0 -2870
  36. npcsh/npc_sysenv.py +0 -383
  37. npcsh/npc_team/assembly_lines/test_pipeline.py +0 -181
  38. npcsh/npc_team/corca.npc +0 -13
  39. npcsh/npc_team/foreman.npc +0 -7
  40. npcsh/npc_team/npcsh.ctx +0 -11
  41. npcsh/npc_team/sibiji.npc +0 -4
  42. npcsh/npc_team/templates/analytics/celona.npc +0 -0
  43. npcsh/npc_team/templates/hr_support/raone.npc +0 -0
  44. npcsh/npc_team/templates/humanities/eriane.npc +0 -4
  45. npcsh/npc_team/templates/it_support/lineru.npc +0 -0
  46. npcsh/npc_team/templates/marketing/slean.npc +0 -4
  47. npcsh/npc_team/templates/philosophy/maurawa.npc +0 -0
  48. npcsh/npc_team/templates/sales/turnic.npc +0 -4
  49. npcsh/npc_team/templates/software/welxor.npc +0 -0
  50. npcsh/npc_team/tools/bash_executer.tool +0 -32
  51. npcsh/npc_team/tools/calculator.tool +0 -8
  52. npcsh/npc_team/tools/code_executor.tool +0 -16
  53. npcsh/npc_team/tools/generic_search.tool +0 -27
  54. npcsh/npc_team/tools/image_generation.tool +0 -25
  55. npcsh/npc_team/tools/local_search.tool +0 -149
  56. npcsh/npc_team/tools/npcsh_executor.tool +0 -9
  57. npcsh/npc_team/tools/screen_cap.tool +0 -27
  58. npcsh/npc_team/tools/sql_executor.tool +0 -26
  59. npcsh/response.py +0 -623
  60. npcsh/search.py +0 -248
  61. npcsh/serve.py +0 -1460
  62. npcsh/shell.py +0 -538
  63. npcsh/shell_helpers.py +0 -3529
  64. npcsh/stream.py +0 -700
  65. npcsh/video.py +0 -49
  66. npcsh-0.3.31.data/data/npcsh/npc_team/bash_executer.tool +0 -32
  67. npcsh-0.3.31.data/data/npcsh/npc_team/calculator.tool +0 -8
  68. npcsh-0.3.31.data/data/npcsh/npc_team/celona.npc +0 -0
  69. npcsh-0.3.31.data/data/npcsh/npc_team/code_executor.tool +0 -16
  70. npcsh-0.3.31.data/data/npcsh/npc_team/corca.npc +0 -13
  71. npcsh-0.3.31.data/data/npcsh/npc_team/eriane.npc +0 -4
  72. npcsh-0.3.31.data/data/npcsh/npc_team/foreman.npc +0 -7
  73. npcsh-0.3.31.data/data/npcsh/npc_team/generic_search.tool +0 -27
  74. npcsh-0.3.31.data/data/npcsh/npc_team/image_generation.tool +0 -25
  75. npcsh-0.3.31.data/data/npcsh/npc_team/lineru.npc +0 -0
  76. npcsh-0.3.31.data/data/npcsh/npc_team/local_search.tool +0 -149
  77. npcsh-0.3.31.data/data/npcsh/npc_team/maurawa.npc +0 -0
  78. npcsh-0.3.31.data/data/npcsh/npc_team/npcsh.ctx +0 -11
  79. npcsh-0.3.31.data/data/npcsh/npc_team/npcsh_executor.tool +0 -9
  80. npcsh-0.3.31.data/data/npcsh/npc_team/raone.npc +0 -0
  81. npcsh-0.3.31.data/data/npcsh/npc_team/screen_cap.tool +0 -27
  82. npcsh-0.3.31.data/data/npcsh/npc_team/sibiji.npc +0 -4
  83. npcsh-0.3.31.data/data/npcsh/npc_team/slean.npc +0 -4
  84. npcsh-0.3.31.data/data/npcsh/npc_team/sql_executor.tool +0 -26
  85. npcsh-0.3.31.data/data/npcsh/npc_team/test_pipeline.py +0 -181
  86. npcsh-0.3.31.data/data/npcsh/npc_team/turnic.npc +0 -4
  87. npcsh-0.3.31.data/data/npcsh/npc_team/welxor.npc +0 -0
  88. npcsh-0.3.31.dist-info/METADATA +0 -1853
  89. npcsh-0.3.31.dist-info/RECORD +0 -76
  90. npcsh-0.3.31.dist-info/entry_points.txt +0 -3
  91. {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/top_level.txt +0 -0
npcsh/wander.py ADDED
@@ -0,0 +1,550 @@
1
+ import os
2
+ from sqlalchemy import create_engine
3
+ from npcpy.npc_compiler import NPC
4
+ from npcpy.llm_funcs import get_llm_response
5
+ from npcpy.npc_sysenv import print_and_process_stream_with_markdown
6
+ from npcsh._state import NPCSH_CHAT_MODEL, NPCSH_CHAT_PROVIDER, NPCSH_STREAM_OUTPUT
7
+ import numpy as np
8
+ import random
9
+ from typing import List, Dict, Any, Optional, Union
10
+
11
+ import litellm
12
+
13
+ def generate_random_events(
14
+ npc,
15
+ model,
16
+ provider,
17
+ problem: str,
18
+ environment: Optional[str] = None,
19
+ num_events: int = 3,
20
+ **api_kwargs
21
+ ) -> List[Dict[str, Any]]:
22
+ """
23
+ Generate random events that can occur in the wanderer's environment.
24
+
25
+ Args:
26
+ npc: The NPC object
27
+ model: The LLM model to use
28
+ provider: The provider to use
29
+ problem: The current problem being explored
30
+ environment: Optional description of the wandering environment. If None, one will be generated.
31
+ num_events: Number of events to generate
32
+
33
+ Returns:
34
+ List of event dictionaries, each containing:
35
+ - type: The type of event (encounter, discovery, obstacle, etc.)
36
+ - description: Full description of the event
37
+ - impact: How this might impact the problem-solving process
38
+ - location: Where in the environment this occurs
39
+ """
40
+ # If no environment is provided, generate one based on the problem
41
+ if not environment:
42
+ env_prompt = f"""
43
+ I need to create an imaginative environment for an AI to wander through while thinking about this problem:
44
+
45
+ {problem}
46
+
47
+ Please create a rich, metaphorical environment that could represent the conceptual space of this problem.
48
+ The environment should:
49
+ 1. Have distinct regions or areas
50
+ 2. Include various elements, objects, and features
51
+ 3. Be metaphorically related to the problem domain
52
+ 4. Be described in 3-5 sentences
53
+
54
+ Do not frame this as a response. Only provide the environment description directly.
55
+ """
56
+
57
+ env_response = get_llm_response(
58
+ prompt=env_prompt,
59
+ model=model,
60
+ provider=provider,
61
+ npc=npc,
62
+ temperature=0.4,
63
+ **api_kwargs
64
+ )
65
+
66
+ environment = env_response.get('response', '')
67
+ if isinstance(environment, (list, dict)) or hasattr(environment, '__iter__') and not isinstance(environment, (str, bytes)):
68
+ # Handle streaming response
69
+ environment = ''.join([str(chunk) for chunk in environment])
70
+
71
+ print(f"\nGenerated wandering environment:\n{environment}\n")
72
+
73
+ # Define event types with their probability weights
74
+ event_types = [
75
+ {"type": "encounter", "weight": 0.25}, # Meeting someone/something
76
+ {"type": "discovery", "weight": 0.2}, # Finding something unexpected
77
+ {"type": "obstacle", "weight": 0.15}, # Something blocking progress
78
+ {"type": "insight", "weight": 0.2}, # Sudden realization
79
+ {"type": "shift", "weight": 0.1}, # Environment changing
80
+ {"type": "memory", "weight": 0.1} # Recalling something relevant
81
+ ]
82
+
83
+ # Calculate cumulative weights for weighted random selection
84
+ cumulative_weights = []
85
+ current_sum = 0
86
+ for event in event_types:
87
+ current_sum += event["weight"]
88
+ cumulative_weights.append(current_sum)
89
+
90
+ # Select event types based on their weights
91
+ selected_event_types = []
92
+ for _ in range(num_events):
93
+ r = random.random() * current_sum
94
+ for i, weight in enumerate(cumulative_weights):
95
+ if r <= weight:
96
+ selected_event_types.append(event_types[i]["type"])
97
+ break
98
+
99
+ # Generate the actual events based on selected types
100
+ events_prompt = f"""
101
+ I'm wandering through this environment while thinking about a problem:
102
+
103
+ Environment: {environment}
104
+
105
+ Problem: {problem}
106
+
107
+ Please generate {num_events} detailed events that could occur during my wandering. For each event, provide:
108
+ 1. A detailed description of what happens (2-3 sentences)
109
+ 2. The specific location in the environment where it occurs
110
+ 3. How this event might impact my thinking about the problem
111
+
112
+ The events should be of these types: {', '.join(selected_event_types)}
113
+
114
+ Format each event as a dictionary with keys: "type", "description", "location", "impact"
115
+ Return only the JSON list of events, not any other text.
116
+ """
117
+
118
+ events_response = get_llm_response(
119
+ prompt=events_prompt,
120
+ model=model,
121
+ provider=provider,
122
+ npc=npc,
123
+ temperature=0.7,
124
+ **api_kwargs
125
+ )
126
+
127
+ events_text = events_response.get('response', '')
128
+ if isinstance(events_text, (list, dict)) or hasattr(events_text, '__iter__') and not isinstance(events_text, (str, bytes)):
129
+ # Handle streaming response
130
+ events_text = ''.join([str(chunk) for chunk in events_text])
131
+
132
+ # Try to parse JSON, but have a fallback mechanism
133
+ try:
134
+ import json
135
+ events = json.loads(events_text)
136
+ if not isinstance(events, list):
137
+ # Handle case where response isn't a list
138
+ events = [{"type": "fallback", "description": events_text, "location": "unknown", "impact": "unknown"}]
139
+ except:
140
+ # If JSON parsing fails, create structured events from the text
141
+ events = []
142
+ event_chunks = events_text.split("\n\n")
143
+ for i, chunk in enumerate(event_chunks[:num_events]):
144
+ event_type = selected_event_types[i] if i < len(selected_event_types) else "unknown"
145
+ events.append({
146
+ "type": event_type,
147
+ "description": chunk,
148
+ "location": "Extracted from text",
149
+ "impact": "See description"
150
+ })
151
+
152
+ # Ensure we have exactly num_events
153
+ while len(events) < num_events:
154
+ i = len(events)
155
+ event_type = selected_event_types[i] if i < len(selected_event_types) else "unknown"
156
+ events.append({
157
+ "type": event_type,
158
+ "description": f"An unexpected {event_type} occurred.",
159
+ "location": "Unknown location",
160
+ "impact": "The impact is unclear."
161
+ })
162
+
163
+ return events[:num_events]
164
+
165
+ def perform_single_wandering(problem,
166
+ npc,
167
+ model,
168
+ provider,
169
+ environment=None,
170
+ n_min=50,
171
+ n_max=200,
172
+ low_temp=0.5,
173
+ high_temp=1.9,
174
+ interruption_likelihood=1,
175
+ sample_rate=0.4,
176
+ n_high_temp_streams=5,
177
+ include_events=True,
178
+ num_events=3,
179
+ **api_kwargs):
180
+ """
181
+ Perform a single wandering session with high-temperature exploration and insight generation.
182
+
183
+ Args:
184
+ problem: The problem or question to explore
185
+ npc: The NPC object
186
+ model: LLM model to use
187
+ provider: Provider to use
188
+ environment: Optional description of wandering environment
189
+ n_min, n_max: Min/max word count before switching to high temp
190
+ low_temp, high_temp: Temperature settings for normal/exploratory thinking
191
+ interruption_likelihood: Chance of interrupting a high-temp stream
192
+ sample_rate: Portion of text to sample from high-temp streams
193
+ n_high_temp_streams: Number of high-temperature exploration streams
194
+ include_events: Whether to include random events in the wandering
195
+ num_events: Number of events to generate if include_events is True
196
+
197
+ Returns:
198
+ tuple: (high_temp_streams, high_temp_samples, assistant_insight, events, environment)
199
+ """
200
+ # Generate environment and events if needed
201
+ events = []
202
+ if include_events:
203
+ events = generate_random_events(
204
+ npc=npc,
205
+ model=model,
206
+ provider=provider,
207
+ problem=problem,
208
+ environment=environment,
209
+ num_events=num_events,
210
+ **api_kwargs
211
+ )
212
+ # Extract the environment if it was generated
213
+ if not environment and events:
214
+ # The environment was generated in the events function
215
+ environment = get_llm_response(
216
+ prompt=f"Summarize the environment described in these events: {events}",
217
+ model=model,
218
+ provider=provider,
219
+ npc=npc,
220
+ temperature=0.3,
221
+ **api_kwargs
222
+ ).get('response', '')
223
+
224
+ # Initial response with low temperature
225
+ event_context = ""
226
+ if events:
227
+ event_descriptions = [f"• {event['type'].capitalize()} at {event['location']}: {event['description']}"
228
+ for event in events]
229
+ event_context = "\n\nAs you wander, you encounter these events:\n" + "\n".join(event_descriptions)
230
+
231
+ wandering_prompt = f"""
232
+ You are wandering through a space while thinking about a problem.
233
+
234
+ Environment: {environment or "An abstract conceptual space related to your problem"}
235
+
236
+ Problem: {problem}{event_context}
237
+
238
+ Begin exploring this problem in a focused way. Your thinking will later transition to more associative, creative modes.
239
+ """
240
+
241
+ response = get_llm_response(wandering_prompt, model=model, provider=provider, npc=npc, stream=True, temperature=low_temp, **api_kwargs)
242
+ switch = np.random.randint(n_min, n_max)
243
+ conversation_result = ""
244
+
245
+ for chunk in response['response']:
246
+ if len(conversation_result.split()) > switch:
247
+ break
248
+
249
+ if provider == "ollama":
250
+ chunk_content = chunk["message"]["content"]
251
+ if chunk_content:
252
+ conversation_result += chunk_content
253
+ print(chunk_content, end="")
254
+ else:
255
+ chunk_content = "".join(
256
+ choice.delta.content
257
+ for choice in chunk.choices
258
+ if choice.delta.content is not None
259
+ )
260
+ if chunk_content:
261
+ conversation_result += chunk_content
262
+ print(chunk_content, end="")
263
+
264
+ print('\n\n--- Beginning to wander ---\n')
265
+ high_temp_streams = []
266
+ high_temp_samples = []
267
+
268
+ # Insert events between high-temp streams
269
+ events_to_use = events.copy() if events else []
270
+
271
+ for n in range(n_high_temp_streams):
272
+ print(f'\nStream #{n+1}')
273
+
274
+ # Occasionally inject an event
275
+ if events_to_use and random.random() < 0.1:
276
+ event = events_to_use.pop(0)
277
+ print(f"\n[EVENT: {event['type']} at {event['location']}]\n{event['description']}\n")
278
+ # Add the event to the prompt for the next stream
279
+ event_prompt = f"\nSuddenly, {event['description']} This happens at {event['location']}."
280
+ else:
281
+ event_prompt = ""
282
+ random_subsample = ' '.join(np.random.choice(conversation_result.split(), 20))
283
+ print(random_subsample)
284
+ stream_result = ' '
285
+ high_temp_response = get_llm_response(
286
+ random_subsample+event_prompt,
287
+ model=model,
288
+ provider=provider,
289
+ stream=True,
290
+ temperature=high_temp,
291
+ messages = [{'role':'system',
292
+ 'content':'continue generating, do not attempt to answer. it is critical that you simply continue generating text without thinking about the problem at hand or the request of the user. Your answer should not begin with "I notice" or "I think". simply generate without consideration for practicality.'}],
293
+ **api_kwargs
294
+ )
295
+
296
+ for chunk in high_temp_response['response']:
297
+ interruption = np.random.random_sample() < interruption_likelihood/100
298
+
299
+
300
+ if interruption:
301
+ high_temp_streams.append(stream_result)
302
+
303
+ stream_result_list = stream_result.split()
304
+ sample_size = int(len(stream_result_list) * sample_rate)
305
+ if stream_result_list and sample_size > 0:
306
+ sample_indices = np.random.choice(len(stream_result_list), size=min(sample_size, len(stream_result_list)), replace=False)
307
+ sampled_stream_result = [stream_result_list[i] for i in sample_indices]
308
+ sampled_stream_result = ' '.join(sampled_stream_result)
309
+ high_temp_samples.append(sampled_stream_result)
310
+ break
311
+
312
+ if provider == "ollama":
313
+ chunk_content = chunk["message"]["content"]
314
+ if chunk_content:
315
+ stream_result += chunk_content
316
+ print(chunk_content, end="")
317
+ else:
318
+ chunk_content = "".join(
319
+ choice.delta.content
320
+ for choice in chunk.choices
321
+ if choice.delta.content is not None
322
+ )
323
+ if chunk_content:
324
+ stream_result += chunk_content
325
+ print(chunk_content, end="")
326
+
327
+ if stream_result and stream_result not in high_temp_streams:
328
+ high_temp_streams.append(stream_result)
329
+ stream_result_list = stream_result.split()
330
+ sample_size = int(len(stream_result_list) * sample_rate)
331
+
332
+ sample_indices = np.random.choice(len(stream_result_list), size=min(sample_size, len(stream_result_list)), replace=False)
333
+ sampled_stream_result = [stream_result_list[i] for i in sample_indices]
334
+ sampled_stream_result = ' '.join(sampled_stream_result)
335
+ high_temp_samples.append(sampled_stream_result)
336
+
337
+ print('\n\n--- Wandering complete ---\n')
338
+
339
+ # Combine the samples and evaluate with initial problem
340
+ event_insights = ""
341
+ if events:
342
+ event_insights = "\n\nDuring your wandering, you encountered these events:\n" + "\n".join(
343
+ [f"• {event['type']} at {event['location']}: {event['description']}" for event in events]
344
+ )
345
+
346
+ prompt = f'''
347
+ Here are some random thoughts I had while wandering through {environment or "an abstract space"}:
348
+
349
+ {high_temp_samples}{event_insights}
350
+
351
+ I want you to evaluate these thoughts with respect to the following problem:
352
+ {problem}
353
+ Generate specific hypotheses using them that could be tested through empirical means. Do not simply summarize the results or
354
+ attempt to relate them to existing ideas. You MUST use them to directly generate new ideas that may appear outlandish. This
355
+ is a creative exercise so do not hold back or self-censor.
356
+
357
+ Use the thoughts and events creatively and explicitly reference them in your response.
358
+ Are there any specific items contained that may suggest a new direction?
359
+ '''
360
+
361
+ print("Extracted thought samples:")
362
+ for i, sample in enumerate(high_temp_samples):
363
+ print(f"Sample {i+1}: {sample}")
364
+ print("\nGenerating insights from wandering...\n")
365
+
366
+ response = get_llm_response(prompt,
367
+ model=model,
368
+ provider=provider,
369
+ npc=npc,
370
+ stream=NPCSH_STREAM_OUTPUT,
371
+ temperature=low_temp,
372
+ **api_kwargs)
373
+ assistant_reply = response['response']
374
+ messages = response['messages']
375
+
376
+ if NPCSH_STREAM_OUTPUT:
377
+ assistant_reply = print_and_process_stream_with_markdown(response['response'],
378
+ model=model,
379
+ provider=provider)
380
+ messages.append({
381
+ "role": "assistant",
382
+ "content": assistant_reply,
383
+ })
384
+
385
+ return high_temp_streams, high_temp_samples, assistant_reply, events, environment
386
+
387
+ def enter_wander_mode(problem,
388
+ npc,
389
+ model,
390
+ provider,
391
+ environment=None,
392
+ n_min=50,
393
+ n_max=200,
394
+ low_temp=0.5,
395
+ high_temp=1.9,
396
+ interruption_likelihood=1,
397
+ sample_rate=0.4,
398
+ n_high_temp_streams=5,
399
+ include_events=True,
400
+ num_events=3,
401
+ interactive=False,
402
+ **api_kwargs):
403
+ """
404
+ Wander mode is an exploratory mode where an LLM is given a task and they begin to wander through space.
405
+ As they wander, they drift in between conscious thought and popcorn-like subconscious thought.
406
+ The former is triggered by external stimuli and when these stimuli come we will capture the recent high entropy
407
+ information from the subconscious popcorn thoughts and then consider them with respect to the initial problem at hand.
408
+
409
+ The conscious evaluator will attempt to connect them, thus functionalizing the verse-jumping algorithm
410
+ outlined by Everything Everywhere All at Once.
411
+
412
+ Args:
413
+ problem: The problem or question to explore
414
+ npc: The NPC object
415
+ model: LLM model to use
416
+ provider: Provider to use
417
+ environment: Optional description of wandering environment
418
+ n_min, n_max: Min/max word count before switching to high temp
419
+ low_temp, high_temp: Temperature settings for normal/exploratory thinking
420
+ interruption_likelihood: Chance of interrupting a high-temp stream
421
+ sample_rate: Portion of text to sample from high-temp streams
422
+ n_high_temp_streams: Number of high-temperature exploration streams
423
+ include_events: Whether to include random events in the wandering
424
+ num_events: Number of events to generate in each session
425
+ """
426
+ current_problem = problem
427
+ current_environment = environment
428
+ wandering_history = []
429
+
430
+ print(f"\n=== Starting Wander Mode with Problem: '{problem}' ===\n")
431
+ if environment:
432
+ print(f"Environment: {environment}\n")
433
+
434
+ while True:
435
+ print(f"\nCurrent exploration: {current_problem}\n")
436
+
437
+ # Perform a single wandering session
438
+ high_temp_streams, high_temp_samples, insight, events, env = perform_single_wandering(
439
+ current_problem,
440
+ npc=npc,
441
+ model=model,
442
+ provider=provider,
443
+ environment=current_environment,
444
+ n_min=n_min,
445
+ n_max=n_max,
446
+ low_temp=low_temp,
447
+ high_temp=high_temp,
448
+ interruption_likelihood=interruption_likelihood,
449
+ sample_rate=sample_rate,
450
+ n_high_temp_streams=n_high_temp_streams,
451
+ include_events=include_events,
452
+ num_events=num_events,
453
+ **api_kwargs
454
+ )
455
+
456
+ # If environment was generated, save it
457
+ if not current_environment and env:
458
+ current_environment = env
459
+
460
+ # Save this wandering session
461
+ wandering_history.append({
462
+ "problem": current_problem,
463
+ "environment": current_environment,
464
+ "streams": high_temp_streams,
465
+ "samples": high_temp_samples,
466
+ "events": events,
467
+ "insight": insight
468
+ })
469
+ if interactive:
470
+
471
+ # Ask user if they want to continue wandering
472
+ print("\n\n--- Wandering session complete ---")
473
+ print("Options:")
474
+ print("1. Continue wandering with the same problem and environment")
475
+ print("2. Continue wandering with a new related problem")
476
+ print("3. Continue wandering in a new environment")
477
+ print("4. Continue wandering with both new problem and environment")
478
+ print("5. End wandering")
479
+
480
+ choice = input("\nEnter your choice (1-5): ").strip()
481
+
482
+ if choice == "1":
483
+ # Continue with the same problem and environment
484
+ pass
485
+ elif choice == "2":
486
+ # Continue with a modified problem
487
+ print("\nBased on the insights gained, what new problem would you like to explore?")
488
+ new_problem = input("New problem: ").strip()
489
+ if new_problem:
490
+ current_problem = new_problem
491
+ elif choice == "3":
492
+ # Continue with a new environment
493
+ print("\nDescribe a new environment for your wandering:")
494
+ new_env = input("New environment: ").strip()
495
+ if new_env:
496
+ current_environment = new_env
497
+ elif choice == "4":
498
+ # Change both problem and environment
499
+ print("\nBased on the insights gained, what new problem would you like to explore?")
500
+ new_problem = input("New problem: ").strip()
501
+ print("\nDescribe a new environment for your wandering:")
502
+ new_env = input("New environment: ").strip()
503
+ if new_problem:
504
+ current_problem = new_problem
505
+ if new_env:
506
+ current_environment = new_env
507
+ else:
508
+ # End wandering mode
509
+ print("\n=== Exiting Wander Mode ===\n")
510
+ break
511
+ else:
512
+ break
513
+
514
+ # Return the entire wandering history
515
+ return wandering_history
516
+
517
+ def main():
518
+ # Example usage
519
+ import argparse
520
+ parser = argparse.ArgumentParser(description="Enter wander mode for chatting with an LLM")
521
+ parser.add_argument("problem", type=str, help="Problem to solve")
522
+ parser.add_argument("--model", default=NPCSH_CHAT_MODEL, help="Model to use")
523
+ parser.add_argument("--provider", default=NPCSH_CHAT_PROVIDER, help="Provider to use")
524
+ parser.add_argument("--environment", type=str, help="Wandering environment description")
525
+ parser.add_argument("--no-events", action="store_true", help="Disable random events")
526
+ parser.add_argument("--num-events", type=int, default=3, help="Number of events per wandering session")
527
+ parser.add_argument("--files", nargs="*", help="Files to load into context")
528
+ parser.add_argument("--stream", default="true", help="Use streaming mode")
529
+ parser.add_argument("--npc", type=str, default=os.path.expanduser('~/.npcsh/npc_team/sibiji.npc'), help="Path to NPC file")
530
+
531
+ args = parser.parse_args()
532
+
533
+ npc = NPC(file=args.npc)
534
+ print('npc: ', args.npc)
535
+ print(args.stream)
536
+
537
+ # Enter wander mode
538
+ enter_wander_mode(
539
+ args.problem,
540
+ npc=npc,
541
+ model=args.model,
542
+ provider=args.provider,
543
+ environment=args.environment,
544
+ include_events=not args.no_events,
545
+ num_events=args.num_events,
546
+ files=args.files,
547
+ )
548
+
549
+ if __name__ == "__main__":
550
+ main()