npcpy 1.0.26__py3-none-any.whl → 1.2.32__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (148) hide show
  1. npcpy/__init__.py +0 -7
  2. npcpy/data/audio.py +16 -99
  3. npcpy/data/image.py +43 -42
  4. npcpy/data/load.py +83 -124
  5. npcpy/data/text.py +28 -28
  6. npcpy/data/video.py +8 -32
  7. npcpy/data/web.py +51 -23
  8. npcpy/ft/diff.py +110 -0
  9. npcpy/ft/ge.py +115 -0
  10. npcpy/ft/memory_trainer.py +171 -0
  11. npcpy/ft/model_ensembler.py +357 -0
  12. npcpy/ft/rl.py +360 -0
  13. npcpy/ft/sft.py +248 -0
  14. npcpy/ft/usft.py +128 -0
  15. npcpy/gen/audio_gen.py +24 -0
  16. npcpy/gen/embeddings.py +13 -13
  17. npcpy/gen/image_gen.py +262 -117
  18. npcpy/gen/response.py +615 -415
  19. npcpy/gen/video_gen.py +53 -7
  20. npcpy/llm_funcs.py +1869 -437
  21. npcpy/main.py +1 -1
  22. npcpy/memory/command_history.py +844 -510
  23. npcpy/memory/kg_vis.py +833 -0
  24. npcpy/memory/knowledge_graph.py +892 -1845
  25. npcpy/memory/memory_processor.py +81 -0
  26. npcpy/memory/search.py +188 -90
  27. npcpy/mix/debate.py +192 -3
  28. npcpy/npc_compiler.py +1672 -801
  29. npcpy/npc_sysenv.py +593 -1266
  30. npcpy/serve.py +3120 -0
  31. npcpy/sql/ai_function_tools.py +257 -0
  32. npcpy/sql/database_ai_adapters.py +186 -0
  33. npcpy/sql/database_ai_functions.py +163 -0
  34. npcpy/sql/model_runner.py +19 -19
  35. npcpy/sql/npcsql.py +706 -507
  36. npcpy/sql/sql_model_compiler.py +156 -0
  37. npcpy/tools.py +183 -0
  38. npcpy/work/plan.py +13 -279
  39. npcpy/work/trigger.py +3 -3
  40. npcpy-1.2.32.dist-info/METADATA +803 -0
  41. npcpy-1.2.32.dist-info/RECORD +54 -0
  42. npcpy/data/dataframes.py +0 -171
  43. npcpy/memory/deep_research.py +0 -125
  44. npcpy/memory/sleep.py +0 -557
  45. npcpy/modes/_state.py +0 -78
  46. npcpy/modes/alicanto.py +0 -1075
  47. npcpy/modes/guac.py +0 -785
  48. npcpy/modes/mcp_npcsh.py +0 -822
  49. npcpy/modes/npc.py +0 -213
  50. npcpy/modes/npcsh.py +0 -1158
  51. npcpy/modes/plonk.py +0 -409
  52. npcpy/modes/pti.py +0 -234
  53. npcpy/modes/serve.py +0 -1637
  54. npcpy/modes/spool.py +0 -312
  55. npcpy/modes/wander.py +0 -549
  56. npcpy/modes/yap.py +0 -572
  57. npcpy/npc_team/alicanto.npc +0 -2
  58. npcpy/npc_team/alicanto.png +0 -0
  59. npcpy/npc_team/assembly_lines/test_pipeline.py +0 -181
  60. npcpy/npc_team/corca.npc +0 -13
  61. npcpy/npc_team/foreman.npc +0 -7
  62. npcpy/npc_team/frederic.npc +0 -6
  63. npcpy/npc_team/frederic4.png +0 -0
  64. npcpy/npc_team/guac.png +0 -0
  65. npcpy/npc_team/jinxs/automator.jinx +0 -18
  66. npcpy/npc_team/jinxs/bash_executer.jinx +0 -31
  67. npcpy/npc_team/jinxs/calculator.jinx +0 -11
  68. npcpy/npc_team/jinxs/edit_file.jinx +0 -96
  69. npcpy/npc_team/jinxs/file_chat.jinx +0 -14
  70. npcpy/npc_team/jinxs/gui_controller.jinx +0 -28
  71. npcpy/npc_team/jinxs/image_generation.jinx +0 -29
  72. npcpy/npc_team/jinxs/internet_search.jinx +0 -30
  73. npcpy/npc_team/jinxs/local_search.jinx +0 -152
  74. npcpy/npc_team/jinxs/npcsh_executor.jinx +0 -31
  75. npcpy/npc_team/jinxs/python_executor.jinx +0 -8
  76. npcpy/npc_team/jinxs/screen_cap.jinx +0 -25
  77. npcpy/npc_team/jinxs/sql_executor.jinx +0 -33
  78. npcpy/npc_team/kadiefa.npc +0 -3
  79. npcpy/npc_team/kadiefa.png +0 -0
  80. npcpy/npc_team/npcsh.ctx +0 -9
  81. npcpy/npc_team/npcsh_sibiji.png +0 -0
  82. npcpy/npc_team/plonk.npc +0 -2
  83. npcpy/npc_team/plonk.png +0 -0
  84. npcpy/npc_team/plonkjr.npc +0 -2
  85. npcpy/npc_team/plonkjr.png +0 -0
  86. npcpy/npc_team/sibiji.npc +0 -5
  87. npcpy/npc_team/sibiji.png +0 -0
  88. npcpy/npc_team/spool.png +0 -0
  89. npcpy/npc_team/templates/analytics/celona.npc +0 -0
  90. npcpy/npc_team/templates/hr_support/raone.npc +0 -0
  91. npcpy/npc_team/templates/humanities/eriane.npc +0 -4
  92. npcpy/npc_team/templates/it_support/lineru.npc +0 -0
  93. npcpy/npc_team/templates/marketing/slean.npc +0 -4
  94. npcpy/npc_team/templates/philosophy/maurawa.npc +0 -0
  95. npcpy/npc_team/templates/sales/turnic.npc +0 -4
  96. npcpy/npc_team/templates/software/welxor.npc +0 -0
  97. npcpy/npc_team/yap.png +0 -0
  98. npcpy/routes.py +0 -958
  99. npcpy/work/mcp_helpers.py +0 -357
  100. npcpy/work/mcp_server.py +0 -194
  101. npcpy-1.0.26.data/data/npcpy/npc_team/alicanto.npc +0 -2
  102. npcpy-1.0.26.data/data/npcpy/npc_team/alicanto.png +0 -0
  103. npcpy-1.0.26.data/data/npcpy/npc_team/automator.jinx +0 -18
  104. npcpy-1.0.26.data/data/npcpy/npc_team/bash_executer.jinx +0 -31
  105. npcpy-1.0.26.data/data/npcpy/npc_team/calculator.jinx +0 -11
  106. npcpy-1.0.26.data/data/npcpy/npc_team/celona.npc +0 -0
  107. npcpy-1.0.26.data/data/npcpy/npc_team/corca.npc +0 -13
  108. npcpy-1.0.26.data/data/npcpy/npc_team/edit_file.jinx +0 -96
  109. npcpy-1.0.26.data/data/npcpy/npc_team/eriane.npc +0 -4
  110. npcpy-1.0.26.data/data/npcpy/npc_team/file_chat.jinx +0 -14
  111. npcpy-1.0.26.data/data/npcpy/npc_team/foreman.npc +0 -7
  112. npcpy-1.0.26.data/data/npcpy/npc_team/frederic.npc +0 -6
  113. npcpy-1.0.26.data/data/npcpy/npc_team/frederic4.png +0 -0
  114. npcpy-1.0.26.data/data/npcpy/npc_team/guac.png +0 -0
  115. npcpy-1.0.26.data/data/npcpy/npc_team/gui_controller.jinx +0 -28
  116. npcpy-1.0.26.data/data/npcpy/npc_team/image_generation.jinx +0 -29
  117. npcpy-1.0.26.data/data/npcpy/npc_team/internet_search.jinx +0 -30
  118. npcpy-1.0.26.data/data/npcpy/npc_team/kadiefa.npc +0 -3
  119. npcpy-1.0.26.data/data/npcpy/npc_team/kadiefa.png +0 -0
  120. npcpy-1.0.26.data/data/npcpy/npc_team/lineru.npc +0 -0
  121. npcpy-1.0.26.data/data/npcpy/npc_team/local_search.jinx +0 -152
  122. npcpy-1.0.26.data/data/npcpy/npc_team/maurawa.npc +0 -0
  123. npcpy-1.0.26.data/data/npcpy/npc_team/npcsh.ctx +0 -9
  124. npcpy-1.0.26.data/data/npcpy/npc_team/npcsh_executor.jinx +0 -31
  125. npcpy-1.0.26.data/data/npcpy/npc_team/npcsh_sibiji.png +0 -0
  126. npcpy-1.0.26.data/data/npcpy/npc_team/plonk.npc +0 -2
  127. npcpy-1.0.26.data/data/npcpy/npc_team/plonk.png +0 -0
  128. npcpy-1.0.26.data/data/npcpy/npc_team/plonkjr.npc +0 -2
  129. npcpy-1.0.26.data/data/npcpy/npc_team/plonkjr.png +0 -0
  130. npcpy-1.0.26.data/data/npcpy/npc_team/python_executor.jinx +0 -8
  131. npcpy-1.0.26.data/data/npcpy/npc_team/raone.npc +0 -0
  132. npcpy-1.0.26.data/data/npcpy/npc_team/screen_cap.jinx +0 -25
  133. npcpy-1.0.26.data/data/npcpy/npc_team/sibiji.npc +0 -5
  134. npcpy-1.0.26.data/data/npcpy/npc_team/sibiji.png +0 -0
  135. npcpy-1.0.26.data/data/npcpy/npc_team/slean.npc +0 -4
  136. npcpy-1.0.26.data/data/npcpy/npc_team/spool.png +0 -0
  137. npcpy-1.0.26.data/data/npcpy/npc_team/sql_executor.jinx +0 -33
  138. npcpy-1.0.26.data/data/npcpy/npc_team/test_pipeline.py +0 -181
  139. npcpy-1.0.26.data/data/npcpy/npc_team/turnic.npc +0 -4
  140. npcpy-1.0.26.data/data/npcpy/npc_team/welxor.npc +0 -0
  141. npcpy-1.0.26.data/data/npcpy/npc_team/yap.png +0 -0
  142. npcpy-1.0.26.dist-info/METADATA +0 -827
  143. npcpy-1.0.26.dist-info/RECORD +0 -139
  144. npcpy-1.0.26.dist-info/entry_points.txt +0 -11
  145. /npcpy/{modes → ft}/__init__.py +0 -0
  146. {npcpy-1.0.26.dist-info → npcpy-1.2.32.dist-info}/WHEEL +0 -0
  147. {npcpy-1.0.26.dist-info → npcpy-1.2.32.dist-info}/licenses/LICENSE +0 -0
  148. {npcpy-1.0.26.dist-info → npcpy-1.2.32.dist-info}/top_level.txt +0 -0
npcpy/modes/wander.py DELETED
@@ -1,549 +0,0 @@
1
- import os
2
- from sqlalchemy import create_engine
3
- from npcpy.npc_compiler import NPC
4
- from npcpy.llm_funcs import get_llm_response
5
- from npcpy.npc_sysenv import NPCSH_CHAT_MODEL, NPCSH_CHAT_PROVIDER, NPCSH_STREAM_OUTPUT, print_and_process_stream_with_markdown
6
- import numpy as np
7
- import random
8
- from typing import List, Dict, Any, Optional, Union
9
-
10
- import litellm
11
-
12
- def generate_random_events(
13
- npc,
14
- model,
15
- provider,
16
- problem: str,
17
- environment: Optional[str] = None,
18
- num_events: int = 3,
19
- **api_kwargs
20
- ) -> List[Dict[str, Any]]:
21
- """
22
- Generate random events that can occur in the wanderer's environment.
23
-
24
- Args:
25
- npc: The NPC object
26
- model: The LLM model to use
27
- provider: The provider to use
28
- problem: The current problem being explored
29
- environment: Optional description of the wandering environment. If None, one will be generated.
30
- num_events: Number of events to generate
31
-
32
- Returns:
33
- List of event dictionaries, each containing:
34
- - type: The type of event (encounter, discovery, obstacle, etc.)
35
- - description: Full description of the event
36
- - impact: How this might impact the problem-solving process
37
- - location: Where in the environment this occurs
38
- """
39
- # If no environment is provided, generate one based on the problem
40
- if not environment:
41
- env_prompt = f"""
42
- I need to create an imaginative environment for an AI to wander through while thinking about this problem:
43
-
44
- {problem}
45
-
46
- Please create a rich, metaphorical environment that could represent the conceptual space of this problem.
47
- The environment should:
48
- 1. Have distinct regions or areas
49
- 2. Include various elements, objects, and features
50
- 3. Be metaphorically related to the problem domain
51
- 4. Be described in 3-5 sentences
52
-
53
- Do not frame this as a response. Only provide the environment description directly.
54
- """
55
-
56
- env_response = get_llm_response(
57
- prompt=env_prompt,
58
- model=model,
59
- provider=provider,
60
- npc=npc,
61
- temperature=0.4,
62
- **api_kwargs
63
- )
64
-
65
- environment = env_response.get('response', '')
66
- if isinstance(environment, (list, dict)) or hasattr(environment, '__iter__') and not isinstance(environment, (str, bytes)):
67
- # Handle streaming response
68
- environment = ''.join([str(chunk) for chunk in environment])
69
-
70
- print(f"\nGenerated wandering environment:\n{environment}\n")
71
-
72
- # Define event types with their probability weights
73
- event_types = [
74
- {"type": "encounter", "weight": 0.25}, # Meeting someone/something
75
- {"type": "discovery", "weight": 0.2}, # Finding something unexpected
76
- {"type": "obstacle", "weight": 0.15}, # Something blocking progress
77
- {"type": "insight", "weight": 0.2}, # Sudden realization
78
- {"type": "shift", "weight": 0.1}, # Environment changing
79
- {"type": "memory", "weight": 0.1} # Recalling something relevant
80
- ]
81
-
82
- # Calculate cumulative weights for weighted random selection
83
- cumulative_weights = []
84
- current_sum = 0
85
- for event in event_types:
86
- current_sum += event["weight"]
87
- cumulative_weights.append(current_sum)
88
-
89
- # Select event types based on their weights
90
- selected_event_types = []
91
- for _ in range(num_events):
92
- r = random.random() * current_sum
93
- for i, weight in enumerate(cumulative_weights):
94
- if r <= weight:
95
- selected_event_types.append(event_types[i]["type"])
96
- break
97
-
98
- # Generate the actual events based on selected types
99
- events_prompt = f"""
100
- I'm wandering through this environment while thinking about a problem:
101
-
102
- Environment: {environment}
103
-
104
- Problem: {problem}
105
-
106
- Please generate {num_events} detailed events that could occur during my wandering. For each event, provide:
107
- 1. A detailed description of what happens (2-3 sentences)
108
- 2. The specific location in the environment where it occurs
109
- 3. How this event might impact my thinking about the problem
110
-
111
- The events should be of these types: {', '.join(selected_event_types)}
112
-
113
- Format each event as a dictionary with keys: "type", "description", "location", "impact"
114
- Return only the JSON list of events, not any other text.
115
- """
116
-
117
- events_response = get_llm_response(
118
- prompt=events_prompt,
119
- model=model,
120
- provider=provider,
121
- npc=npc,
122
- temperature=0.7,
123
- **api_kwargs
124
- )
125
-
126
- events_text = events_response.get('response', '')
127
- if isinstance(events_text, (list, dict)) or hasattr(events_text, '__iter__') and not isinstance(events_text, (str, bytes)):
128
- # Handle streaming response
129
- events_text = ''.join([str(chunk) for chunk in events_text])
130
-
131
- # Try to parse JSON, but have a fallback mechanism
132
- try:
133
- import json
134
- events = json.loads(events_text)
135
- if not isinstance(events, list):
136
- # Handle case where response isn't a list
137
- events = [{"type": "fallback", "description": events_text, "location": "unknown", "impact": "unknown"}]
138
- except:
139
- # If JSON parsing fails, create structured events from the text
140
- events = []
141
- event_chunks = events_text.split("\n\n")
142
- for i, chunk in enumerate(event_chunks[:num_events]):
143
- event_type = selected_event_types[i] if i < len(selected_event_types) else "unknown"
144
- events.append({
145
- "type": event_type,
146
- "description": chunk,
147
- "location": "Extracted from text",
148
- "impact": "See description"
149
- })
150
-
151
- # Ensure we have exactly num_events
152
- while len(events) < num_events:
153
- i = len(events)
154
- event_type = selected_event_types[i] if i < len(selected_event_types) else "unknown"
155
- events.append({
156
- "type": event_type,
157
- "description": f"An unexpected {event_type} occurred.",
158
- "location": "Unknown location",
159
- "impact": "The impact is unclear."
160
- })
161
-
162
- return events[:num_events]
163
-
164
- def perform_single_wandering(problem,
165
- npc,
166
- model,
167
- provider,
168
- environment=None,
169
- n_min=50,
170
- n_max=200,
171
- low_temp=0.5,
172
- high_temp=1.9,
173
- interruption_likelihood=1,
174
- sample_rate=0.4,
175
- n_high_temp_streams=5,
176
- include_events=True,
177
- num_events=3,
178
- **api_kwargs):
179
- """
180
- Perform a single wandering session with high-temperature exploration and insight generation.
181
-
182
- Args:
183
- problem: The problem or question to explore
184
- npc: The NPC object
185
- model: LLM model to use
186
- provider: Provider to use
187
- environment: Optional description of wandering environment
188
- n_min, n_max: Min/max word count before switching to high temp
189
- low_temp, high_temp: Temperature settings for normal/exploratory thinking
190
- interruption_likelihood: Chance of interrupting a high-temp stream
191
- sample_rate: Portion of text to sample from high-temp streams
192
- n_high_temp_streams: Number of high-temperature exploration streams
193
- include_events: Whether to include random events in the wandering
194
- num_events: Number of events to generate if include_events is True
195
-
196
- Returns:
197
- tuple: (high_temp_streams, high_temp_samples, assistant_insight, events, environment)
198
- """
199
- # Generate environment and events if needed
200
- events = []
201
- if include_events:
202
- events = generate_random_events(
203
- npc=npc,
204
- model=model,
205
- provider=provider,
206
- problem=problem,
207
- environment=environment,
208
- num_events=num_events,
209
- **api_kwargs
210
- )
211
- # Extract the environment if it was generated
212
- if not environment and events:
213
- # The environment was generated in the events function
214
- environment = get_llm_response(
215
- prompt=f"Summarize the environment described in these events: {events}",
216
- model=model,
217
- provider=provider,
218
- npc=npc,
219
- temperature=0.3,
220
- **api_kwargs
221
- ).get('response', '')
222
-
223
- # Initial response with low temperature
224
- event_context = ""
225
- if events:
226
- event_descriptions = [f"• {event['type'].capitalize()} at {event['location']}: {event['description']}"
227
- for event in events]
228
- event_context = "\n\nAs you wander, you encounter these events:\n" + "\n".join(event_descriptions)
229
-
230
- wandering_prompt = f"""
231
- You are wandering through a space while thinking about a problem.
232
-
233
- Environment: {environment or "An abstract conceptual space related to your problem"}
234
-
235
- Problem: {problem}{event_context}
236
-
237
- Begin exploring this problem in a focused way. Your thinking will later transition to more associative, creative modes.
238
- """
239
-
240
- response = get_llm_response(wandering_prompt, model=model, provider=provider, npc=npc, stream=True, temperature=low_temp, **api_kwargs)
241
- switch = np.random.randint(n_min, n_max)
242
- conversation_result = ""
243
-
244
- for chunk in response['response']:
245
- if len(conversation_result.split()) > switch:
246
- break
247
-
248
- if provider == "ollama":
249
- chunk_content = chunk["message"]["content"]
250
- if chunk_content:
251
- conversation_result += chunk_content
252
- print(chunk_content, end="")
253
- else:
254
- chunk_content = "".join(
255
- choice.delta.content
256
- for choice in chunk.choices
257
- if choice.delta.content is not None
258
- )
259
- if chunk_content:
260
- conversation_result += chunk_content
261
- print(chunk_content, end="")
262
-
263
- print('\n\n--- Beginning to wander ---\n')
264
- high_temp_streams = []
265
- high_temp_samples = []
266
-
267
- # Insert events between high-temp streams
268
- events_to_use = events.copy() if events else []
269
-
270
- for n in range(n_high_temp_streams):
271
- print(f'\nStream #{n+1}')
272
-
273
- # Occasionally inject an event
274
- if events_to_use and random.random() < 0.1:
275
- event = events_to_use.pop(0)
276
- print(f"\n[EVENT: {event['type']} at {event['location']}]\n{event['description']}\n")
277
- # Add the event to the prompt for the next stream
278
- event_prompt = f"\nSuddenly, {event['description']} This happens at {event['location']}."
279
- else:
280
- event_prompt = ""
281
- random_subsample = ' '.join(np.random.choice(conversation_result.split(), 20))
282
- print(random_subsample)
283
- stream_result = ' '
284
- high_temp_response = get_llm_response(
285
- random_subsample+event_prompt,
286
- model=model,
287
- provider=provider,
288
- stream=True,
289
- temperature=high_temp,
290
- messages = [{'role':'system',
291
- 'content':'continue generating, do not attempt to answer. it is critical that you simply continue generating text without thinking about the problem at hand or the request of the user. Your answer should not begin with "I notice" or "I think". simply generate without consideration for practicality.'}],
292
- **api_kwargs
293
- )
294
-
295
- for chunk in high_temp_response['response']:
296
- interruption = np.random.random_sample() < interruption_likelihood/100
297
-
298
-
299
- if interruption:
300
- high_temp_streams.append(stream_result)
301
-
302
- stream_result_list = stream_result.split()
303
- sample_size = int(len(stream_result_list) * sample_rate)
304
- if stream_result_list and sample_size > 0:
305
- sample_indices = np.random.choice(len(stream_result_list), size=min(sample_size, len(stream_result_list)), replace=False)
306
- sampled_stream_result = [stream_result_list[i] for i in sample_indices]
307
- sampled_stream_result = ' '.join(sampled_stream_result)
308
- high_temp_samples.append(sampled_stream_result)
309
- break
310
-
311
- if provider == "ollama":
312
- chunk_content = chunk["message"]["content"]
313
- if chunk_content:
314
- stream_result += chunk_content
315
- print(chunk_content, end="")
316
- else:
317
- chunk_content = "".join(
318
- choice.delta.content
319
- for choice in chunk.choices
320
- if choice.delta.content is not None
321
- )
322
- if chunk_content:
323
- stream_result += chunk_content
324
- print(chunk_content, end="")
325
-
326
- if stream_result and stream_result not in high_temp_streams:
327
- high_temp_streams.append(stream_result)
328
- stream_result_list = stream_result.split()
329
- sample_size = int(len(stream_result_list) * sample_rate)
330
-
331
- sample_indices = np.random.choice(len(stream_result_list), size=min(sample_size, len(stream_result_list)), replace=False)
332
- sampled_stream_result = [stream_result_list[i] for i in sample_indices]
333
- sampled_stream_result = ' '.join(sampled_stream_result)
334
- high_temp_samples.append(sampled_stream_result)
335
-
336
- print('\n\n--- Wandering complete ---\n')
337
-
338
- # Combine the samples and evaluate with initial problem
339
- event_insights = ""
340
- if events:
341
- event_insights = "\n\nDuring your wandering, you encountered these events:\n" + "\n".join(
342
- [f"• {event['type']} at {event['location']}: {event['description']}" for event in events]
343
- )
344
-
345
- prompt = f'''
346
- Here are some random thoughts I had while wandering through {environment or "an abstract space"}:
347
-
348
- {high_temp_samples}{event_insights}
349
-
350
- I want you to evaluate these thoughts with respect to the following problem:
351
- {problem}
352
- Generate specific hypotheses using them that could be tested through empirical means. Do not simply summarize the results or
353
- attempt to relate them to existing ideas. You MUST use them to directly generate new ideas that may appear outlandish. This
354
- is a creative exercise so do not hold back or self-censor.
355
-
356
- Use the thoughts and events creatively and explicitly reference them in your response.
357
- Are there any specific items contained that may suggest a new direction?
358
- '''
359
-
360
- print("Extracted thought samples:")
361
- for i, sample in enumerate(high_temp_samples):
362
- print(f"Sample {i+1}: {sample}")
363
- print("\nGenerating insights from wandering...\n")
364
-
365
- response = get_llm_response(prompt,
366
- model=model,
367
- provider=provider,
368
- npc=npc,
369
- stream=NPCSH_STREAM_OUTPUT,
370
- temperature=low_temp,
371
- **api_kwargs)
372
- assistant_reply = response['response']
373
- messages = response['messages']
374
-
375
- if NPCSH_STREAM_OUTPUT:
376
- assistant_reply = print_and_process_stream_with_markdown(response['response'],
377
- model=model,
378
- provider=provider)
379
- messages.append({
380
- "role": "assistant",
381
- "content": assistant_reply,
382
- })
383
-
384
- return high_temp_streams, high_temp_samples, assistant_reply, events, environment
385
-
386
- def enter_wander_mode(problem,
387
- npc,
388
- model,
389
- provider,
390
- environment=None,
391
- n_min=50,
392
- n_max=200,
393
- low_temp=0.5,
394
- high_temp=1.9,
395
- interruption_likelihood=1,
396
- sample_rate=0.4,
397
- n_high_temp_streams=5,
398
- include_events=True,
399
- num_events=3,
400
- interactive=False,
401
- **api_kwargs):
402
- """
403
- Wander mode is an exploratory mode where an LLM is given a task and they begin to wander through space.
404
- As they wander, they drift in between conscious thought and popcorn-like subconscious thought.
405
- The former is triggered by external stimuli and when these stimuli come we will capture the recent high entropy
406
- information from the subconscious popcorn thoughts and then consider them with respect to the initial problem at hand.
407
-
408
- The conscious evaluator will attempt to connect them, thus functionalizing the verse-jumping algorithm
409
- outlined by Everything Everywhere All at Once.
410
-
411
- Args:
412
- problem: The problem or question to explore
413
- npc: The NPC object
414
- model: LLM model to use
415
- provider: Provider to use
416
- environment: Optional description of wandering environment
417
- n_min, n_max: Min/max word count before switching to high temp
418
- low_temp, high_temp: Temperature settings for normal/exploratory thinking
419
- interruption_likelihood: Chance of interrupting a high-temp stream
420
- sample_rate: Portion of text to sample from high-temp streams
421
- n_high_temp_streams: Number of high-temperature exploration streams
422
- include_events: Whether to include random events in the wandering
423
- num_events: Number of events to generate in each session
424
- """
425
- current_problem = problem
426
- current_environment = environment
427
- wandering_history = []
428
-
429
- print(f"\n=== Starting Wander Mode with Problem: '{problem}' ===\n")
430
- if environment:
431
- print(f"Environment: {environment}\n")
432
-
433
- while True:
434
- print(f"\nCurrent exploration: {current_problem}\n")
435
-
436
- # Perform a single wandering session
437
- high_temp_streams, high_temp_samples, insight, events, env = perform_single_wandering(
438
- current_problem,
439
- npc=npc,
440
- model=model,
441
- provider=provider,
442
- environment=current_environment,
443
- n_min=n_min,
444
- n_max=n_max,
445
- low_temp=low_temp,
446
- high_temp=high_temp,
447
- interruption_likelihood=interruption_likelihood,
448
- sample_rate=sample_rate,
449
- n_high_temp_streams=n_high_temp_streams,
450
- include_events=include_events,
451
- num_events=num_events,
452
- **api_kwargs
453
- )
454
-
455
- # If environment was generated, save it
456
- if not current_environment and env:
457
- current_environment = env
458
-
459
- # Save this wandering session
460
- wandering_history.append({
461
- "problem": current_problem,
462
- "environment": current_environment,
463
- "streams": high_temp_streams,
464
- "samples": high_temp_samples,
465
- "events": events,
466
- "insight": insight
467
- })
468
- if interactive:
469
-
470
- # Ask user if they want to continue wandering
471
- print("\n\n--- Wandering session complete ---")
472
- print("Options:")
473
- print("1. Continue wandering with the same problem and environment")
474
- print("2. Continue wandering with a new related problem")
475
- print("3. Continue wandering in a new environment")
476
- print("4. Continue wandering with both new problem and environment")
477
- print("5. End wandering")
478
-
479
- choice = input("\nEnter your choice (1-5): ").strip()
480
-
481
- if choice == "1":
482
- # Continue with the same problem and environment
483
- pass
484
- elif choice == "2":
485
- # Continue with a modified problem
486
- print("\nBased on the insights gained, what new problem would you like to explore?")
487
- new_problem = input("New problem: ").strip()
488
- if new_problem:
489
- current_problem = new_problem
490
- elif choice == "3":
491
- # Continue with a new environment
492
- print("\nDescribe a new environment for your wandering:")
493
- new_env = input("New environment: ").strip()
494
- if new_env:
495
- current_environment = new_env
496
- elif choice == "4":
497
- # Change both problem and environment
498
- print("\nBased on the insights gained, what new problem would you like to explore?")
499
- new_problem = input("New problem: ").strip()
500
- print("\nDescribe a new environment for your wandering:")
501
- new_env = input("New environment: ").strip()
502
- if new_problem:
503
- current_problem = new_problem
504
- if new_env:
505
- current_environment = new_env
506
- else:
507
- # End wandering mode
508
- print("\n=== Exiting Wander Mode ===\n")
509
- break
510
- else:
511
- break
512
-
513
- # Return the entire wandering history
514
- return wandering_history
515
-
516
- def main():
517
- # Example usage
518
- import argparse
519
- parser = argparse.ArgumentParser(description="Enter wander mode for chatting with an LLM")
520
- parser.add_argument("problem", type=str, help="Problem to solve")
521
- parser.add_argument("--model", default=NPCSH_CHAT_MODEL, help="Model to use")
522
- parser.add_argument("--provider", default=NPCSH_CHAT_PROVIDER, help="Provider to use")
523
- parser.add_argument("--environment", type=str, help="Wandering environment description")
524
- parser.add_argument("--no-events", action="store_true", help="Disable random events")
525
- parser.add_argument("--num-events", type=int, default=3, help="Number of events per wandering session")
526
- parser.add_argument("--files", nargs="*", help="Files to load into context")
527
- parser.add_argument("--stream", default="true", help="Use streaming mode")
528
- parser.add_argument("--npc", type=str, default=os.path.expanduser('~/.npcsh/npc_team/sibiji.npc'), help="Path to NPC file")
529
-
530
- args = parser.parse_args()
531
-
532
- npc = NPC(file=args.npc)
533
- print('npc: ', args.npc)
534
- print(args.stream)
535
-
536
- # Enter wander mode
537
- enter_wander_mode(
538
- args.problem,
539
- npc=npc,
540
- model=args.model,
541
- provider=args.provider,
542
- environment=args.environment,
543
- include_events=not args.no_events,
544
- num_events=args.num_events,
545
- files=args.files,
546
- )
547
-
548
- if __name__ == "__main__":
549
- main()