npcpy 1.0.26__py3-none-any.whl → 1.2.32__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (148) hide show
  1. npcpy/__init__.py +0 -7
  2. npcpy/data/audio.py +16 -99
  3. npcpy/data/image.py +43 -42
  4. npcpy/data/load.py +83 -124
  5. npcpy/data/text.py +28 -28
  6. npcpy/data/video.py +8 -32
  7. npcpy/data/web.py +51 -23
  8. npcpy/ft/diff.py +110 -0
  9. npcpy/ft/ge.py +115 -0
  10. npcpy/ft/memory_trainer.py +171 -0
  11. npcpy/ft/model_ensembler.py +357 -0
  12. npcpy/ft/rl.py +360 -0
  13. npcpy/ft/sft.py +248 -0
  14. npcpy/ft/usft.py +128 -0
  15. npcpy/gen/audio_gen.py +24 -0
  16. npcpy/gen/embeddings.py +13 -13
  17. npcpy/gen/image_gen.py +262 -117
  18. npcpy/gen/response.py +615 -415
  19. npcpy/gen/video_gen.py +53 -7
  20. npcpy/llm_funcs.py +1869 -437
  21. npcpy/main.py +1 -1
  22. npcpy/memory/command_history.py +844 -510
  23. npcpy/memory/kg_vis.py +833 -0
  24. npcpy/memory/knowledge_graph.py +892 -1845
  25. npcpy/memory/memory_processor.py +81 -0
  26. npcpy/memory/search.py +188 -90
  27. npcpy/mix/debate.py +192 -3
  28. npcpy/npc_compiler.py +1672 -801
  29. npcpy/npc_sysenv.py +593 -1266
  30. npcpy/serve.py +3120 -0
  31. npcpy/sql/ai_function_tools.py +257 -0
  32. npcpy/sql/database_ai_adapters.py +186 -0
  33. npcpy/sql/database_ai_functions.py +163 -0
  34. npcpy/sql/model_runner.py +19 -19
  35. npcpy/sql/npcsql.py +706 -507
  36. npcpy/sql/sql_model_compiler.py +156 -0
  37. npcpy/tools.py +183 -0
  38. npcpy/work/plan.py +13 -279
  39. npcpy/work/trigger.py +3 -3
  40. npcpy-1.2.32.dist-info/METADATA +803 -0
  41. npcpy-1.2.32.dist-info/RECORD +54 -0
  42. npcpy/data/dataframes.py +0 -171
  43. npcpy/memory/deep_research.py +0 -125
  44. npcpy/memory/sleep.py +0 -557
  45. npcpy/modes/_state.py +0 -78
  46. npcpy/modes/alicanto.py +0 -1075
  47. npcpy/modes/guac.py +0 -785
  48. npcpy/modes/mcp_npcsh.py +0 -822
  49. npcpy/modes/npc.py +0 -213
  50. npcpy/modes/npcsh.py +0 -1158
  51. npcpy/modes/plonk.py +0 -409
  52. npcpy/modes/pti.py +0 -234
  53. npcpy/modes/serve.py +0 -1637
  54. npcpy/modes/spool.py +0 -312
  55. npcpy/modes/wander.py +0 -549
  56. npcpy/modes/yap.py +0 -572
  57. npcpy/npc_team/alicanto.npc +0 -2
  58. npcpy/npc_team/alicanto.png +0 -0
  59. npcpy/npc_team/assembly_lines/test_pipeline.py +0 -181
  60. npcpy/npc_team/corca.npc +0 -13
  61. npcpy/npc_team/foreman.npc +0 -7
  62. npcpy/npc_team/frederic.npc +0 -6
  63. npcpy/npc_team/frederic4.png +0 -0
  64. npcpy/npc_team/guac.png +0 -0
  65. npcpy/npc_team/jinxs/automator.jinx +0 -18
  66. npcpy/npc_team/jinxs/bash_executer.jinx +0 -31
  67. npcpy/npc_team/jinxs/calculator.jinx +0 -11
  68. npcpy/npc_team/jinxs/edit_file.jinx +0 -96
  69. npcpy/npc_team/jinxs/file_chat.jinx +0 -14
  70. npcpy/npc_team/jinxs/gui_controller.jinx +0 -28
  71. npcpy/npc_team/jinxs/image_generation.jinx +0 -29
  72. npcpy/npc_team/jinxs/internet_search.jinx +0 -30
  73. npcpy/npc_team/jinxs/local_search.jinx +0 -152
  74. npcpy/npc_team/jinxs/npcsh_executor.jinx +0 -31
  75. npcpy/npc_team/jinxs/python_executor.jinx +0 -8
  76. npcpy/npc_team/jinxs/screen_cap.jinx +0 -25
  77. npcpy/npc_team/jinxs/sql_executor.jinx +0 -33
  78. npcpy/npc_team/kadiefa.npc +0 -3
  79. npcpy/npc_team/kadiefa.png +0 -0
  80. npcpy/npc_team/npcsh.ctx +0 -9
  81. npcpy/npc_team/npcsh_sibiji.png +0 -0
  82. npcpy/npc_team/plonk.npc +0 -2
  83. npcpy/npc_team/plonk.png +0 -0
  84. npcpy/npc_team/plonkjr.npc +0 -2
  85. npcpy/npc_team/plonkjr.png +0 -0
  86. npcpy/npc_team/sibiji.npc +0 -5
  87. npcpy/npc_team/sibiji.png +0 -0
  88. npcpy/npc_team/spool.png +0 -0
  89. npcpy/npc_team/templates/analytics/celona.npc +0 -0
  90. npcpy/npc_team/templates/hr_support/raone.npc +0 -0
  91. npcpy/npc_team/templates/humanities/eriane.npc +0 -4
  92. npcpy/npc_team/templates/it_support/lineru.npc +0 -0
  93. npcpy/npc_team/templates/marketing/slean.npc +0 -4
  94. npcpy/npc_team/templates/philosophy/maurawa.npc +0 -0
  95. npcpy/npc_team/templates/sales/turnic.npc +0 -4
  96. npcpy/npc_team/templates/software/welxor.npc +0 -0
  97. npcpy/npc_team/yap.png +0 -0
  98. npcpy/routes.py +0 -958
  99. npcpy/work/mcp_helpers.py +0 -357
  100. npcpy/work/mcp_server.py +0 -194
  101. npcpy-1.0.26.data/data/npcpy/npc_team/alicanto.npc +0 -2
  102. npcpy-1.0.26.data/data/npcpy/npc_team/alicanto.png +0 -0
  103. npcpy-1.0.26.data/data/npcpy/npc_team/automator.jinx +0 -18
  104. npcpy-1.0.26.data/data/npcpy/npc_team/bash_executer.jinx +0 -31
  105. npcpy-1.0.26.data/data/npcpy/npc_team/calculator.jinx +0 -11
  106. npcpy-1.0.26.data/data/npcpy/npc_team/celona.npc +0 -0
  107. npcpy-1.0.26.data/data/npcpy/npc_team/corca.npc +0 -13
  108. npcpy-1.0.26.data/data/npcpy/npc_team/edit_file.jinx +0 -96
  109. npcpy-1.0.26.data/data/npcpy/npc_team/eriane.npc +0 -4
  110. npcpy-1.0.26.data/data/npcpy/npc_team/file_chat.jinx +0 -14
  111. npcpy-1.0.26.data/data/npcpy/npc_team/foreman.npc +0 -7
  112. npcpy-1.0.26.data/data/npcpy/npc_team/frederic.npc +0 -6
  113. npcpy-1.0.26.data/data/npcpy/npc_team/frederic4.png +0 -0
  114. npcpy-1.0.26.data/data/npcpy/npc_team/guac.png +0 -0
  115. npcpy-1.0.26.data/data/npcpy/npc_team/gui_controller.jinx +0 -28
  116. npcpy-1.0.26.data/data/npcpy/npc_team/image_generation.jinx +0 -29
  117. npcpy-1.0.26.data/data/npcpy/npc_team/internet_search.jinx +0 -30
  118. npcpy-1.0.26.data/data/npcpy/npc_team/kadiefa.npc +0 -3
  119. npcpy-1.0.26.data/data/npcpy/npc_team/kadiefa.png +0 -0
  120. npcpy-1.0.26.data/data/npcpy/npc_team/lineru.npc +0 -0
  121. npcpy-1.0.26.data/data/npcpy/npc_team/local_search.jinx +0 -152
  122. npcpy-1.0.26.data/data/npcpy/npc_team/maurawa.npc +0 -0
  123. npcpy-1.0.26.data/data/npcpy/npc_team/npcsh.ctx +0 -9
  124. npcpy-1.0.26.data/data/npcpy/npc_team/npcsh_executor.jinx +0 -31
  125. npcpy-1.0.26.data/data/npcpy/npc_team/npcsh_sibiji.png +0 -0
  126. npcpy-1.0.26.data/data/npcpy/npc_team/plonk.npc +0 -2
  127. npcpy-1.0.26.data/data/npcpy/npc_team/plonk.png +0 -0
  128. npcpy-1.0.26.data/data/npcpy/npc_team/plonkjr.npc +0 -2
  129. npcpy-1.0.26.data/data/npcpy/npc_team/plonkjr.png +0 -0
  130. npcpy-1.0.26.data/data/npcpy/npc_team/python_executor.jinx +0 -8
  131. npcpy-1.0.26.data/data/npcpy/npc_team/raone.npc +0 -0
  132. npcpy-1.0.26.data/data/npcpy/npc_team/screen_cap.jinx +0 -25
  133. npcpy-1.0.26.data/data/npcpy/npc_team/sibiji.npc +0 -5
  134. npcpy-1.0.26.data/data/npcpy/npc_team/sibiji.png +0 -0
  135. npcpy-1.0.26.data/data/npcpy/npc_team/slean.npc +0 -4
  136. npcpy-1.0.26.data/data/npcpy/npc_team/spool.png +0 -0
  137. npcpy-1.0.26.data/data/npcpy/npc_team/sql_executor.jinx +0 -33
  138. npcpy-1.0.26.data/data/npcpy/npc_team/test_pipeline.py +0 -181
  139. npcpy-1.0.26.data/data/npcpy/npc_team/turnic.npc +0 -4
  140. npcpy-1.0.26.data/data/npcpy/npc_team/welxor.npc +0 -0
  141. npcpy-1.0.26.data/data/npcpy/npc_team/yap.png +0 -0
  142. npcpy-1.0.26.dist-info/METADATA +0 -827
  143. npcpy-1.0.26.dist-info/RECORD +0 -139
  144. npcpy-1.0.26.dist-info/entry_points.txt +0 -11
  145. /npcpy/{modes → ft}/__init__.py +0 -0
  146. {npcpy-1.0.26.dist-info → npcpy-1.2.32.dist-info}/WHEEL +0 -0
  147. {npcpy-1.0.26.dist-info → npcpy-1.2.32.dist-info}/licenses/LICENSE +0 -0
  148. {npcpy-1.0.26.dist-info → npcpy-1.2.32.dist-info}/top_level.txt +0 -0
npcpy/llm_funcs.py CHANGED
@@ -1,29 +1,21 @@
1
- import subprocess
2
- import requests
3
- import os
1
+ from jinja2 import Environment, FileSystemLoader, Undefined
4
2
  import json
5
3
  import PIL
6
-
7
- import sqlite3
8
- from datetime import datetime
9
- from typing import List, Dict, Any, Optional, Union, Generator
10
-
11
-
12
- from jinja2 import Environment, FileSystemLoader, Template, Undefined
13
-
14
- import pandas as pd
15
- import numpy as np
16
- from sqlalchemy import create_engine
17
-
4
+ import random
5
+ import subprocess
6
+ from typing import List, Dict, Any, Optional, Union
18
7
  from npcpy.npc_sysenv import (
8
+ print_and_process_stream_with_markdown,
19
9
  render_markdown,
20
10
  lookup_provider,
21
11
  request_user_input,
22
12
  get_system_message
23
13
  )
24
14
  from npcpy.gen.response import get_litellm_response
25
- from npcpy.gen.image_gen import generate_image, edit_image
26
- from npcpy.gen.video_gen import generate_video_diffusers
15
+ from npcpy.gen.image_gen import generate_image
16
+ from npcpy.gen.video_gen import generate_video_diffusers, generate_video_veo3
17
+
18
+ from datetime import datetime
27
19
 
28
20
  def gen_image(
29
21
  prompt: str,
@@ -32,7 +24,10 @@ def gen_image(
32
24
  npc: Any = None,
33
25
  height: int = 1024,
34
26
  width: int = 1024,
27
+ n_images: int=1,
35
28
  input_images: List[Union[str, bytes, PIL.Image.Image]] = None,
29
+ save = False,
30
+ filename = '',
36
31
  ):
37
32
  """This function generates an image using the specified provider and model.
38
33
  Args:
@@ -57,17 +52,24 @@ def gen_image(
57
52
  if npc.api_url is not None:
58
53
  api_url = npc.api_url
59
54
 
60
- image = generate_image(
55
+ images = generate_image(
61
56
  prompt=prompt,
62
57
  model=model,
63
58
  provider=provider,
64
59
  height=height,
65
60
  width=width,
66
61
  attachments=input_images,
62
+ n_images=n_images,
67
63
 
68
64
  )
69
- return image
70
-
65
+ if save:
66
+ if len(filename) == 0 :
67
+ todays_date = datetime.now().strftime("%Y-%m-%d")
68
+ filename = 'vixynt_gen'
69
+ for i, image in enumerate(images):
70
+
71
+ image.save(filename+'_'+str(i)+'.png')
72
+ return images
71
73
 
72
74
 
73
75
  def gen_video(
@@ -81,34 +83,63 @@ def gen_video(
81
83
  num_frames=25,
82
84
  height=256,
83
85
  width=256,
86
+ negative_prompt="",
84
87
  messages: list = None,
85
88
  ):
86
89
  """
87
90
  Function Description:
88
- This function generates a video using the Stable Diffusion API.
91
+ This function generates a video using either Diffusers or Veo 3 via Gemini API.
89
92
  Args:
90
93
  prompt (str): The prompt for generating the video.
91
- model_id (str): The Hugging Face model ID to use for Stable Diffusion.
94
+ Keyword Args:
95
+ model (str): The model to use for generating the video.
96
+ provider (str): The provider to use for generating the video (gemini for Veo 3).
92
97
  device (str): The device to run the model on ('cpu' or 'cuda').
98
+ negative_prompt (str): What to avoid in the video (Veo 3 only).
93
99
  Returns:
94
- PIL.Image: The generated image.
100
+ dict: Response with output path and messages.
95
101
  """
96
- output_path = generate_video_diffusers(
97
- prompt,
98
- model,
99
- npc=npc,
100
- device=device,
101
- output_path=output_path,
102
- num_inference_steps=num_inference_steps,
103
- num_frames=num_frames,
104
- height=height,
105
- width=width,
106
- )
107
- if provider == "diffusers":
108
- return {"output": "output path at " + output_path, "messages": messages}
109
-
110
-
111
-
102
+
103
+ if provider == "gemini":
104
+
105
+ try:
106
+ output_path = generate_video_veo3(
107
+ prompt=prompt,
108
+ model=model,
109
+ negative_prompt=negative_prompt,
110
+ output_path=output_path,
111
+ )
112
+ return {
113
+ "output": f"High-fidelity video with synchronized audio generated at {output_path}",
114
+ "messages": messages
115
+ }
116
+ except Exception as e:
117
+ print(f"Veo 3 generation failed: {e}")
118
+ print("Falling back to diffusers...")
119
+ provider = "diffusers"
120
+
121
+ if provider == "diffusers" or provider is None:
122
+
123
+ output_path = generate_video_diffusers(
124
+ prompt,
125
+ model,
126
+ npc=npc,
127
+ device=device,
128
+ output_path=output_path,
129
+ num_inference_steps=num_inference_steps,
130
+ num_frames=num_frames,
131
+ height=height,
132
+ width=width,
133
+ )
134
+ return {
135
+ "output": f"Video generated at {output_path}",
136
+ "messages": messages
137
+ }
138
+
139
+ return {
140
+ "output": f"Unsupported provider: {provider}",
141
+ "messages": messages
142
+ }
112
143
 
113
144
 
114
145
  def get_llm_response(
@@ -124,6 +155,7 @@ def get_llm_response(
124
155
  context=None,
125
156
  stream: bool = False,
126
157
  attachments: List[str] = None,
158
+ include_usage: bool = False,
127
159
  **kwargs,
128
160
  ):
129
161
  """This function generates a response using the specified provider and model.
@@ -140,7 +172,6 @@ def get_llm_response(
140
172
  Returns:
141
173
  Any: The response generated by the specified provider and model.
142
174
  """
143
- # Determine provider and model from NPC if needed
144
175
  if model is not None and provider is not None:
145
176
  pass
146
177
  elif provider is None and model is not None:
@@ -152,6 +183,14 @@ def get_llm_response(
152
183
  model = npc.model
153
184
  if npc.api_url is not None:
154
185
  api_url = npc.api_url
186
+ elif team is not None:
187
+ if team.model is not None:
188
+ model = team.model
189
+ if team.provider is not None:
190
+ provider = team.provider
191
+ if team.api_url is not None:
192
+ api_url = team.api_url
193
+
155
194
  else:
156
195
  provider = "ollama"
157
196
  if images is not None or attachments is not None:
@@ -159,24 +198,33 @@ def get_llm_response(
159
198
  else:
160
199
  model = "llama3.2"
161
200
 
162
- # Set up system message
163
- system_message = get_system_message(npc) if npc else "You are a helpful assistant."
164
-
165
- # Set up messages
201
+ if npc is not None:
202
+
203
+ system_message = get_system_message(npc, team)
204
+ else:
205
+ system_message = "You are a helpful assistant."
206
+
207
+
208
+ if context is not None:
209
+ context_str = f'User Provided Context: {context}'
210
+ else:
211
+ context_str = ''
212
+
166
213
  if messages is None or len(messages) == 0:
167
214
  messages = [{"role": "system", "content": system_message}]
168
215
  if prompt:
169
- messages.append({"role": "user", "content": prompt})
216
+ messages.append({"role": "user", "content": prompt+context_str})
170
217
  elif prompt and messages[-1]["role"] == "user":
171
- # If the last message is from user, append to it
218
+
172
219
  if isinstance(messages[-1]["content"], str):
173
- messages[-1]["content"] += "\n" + prompt
220
+ messages[-1]["content"] += "\n" + prompt+context_str
174
221
  elif prompt:
175
- # Add a new user message
176
- messages.append({"role": "user", "content": prompt})
177
-
222
+ messages.append({"role": "user",
223
+ "content": prompt + context_str})
224
+ #import pdb
225
+ #pdb.set_trace()
178
226
  response = get_litellm_response(
179
- prompt=prompt,
227
+ prompt + context_str,
180
228
  messages=messages,
181
229
  model=model,
182
230
  provider=provider,
@@ -185,6 +233,7 @@ def get_llm_response(
185
233
  images=images,
186
234
  attachments=attachments,
187
235
  stream=stream,
236
+ include_usage=include_usage,
188
237
  **kwargs,
189
238
  )
190
239
  return response
@@ -221,7 +270,7 @@ def execute_llm_command(
221
270
  attempt = 0
222
271
  subcommands = []
223
272
 
224
- # Create context from retrieved documents
273
+
225
274
  context = ""
226
275
  while attempt < max_attempts:
227
276
  prompt = f"""
@@ -264,7 +313,7 @@ def execute_llm_command(
264
313
  """
265
314
 
266
315
  messages.append({"role": "user", "content": prompt})
267
- # print(messages, stream)
316
+
268
317
  response = get_llm_response(
269
318
  prompt,
270
319
  model=model,
@@ -330,20 +379,20 @@ def execute_llm_command(
330
379
  "messages": messages,
331
380
  "output": "Max attempts reached. Unable to execute the command successfully.",
332
381
  }
333
-
334
382
  def handle_jinx_call(
335
383
  command: str,
336
384
  jinx_name: str,
337
385
  model: str = None,
338
386
  provider: str = None,
339
- api_url: str = None,
340
- api_key: str = None,
341
387
  messages: List[Dict[str, str]] = None,
342
388
  npc: Any = None,
389
+ team: Any = None,
343
390
  stream=False,
344
391
  n_attempts=3,
345
392
  attempt=0,
346
393
  context=None,
394
+ extra_globals=None, # ADD THIS
395
+ **kwargs
347
396
  ) -> Union[str, Dict[str, Any]]:
348
397
  """This function handles a jinx call.
349
398
  Args:
@@ -359,35 +408,91 @@ def handle_jinx_call(
359
408
  the jinx call.
360
409
 
361
410
  """
362
- if npc is None:
411
+ if npc is None and team is None:
363
412
  return f"No jinxs are available. "
364
413
  else:
365
- if jinx_name not in npc.jinxs_dict:
366
- print("not available")
367
- print(f"jinx '{jinx_name}' not found in NPC's jinxs_dict.")
368
- return f"jinx '{jinx_name}' not found."
414
+
415
+
416
+
417
+ if jinx_name not in npc.jinxs_dict and jinx_name not in team.jinxs_dict:
418
+ print(f"Jinx {jinx_name} not available")
419
+ if attempt < n_attempts:
420
+ print(f"attempt {attempt+1} to generate jinx name failed, trying again")
421
+ return check_llm_command(
422
+ f'''
423
+ In the previous attempt, the jinx name was: {jinx_name}.
424
+
425
+ That jinx was not available, only select those that are available.
426
+
427
+ If there are no available jinxs choose an alternative action. Do not invoke the jinx action.
428
+
429
+
430
+ Here was the original command: BEGIN ORIGINAL COMMAND
431
+ '''+ command +' END ORIGINAL COMMAND',
432
+ model=model,
433
+ provider=provider,
434
+ messages=messages,
435
+ npc=npc,
436
+ team=team,
437
+ stream=stream,
438
+ context=context
439
+ )
440
+ return {
441
+ "output": f"Incorrect jinx name supplied and n_attempts reached.",
442
+ "messages": messages,
443
+ }
444
+
445
+
446
+
447
+
369
448
  elif jinx_name in npc.jinxs_dict:
370
449
  jinx = npc.jinxs_dict[jinx_name]
450
+ elif jinx_name in team.jinxs_dict:
451
+ jinx = team.jinxs_dict[jinx_name]
452
+
371
453
  render_markdown(f"jinx found: {jinx.jinx_name}")
372
454
  jinja_env = Environment(loader=FileSystemLoader("."), undefined=Undefined)
455
+ example_format = {}
456
+ for inp in jinx.inputs:
457
+ if isinstance(inp, str):
458
+ example_format[inp] = f"<value for {inp}>"
459
+ elif isinstance(inp, dict):
460
+ key = list(inp.keys())[0]
461
+ example_format[key] = f"<value for {key}>"
462
+
463
+ json_format_str = json.dumps(example_format, indent=4)
464
+
373
465
 
374
466
  prompt = f"""
375
467
  The user wants to use the jinx '{jinx_name}' with the following request:
376
- '{command}'
377
- Here is the jinx file:
468
+ '{command}'"""
469
+
470
+
471
+ prompt += f'Here were the previous 5 messages in the conversation: {messages[-5:]}'
472
+
473
+
474
+ prompt+=f"""Here is the jinx file:
378
475
  ```
379
476
  {jinx.to_dict()}
380
477
  ```
381
478
 
382
479
  Please determine the required inputs for the jinx as a JSON object.
383
480
 
384
-
385
481
  They must be exactly as they are named in the jinx.
386
482
  For example, if the jinx has three inputs, you should respond with a list of three values that will pass for those args.
387
483
 
484
+ If the jinx requires a file path, you must include an absolute path to the file including an extension.
485
+ If the jinx requires code to be generated, you must generate it exactly according to the instructions.
486
+ Your inputs must satisfy the jinx's requirements.
487
+
488
+
489
+
490
+
388
491
  Return only the JSON object without any markdown formatting.
389
492
 
390
- """
493
+ The format of the JSON object is:
494
+
495
+ """+"{"+json_format_str+"}"
391
496
 
392
497
  if npc and hasattr(npc, "shared_context"):
393
498
  if npc.shared_context.get("dataframes"):
@@ -395,17 +500,17 @@ def handle_jinx_call(
395
500
  for df_name in npc.shared_context["dataframes"].keys():
396
501
  context_info += f"- {df_name}\n"
397
502
  prompt += f"""Here is contextual info that may affect your choice: {context_info}
398
- """
503
+ """
399
504
  response = get_llm_response(
400
505
  prompt,
401
506
  format="json",
402
507
  model=model,
403
508
  provider=provider,
404
- api_url=api_url,
405
- api_key=api_key,
509
+ messages=messages[-10:],
406
510
  npc=npc,
407
511
  context=context
408
512
  )
513
+
409
514
  try:
410
515
  response_text = response.get("response", "{}")
411
516
  if isinstance(response_text, str):
@@ -413,25 +518,25 @@ def handle_jinx_call(
413
518
  response_text.replace("```json", "").replace("```", "").strip()
414
519
  )
415
520
 
416
- # Parse the cleaned response
521
+
417
522
  if isinstance(response_text, dict):
418
523
  input_values = response_text
419
524
  else:
420
525
  input_values = json.loads(response_text)
421
- # print(f"Extracted inputs: {input_values}")
526
+
422
527
  except json.JSONDecodeError as e:
423
528
  print(f"Error decoding input values: {e}. Raw response: {response}")
424
529
  return f"Error extracting inputs for jinx '{jinx_name}'"
425
- # Input validation (example):
530
+
426
531
  required_inputs = jinx.inputs
427
532
  missing_inputs = []
428
533
  for inp in required_inputs:
429
534
  if not isinstance(inp, dict):
430
- # dicts contain the keywords so its fine if theyre missing from the inputs.
535
+
431
536
  if inp not in input_values or input_values[inp] == "":
432
537
  missing_inputs.append(inp)
433
538
  if len(missing_inputs) > 0:
434
- # print(f"Missing required inputs for jinx '{jinx_name}': {missing_inputs}")
539
+
435
540
  if attempt < n_attempts:
436
541
  print(f"attempt {attempt+1} to generate inputs failed, trying again")
437
542
  print("missing inputs", missing_inputs)
@@ -444,8 +549,7 @@ def handle_jinx_call(
444
549
  provider=provider,
445
550
  messages=messages,
446
551
  npc=npc,
447
- api_url=api_url,
448
- api_key=api_key,
552
+ team=team,
449
553
  stream=stream,
450
554
  attempt=attempt + 1,
451
555
  n_attempts=n_attempts,
@@ -456,10 +560,6 @@ def handle_jinx_call(
456
560
  "messages": messages,
457
561
  }
458
562
 
459
- # try:
460
- print(npc.name+'>',end='')
461
-
462
- print("Executing jinx with input values:", end='')
463
563
 
464
564
  render_markdown( "\n".join(['\n - ' + str(key) + ': ' +str(val) for key, val in input_values.items()]))
465
565
 
@@ -468,15 +568,14 @@ def handle_jinx_call(
468
568
  input_values,
469
569
  jinja_env,
470
570
  npc=npc,
471
-
472
571
  messages=messages,
572
+ extra_globals=extra_globals # ADD THIS
573
+
473
574
  )
474
- if 'llm_response' in jinx_output and 'messages' in jinx_output:
475
- if len(jinx_output['llm_response'])>0:
476
- messages = jinx_output['messages']
477
575
  except Exception as e:
478
576
  print(f"An error occurred while executing the jinx: {e}")
479
577
  print(f"trying again, attempt {attempt+1}")
578
+ print('command', command)
480
579
  if attempt < n_attempts:
481
580
  jinx_output = handle_jinx_call(
482
581
  command,
@@ -485,59 +584,33 @@ def handle_jinx_call(
485
584
  provider=provider,
486
585
  messages=messages,
487
586
  npc=npc,
488
- api_url=api_url,
489
- api_key=api_key,
587
+ team=team,
490
588
  stream=stream,
491
589
  attempt=attempt + 1,
492
590
  n_attempts=n_attempts,
493
591
  context=f""" \n \n \n "jinx failed: {e} \n \n \n here was the previous attempt: {input_values}""",
494
592
  )
495
- else:
496
- user_input = input(
497
- "the jinx execution has failed after three tries, can you add more context to help or would you like to run again?"
498
- )
499
- return handle_jinx_call(
500
- command + " " + user_input,
501
- jinx_name,
502
- model=model,
503
- provider=provider,
504
- messages=messages,
505
- npc=npc,
506
- api_url=api_url,
507
- api_key=api_key,
508
- stream=stream,
509
- attempt=attempt + 1,
510
- n_attempts=n_attempts,
511
- context=context,
512
- )
513
- # process the jinx call
514
- #print(messages)
515
- if not stream and messages[-1]['role'] != 'assistant':
516
- # if the jinx has already added a message to the output from a final prompt we dont want to double that
517
-
518
- render_markdown(f""" ## jinx OUTPUT FROM CALLING {jinx_name} \n \n output:{jinx_output['output']}""" )
519
-
520
-
593
+ if not stream and len(messages) > 0 :
594
+ render_markdown(f""" ## jinx OUTPUT FROM CALLING {jinx_name} \n \n output:{jinx_output['output']}""" )
521
595
  response = get_llm_response(f"""
522
596
  The user had the following request: {command}.
523
- Here were the jinx outputs from calling {jinx_name}: {jinx_output}
597
+ Here were the jinx outputs from calling {jinx_name}: {jinx_output.get('output', '')}
524
598
 
525
599
  Given the jinx outputs and the user request, please format a simple answer that
526
600
  provides the answer without requiring the user to carry out any further steps.
527
601
  """,
528
602
  model=model,
529
603
  provider=provider,
530
- api_url=api_url,
531
- api_key=api_key,
532
604
  npc=npc,
533
- messages=messages,
605
+ messages=messages[-10:],
534
606
  context=context,
535
607
  stream=stream,
536
608
  )
537
609
  messages = response['messages']
538
610
  response = response.get("response", {})
539
611
  return {'messages':messages, 'output':response}
540
- return {'messages': messages, 'output': jinx_output}
612
+
613
+ return {'messages': messages, 'output': jinx_output['output']}
541
614
 
542
615
 
543
616
  def handle_request_input(
@@ -581,403 +654,1762 @@ def handle_request_input(
581
654
  return user_input
582
655
 
583
656
 
657
+
658
+
659
+
660
+ def jinx_handler(command, extracted_data, **kwargs):
661
+ return handle_jinx_call(
662
+ command,
663
+ extracted_data.get('jinx_name'),
664
+ model=kwargs.get('model'),
665
+ provider=kwargs.get('provider'),
666
+ api_url=kwargs.get('api_url'),
667
+ api_key=kwargs.get('api_key'),
668
+ messages=kwargs.get('messages'),
669
+ npc=kwargs.get('npc'),
670
+ team=kwargs.get('team'),
671
+ stream=kwargs.get('stream'),
672
+ context=kwargs.get('context'),
673
+ extra_globals=kwargs.get('extra_globals') # ADD THIS
674
+ )
675
+
676
+ def answer_handler(command, extracted_data, **kwargs):
677
+
678
+ response = get_llm_response(
679
+ f"""
680
+
681
+ Here is the user question: {command}
682
+
683
+
684
+ Do not needlessly reference the user's files or provided context.
685
+
686
+ Simply provide the answer to the user's question. Avoid
687
+ appearing zany or unnecessarily forthcoming about the fact that you have received such information. You know it
688
+ and the user knows it. there is no need to constantly mention the facts that are aware to both.
689
+
690
+ Your previous commnets on this topic: {extracted_data.get('explanation', '')}
691
+
692
+ """,
693
+ model=kwargs.get('model'),
694
+ provider=kwargs.get('provider'),
695
+ api_url=kwargs.get('api_url'),
696
+ api_key=kwargs.get('api_key'),
697
+ messages=kwargs.get('messages',)[-10:],
698
+ npc=kwargs.get('npc'),
699
+ team=kwargs.get('team'),
700
+ stream=kwargs.get('stream', False),
701
+ images=kwargs.get('images'),
702
+ context=kwargs.get('context')
703
+ )
704
+
705
+ return response
706
+
584
707
  def check_llm_command(
585
708
  command: str,
586
- model: str = None,
587
- provider: str = None,
709
+ model: str = None,
710
+ provider: str = None,
588
711
  api_url: str = None,
589
712
  api_key: str = None,
590
713
  npc: Any = None,
591
714
  team: Any = None,
592
715
  messages: List[Dict[str, str]] = None,
593
- jinxs: List[Dict[str, str]] = None,
594
- tools = None,
595
- tool_map: Dict[str, str] = None,
596
716
  images: list = None,
597
717
  stream=False,
598
- context=None,
599
- human_in_the_loop=False,
600
- shell = False,
718
+ context=None,
719
+ actions: Dict[str, Dict] = None,
720
+ extra_globals=None,
601
721
  ):
602
- """This function checks an LLM command.
603
- Args:
604
- command (str): The command to check.
605
- Keyword Args:
606
- model (str): The model to use for checking the command.
607
- provider (str): The provider to use for checking the command.
608
- npc (Any): The NPC object.
609
- messages (List[Dict[str, str]]): The message history.
610
- stream (bool): Whether to stream the response.
611
- Returns:
612
- Any: The result of checking the LLM command or a generator if stream=True.
613
- """
722
+ """This function checks an LLM command and returns sequences of steps with parallel actions."""
614
723
  if messages is None:
615
724
  messages = []
725
+
726
+ if actions is None:
727
+ actions = DEFAULT_ACTION_SPACE.copy()
728
+ exec = execute_multi_step_plan(
729
+ command=command,
730
+ model=model,
731
+ provider=provider,
732
+ api_url=api_url,
733
+ api_key=api_key,
734
+ npc=npc,
735
+ team=team,
736
+ messages=messages,
737
+ images=images,
738
+ stream=stream,
739
+ context=context,
740
+ actions=actions,
741
+ extra_globals=extra_globals,
742
+
743
+ )
744
+ return exec
745
+
746
+
747
+
748
+
749
+ def jinx_context_filler(npc, team):
750
+ """
751
+ Generate context information about available jinxs for NPCs and teams.
616
752
 
617
- prompt = f"""
618
- A user submitted this query: {command}
753
+ Args:
754
+ npc: The NPC object
755
+ team: The team object
756
+
757
+ Returns:
758
+ str: Formatted string containing jinx information and usage guidelines
619
759
  """
760
+
761
+ npc_jinxs = "\nNPC Jinxs:\n" + (
762
+ "\n".join(
763
+ f"- {name}: {jinx.description}"
764
+ for name, jinx in getattr(npc, "jinxs_dict", {}).items()
765
+ )
766
+ if getattr(npc, "jinxs_dict", None)
767
+ else ''
768
+ )
620
769
 
621
- if tools:
622
- result = get_llm_response(
623
- prompt,
624
- model=model,
625
- provider=provider,
626
- api_url=api_url,
627
- api_key=api_key,
628
- npc=npc,
629
- messages=[],
630
- tools=tools,
631
- tool_map=tool_map,
632
- context=None,
633
- stream=stream,
770
+
771
+ team_jinxs = "\n\nTeam Jinxs:\n" + (
772
+ "\n".join(
773
+ f"- {name}: {jinx.description}"
774
+ for name, jinx in getattr(team, "jinxs_dict", {}).items()
634
775
  )
635
- return {
636
- 'messages': result.get('messages', messages),
637
- 'output': result.get('response', '')
638
- }
639
-
640
- prompt += f"""
641
- Determine the nature of the user's request:
776
+ if team and getattr(team, "jinxs_dict", None)
777
+ else ''
778
+ )
779
+
780
+
781
+ usage_guidelines = """
782
+ Use jinxs when appropriate. For example:
642
783
 
643
- 1. Should a jinx be invoked to fulfill the request? A jinx is a jinja-template execution script.
784
+ - If you are asked about something up-to-date or dynamic (e.g., latest exchange rates)
785
+ - If the user asks you to read or edit a file
786
+ - If the user asks for code that should be executed
787
+ - If the user requests to open, search, download or scrape, which involve actual system or web actions
788
+ - If they request a screenshot, audio, or image manipulation
789
+ - Situations requiring file parsing (e.g., CSV or JSON loading)
790
+ - Scripted workflows or pipelines, e.g., generate a chart, fetch data, summarize from source, etc.
644
791
 
645
- 2. Is it a general question that requires an informative answer or a highly specific question that
646
- requires information on the web?
792
+ You MUST use a jinx if the request directly refers to a tool the AI cannot handle directly (e.g., 'run', 'open', 'search', etc).
647
793
 
648
- 3. Would this question be best answered by an alternative NPC?
794
+ You must NOT use a jinx if:
795
+ - The user asks you to write them a story (unless they specify saving it to a file)
796
+ - To answer simple questions
797
+ - To determine general information that does not require up-to-date details
798
+ - To answer questions that can be answered with existing knowledge
649
799
 
650
- 4. Is it a complex request that actually requires more than one
651
- jinx to be called, perhaps in a sequence?
652
- Sequences should only be used for more than one consecutive jinx call. Do not invoke sequences for single jinx calls.
653
- """
654
- if human_in_the_loop and shell:
800
+ To invoke a jinx, return the action 'invoke_jinx' along with the jinx specific name.
801
+ An example for a jinx-specific return would be:
802
+ {
803
+ "action": "invoke_jinx",
804
+ "jinx_name": "file_reader",
805
+ "explanation": "Read the contents of <full_filename_path_from_user_request> and <detailed explanation of how to accomplish the problem outlined in the request>."
806
+ }
655
807
 
656
- prompt+= f"""
657
- 5. is there a high amount of ambiguity in the user's request? If so, ask the user for more information.
658
-
659
- in your response, consider as well the following guidelines for whether to request input:
660
-
661
- Here are some examples of ambiguous and non-ambiguous requests:
808
+ Do not use the jinx names as the action keys. You must use the action 'invoke_jinx' to invoke a jinx!
809
+ Do not invent jinx names. Use only those provided.
662
810
 
663
- For exmaple,
664
- "tell me a joke about my favorite city" is ambiguous because the user
665
- did not specify the city. In this case, ask the user for more information.
811
+ Here are the currently available jinxs:"""
666
812
 
667
- "tell me a joke about the weather" is not ambiguous because the user
668
- specified the topic of the joke. In this case, you can answer the question
669
- without asking for more information.
670
813
 
671
- "take a screenshot of my screen" is not ambiguous because the user
672
- specified the action they want to take. In this case, you can carry out the action without asking for more information.
814
+
673
815
 
674
- ambiguous: "whats happening tonight in my city" is ambiguous because the user
675
- did not specify the city. In this case, ask the user for more information.
676
- not ambiguous: "whats happening tonight in new york" is not ambiguous because the user
677
- specified the city. In this case, you can answer the question without asking for more information.
678
-
679
- Please limit requests for input to only the most ambiguous requests to ensure the optimal user experience.
816
+ if not npc_jinxs and not team_jinxs:
817
+ return "No jinxs are available."
818
+ else:
819
+ output = usage_guidelines
820
+ if npc_jinxs:
821
+ output += npc_jinxs
822
+ if team_jinxs:
823
+ output += team_jinxs
824
+ return output
680
825
 
681
- """
682
-
826
+
827
+
828
+ DEFAULT_ACTION_SPACE = {
829
+ "invoke_jinx": {
830
+ "description": "Invoke a jinx (jinja-template execution script)",
831
+ "handler": jinx_handler,
832
+ "context": lambda npc=None, team=None, **_: jinx_context_filler(npc, team),
833
+ "output_keys": {
834
+ "jinx_name": {
835
+ "description": "The name of the jinx to invoke. must be from the provided list verbatim",
836
+ "type": "string"
837
+ }
838
+ }
839
+ },
840
+ "answer": {
841
+ "description": "Provide a direct informative answer",
842
+ "handler": answer_handler,
843
+ "context": """For general questions, use existing knowledge. For most queries a single action to answer a question will be sufficient.
844
+ e.g.
845
+
846
+ {
847
+ "actions": [
848
+ {
849
+ "action": "answer",
850
+ "explanation": "Provide a direct answer to the user's question based on existing knowledge."
851
+
852
+
853
+ }
854
+ ]
855
+ }
856
+
857
+ This should be preferred for more than half of requests. Do not overcomplicate the process.
858
+ Starting dialogue is usually more useful than using tools willynilly. Think carefully about
859
+ the user's intent and use this action as an opportunity to clear up potential ambiguities before
860
+ proceeding to more complex actions.
861
+ For example, if a user requests to write a story,
862
+ it is better to respond with 'answer' and to write them a story rather than to invoke some tool.
863
+ Indeed, it might be even better to respond and to request clarification about what other elements they would liek to specify with the story.
864
+ Natural language is highly ambiguous and it is important to establish common ground and priorities before proceeding to more complex actions.
865
+
866
+ """,
867
+ "output_keys": {}
868
+ }
869
+ }
870
+ def plan_multi_step_actions(
871
+ command: str,
872
+ actions: Dict[str, Dict],
873
+ npc: Any = None,
874
+ team: Any = None,
875
+ model: str = None,
876
+ provider: str = None,
877
+ api_url: str = None,
878
+ api_key: str = None,
879
+ context: str = None,
880
+ messages: List[Dict[str, str]] = None,
881
+
683
882
 
684
- if npc is not None:
685
- if npc.shared_context:
686
- prompt += f"""
687
- Relevant shared context for the npc:
688
- {npc.shared_context}
689
- """
883
+ ):
884
+ """
885
+ Analyzes the user's command and creates a complete, sequential plan of actions
886
+ by dynamically building a prompt from the provided action space.
887
+ """
888
+
889
+
890
+ prompt = f"""
891
+ Analyze the user's request: "{command}"
690
892
 
691
- if npc.jinxs_dict is None :
692
- prompt += "No NPC jinxs available. Do not invoke jinxs."
693
- else:
694
- prompt += "Available jinxs: \n"
695
- jinxs_set = {}
696
- if npc.jinxs_dict is not None:
697
- for jinx_name, jinx in npc.jinxs_dict.items():
698
- if jinx_name not in jinxs_set:
699
- jinxs_set[jinx_name] = jinx.description
700
- for jinx_name, jinx_description in jinxs_set.items():
701
- prompt += f"""
702
- {jinx_name} : {jinx_description} \n
703
-
704
- """
705
- #import pdb
706
- #pdb.set_trace()
707
- if jinxs is not None:
708
- for jinx in jinxs:
709
- prompt += f"""
710
- Here is a jinx that may be relevant to the user's request:
711
- {jinx}
712
- """
713
-
714
- if team is None:
715
- prompt += "No NPCs available for alternative answers."
716
- else:
717
- # team.npcs is a dict , need to check if it is empty
718
- #if team.npcs is None or len(team.npcs) == 0:
719
- # prompt += "No NPCs available for alternative answers."
720
- #else:
721
- # prompt += f"""
722
- # Available NPCs for alternative answers:#
723
- #
724
- # {team.npcs}
725
- # """
726
- if team.context:
727
- prompt += f"""
728
- Relevant shared context for the team:
729
- {team.context}
730
- """
731
- action_space = ["invoke_jinx",
732
- "answer_question",
733
- ]
893
+ Your task is to create a complete, sequential JSON plan to fulfill the entire request.
894
+ Use the following context about available actions and tools to construct the plan.
895
+
896
+ """
897
+ if messages == None:
898
+ messages = list()
899
+ for action_name, action_info in actions.items():
900
+ ctx = action_info.get("context")
901
+ if callable(ctx):
902
+ try:
903
+
904
+ ctx = ctx(npc=npc, team=team)
905
+ except Exception as e:
906
+ print( actions)
907
+ print(f"[WARN] Failed to render context for action '{action_name}': {e}")
908
+ ctx = None
909
+
910
+ if ctx:
911
+ prompt += f"\n--- Context for action '{action_name}' ---\n{ctx}\n"
912
+ if len(messages) >0:
913
+ prompt += f'Here were the previous 5 messages in the conversation: {messages[-5:]}'
734
914
 
735
- if human_in_the_loop:
736
- action_space.append("request_input")
737
915
  prompt += f"""
738
- In considering how to answer this, consider:
916
+ --- Instructions ---
917
+ Based on the user's request and the context provided above, create a plan.
739
918
 
740
- - Whether a jinx should be used.
919
+ The plan must be a JSON object with a single key, "actions". Each action must include:
920
+ - "action": The name of the action to take.
921
+ - "explanation": A clear description of the goal for this specific step.
922
+
923
+ An Example Plan might look like this depending on the available actions:
924
+ """ + """
925
+ {
926
+ "actions": [
927
+ {
928
+ "action": "<action_name_1>",
929
+ "<action_specific_key_1..>": "<action_specific_value_1>",
930
+ <...> : ...,
931
+ "explanation": "Identify the current CEO of Microsoft."
932
+ },
933
+ {
934
+ "action": "<action_name_2>",
935
+ "<action_specific_key_1..>": "<action_specific_value_1>",
936
+ "explanation": "Find the <action-specific> information identified in the previous step."
937
+ }
938
+ ]
939
+ }
741
940
 
742
- Excluding time-sensitive phenomena or ones that require external data inputs /information,
743
- most general questions can be answered without any extra jinxs.
941
+ The plans should mostly be 1-2 actions and usually never more than 3 actions at a time.
942
+ Interactivity is important, unless a user specifies a usage of a specific action, it is generally best to
943
+ assume just to respond in the simplest way possible rather than trying to assume certain actions have been requested.
744
944
 
745
-
945
+ """+f"""
946
+ Now, create the plan for the user's query: "{command}"
947
+ Respond ONLY with the plan.
948
+ """
746
949
 
747
- Only use jinxs when it is obvious that the answer needs to be as up-to-date as possible. For example,
748
- a question about where mount everest is does not necessarily need to be answered by a jinx call or an agent pass.
950
+ action_response = get_llm_response(
951
+ prompt,
952
+ model=model,
953
+ provider=provider,
954
+ api_url=api_url,
955
+ api_key=api_key,
956
+ npc=npc,
957
+ team=team,
958
+ format="json",
959
+ messages=[],
960
+ context=context,
961
+ )
962
+ response_content = action_response.get("response", {})
963
+
964
+
965
+ return response_content.get("actions", [])
966
+
967
+ def execute_multi_step_plan(
968
+ command: str,
969
+ model: str = None,
970
+ provider: str = None,
971
+ api_url: str = None,
972
+ api_key: str = None,
973
+ npc: Any = None,
974
+ team: Any = None,
975
+ messages: List[Dict[str, str]] = None,
976
+ images: list = None,
977
+ stream=False,
978
+ context=None,
979
+
980
+ actions: Dict[str, Dict] = None,
981
+ **kwargs,
982
+ ):
983
+ """
984
+ Creates a comprehensive plan and executes it sequentially, passing context
985
+ between steps for adaptive behavior.
986
+ """
987
+
988
+
989
+ planned_actions = plan_multi_step_actions(
990
+ command=command,
991
+ actions=actions,
992
+ npc=npc,
993
+ model=model,
994
+ provider=provider,
995
+ api_url=api_url,
996
+ api_key=api_key,
997
+ context=context,
998
+ messages=messages,
999
+ team=team,
1000
+
1001
+
1002
+ )
1003
+
1004
+ if not planned_actions:
1005
+ print("Could not generate a multi-step plan. Answering directly.")
1006
+ result = answer_handler(command=command,
1007
+ extracted_data={"explanation": "Answering the user's query directly."},
1008
+ model=model,
1009
+ provider=provider,
1010
+ api_url=api_url,
1011
+ api_key=api_key,
1012
+ messages=messages,
1013
+ npc=npc,
1014
+ stream=stream,
1015
+ team = team,
1016
+ images=images,
1017
+ context=context
1018
+ )
1019
+ return {"messages": result.get('messages',
1020
+ messages),
1021
+ "output": result.get('response')}
1022
+
1023
+
1024
+ step_outputs = []
1025
+ current_messages = messages.copy()
1026
+ render_markdown(f"### Plan for Command: {command[:100]}")
1027
+ for action in planned_actions:
1028
+ step_info = json.dumps({'action': action.get('action', ''),
1029
+ 'explanation': str(action.get('explanation',''))[0:10]+'...'})
1030
+ render_markdown(f'- {step_info}')
749
1031
 
750
- Similarly, if a user asks to explain the plot of the aeneid, this can be answered without a jinx call or agent pass.
751
1032
 
752
- If a user were to ask for the current weather in tokyo or the current price of bitcoin or who the mayor of a city is,
753
- then a jinx call may be appropriate.
1033
+
1034
+ for i, action_data in enumerate(planned_actions):
1035
+ render_markdown(f"--- Executing Step {i + 1} of {len(planned_actions)} ---")
1036
+ action_name = action_data["action"]
1037
+
1038
+
1039
+ try:
1040
+ handler = actions[action_name]["handler"]
754
1041
 
755
- jinxs are valuable but their use should be limited and purposeful to
756
- ensure the best user experience.
757
1042
 
758
- If a user asks you to search or to take a screenshot or to open a program or to write a program most likely it is
759
- appropriate to use a jinx.
760
- Respond with a JSON object containing:
761
- - "action": one of {action_space}
762
- - "jinx_name": : if action is "invoke_jinx": the name of the jinx to use.
763
- else if action is "", a list of jinx names to use.
764
- - "explanation": a brief explanation of why you chose this action.
1043
+
1044
+ step_context = f"Context from previous steps: {json.dumps(step_outputs)}" if step_outputs else ""
1045
+ render_markdown(
1046
+ f"- Executing Action: {action_name} \n- Explanation: {action_data.get('explanation')}\n "
1047
+ )
1048
+
1049
+ result = handler(
1050
+ command=command,
1051
+ extracted_data=action_data,
1052
+ model=model,
1053
+ provider=provider,
1054
+ api_url=api_url,
1055
+ api_key=api_key,
1056
+ messages=current_messages,
1057
+ npc=npc,
1058
+ team=team,
1059
+ stream=stream,
1060
+ context=context+step_context,
1061
+ images=images,
1062
+ extra_globals=kwargs.get('extra_globals') # ADD THIS
1063
+ )
1064
+ except KeyError as e:
1065
+
1066
+ return execute_multi_step_plan(
1067
+ command=command + 'This error occurred: '+str(e)+'\n Do not make the same mistake again. If you are intending to use a jinx, you must `invoke_jinx`. If you just need to answer, choose `answer`.',
1068
+ model= model,
1069
+ provider = provider,
1070
+ api_url = api_url,
1071
+ api_key = api_key,
1072
+ npc = npc,
1073
+ team = team,
1074
+ messages = messages,
1075
+ images = images,
1076
+ stream=stream,
1077
+ context=context,
1078
+ actions=actions,
1079
+
1080
+ **kwargs,
1081
+ )
765
1082
 
1083
+ action_output = result.get('output') or result.get('response')
1084
+
1085
+ if stream and len(planned_actions) > 1:
1086
+
1087
+ action_output = print_and_process_stream_with_markdown(action_output, model, provider)
1088
+ elif len(planned_actions) == 1:
1089
+
1090
+
1091
+ return {"messages": result.get('messages',
1092
+ current_messages),
1093
+ "output": action_output}
1094
+ step_outputs.append(action_output)
1095
+ current_messages = result.get('messages',
1096
+ current_messages)
1097
+
1098
+
1099
+
1100
+ final_output = compile_sequence_results(
1101
+ original_command=command,
1102
+ outputs=step_outputs,
1103
+ model=model,
1104
+ provider=provider,
1105
+ npc=npc,
1106
+ stream=stream,
1107
+ context=context,
1108
+ **kwargs
1109
+ )
766
1110
 
1111
+ return {"messages": current_messages,
1112
+ "output": final_output}
1113
+
1114
+ def compile_sequence_results(original_command: str,
1115
+ outputs: List[str],
1116
+ model: str = None,
1117
+ provider: str = None,
1118
+ npc: Any = None,
1119
+ team: Any = None,
1120
+ context: str = None,
1121
+ stream: bool = False,
1122
+ **kwargs) -> str:
1123
+ """
1124
+ Synthesizes a list of outputs from sequential steps into a single,
1125
+ coherent final response, framed as an answer to the original query.
1126
+ """
1127
+ if not outputs:
1128
+ return "The process completed, but produced no output."
1129
+ synthesis_prompt = f"""
1130
+ A user asked the following question:
1131
+ "{original_command}"
767
1132
 
768
- Return only the JSON object. Do not include any additional text.
1133
+ To answer this, the following information was gathered in sequential steps:
1134
+ {json.dumps(outputs, indent=2)}
769
1135
 
770
- The format of the JSON object is:
771
- {{
772
- "action": "invoke_jinx" | "answer_question" | "request_input",
773
- "jinx_name": "<jinx_name(s)_if_applicable>",
774
- "explanation": "<your_explanation>",
1136
+ Based *directly on the user's original question* and the information gathered, please
1137
+ provide a single, final, and coherent response. Answer the user's question directly.
1138
+ Do not mention the steps taken.
775
1139
 
776
- }}
1140
+ Final Synthesized Response that addresses the user in a polite and informative manner:
1141
+ """
1142
+
1143
+ response = get_llm_response(
1144
+ synthesis_prompt,
1145
+ model=model,
1146
+ provider=provider,
1147
+ npc=npc,
1148
+ team=team,
1149
+ messages=[],
1150
+ stream=stream,
1151
+ context=context,
1152
+ **kwargs
1153
+ )
1154
+ synthesized = response.get("response", "")
1155
+ if synthesized:
1156
+ return synthesized
1157
+ return '\n'.join(outputs)
777
1158
 
778
- If you execute a sequence, ensure that you have a specified NPC for each jinx use.
779
- question answering is not a jinx use.
780
- "invoke_jinx" should never be used in the list of jinxs when executing a sequence.
781
1159
 
782
1160
 
783
- Remember, do not include ANY ADDITIONAL MARKDOWN FORMATTING.
784
- There should be no leading ```json.
1161
+ def should_continue_with_more_actions(
1162
+ original_command: str,
1163
+ completed_actions: List[Dict[str, Any]],
1164
+ current_messages: List[Dict[str, str]],
1165
+ model: str = None,
1166
+ provider: str = None,
1167
+ npc: Any = None,
1168
+ team: Any = None,
1169
+ context: str = None,
1170
+ **kwargs: Any
785
1171
 
786
- """
787
- if context:
788
- prompt += f"""
789
- Additional relevant context from user:
1172
+ ) -> Dict:
1173
+ """Decide if more action sequences are needed."""
1174
+
1175
+ results_summary = ""
1176
+ for idx, action_result in enumerate(completed_actions):
1177
+ action_name = action_result.get("action", "Unknown Action")
1178
+ output = action_result.get('output', 'No Output')
1179
+ output_preview = output[:100] + "..." if isinstance(output, str) and len(output) > 100 else output
1180
+ results_summary += f"{idx + 1}. {action_name}: {output_preview}\n"
1181
+
1182
+ prompt = f"""
1183
+ Original user request: "{original_command}"
790
1184
 
791
- {context}
1185
+ This request asks for multiple things. Analyze if ALL parts have been addressed.
1186
+ Look for keywords like "and then", "use that to", "after that" which indicate multiple tasks.
792
1187
 
793
- """
1188
+ Completed actions so far:
1189
+ {results_summary}
794
1190
 
1191
+ For the request "{original_command}", identify:
1192
+ 1. What parts have been completed
1193
+ 2. What parts still need to be done
795
1194
 
1195
+ JSON response:
1196
+ {{
1197
+ "needs_more_actions": true/false,
1198
+ "reasoning": "explain what's been done and what's still needed",
1199
+ "next_focus": "if more actions needed, what specific task should be done next"
1200
+ }}
1201
+ """
796
1202
 
797
- action_response = get_llm_response(
1203
+ response = get_llm_response(
798
1204
  prompt,
799
1205
  model=model,
800
1206
  provider=provider,
801
- api_url=api_url,
802
- api_key=api_key,
803
1207
  npc=npc,
1208
+ team=team,
804
1209
  format="json",
805
1210
  messages=[],
806
- context=None,
1211
+
1212
+ context=context,
1213
+ **kwargs
807
1214
  )
808
1215
 
809
- if "Error" in action_response:
810
- print(f"LLM Error: {action_response['error']}")
811
- return action_response["error"]
1216
+ response_dict = response.get("response", {})
1217
+ if not isinstance(response_dict, dict):
1218
+ return {"needs_more_actions": False, "reasoning": "Error", "next_focus": ""}
1219
+
1220
+ return response_dict
812
1221
 
813
- response_content = action_response.get("response", {})
814
1222
 
815
- if isinstance(response_content, str):
816
- try:
817
- response_content_parsed = json.loads(response_content)
818
- except json.JSONDecodeError as e:
819
- print(
820
- f"Invalid JSON received from LLM: {e}. Response was: {response_content}"
821
- )
822
- return f"Error: Invalid JSON from LLM: {response_content}"
823
- else:
824
- response_content_parsed = response_content
825
-
826
- #print(prompt)
827
- action = response_content_parsed.get("action")
828
- explanation = response_content_parsed.get("explanation")
829
- jinx_name = response_content_parsed.get('jinx_name', '')
830
- jinx_name = '\n Jinx: ' + str(jinx_name) if jinx_name else ''
831
-
832
- render_markdown(f"- Action chosen: {action + jinx_name}\n")
833
- render_markdown(f"- Explanation given: {explanation}\n")
834
-
835
-
836
- # Execute the chosen action
837
- # Execute the chosen action
838
- if action == "invoke_jinx":
839
- if npc and npc.jinxs_dict and jinx_name in npc.jinxs_dict:
840
- if stream and not shell:
841
- # Create wrapper generator for streaming case
842
- def decision_wrapped_gen():
843
- # First yield decision
844
- yield {'role': 'decision', 'content': f"- Action chosen: {action + jinx_name}\n- Explanation given: {explanation}\n"}
845
- # Then execute jinx and yield from its result
846
- result = handle_jinx_call(
847
- command,
848
- jinx_name,
849
- model=model,
850
- provider=provider,
851
- api_url=api_url,
852
- api_key=api_key,
853
- messages=messages,
854
- npc=npc,
855
- stream=stream
856
- )
857
- yield from result['response']
858
- return {'messages': messages, 'output': decision_wrapped_gen()}
859
- elif stream and shell:
860
- result = handle_jinx_call(
861
- command,
862
- jinx_name,
863
- model=model,
864
- provider=provider,
865
- api_url=api_url,
866
- api_key=api_key,
867
- messages=messages,
868
- npc = npc ,
869
- stream =stream
870
- )
871
- return {'messages': result.get('messages', messages), 'output': result.get('output', '')}
872
- else:
873
- # Non-streaming case
874
- result = handle_jinx_call(
875
- command, jinx_name,
876
- model=model, provider=provider,
877
- api_url=api_url, api_key=api_key,
878
- messages=messages, npc=npc,
879
- stream=stream
880
- )
881
- return {'messages': result.get('messages', messages), 'output': result.get('output', '')}
882
- else:
883
- return {"messages": messages, "output": f"jinx '{jinx_name}' not found"}
884
-
885
- elif action == "answer_question":
886
- if stream and not shell:
887
- def decision_wrapped_gen():
888
- yield {'role': 'decision',
889
- 'content': f"- Action chosen: {action + jinx_name}\n- Explanation given: {explanation}\n"}
890
- result = get_llm_response(
891
- command,
892
- model=model, provider=provider,
893
- api_url=api_url, api_key=api_key,
894
- messages=messages, npc=npc,
895
- stream=stream,
896
- images=images,
897
- )
898
- yield from result['response']
899
- return {'messages': messages, 'output': decision_wrapped_gen()}
900
- elif stream and shell:
901
- result = get_llm_response(
902
- command,
903
- model=model,
904
- provider=provider,
905
- api_url=api_url,
906
- api_key=api_key,
907
- messages=messages,
908
- npc=npc,
909
- stream=stream,
910
- images=images,
911
- )
912
1223
 
913
1224
 
914
- return {'messages': result.get('messages', messages), 'output': result.get('response', '')}
915
1225
 
916
1226
 
917
- else:
918
- result = get_llm_response(
919
- command,
920
- model=model, provider=provider,
921
- api_url=api_url, api_key=api_key,
922
- messages=messages, npc=npc,
923
- stream=stream, images=images,
924
- )
925
- return {'messages': result.get('messages', messages), 'output': result.get('response', '')}
926
1227
 
927
1228
 
928
- elif action == "request_input":
929
- explanation = response_content_parsed.get("explanation")
930
- request_input = handle_request_input(
931
- f"Explanation from check_llm_command: {explanation} \n for the user input command: {command}",
1229
+ def identify_groups(
1230
+ facts: List[str],
1231
+ model,
1232
+ provider,
1233
+ npc = None,
1234
+ context: str = None,
1235
+ **kwargs
1236
+ ) -> List[str]:
1237
+ """Identify natural groups from a list of facts"""
1238
+
1239
+
1240
+ prompt = """What are the main groups these facts could be organized into?
1241
+ Express these groups in plain, natural language.
1242
+
1243
+ For example, given:
1244
+ - User enjoys programming in Python
1245
+ - User works on machine learning projects
1246
+ - User likes to play piano
1247
+ - User practices meditation daily
1248
+
1249
+ You might identify groups like:
1250
+ - Programming
1251
+ - Machine Learning
1252
+ - Musical Interests
1253
+ - Daily Practices
1254
+
1255
+ Return a JSON object with the following structure:
1256
+ `{
1257
+ "groups": ["list of group names"]
1258
+ }`
1259
+
1260
+
1261
+ Return only the JSON object. Do not include any additional markdown formatting or
1262
+ leading json characters.
1263
+ """
1264
+
1265
+ response = get_llm_response(
1266
+ prompt + f"\n\nFacts: {json.dumps(facts)}",
1267
+ model=model,
1268
+ provider=provider,
1269
+ format="json",
1270
+ npc=npc,
1271
+ context=context,
1272
+
1273
+ **kwargs
1274
+ )
1275
+ return response["response"]["groups"]
1276
+
1277
+ def get_related_concepts_multi(node_name: str,
1278
+ node_type: str,
1279
+ all_concept_names,
1280
+ model: str = None,
1281
+ provider: str = None,
1282
+ npc=None,
1283
+ context : str = None,
1284
+ **kwargs):
1285
+ """Links any node (fact or concept) to ALL relevant concepts in the entire ontology."""
1286
+ prompt = f"""
1287
+ Which of the following concepts from the entire ontology relate to the given {node_type}?
1288
+ Select all that apply, from the most specific to the most abstract.
1289
+
1290
+ {node_type.capitalize()}: "{node_name}"
1291
+
1292
+ Available Concepts:
1293
+ {json.dumps(all_concept_names, indent=2)}
1294
+
1295
+ Respond with JSON: {{"related_concepts": ["Concept A", "Concept B", ...]}}
1296
+ """
1297
+ response = get_llm_response(prompt,
1298
+ model=model,
1299
+ provider=provider,
1300
+ format="json",
1301
+ npc=npc,
1302
+ context=context,
1303
+ **kwargs)
1304
+ return response["response"].get("related_concepts", [])
1305
+
1306
+
1307
+ def assign_groups_to_fact(
1308
+ fact: str,
1309
+ groups: List[str],
1310
+ model = None,
1311
+ provider = None,
1312
+ npc = None,
1313
+ context: str = None,
1314
+ **kwargs
1315
+ ) -> Dict[str, List[str]]:
1316
+ """Assign facts to the identified groups"""
1317
+ prompt = f"""Given this fact, assign it to any relevant groups.
1318
+
1319
+ A fact can belong to multiple groups if it fits.
1320
+
1321
+ Here is the fact: {fact}
1322
+
1323
+ Here are the groups: {groups}
1324
+
1325
+ Return a JSON object with the following structure:
1326
+ {{
1327
+ "groups": ["list of group names"]
1328
+ }}
1329
+
1330
+ Do not include any additional markdown formatting or leading json characters.
1331
+
1332
+
1333
+ """
1334
+
1335
+ response = get_llm_response(
1336
+ prompt,
1337
+ model=model,
1338
+ provider=provider,
1339
+ format="json",
1340
+ npc=npc,
1341
+ context=context,
1342
+ **kwargs
1343
+ )
1344
+ return response["response"]
1345
+
1346
+ def generate_group_candidates(
1347
+ items: List[str],
1348
+ item_type: str,
1349
+ model: str = None,
1350
+ provider: str =None,
1351
+ npc = None,
1352
+ context: str = None,
1353
+ n_passes: int = 3,
1354
+ subset_size: int = 10,
1355
+ **kwargs
1356
+ ) -> List[str]:
1357
+ """Generate candidate groups for items (facts or groups) based on core semantic meaning."""
1358
+ all_candidates = []
1359
+
1360
+ for pass_num in range(n_passes):
1361
+ if len(items) > subset_size:
1362
+ item_subset = random.sample(items, min(subset_size, len(items)))
1363
+ else:
1364
+ item_subset = items
1365
+
1366
+
1367
+ prompt = f"""From the following {item_type}, identify specific and relevant conceptual groups.
1368
+ Think about the core subject or entity being discussed.
1369
+
1370
+ GUIDELINES FOR GROUP NAMES:
1371
+ 1. **Prioritize Specificity:** Names should be precise and directly reflect the content.
1372
+ 2. **Favor Nouns and Noun Phrases:** Use descriptive nouns or noun phrases.
1373
+ 3. **AVOID:**
1374
+ * Gerunds (words ending in -ing when used as nouns, like "Understanding", "Analyzing", "Processing"). If a gerund is unavoidable, try to make it a specific action (e.g., "User Authentication Module" is better than "Authenticating Users").
1375
+ * Adverbs or descriptive adjectives that don't form a core part of the subject's identity (e.g., "Quickly calculating", "Effectively managing").
1376
+ * Overly generic terms (e.g., "Concepts", "Processes", "Dynamics", "Mechanics", "Analysis", "Understanding", "Interactions", "Relationships", "Properties", "Structures", "Systems", "Frameworks", "Predictions", "Outcomes", "Effects", "Considerations", "Methods", "Techniques", "Data", "Theoretical", "Physical", "Spatial", "Temporal").
1377
+ 4. **Direct Naming:** If an item is a specific entity or action, it can be a group name itself (e.g., "Earth", "Lamb Shank Braising", "World War I").
1378
+
1379
+ EXAMPLE:
1380
+ Input {item_type.capitalize()}: ["Self-intersection shocks drive accretion disk formation.", "Gravity stretches star into stream.", "Energy dissipation in shocks influences capture fraction."]
1381
+ Desired Output Groups: ["Accretion Disk Formation (Self-Intersection Shocks)", "Stellar Tidal Stretching", "Energy Dissipation from Shocks"]
1382
+
1383
+ ---
1384
+
1385
+ Now, analyze the following {item_type}:
1386
+ {item_type.capitalize()}: {json.dumps(item_subset)}
1387
+
1388
+ Return a JSON object:
1389
+ {{
1390
+ "groups": ["list of specific, precise, and relevant group names"]
1391
+ }}
1392
+ """
1393
+
1394
+
1395
+ response = get_llm_response(
1396
+ prompt,
932
1397
  model=model,
933
1398
  provider=provider,
1399
+ format="json",
1400
+ npc=npc,
1401
+ context=context,
1402
+ **kwargs
934
1403
  )
935
1404
 
936
- messages.extend([
1405
+ candidates = response["response"].get("groups", [])
1406
+ all_candidates.extend(candidates)
1407
+
1408
+ return list(set(all_candidates))
1409
+
1410
+
1411
+ def remove_idempotent_groups(
1412
+ group_candidates: List[str],
1413
+ model: str = None,
1414
+ provider: str =None,
1415
+ npc = None,
1416
+ context : str = None,
1417
+ **kwargs: Any
1418
+ ) -> List[str]:
1419
+ """Remove groups that are essentially identical in meaning, favoring specificity and direct naming, and avoiding generic structures."""
1420
+
1421
+ prompt = f"""Compare these group names. Identify and list ONLY the groups that are conceptually distinct and specific.
1422
+
1423
+ GUIDELINES FOR SELECTING DISTINCT GROUPS:
1424
+ 1. **Prioritize Specificity and Direct Naming:** Favor precise nouns or noun phrases that directly name the subject.
1425
+ 2. **Prefer Concrete Entities/Actions:** If a name refers to a specific entity or action (e.g., "Earth", "Sun", "Water", "France", "User Authentication Module", "Lamb Shank Braising", "World War I"), keep it if it's distinct.
1426
+ 3. **Rephrase Gerunds:** If a name uses a gerund (e.g., "Understanding TDEs"), rephrase it to a noun or noun phrase (e.g., "Tidal Disruption Events").
1427
+ 4. **AVOID OVERLY GENERIC TERMS:** Do NOT use very broad or abstract terms that don't add specific meaning. Examples to avoid: "Concepts", "Processes", "Dynamics", "Mechanics", "Analysis", "Understanding", "Interactions", "Relationships", "Properties", "Structures", "Systems", "Frameworks", "Predictions", "Outcomes", "Effects", "Considerations", "Methods", "Techniques", "Data", "Theoretical", "Physical", "Spatial", "Temporal". If a group name seems overly generic or abstract, it should likely be removed or refined.
1428
+ 5. **Similarity Check:** If two groups are very similar, keep the one that is more descriptive or specific to the domain.
1429
+
1430
+ EXAMPLE 1:
1431
+ Groups: ["Accretion Disk Formation", "Accretion Disk Dynamics", "Formation of Accretion Disks"]
1432
+ Distinct Groups: ["Accretion Disk Formation", "Accretion Disk Dynamics"]
1433
+
1434
+ EXAMPLE 2:
1435
+ Groups: ["Causes of Events", "Event Mechanisms", "Event Drivers"]
1436
+ Distinct Groups: ["Event Causation", "Event Mechanisms"]
1437
+
1438
+ EXAMPLE 3:
1439
+ Groups: ["Astrophysics Basics", "Fundamental Physics", "General Science Concepts"]
1440
+ Distinct Groups: ["Fundamental Physics"]
1441
+
1442
+ EXAMPLE 4:
1443
+ Groups: ["Earth", "The Planet Earth", "Sun", "Our Star"]
1444
+ Distinct Groups: ["Earth", "Sun"]
1445
+
1446
+ EXAMPLE 5:
1447
+ Groups: ["User Authentication Module", "Authentication System", "Login Process"]
1448
+ Distinct Groups: ["User Authentication Module", "Login Process"]
1449
+
1450
+ ---
1451
+
1452
+ Now, analyze the following groups:
1453
+ Groups: {json.dumps(group_candidates)}
1454
+
1455
+ Return JSON:
1456
+ {{
1457
+ "distinct_groups": ["list of specific, precise, and distinct group names to keep"]
1458
+ }}
1459
+ """
1460
+
1461
+ response = get_llm_response(
1462
+ prompt,
1463
+ model=model,
1464
+ provider=provider,
1465
+ format="json",
1466
+ npc=npc,
1467
+ context=context,
1468
+ **kwargs
1469
+ )
1470
+
1471
+ return response["response"]["distinct_groups"]
1472
+
1473
+ def breathe(
1474
+ messages: List[Dict[str, str]],
1475
+ model: str = None,
1476
+ provider: str = None,
1477
+ npc = None,
1478
+ context: str = None,
1479
+ **kwargs: Any
1480
+ ) -> Dict[str, Any]:
1481
+ """Condense the conversation context into a small set of key extractions."""
1482
+ if not messages:
1483
+ return {"output": {}, "messages": []}
1484
+
1485
+ if 'stream' in kwargs:
1486
+ kwargs['stream'] = False
1487
+ conversation_text = "\n".join([f"{m['role']}: {m['content']}" for m in messages])
1488
+
1489
+
1490
+ prompt = f'''
1491
+ Read the following conversation:
1492
+
1493
+ {conversation_text}
1494
+
1495
+ ''' +'''
1496
+
1497
+ Now identify the following items:
1498
+
1499
+ 1. The high level objective
1500
+ 2. The most recent task
1501
+ 3. The accomplishments thus far
1502
+ 4. The failures thus far
1503
+
1504
+
1505
+ Return a JSON like so:
1506
+
1507
+ {
1508
+ "high_level_objective": "the overall goal so far for the user",
1509
+ "most_recent_task": "The currently ongoing task",
1510
+ "accomplishments": ["accomplishment1", "accomplishment2"],
1511
+ "failures": ["falures1", "failures2"],
1512
+ }
1513
+
1514
+ '''
1515
+
1516
+
1517
+ result = get_llm_response(prompt,
1518
+ model=model,
1519
+ provider=provider,
1520
+ npc=npc,
1521
+ context=context,
1522
+ format='json',
1523
+ **kwargs)
1524
+
1525
+ res = result.get('response', {})
1526
+ if isinstance(res, str):
1527
+ raise Exception
1528
+ format_output = f"""Here is a summary of the previous session.
1529
+ The high level objective was: {res.get('high_level_objective')} \n The accomplishments were: {res.get('accomplishments')},
1530
+ the failures were: {res.get('failures')} and the most recent task was: {res.get('most_recent_task')} """
1531
+ return {'output': format_output,
1532
+ 'messages': [
1533
+ {
1534
+ 'content': format_output,
1535
+ 'role': 'assistant'}
1536
+ ]
1537
+ }
1538
+ def abstract(groups,
1539
+ model,
1540
+ provider,
1541
+ npc=None,
1542
+ context: str = None,
1543
+ **kwargs):
1544
+ """
1545
+ Create more abstract terms from groups.
1546
+ """
1547
+ sample_groups = random.sample(groups, min(len(groups), max(3, len(groups) // 2)))
1548
+
1549
+ groups_text_for_prompt = "\n".join([f'- "{g["name"]}"' for g in sample_groups])
1550
+
1551
+ prompt = f"""
1552
+ Create more abstract categories from this list of groups.
1553
+
1554
+ Groups:
1555
+ {groups_text_for_prompt}
1556
+
1557
+ You will create higher-level concepts that interrelate between the given groups.
1558
+
1559
+ Create abstract categories that encompass multiple related facts, but do not unnecessarily combine facts with conjunctions. For example, do not try to combine "characters", "settings", and "physical reactions" into a
1560
+ compound group like "Characters, Setting, and Physical Reactions". This kind of grouping is not productive and only obfuscates true abstractions.
1561
+ For example, a group that might encompass the three aforermentioned names might be "Literary Themes" or "Video Editing Functionis", depending on the context.
1562
+ Your aim is to abstract, not to just arbitrarily generate associations.
1563
+
1564
+ Group names should never be more than two words. They should not contain gerunds. They should never contain conjunctions like "AND" or "OR".
1565
+ Generate no more than 5 new concepts and no fewer than 2.
1566
+
1567
+ Respond with JSON:
1568
+ {{
1569
+ "groups": [
1570
+ {{
1571
+ "name": "abstract category name"
1572
+ }}
1573
+ ]
1574
+ }}
1575
+ """
1576
+ response = get_llm_response(prompt,
1577
+ model=model,
1578
+ provider=provider,
1579
+ format="json",
1580
+ npc=npc,
1581
+ context=context,
1582
+ **kwargs)
1583
+
1584
+ return response["response"].get("groups", [])
1585
+
1586
+
1587
+ def extract_facts(
1588
+ text: str,
1589
+ model: str,
1590
+ provider: str,
1591
+ npc = None,
1592
+ context: str = None
1593
+ ) -> List[str]:
1594
+ """Extract concise facts from text using LLM (as defined earlier)"""
1595
+
1596
+ prompt = """Extract concise facts from this text.
1597
+ A fact is a piece of information that makes a statement about the world.
1598
+ A fact is typically a sentence that is true or false.
1599
+ Facts may be simple or complex. They can also be conflicting with each other, usually
1600
+ because there is some hidden context that is not mentioned in the text.
1601
+ In any case, it is simply your job to extract a list of facts that could pertain to
1602
+ an individual's personality.
1603
+
1604
+ For example, if a message says:
1605
+ "since I am a doctor I am often trying to think up new ways to help people.
1606
+ Can you help me set up a new kind of software to help with that?"
1607
+ You might extract the following facts:
1608
+ - The individual is a doctor
1609
+ - They are helpful
1610
+
1611
+ Another example:
1612
+ "I am a software engineer who loves to play video games. I am also a huge fan of the
1613
+ Star Wars franchise and I am a member of the 501st Legion."
1614
+ You might extract the following facts:
1615
+ - The individual is a software engineer
1616
+ - The individual loves to play video games
1617
+ - The individual is a huge fan of the Star Wars franchise
1618
+ - The individual is a member of the 501st Legion
1619
+
1620
+ Another example:
1621
+ "The quantum tunneling effect allows particles to pass through barriers
1622
+ that classical physics says they shouldn't be able to cross. This has
1623
+ huge implications for semiconductor design."
1624
+ You might extract these facts:
1625
+ - Quantum tunneling enables particles to pass through barriers that are
1626
+ impassable according to classical physics
1627
+ - The behavior of quantum tunneling has significant implications for
1628
+ how semiconductors must be designed
1629
+
1630
+ Another example:
1631
+ "People used to think the Earth was flat. Now we know it's spherical,
1632
+ though technically it's an oblate spheroid due to its rotation."
1633
+ You might extract these facts:
1634
+ - People historically believed the Earth was flat
1635
+ - It is now known that the Earth is an oblate spheroid
1636
+ - The Earth's oblate spheroid shape is caused by its rotation
1637
+
1638
+ Another example:
1639
+ "My research on black holes suggests they emit radiation, but my professor
1640
+ says this conflicts with Einstein's work. After reading more papers, I
1641
+ learned this is actually Hawking radiation and doesn't conflict at all."
1642
+ You might extract the following facts:
1643
+ - Black holes emit radiation
1644
+ - The professor believes this radiation conflicts with Einstein's work
1645
+ - The radiation from black holes is called Hawking radiation
1646
+ - Hawking radiation does not conflict with Einstein's work
1647
+
1648
+ Another example:
1649
+ "During the pandemic, many developers switched to remote work. I found
1650
+ that I'm actually more productive at home, though my company initially
1651
+ thought productivity would drop. Now they're keeping remote work permanent."
1652
+ You might extract the following facts:
1653
+ - The pandemic caused many developers to switch to remote work
1654
+ - The individual discovered higher productivity when working from home
1655
+ - The company predicted productivity would decrease with remote work
1656
+ - The company decided to make remote work a permanent option
1657
+
1658
+ Thus, it is your mission to reliably extract lists of facts.
1659
+
1660
+ Return a JSON object with the following structure:
937
1661
  {
938
- "role": "assistant",
939
- "content": f"""It's clear that extra input is required.
940
- Could you please provide it? Here is the reason:
941
- {explanation},
942
- and the prompt: {command}"""
943
- },
1662
+ "fact_list": "a list containing the facts where each fact is a string",
1663
+ }
1664
+ """
1665
+ if context and len(context) > 0:
1666
+ prompt+=f""" Here is some relevant user context: {context}"""
1667
+
1668
+ prompt+="""
1669
+ Return only the JSON object.
1670
+ Do not include any additional markdown formatting.
1671
+ """
1672
+
1673
+ response = get_llm_response(
1674
+ prompt + f"HERE BEGINS THE TEXT TO INVESTIGATE:\n\nText: {text}",
1675
+ model=model,
1676
+ provider=provider,
1677
+ format="json",
1678
+ npc=npc,
1679
+ context=context,
1680
+ )
1681
+ response = response["response"]
1682
+ return response.get("fact_list", [])
1683
+
1684
+
1685
+ def get_facts(content_text,
1686
+ model= None,
1687
+ provider = None,
1688
+ npc=None,
1689
+ context : str=None,
1690
+ attempt_number=1,
1691
+ n_attempts=3,
1692
+
1693
+ **kwargs):
1694
+ """Extract facts from content text"""
1695
+
1696
+ prompt = f"""
1697
+ Extract facts from this text. A fact is a specific statement that can be sourced from the text.
1698
+
1699
+ Example: if text says "the moon is the earth's only currently known satellite", extract:
1700
+ - "The moon is a satellite of earth"
1701
+ - "The moon is the only current satellite of earth"
1702
+ - "There may have been other satellites of earth" (inferred from "only currently known")
1703
+
1704
+
1705
+ A fact is a piece of information that makes a statement about the world.
1706
+ A fact is typically a sentence that is true or false.
1707
+ Facts may be simple or complex. They can also be conflicting with each other, usually
1708
+ because there is some hidden context that is not mentioned in the text.
1709
+ In any case, it is simply your job to extract a list of facts that could pertain to
1710
+ an individual's personality.
1711
+
1712
+ For example, if a message says:
1713
+ "since I am a doctor I am often trying to think up new ways to help people.
1714
+ Can you help me set up a new kind of software to help with that?"
1715
+ You might extract the following facts:
1716
+ - The individual is a doctor
1717
+ - They are helpful
1718
+
1719
+ Another example:
1720
+ "I am a software engineer who loves to play video games. I am also a huge fan of the
1721
+ Star Wars franchise and I am a member of the 501st Legion."
1722
+ You might extract the following facts:
1723
+ - The individual is a software engineer
1724
+ - The individual loves to play video games
1725
+ - The individual is a huge fan of the Star Wars franchise
1726
+ - The individual is a member of the 501st Legion
1727
+
1728
+ Another example:
1729
+ "The quantum tunneling effect allows particles to pass through barriers
1730
+ that classical physics says they shouldn't be able to cross. This has
1731
+ huge implications for semiconductor design."
1732
+ You might extract these facts:
1733
+ - Quantum tunneling enables particles to pass through barriers that are
1734
+ impassable according to classical physics
1735
+ - The behavior of quantum tunneling has significant implications for
1736
+ how semiconductors must be designed
1737
+
1738
+ Another example:
1739
+ "People used to think the Earth was flat. Now we know it's spherical,
1740
+ though technically it's an oblate spheroid due to its rotation."
1741
+ You might extract these facts:
1742
+ - People historically believed the Earth was flat
1743
+ - It is now known that the Earth is an oblate spheroid
1744
+ - The Earth's oblate spheroid shape is caused by its rotation
1745
+
1746
+ Another example:
1747
+ "My research on black holes suggests they emit radiation, but my professor
1748
+ says this conflicts with Einstein's work. After reading more papers, I
1749
+ learned this is actually Hawking radiation and doesn't conflict at all."
1750
+ You might extract the following facts:
1751
+ - Black holes emit radiation
1752
+ - The professor believes this radiation conflicts with Einstein's work
1753
+ - The radiation from black holes is called Hawking radiation
1754
+ - Hawking radiation does not conflict with Einstein's work
1755
+
1756
+ Another example:
1757
+ "During the pandemic, many developers switched to remote work. I found
1758
+ that I'm actually more productive at home, though my company initially
1759
+ thought productivity would drop. Now they're keeping remote work permanent."
1760
+ You might extract the following facts:
1761
+ - The pandemic caused many developers to switch to remote work
1762
+ - The individual discovered higher productivity when working from home
1763
+ - The company predicted productivity would decrease with remote work
1764
+ - The company decided to make remote work a permanent option
1765
+
1766
+ Thus, it is your mission to reliably extract lists of facts.
1767
+
1768
+ Here is the text:
1769
+ Text: "{content_text}"
1770
+
1771
+ Facts should never be more than one or two sentences, and they should not be overly complex or literal. They must be explicitly
1772
+ derived or inferred from the source text. Do not simply repeat the source text verbatim when stating the fact.
1773
+
1774
+ No two facts should share substantially similar claims. They should be conceptually distinct and pertain to distinct ideas, avoiding lengthy convoluted or compound facts .
1775
+ Respond with JSON:
1776
+ {{
1777
+ "facts": [
1778
+ {{
1779
+ "statement": "fact statement that builds on input text to state a specific claim that can be falsified through reference to the source material",
1780
+ "source_text": "text snippets related to the source text",
1781
+ "type": "explicit or inferred"
1782
+ }}
1783
+ ]
1784
+ }}
1785
+ """
1786
+
1787
+ response = get_llm_response(prompt,
1788
+ model=model,
1789
+ provider=provider,
1790
+ npc=npc,
1791
+ format="json",
1792
+ context=context,
1793
+ **kwargs)
1794
+
1795
+ if len(response.get("response", {}).get("facts", [])) == 0 and attempt_number < n_attempts:
1796
+ print(f" Attempt {attempt_number} to extract facts yielded no results. Retrying...")
1797
+ return get_facts(content_text,
1798
+ model=model,
1799
+ provider=provider,
1800
+ npc=npc,
1801
+ context=context,
1802
+ attempt_number=attempt_number+1,
1803
+ n_attempts=n_attempts,
1804
+ **kwargs)
1805
+
1806
+ return response["response"].get("facts", [])
1807
+
1808
+
1809
+
1810
+ def zoom_in(facts,
1811
+ model= None,
1812
+ provider=None,
1813
+ npc=None,
1814
+ context: str = None,
1815
+ attempt_number: int = 1,
1816
+ n_attempts=3,
1817
+ **kwargs):
1818
+ """Infer new implied facts from existing facts"""
1819
+ valid_facts = []
1820
+ for fact in facts:
1821
+ if isinstance(fact, dict) and 'statement' in fact:
1822
+ valid_facts.append(fact)
1823
+ if not valid_facts:
1824
+ return []
1825
+
1826
+ fact_lines = []
1827
+ for fact in valid_facts:
1828
+ fact_lines.append(f"- {fact['statement']}")
1829
+ facts_text = "\n".join(fact_lines)
1830
+
1831
+ prompt = f"""
1832
+ Look at these facts and infer new implied facts:
1833
+
1834
+ {facts_text}
1835
+
1836
+ What other facts can be reasonably inferred from these?
1837
+ """ +"""
1838
+ Respond with JSON:
1839
+ {
1840
+ "implied_facts": [
944
1841
  {
945
- "role": "user",
946
- "content": command + " \n \n \n extra context: " + request_input
1842
+ "statement": "new implied fact",
1843
+ "inferred_from": ["which facts this comes from"]
947
1844
  }
948
- ])
949
-
950
- if stream and not shell:
951
- def decision_wrapped_gen():
952
- yield {'role': 'decision', 'content': f"- Action chosen: {action + jinx_name}\n- Explanation given: {explanation}\n"}
953
- result = check_llm_command(
954
- command + " \n \n \n extra context: " + request_input,
955
- model=model, provider=provider,
956
- api_url=api_url, api_key=api_key,
957
- npc=npc, messages=messages,
958
- stream=stream,
959
- shell = shell
960
- )
961
- yield from result['output']
962
- return {'messages': messages, 'output': decision_wrapped_gen()}
963
- elif stream and shell:
964
- return check_llm_command(
965
- command + " \n \n \n extra context: " + request_input,
966
- model=model, provider=provider,
967
- api_url=api_url, api_key=api_key,
968
- npc=npc, messages=messages,
969
- stream=stream, shell = shell
970
- )
1845
+ ]
1846
+ }
1847
+ """
1848
+
1849
+ response = get_llm_response(prompt,
1850
+ model=model,
1851
+ provider=provider,
1852
+ format="json",
1853
+ context=context,
1854
+ npc=npc,
1855
+ **kwargs)
1856
+
1857
+ facts = response.get("response", {}).get("implied_facts", [])
1858
+ if len(facts) == 0:
1859
+ return zoom_in(valid_facts,
1860
+ model=model,
1861
+ provider=provider,
1862
+ npc=npc,
1863
+ context=context,
1864
+ attempt_number=attempt_number+1,
1865
+ n_tries=n_tries,
1866
+ **kwargs)
1867
+ return facts
1868
+ def generate_groups(facts,
1869
+ model=None,
1870
+ provider=None,
1871
+ npc=None,
1872
+ context: str =None,
1873
+ **kwargs):
1874
+ """Generate conceptual groups for facts"""
1875
+
1876
+ facts_text = "\n".join([f"- {fact['statement']}" for fact in facts])
1877
+
1878
+ prompt = f"""
1879
+ Generate conceptual groups for this group off facts:
1880
+
1881
+ {facts_text}
1882
+
1883
+ Create categories that encompass multiple related facts, but do not unnecessarily combine facts with conjunctions.
1884
+
1885
+ Your aim is to generalize commonly occurring ideas into groups, not to just arbitrarily generate associations.
1886
+ Focus on the key commonly occurring items and expresions.
1887
+
1888
+ Group names should never be more than two words. They should not contain gerunds. They should never contain conjunctions like "AND" or "OR".
1889
+ Respond with JSON:
1890
+ {{
1891
+ "groups": [
1892
+ {{
1893
+ "name": "group name"
1894
+ }}
1895
+ ]
1896
+ }}
1897
+ """
1898
+
1899
+ response = get_llm_response(prompt,
1900
+ model=model,
1901
+ provider=provider,
1902
+ format="json",
1903
+ context=context,
1904
+ npc=npc,
1905
+ **kwargs)
1906
+
1907
+ return response["response"].get("groups", [])
1908
+
1909
+ def remove_redundant_groups(groups,
1910
+ model=None,
1911
+ provider=None,
1912
+ npc=None,
1913
+ context: str = None,
1914
+ **kwargs):
1915
+ """Remove redundant groups"""
1916
+
1917
+ groups_text = "\n".join([f"- {g['name']}" for g in groups])
1918
+
1919
+ prompt = f"""
1920
+ Remove redundant groups from this list:
1921
+
1922
+ {groups_text}
1923
+
1924
+
1925
+
1926
+ Merge similar groups and keep only distinct concepts.
1927
+ Create abstract categories that encompass multiple related facts, but do not unnecessarily combine facts with conjunctions. For example, do not try to combine "characters", "settings", and "physical reactions" into a
1928
+ compound group like "Characters, Setting, and Physical Reactions". This kind of grouping is not productive and only obfuscates true abstractions.
1929
+ For example, a group that might encompass the three aforermentioned names might be "Literary Themes" or "Video Editing Functionis", depending on the context.
1930
+ Your aim is to abstract, not to just arbitrarily generate associations.
1931
+
1932
+ Group names should never be more than two words. They should not contain gerunds. They should never contain conjunctions like "AND" or "OR".
1933
+
1934
+
1935
+ Respond with JSON:
1936
+ {{
1937
+ "groups": [
1938
+ {{
1939
+ "name": "final group name"
1940
+ }}
1941
+ ]
1942
+ }}
1943
+ """
1944
+
1945
+ response = get_llm_response(prompt,
1946
+ model=model,
1947
+ provider=provider,
1948
+ format="json",
1949
+ npc=npc,
1950
+ context=context,
1951
+ **kwargs)
1952
+
1953
+ return response["response"].get("groups", [])
1954
+
1955
+
1956
+ def prune_fact_subset_llm(fact_subset,
1957
+ concept_name,
1958
+ model=None,
1959
+ provider=None,
1960
+ npc=None,
1961
+ context : str = None,
1962
+ **kwargs):
1963
+ """Identifies redundancies WITHIN a small, topically related subset of facts."""
1964
+ print(f" Step Sleep-A: Pruning fact subset for concept '{concept_name}'...")
1965
+
1966
+
1967
+ prompt = f"""
1968
+ The following facts are all related to the concept "{concept_name}".
1969
+ Review ONLY this subset and identify groups of facts that are semantically identical.
1970
+ Return only the set of facts that are semantically distinct, and archive the rest.
1971
+
1972
+ Fact Subset: {json.dumps(fact_subset, indent=2)}
1973
+
1974
+ Return a json list of groups
1975
+ {{
1976
+ "refined_facts": [
1977
+ fact1,
1978
+ fact2,
1979
+ fact3,...
1980
+ ]
1981
+ }}
1982
+ """
1983
+ response = get_llm_response(prompt,
1984
+ model=model,
1985
+ provider=provider,
1986
+ npc=None,
1987
+ format="json",
1988
+ context=context)
1989
+ return response['response'].get('refined_facts', [])
1990
+
1991
+ def consolidate_facts_llm(new_fact,
1992
+ existing_facts,
1993
+ model,
1994
+ provider,
1995
+ npc=None,
1996
+ context: str =None,
1997
+ **kwargs):
1998
+ """
1999
+ Uses an LLM to decide if a new fact is novel or redundant.
2000
+ """
2001
+ prompt = f"""
2002
+ Analyze the "New Fact" in the context of the "Existing Facts" list.
2003
+ Your task is to determine if the new fact provides genuinely new information or if it is essentially a repeat or minor rephrasing of information already present.
2004
+
2005
+ New Fact:
2006
+ "{new_fact['statement']}"
2007
+
2008
+ Existing Facts:
2009
+ {json.dumps([f['statement'] for f in existing_facts], indent=2)}
2010
+
2011
+ Possible decisions:
2012
+ - 'novel': The fact introduces new, distinct information not covered by the existing facts.
2013
+ - 'redundant': The fact repeats information already present in the existing facts.
2014
+
2015
+ Respond with a JSON object:
2016
+ {{
2017
+ "decision": "novel or redundant",
2018
+ "reason": "A brief explanation for your decision."
2019
+ }}
2020
+ """
2021
+ response = get_llm_response(prompt,
2022
+ model=model,
2023
+ provider=provider,
2024
+ format="json",
2025
+ npc=npc,
2026
+ context=context,
2027
+ **kwargs)
2028
+ return response['response']
2029
+
2030
+
2031
+ def get_related_facts_llm(new_fact_statement,
2032
+ existing_fact_statements,
2033
+ model = None,
2034
+ provider = None,
2035
+ npc = None,
2036
+ attempt_number = 1,
2037
+ n_attempts = 3,
2038
+ context='',
2039
+ **kwargs):
2040
+ """Identifies which existing facts are causally or thematically related to a new fact."""
2041
+ prompt = f"""
2042
+ A new fact has been learned: "{new_fact_statement}"
2043
+
2044
+ Which of the following existing facts are directly related to it (causally, sequentially, or thematically)?
2045
+ Select only the most direct and meaningful connections.
2046
+
2047
+ Existing Facts:
2048
+ {json.dumps(existing_fact_statements, indent=2)}
2049
+
2050
+ Respond with JSON: {{"related_facts": ["statement of a related fact", ...]}}
2051
+ """
2052
+ response = get_llm_response(prompt,
2053
+ model=model,
2054
+ provider=provider,
2055
+ format="json",
2056
+ npc=npc,
2057
+ context=context,
2058
+ **kwargs)
2059
+ if attempt_number > n_attempts:
2060
+ print(f" Attempt {attempt_number} to find related facts yielded no results. Giving up.")
2061
+ return get_related_facts_llm(new_fact_statement,
2062
+ existing_fact_statements,
2063
+ model=model,
2064
+ provider=provider,
2065
+ npc=npc,
2066
+ attempt_number=attempt_number+1,
2067
+ n_attempts=n_attempts,
2068
+ context=context,
2069
+ **kwargs)
2070
+
2071
+ return response["response"].get("related_facts", [])
2072
+
2073
+ def find_best_link_concept_llm(candidate_concept_name,
2074
+ existing_concept_names,
2075
+ model = None,
2076
+ provider = None,
2077
+ npc = None,
2078
+ context: str = None,
2079
+ **kwargs ):
2080
+ """
2081
+ Finds the best existing concept to link a new candidate concept to.
2082
+ This prompt now uses neutral "association" language.
2083
+ """
2084
+ prompt = f"""
2085
+ Here is a new candidate concept: "{candidate_concept_name}"
2086
+
2087
+ Which of the following existing concepts is it most closely related to? The relationship could be as a sub-category, a similar idea, or a related domain.
2088
+
2089
+ Existing Concepts:
2090
+ {json.dumps(existing_concept_names, indent=2)}
2091
+
2092
+ Respond with the single best-fit concept to link to from the list, or respond with "none" if it is a genuinely new root idea.
2093
+ {{
2094
+ "best_link_concept": "The single best concept name OR none"
2095
+ }}
2096
+ """
2097
+ response = get_llm_response(prompt,
2098
+ model=model,
2099
+ provider=provider,
2100
+ format="json",
2101
+ npc=npc,
2102
+ context=context,
2103
+ **kwargs)
2104
+ return response['response'].get('best_link_concept')
2105
+
2106
+ def asymptotic_freedom(parent_concept,
2107
+ supporting_facts,
2108
+ model=None,
2109
+ provider=None,
2110
+ npc = None,
2111
+ context: str = None,
2112
+ **kwargs):
2113
+ """Given a concept and its facts, proposes an intermediate layer of sub-concepts."""
2114
+ print(f" Step Sleep-B: Attempting to deepen concept '{parent_concept['name']}'...")
2115
+ fact_statements = []
2116
+ for f in supporting_facts:
2117
+ fact_statements.append(f['statement'])
2118
+
2119
+ prompt = f"""
2120
+ The concept "{parent_concept['name']}" is supported by many diverse facts.
2121
+ Propose a layer of 2-4 more specific sub-concepts to better organize these facts.
2122
+ These new concepts will exist as nodes that link to "{parent_concept['name']}".
2123
+
2124
+ Supporting Facts: {json.dumps(fact_statements, indent=2)}
2125
+ Respond with JSON: {{
2126
+ "new_sub_concepts": ["sub_layer1", "sub_layer2"]
2127
+ }}
2128
+ """
2129
+ response = get_llm_response(prompt,
2130
+ model=model,
2131
+ provider=provider,
2132
+ format="json",
2133
+ context=context, npc=npc,
2134
+ **kwargs)
2135
+ return response['response'].get('new_sub_concepts', [])
2136
+
971
2137
 
2138
+
2139
+ def bootstrap(
2140
+ prompt: str,
2141
+ model: str = None,
2142
+ provider: str = None,
2143
+ npc: Any = None,
2144
+ team: Any = None,
2145
+ sample_params: Dict[str, Any] = None,
2146
+ sync_strategy: str = "consensus",
2147
+ context: str = None,
2148
+ n_samples: int = 3,
2149
+ **kwargs
2150
+ ) -> Dict[str, Any]:
2151
+ """Bootstrap by sampling multiple agents from team or varying parameters"""
2152
+
2153
+ if team and hasattr(team, 'npcs') and len(team.npcs) >= n_samples:
2154
+
2155
+ sampled_npcs = list(team.npcs.values())[:n_samples]
2156
+ results = []
2157
+
2158
+ for i, agent in enumerate(sampled_npcs):
2159
+ response = get_llm_response(
2160
+ f"Sample {i+1}: {prompt}\nContext: {context}",
2161
+ npc=agent,
2162
+ context=context,
2163
+ **kwargs
2164
+ )
2165
+ results.append({
2166
+ 'agent': agent.name,
2167
+ 'response': response.get("response", "")
2168
+ })
2169
+ else:
2170
+
2171
+ if sample_params is None:
2172
+ sample_params = {"temperature": [0.3, 0.7, 1.0]}
2173
+
2174
+ results = []
2175
+ for i in range(n_samples):
2176
+ temp = sample_params.get('temperature', [0.7])[i % len(sample_params.get('temperature', [0.7]))]
2177
+ response = get_llm_response(
2178
+ f"Sample {i+1}: {prompt}\nContext: {context}",
2179
+ model=model,
2180
+ provider=provider,
2181
+ npc=npc,
2182
+ temperature=temp,
2183
+ context=context,
2184
+ **kwargs
2185
+ )
2186
+ results.append({
2187
+ 'variation': f'temp_{temp}',
2188
+ 'response': response.get("response", "")
2189
+ })
2190
+
2191
+
2192
+ response_texts = [r['response'] for r in results]
2193
+ return synthesize(response_texts, sync_strategy, model, provider, npc or (team.forenpc if team else None), context)
2194
+
2195
+ def harmonize(
2196
+ prompt: str,
2197
+ items: List[str],
2198
+ model: str = None,
2199
+ provider: str = None,
2200
+ npc: Any = None,
2201
+ team: Any = None,
2202
+ harmony_rules: List[str] = None,
2203
+ context: str = None,
2204
+ agent_roles: List[str] = None,
2205
+ **kwargs
2206
+ ) -> Dict[str, Any]:
2207
+ """Harmonize using multiple specialized agents"""
2208
+
2209
+ if team and hasattr(team, 'npcs'):
2210
+
2211
+ available_agents = list(team.npcs.values())
2212
+
2213
+ if agent_roles:
2214
+
2215
+ selected_agents = []
2216
+ for role in agent_roles:
2217
+ matching_agent = next((a for a in available_agents if role.lower() in a.name.lower() or role.lower() in a.primary_directive.lower()), None)
2218
+ if matching_agent:
2219
+ selected_agents.append(matching_agent)
2220
+ agents_to_use = selected_agents or available_agents[:len(items)]
972
2221
  else:
973
- return check_llm_command(
974
- command + " \n \n \n extra context: " + request_input,
975
- model=model, provider=provider,
976
- api_url=api_url, api_key=api_key,
977
- npc=npc, messages=messages,
978
- stream = stream,
979
- shell = shell
2222
+
2223
+ agents_to_use = available_agents[:min(len(items), len(available_agents))]
2224
+
2225
+ harmonized_results = []
2226
+ for i, (item, agent) in enumerate(zip(items, agents_to_use)):
2227
+ harmony_prompt = f"""Harmonize this element: {item}
2228
+ Task: {prompt}
2229
+ Rules: {', '.join(harmony_rules or ['maintain_consistency'])}
2230
+ Context: {context}
2231
+ Your role in harmony: {agent.primary_directive}"""
2232
+
2233
+ response = get_llm_response(
2234
+ harmony_prompt,
2235
+ npc=agent,
2236
+ context=context,
2237
+ **kwargs
980
2238
  )
2239
+ harmonized_results.append({
2240
+ 'agent': agent.name,
2241
+ 'item': item,
2242
+ 'harmonized': response.get("response", "")
2243
+ })
2244
+
2245
+
2246
+ coordinator = team.get_forenpc() if team else npc
2247
+ synthesis_prompt = f"""Synthesize these harmonized elements:
2248
+ {chr(10).join([f"{r['agent']}: {r['harmonized']}" for r in harmonized_results])}
2249
+ Create unified harmonious result."""
2250
+
2251
+ return get_llm_response(synthesis_prompt, npc=coordinator, context=context, **kwargs)
2252
+
2253
+ else:
2254
+
2255
+ items_text = chr(10).join([f"{i+1}. {item}" for i, item in enumerate(items)])
2256
+ harmony_prompt = f"""Harmonize these items: {items_text}
2257
+ Task: {prompt}
2258
+ Rules: {', '.join(harmony_rules or ['maintain_consistency'])}
2259
+ Context: {context}"""
2260
+
2261
+ return get_llm_response(harmony_prompt, model=model, provider=provider, npc=npc, context=context, **kwargs)
2262
+
2263
+ def orchestrate(
2264
+ prompt: str,
2265
+ items: List[str],
2266
+ model: str = None,
2267
+ provider: str = None,
2268
+ npc: Any = None,
2269
+ team: Any = None,
2270
+ workflow: str = "sequential_coordination",
2271
+ context: str = None,
2272
+ **kwargs
2273
+ ) -> Dict[str, Any]:
2274
+ """Orchestrate using team.orchestrate method"""
2275
+
2276
+ if team and hasattr(team, 'orchestrate'):
2277
+
2278
+ orchestration_request = f"""Orchestrate workflow: {workflow}
2279
+ Task: {prompt}
2280
+ Items: {chr(10).join([f'- {item}' for item in items])}
2281
+ Context: {context}"""
2282
+
2283
+ return team.orchestrate(orchestration_request)
2284
+
981
2285
  else:
982
- print("Error: Invalid action in LLM response")
983
- return {"messages": messages, "output": "Error: Invalid action in LLM response"}
2286
+
2287
+ items_text = chr(10).join([f"{i+1}. {item}" for i, item in enumerate(items)])
2288
+ orchestrate_prompt = f"""Orchestrate using {workflow}:
2289
+ Task: {prompt}
2290
+ Items: {items_text}
2291
+ Context: {context}"""
2292
+
2293
+ return get_llm_response(orchestrate_prompt, model=model, provider=provider, npc=npc, context=context, **kwargs)
2294
+
2295
+ def spread_and_sync(
2296
+ prompt: str,
2297
+ variations: List[str],
2298
+ model: str = None,
2299
+ provider: str = None,
2300
+ npc: Any = None,
2301
+ team: Any = None,
2302
+ sync_strategy: str = "consensus",
2303
+ context: str = None,
2304
+ **kwargs
2305
+ ) -> Dict[str, Any]:
2306
+ """Spread across agents/variations then sync with distribution analysis"""
2307
+
2308
+ if team and hasattr(team, 'npcs') and len(team.npcs) >= len(variations):
2309
+
2310
+ agents = list(team.npcs.values())[:len(variations)]
2311
+ results = []
2312
+
2313
+ for variation, agent in zip(variations, agents):
2314
+ variation_prompt = f"""Analyze from {variation} perspective:
2315
+ Task: {prompt}
2316
+ Context: {context}
2317
+ Apply your expertise with {variation} approach."""
2318
+
2319
+ response = get_llm_response(variation_prompt, npc=agent, context=context, **kwargs)
2320
+ results.append({
2321
+ 'agent': agent.name,
2322
+ 'variation': variation,
2323
+ 'response': response.get("response", "")
2324
+ })
2325
+ else:
2326
+
2327
+ results = []
2328
+ agent = npc or (team.get_forenpc() if team else None)
2329
+
2330
+ for variation in variations:
2331
+ variation_prompt = f"""Analyze from {variation} perspective:
2332
+ Task: {prompt}
2333
+ Context: {context}"""
2334
+
2335
+ response = get_llm_response(variation_prompt, model=model, provider=provider, npc=agent, context=context, **kwargs)
2336
+ results.append({
2337
+ 'variation': variation,
2338
+ 'response': response.get("response", "")
2339
+ })
2340
+
2341
+
2342
+ response_texts = [r['response'] for r in results]
2343
+ return synthesize(response_texts, sync_strategy, model, provider, npc or (team.get_forenpc() if team else None), context)
2344
+
2345
+ def criticize(
2346
+ prompt: str,
2347
+ model: str = None,
2348
+ provider: str = None,
2349
+ npc: Any = None,
2350
+ team: Any = None,
2351
+ context: str = None,
2352
+ **kwargs
2353
+ ) -> Dict[str, Any]:
2354
+ """Provide critical analysis and constructive criticism"""
2355
+ critique_prompt = f"""
2356
+ Provide a critical analysis and constructive criticism of the following:
2357
+ {prompt}
2358
+
2359
+ Focus on identifying weaknesses, potential improvements, and alternative approaches.
2360
+ Be specific and provide actionable feedback.
2361
+ """
2362
+
2363
+ return get_llm_response(
2364
+ critique_prompt,
2365
+ model=model,
2366
+ provider=provider,
2367
+ npc=npc,
2368
+ team=team,
2369
+ context=context,
2370
+ **kwargs
2371
+ )
2372
+ def synthesize(
2373
+ prompt: str,
2374
+ model: str = None,
2375
+ provider: str = None,
2376
+ npc: Any = None,
2377
+ team: Any = None,
2378
+ context: str = None,
2379
+ **kwargs
2380
+ ) -> Dict[str, Any]:
2381
+ """Synthesize information from multiple sources or perspectives"""
2382
+
2383
+ # Extract responses from kwargs if provided, otherwise use prompt as single response
2384
+ responses = kwargs.get('responses', [prompt])
2385
+ sync_strategy = kwargs.get('sync_strategy', 'consensus')
2386
+
2387
+ # If we have multiple responses, create a synthesis prompt
2388
+ if len(responses) > 1:
2389
+ synthesis_prompt = f"""Synthesize these multiple perspectives:
2390
+
2391
+ {chr(10).join([f'Response {i+1}: {r}' for i, r in enumerate(responses)])}
2392
+
2393
+ Synthesis strategy: {sync_strategy}
2394
+ Context: {context}
2395
+
2396
+ Create a coherent synthesis that incorporates key insights from all perspectives."""
2397
+ else:
2398
+ # For single response, just summarize/refine it
2399
+ synthesis_prompt = f"""Refine and synthesize this content:
2400
+
2401
+ {responses[0]}
2402
+
2403
+ Context: {context}
2404
+
2405
+ Create a clear, concise synthesis that captures the essence of the content."""
2406
+
2407
+ return get_llm_response(
2408
+ synthesis_prompt,
2409
+ model=model,
2410
+ provider=provider,
2411
+ npc=npc,
2412
+ team=team,
2413
+ context=context,
2414
+ **kwargs
2415
+ )