npcpy 1.1.28__py3-none-any.whl → 1.2.32__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. npcpy/data/audio.py +16 -38
  2. npcpy/data/image.py +29 -29
  3. npcpy/data/load.py +4 -3
  4. npcpy/data/text.py +28 -28
  5. npcpy/data/video.py +6 -6
  6. npcpy/data/web.py +49 -21
  7. npcpy/ft/__init__.py +0 -0
  8. npcpy/ft/diff.py +110 -0
  9. npcpy/ft/ge.py +115 -0
  10. npcpy/ft/memory_trainer.py +171 -0
  11. npcpy/ft/model_ensembler.py +357 -0
  12. npcpy/ft/rl.py +360 -0
  13. npcpy/ft/sft.py +248 -0
  14. npcpy/ft/usft.py +128 -0
  15. npcpy/gen/audio_gen.py +24 -0
  16. npcpy/gen/embeddings.py +13 -13
  17. npcpy/gen/image_gen.py +37 -15
  18. npcpy/gen/response.py +287 -111
  19. npcpy/gen/video_gen.py +10 -9
  20. npcpy/llm_funcs.py +447 -79
  21. npcpy/memory/command_history.py +201 -48
  22. npcpy/memory/kg_vis.py +74 -74
  23. npcpy/memory/knowledge_graph.py +482 -115
  24. npcpy/memory/memory_processor.py +81 -0
  25. npcpy/memory/search.py +70 -70
  26. npcpy/mix/debate.py +192 -3
  27. npcpy/npc_compiler.py +1541 -879
  28. npcpy/npc_sysenv.py +250 -78
  29. npcpy/serve.py +1036 -321
  30. npcpy/sql/ai_function_tools.py +257 -0
  31. npcpy/sql/database_ai_adapters.py +186 -0
  32. npcpy/sql/database_ai_functions.py +163 -0
  33. npcpy/sql/model_runner.py +19 -19
  34. npcpy/sql/npcsql.py +706 -507
  35. npcpy/sql/sql_model_compiler.py +156 -0
  36. npcpy/tools.py +20 -20
  37. npcpy/work/plan.py +8 -8
  38. npcpy/work/trigger.py +3 -3
  39. {npcpy-1.1.28.dist-info → npcpy-1.2.32.dist-info}/METADATA +169 -9
  40. npcpy-1.2.32.dist-info/RECORD +54 -0
  41. npcpy-1.1.28.dist-info/RECORD +0 -40
  42. {npcpy-1.1.28.dist-info → npcpy-1.2.32.dist-info}/WHEEL +0 -0
  43. {npcpy-1.1.28.dist-info → npcpy-1.2.32.dist-info}/licenses/LICENSE +0 -0
  44. {npcpy-1.1.28.dist-info → npcpy-1.2.32.dist-info}/top_level.txt +0 -0
npcpy/llm_funcs.py CHANGED
@@ -15,6 +15,8 @@ from npcpy.gen.response import get_litellm_response
15
15
  from npcpy.gen.image_gen import generate_image
16
16
  from npcpy.gen.video_gen import generate_video_diffusers, generate_video_veo3
17
17
 
18
+ from datetime import datetime
19
+
18
20
  def gen_image(
19
21
  prompt: str,
20
22
  model: str = None,
@@ -24,6 +26,8 @@ def gen_image(
24
26
  width: int = 1024,
25
27
  n_images: int=1,
26
28
  input_images: List[Union[str, bytes, PIL.Image.Image]] = None,
29
+ save = False,
30
+ filename = '',
27
31
  ):
28
32
  """This function generates an image using the specified provider and model.
29
33
  Args:
@@ -48,7 +52,7 @@ def gen_image(
48
52
  if npc.api_url is not None:
49
53
  api_url = npc.api_url
50
54
 
51
- image = generate_image(
55
+ images = generate_image(
52
56
  prompt=prompt,
53
57
  model=model,
54
58
  provider=provider,
@@ -58,7 +62,14 @@ def gen_image(
58
62
  n_images=n_images,
59
63
 
60
64
  )
61
- return image
65
+ if save:
66
+ if len(filename) == 0 :
67
+ todays_date = datetime.now().strftime("%Y-%m-%d")
68
+ filename = 'vixynt_gen'
69
+ for i, image in enumerate(images):
70
+
71
+ image.save(filename+'_'+str(i)+'.png')
72
+ return images
62
73
 
63
74
 
64
75
  def gen_video(
@@ -94,6 +105,7 @@ def gen_video(
94
105
  try:
95
106
  output_path = generate_video_veo3(
96
107
  prompt=prompt,
108
+ model=model,
97
109
  negative_prompt=negative_prompt,
98
110
  output_path=output_path,
99
111
  )
@@ -107,7 +119,7 @@ def gen_video(
107
119
  provider = "diffusers"
108
120
 
109
121
  if provider == "diffusers" or provider is None:
110
- # Use diffusers as default/fallback
122
+
111
123
  output_path = generate_video_diffusers(
112
124
  prompt,
113
125
  model,
@@ -143,6 +155,7 @@ def get_llm_response(
143
155
  context=None,
144
156
  stream: bool = False,
145
157
  attachments: List[str] = None,
158
+ include_usage: bool = False,
146
159
  **kwargs,
147
160
  ):
148
161
  """This function generates a response using the specified provider and model.
@@ -190,7 +203,7 @@ def get_llm_response(
190
203
  system_message = get_system_message(npc, team)
191
204
  else:
192
205
  system_message = "You are a helpful assistant."
193
- #print(system_message)
206
+
194
207
 
195
208
  if context is not None:
196
209
  context_str = f'User Provided Context: {context}'
@@ -208,6 +221,8 @@ def get_llm_response(
208
221
  elif prompt:
209
222
  messages.append({"role": "user",
210
223
  "content": prompt + context_str})
224
+ #import pdb
225
+ #pdb.set_trace()
211
226
  response = get_litellm_response(
212
227
  prompt + context_str,
213
228
  messages=messages,
@@ -218,6 +233,7 @@ def get_llm_response(
218
233
  images=images,
219
234
  attachments=attachments,
220
235
  stream=stream,
236
+ include_usage=include_usage,
221
237
  **kwargs,
222
238
  )
223
239
  return response
@@ -254,7 +270,7 @@ def execute_llm_command(
254
270
  attempt = 0
255
271
  subcommands = []
256
272
 
257
- # Create context from retrieved documents
273
+
258
274
  context = ""
259
275
  while attempt < max_attempts:
260
276
  prompt = f"""
@@ -297,7 +313,7 @@ def execute_llm_command(
297
313
  """
298
314
 
299
315
  messages.append({"role": "user", "content": prompt})
300
- # print(messages, stream)
316
+
301
317
  response = get_llm_response(
302
318
  prompt,
303
319
  model=model,
@@ -363,7 +379,6 @@ def execute_llm_command(
363
379
  "messages": messages,
364
380
  "output": "Max attempts reached. Unable to execute the command successfully.",
365
381
  }
366
-
367
382
  def handle_jinx_call(
368
383
  command: str,
369
384
  jinx_name: str,
@@ -376,6 +391,7 @@ def handle_jinx_call(
376
391
  n_attempts=3,
377
392
  attempt=0,
378
393
  context=None,
394
+ extra_globals=None, # ADD THIS
379
395
  **kwargs
380
396
  ) -> Union[str, Dict[str, Any]]:
381
397
  """This function handles a jinx call.
@@ -396,8 +412,8 @@ def handle_jinx_call(
396
412
  return f"No jinxs are available. "
397
413
  else:
398
414
 
399
- #print(npc, team)
400
- #print(team.jinxs_dict, npc.jinxs_dict)
415
+
416
+
401
417
  if jinx_name not in npc.jinxs_dict and jinx_name not in team.jinxs_dict:
402
418
  print(f"Jinx {jinx_name} not available")
403
419
  if attempt < n_attempts:
@@ -502,25 +518,25 @@ def handle_jinx_call(
502
518
  response_text.replace("```json", "").replace("```", "").strip()
503
519
  )
504
520
 
505
- # Parse the cleaned response
521
+
506
522
  if isinstance(response_text, dict):
507
523
  input_values = response_text
508
524
  else:
509
525
  input_values = json.loads(response_text)
510
- # print(f"Extracted inputs: {input_values}")
526
+
511
527
  except json.JSONDecodeError as e:
512
528
  print(f"Error decoding input values: {e}. Raw response: {response}")
513
529
  return f"Error extracting inputs for jinx '{jinx_name}'"
514
- # Input validation (example):
530
+
515
531
  required_inputs = jinx.inputs
516
532
  missing_inputs = []
517
533
  for inp in required_inputs:
518
534
  if not isinstance(inp, dict):
519
- # dicts contain the keywords so its fine if theyre missing from the inputs.
535
+
520
536
  if inp not in input_values or input_values[inp] == "":
521
537
  missing_inputs.append(inp)
522
538
  if len(missing_inputs) > 0:
523
- # print(f"Missing required inputs for jinx '{jinx_name}': {missing_inputs}")
539
+
524
540
  if attempt < n_attempts:
525
541
  print(f"attempt {attempt+1} to generate inputs failed, trying again")
526
542
  print("missing inputs", missing_inputs)
@@ -553,6 +569,8 @@ def handle_jinx_call(
553
569
  jinja_env,
554
570
  npc=npc,
555
571
  messages=messages,
572
+ extra_globals=extra_globals # ADD THIS
573
+
556
574
  )
557
575
  except Exception as e:
558
576
  print(f"An error occurred while executing the jinx: {e}")
@@ -576,7 +594,7 @@ def handle_jinx_call(
576
594
  render_markdown(f""" ## jinx OUTPUT FROM CALLING {jinx_name} \n \n output:{jinx_output['output']}""" )
577
595
  response = get_llm_response(f"""
578
596
  The user had the following request: {command}.
579
- Here were the jinx outputs from calling {jinx_name}: {jinx_output}
597
+ Here were the jinx outputs from calling {jinx_name}: {jinx_output.get('output', '')}
580
598
 
581
599
  Given the jinx outputs and the user request, please format a simple answer that
582
600
  provides the answer without requiring the user to carry out any further steps.
@@ -635,8 +653,8 @@ def handle_request_input(
635
653
  )
636
654
  return user_input
637
655
 
638
- ### Following functions primarily support check_llm_command's procedure which was
639
- ### broken up into smaller functions for clarity and modularity.
656
+
657
+
640
658
 
641
659
 
642
660
  def jinx_handler(command, extracted_data, **kwargs):
@@ -649,10 +667,10 @@ def jinx_handler(command, extracted_data, **kwargs):
649
667
  api_key=kwargs.get('api_key'),
650
668
  messages=kwargs.get('messages'),
651
669
  npc=kwargs.get('npc'),
652
- team = kwargs.get('team'),
670
+ team=kwargs.get('team'),
653
671
  stream=kwargs.get('stream'),
654
-
655
- context=kwargs.get('context')
672
+ context=kwargs.get('context'),
673
+ extra_globals=kwargs.get('extra_globals') # ADD THIS
656
674
  )
657
675
 
658
676
  def answer_handler(command, extracted_data, **kwargs):
@@ -699,6 +717,7 @@ def check_llm_command(
699
717
  stream=False,
700
718
  context=None,
701
719
  actions: Dict[str, Dict] = None,
720
+ extra_globals=None,
702
721
  ):
703
722
  """This function checks an LLM command and returns sequences of steps with parallel actions."""
704
723
  if messages is None:
@@ -719,11 +738,12 @@ def check_llm_command(
719
738
  stream=stream,
720
739
  context=context,
721
740
  actions=actions,
741
+ extra_globals=extra_globals,
722
742
 
723
743
  )
724
744
  return exec
725
745
 
726
- # Define `DEFAULT_ACTION_SPACE`
746
+
727
747
 
728
748
 
729
749
  def jinx_context_filler(npc, team):
@@ -737,7 +757,7 @@ def jinx_context_filler(npc, team):
737
757
  Returns:
738
758
  str: Formatted string containing jinx information and usage guidelines
739
759
  """
740
- # Generate NPC jinxs listing
760
+
741
761
  npc_jinxs = "\nNPC Jinxs:\n" + (
742
762
  "\n".join(
743
763
  f"- {name}: {jinx.description}"
@@ -747,7 +767,7 @@ def jinx_context_filler(npc, team):
747
767
  else ''
748
768
  )
749
769
 
750
- # Generate team jinxs listing
770
+
751
771
  team_jinxs = "\n\nTeam Jinxs:\n" + (
752
772
  "\n".join(
753
773
  f"- {name}: {jinx.description}"
@@ -757,7 +777,7 @@ def jinx_context_filler(npc, team):
757
777
  else ''
758
778
  )
759
779
 
760
- # Guidelines for jinx usage
780
+
761
781
  usage_guidelines = """
762
782
  Use jinxs when appropriate. For example:
763
783
 
@@ -817,7 +837,7 @@ DEFAULT_ACTION_SPACE = {
817
837
  }
818
838
  }
819
839
  },
820
- "answer_question": {
840
+ "answer": {
821
841
  "description": "Provide a direct informative answer",
822
842
  "handler": answer_handler,
823
843
  "context": """For general questions, use existing knowledge. For most queries a single action to answer a question will be sufficient.
@@ -826,7 +846,7 @@ e.g.
826
846
  {
827
847
  "actions": [
828
848
  {
829
- "action": "answer_question",
849
+ "action": "answer",
830
850
  "explanation": "Provide a direct answer to the user's question based on existing knowledge."
831
851
 
832
852
 
@@ -839,7 +859,7 @@ Starting dialogue is usually more useful than using tools willynilly. Think care
839
859
  the user's intent and use this action as an opportunity to clear up potential ambiguities before
840
860
  proceeding to more complex actions.
841
861
  For example, if a user requests to write a story,
842
- it is better to respond with 'answer_question' and to write them a story rather than to invoke some tool.
862
+ it is better to respond with 'answer' and to write them a story rather than to invoke some tool.
843
863
  Indeed, it might be even better to respond and to request clarification about what other elements they would liek to specify with the story.
844
864
  Natural language is highly ambiguous and it is important to establish common ground and priorities before proceeding to more complex actions.
845
865
 
@@ -858,6 +878,7 @@ def plan_multi_step_actions(
858
878
  api_key: str = None,
859
879
  context: str = None,
860
880
  messages: List[Dict[str, str]] = None,
881
+
861
882
 
862
883
  ):
863
884
  """
@@ -873,12 +894,13 @@ Your task is to create a complete, sequential JSON plan to fulfill the entire re
873
894
  Use the following context about available actions and tools to construct the plan.
874
895
 
875
896
  """
876
-
897
+ if messages == None:
898
+ messages = list()
877
899
  for action_name, action_info in actions.items():
878
900
  ctx = action_info.get("context")
879
901
  if callable(ctx):
880
902
  try:
881
- #print(ctx)
903
+
882
904
  ctx = ctx(npc=npc, team=team)
883
905
  except Exception as e:
884
906
  print( actions)
@@ -938,8 +960,8 @@ Respond ONLY with the plan.
938
960
  context=context,
939
961
  )
940
962
  response_content = action_response.get("response", {})
941
- #print(action_response)
942
- #print(type(action_response))
963
+
964
+
943
965
  return response_content.get("actions", [])
944
966
 
945
967
  def execute_multi_step_plan(
@@ -963,7 +985,7 @@ def execute_multi_step_plan(
963
985
  between steps for adaptive behavior.
964
986
  """
965
987
 
966
- # 1. Get the complete plan upfront with the corrected call.
988
+
967
989
  planned_actions = plan_multi_step_actions(
968
990
  command=command,
969
991
  actions=actions,
@@ -976,6 +998,7 @@ def execute_multi_step_plan(
976
998
  messages=messages,
977
999
  team=team,
978
1000
 
1001
+
979
1002
  )
980
1003
 
981
1004
  if not planned_actions:
@@ -991,7 +1014,8 @@ def execute_multi_step_plan(
991
1014
  stream=stream,
992
1015
  team = team,
993
1016
  images=images,
994
- context=context)
1017
+ context=context
1018
+ )
995
1019
  return {"messages": result.get('messages',
996
1020
  messages),
997
1021
  "output": result.get('response')}
@@ -999,7 +1023,7 @@ def execute_multi_step_plan(
999
1023
 
1000
1024
  step_outputs = []
1001
1025
  current_messages = messages.copy()
1002
- render_markdown(f"### Plan for Command: {command[100:]}")
1026
+ render_markdown(f"### Plan for Command: {command[:100]}")
1003
1027
  for action in planned_actions:
1004
1028
  step_info = json.dumps({'action': action.get('action', ''),
1005
1029
  'explanation': str(action.get('explanation',''))[0:10]+'...'})
@@ -1010,39 +1034,60 @@ def execute_multi_step_plan(
1010
1034
  for i, action_data in enumerate(planned_actions):
1011
1035
  render_markdown(f"--- Executing Step {i + 1} of {len(planned_actions)} ---")
1012
1036
  action_name = action_data["action"]
1013
- handler = actions[action_name]["handler"]
1014
-
1015
- # re-implement the yielding
1016
- step_context = f"Context from previous steps: {json.dumps(step_outputs)}" if step_outputs else ""
1017
- render_markdown(
1018
- f"- Executing Action: {action_name} \n- Explanation: {action_data.get('explanation')}\n "
1019
- )
1020
-
1021
- result = handler(
1022
- command=command,
1023
- extracted_data=action_data,
1024
- model=model,
1025
- provider=provider,
1026
- api_url=api_url,
1027
- api_key=api_key,
1028
- messages=current_messages,
1029
- npc=npc,
1030
- team=team,
1031
- stream=stream,
1032
-
1033
- context=context+step_context,
1034
- images=images
1035
- )
1037
+
1038
+
1039
+ try:
1040
+ handler = actions[action_name]["handler"]
1036
1041
 
1037
1042
 
1043
+
1044
+ step_context = f"Context from previous steps: {json.dumps(step_outputs)}" if step_outputs else ""
1045
+ render_markdown(
1046
+ f"- Executing Action: {action_name} \n- Explanation: {action_data.get('explanation')}\n "
1047
+ )
1048
+
1049
+ result = handler(
1050
+ command=command,
1051
+ extracted_data=action_data,
1052
+ model=model,
1053
+ provider=provider,
1054
+ api_url=api_url,
1055
+ api_key=api_key,
1056
+ messages=current_messages,
1057
+ npc=npc,
1058
+ team=team,
1059
+ stream=stream,
1060
+ context=context+step_context,
1061
+ images=images,
1062
+ extra_globals=kwargs.get('extra_globals') # ADD THIS
1063
+ )
1064
+ except KeyError as e:
1065
+
1066
+ return execute_multi_step_plan(
1067
+ command=command + 'This error occurred: '+str(e)+'\n Do not make the same mistake again. If you are intending to use a jinx, you must `invoke_jinx`. If you just need to answer, choose `answer`.',
1068
+ model= model,
1069
+ provider = provider,
1070
+ api_url = api_url,
1071
+ api_key = api_key,
1072
+ npc = npc,
1073
+ team = team,
1074
+ messages = messages,
1075
+ images = images,
1076
+ stream=stream,
1077
+ context=context,
1078
+ actions=actions,
1079
+
1080
+ **kwargs,
1081
+ )
1082
+
1038
1083
  action_output = result.get('output') or result.get('response')
1039
1084
 
1040
1085
  if stream and len(planned_actions) > 1:
1041
- # If streaming, we need to process the output with markdown rendering
1086
+
1042
1087
  action_output = print_and_process_stream_with_markdown(action_output, model, provider)
1043
1088
  elif len(planned_actions) == 1:
1044
- # If streaming and only one action, we can directly return the output
1045
- # can circumvent because compile sequence results just returns single output results.
1089
+
1090
+
1046
1091
  return {"messages": result.get('messages',
1047
1092
  current_messages),
1048
1093
  "output": action_output}
@@ -1050,8 +1095,8 @@ def execute_multi_step_plan(
1050
1095
  current_messages = result.get('messages',
1051
1096
  current_messages)
1052
1097
 
1053
- # render_markdown('## Reviewing output...')
1054
- # need tot replace with a review step actually
1098
+
1099
+
1055
1100
  final_output = compile_sequence_results(
1056
1101
  original_command=command,
1057
1102
  outputs=step_outputs,
@@ -1109,7 +1154,7 @@ Final Synthesized Response that addresses the user in a polite and informative m
1109
1154
  synthesized = response.get("response", "")
1110
1155
  if synthesized:
1111
1156
  return synthesized
1112
- return '\n'.join(outputs) # Fallback to joining outputs if synthesis fails
1157
+ return '\n'.join(outputs)
1113
1158
 
1114
1159
 
1115
1160
 
@@ -1176,8 +1221,8 @@ JSON response:
1176
1221
 
1177
1222
 
1178
1223
 
1179
- ### Functions for knowledge extraction using get_llm_response
1180
- ### primarily used in memory.knowledge_graph but are general enough
1224
+
1225
+
1181
1226
 
1182
1227
 
1183
1228
 
@@ -1318,7 +1363,7 @@ def generate_group_candidates(
1318
1363
  else:
1319
1364
  item_subset = items
1320
1365
 
1321
- # --- PROMPT MODIFICATION: Focus on semantic essence, avoid gerunds/adverbs, favor subjects ---
1366
+
1322
1367
  prompt = f"""From the following {item_type}, identify specific and relevant conceptual groups.
1323
1368
  Think about the core subject or entity being discussed.
1324
1369
 
@@ -1345,7 +1390,7 @@ def generate_group_candidates(
1345
1390
  "groups": ["list of specific, precise, and relevant group names"]
1346
1391
  }}
1347
1392
  """
1348
- # --- END PROMPT MODIFICATION ---
1393
+
1349
1394
 
1350
1395
  response = get_llm_response(
1351
1396
  prompt,
@@ -1547,7 +1592,7 @@ def extract_facts(
1547
1592
  context: str = None
1548
1593
  ) -> List[str]:
1549
1594
  """Extract concise facts from text using LLM (as defined earlier)"""
1550
- # Implementation from your previous code
1595
+
1551
1596
  prompt = """Extract concise facts from this text.
1552
1597
  A fact is a piece of information that makes a statement about the world.
1553
1598
  A fact is typically a sentence that is true or false.
@@ -1642,6 +1687,9 @@ def get_facts(content_text,
1642
1687
  provider = None,
1643
1688
  npc=None,
1644
1689
  context : str=None,
1690
+ attempt_number=1,
1691
+ n_attempts=3,
1692
+
1645
1693
  **kwargs):
1646
1694
  """Extract facts from content text"""
1647
1695
 
@@ -1744,6 +1792,17 @@ def get_facts(content_text,
1744
1792
  context=context,
1745
1793
  **kwargs)
1746
1794
 
1795
+ if len(response.get("response", {}).get("facts", [])) == 0 and attempt_number < n_attempts:
1796
+ print(f" Attempt {attempt_number} to extract facts yielded no results. Retrying...")
1797
+ return get_facts(content_text,
1798
+ model=model,
1799
+ provider=provider,
1800
+ npc=npc,
1801
+ context=context,
1802
+ attempt_number=attempt_number+1,
1803
+ n_attempts=n_attempts,
1804
+ **kwargs)
1805
+
1747
1806
  return response["response"].get("facts", [])
1748
1807
 
1749
1808
 
@@ -1753,6 +1812,8 @@ def zoom_in(facts,
1753
1812
  provider=None,
1754
1813
  npc=None,
1755
1814
  context: str = None,
1815
+ attempt_number: int = 1,
1816
+ n_attempts=3,
1756
1817
  **kwargs):
1757
1818
  """Infer new implied facts from existing facts"""
1758
1819
  valid_facts = []
@@ -1773,16 +1834,16 @@ def zoom_in(facts,
1773
1834
  {facts_text}
1774
1835
 
1775
1836
  What other facts can be reasonably inferred from these?
1776
-
1837
+ """ +"""
1777
1838
  Respond with JSON:
1778
- {{
1839
+ {
1779
1840
  "implied_facts": [
1780
- {{
1841
+ {
1781
1842
  "statement": "new implied fact",
1782
1843
  "inferred_from": ["which facts this comes from"]
1783
- }}
1844
+ }
1784
1845
  ]
1785
- }}
1846
+ }
1786
1847
  """
1787
1848
 
1788
1849
  response = get_llm_response(prompt,
@@ -1792,7 +1853,18 @@ def zoom_in(facts,
1792
1853
  context=context,
1793
1854
  npc=npc,
1794
1855
  **kwargs)
1795
- return response["response"].get("implied_facts", [])
1856
+
1857
+ facts = response.get("response", {}).get("implied_facts", [])
1858
+ if len(facts) == 0:
1859
+ return zoom_in(valid_facts,
1860
+ model=model,
1861
+ provider=provider,
1862
+ npc=npc,
1863
+ context=context,
1864
+ attempt_number=attempt_number+1,
1865
+ n_tries=n_tries,
1866
+ **kwargs)
1867
+ return facts
1796
1868
  def generate_groups(facts,
1797
1869
  model=None,
1798
1870
  provider=None,
@@ -1961,7 +2033,10 @@ def get_related_facts_llm(new_fact_statement,
1961
2033
  model = None,
1962
2034
  provider = None,
1963
2035
  npc = None,
1964
- context=''):
2036
+ attempt_number = 1,
2037
+ n_attempts = 3,
2038
+ context='',
2039
+ **kwargs):
1965
2040
  """Identifies which existing facts are causally or thematically related to a new fact."""
1966
2041
  prompt = f"""
1967
2042
  A new fact has been learned: "{new_fact_statement}"
@@ -1974,12 +2049,25 @@ def get_related_facts_llm(new_fact_statement,
1974
2049
 
1975
2050
  Respond with JSON: {{"related_facts": ["statement of a related fact", ...]}}
1976
2051
  """
1977
- response = get_llm_response(prompt,
2052
+ response = get_llm_response(prompt,
1978
2053
  model=model,
1979
2054
  provider=provider,
1980
- npc = npc,
1981
2055
  format="json",
1982
- context=context)
2056
+ npc=npc,
2057
+ context=context,
2058
+ **kwargs)
2059
+ if attempt_number > n_attempts:
2060
+ print(f" Attempt {attempt_number} to find related facts yielded no results. Giving up.")
2061
+ return get_related_facts_llm(new_fact_statement,
2062
+ existing_fact_statements,
2063
+ model=model,
2064
+ provider=provider,
2065
+ npc=npc,
2066
+ attempt_number=attempt_number+1,
2067
+ n_attempts=n_attempts,
2068
+ context=context,
2069
+ **kwargs)
2070
+
1983
2071
  return response["response"].get("related_facts", [])
1984
2072
 
1985
2073
  def find_best_link_concept_llm(candidate_concept_name,
@@ -2044,4 +2132,284 @@ def asymptotic_freedom(parent_concept,
2044
2132
  format="json",
2045
2133
  context=context, npc=npc,
2046
2134
  **kwargs)
2047
- return response['response'].get('new_sub_concepts', [])
2135
+ return response['response'].get('new_sub_concepts', [])
2136
+
2137
+
2138
+
2139
+ def bootstrap(
2140
+ prompt: str,
2141
+ model: str = None,
2142
+ provider: str = None,
2143
+ npc: Any = None,
2144
+ team: Any = None,
2145
+ sample_params: Dict[str, Any] = None,
2146
+ sync_strategy: str = "consensus",
2147
+ context: str = None,
2148
+ n_samples: int = 3,
2149
+ **kwargs
2150
+ ) -> Dict[str, Any]:
2151
+ """Bootstrap by sampling multiple agents from team or varying parameters"""
2152
+
2153
+ if team and hasattr(team, 'npcs') and len(team.npcs) >= n_samples:
2154
+
2155
+ sampled_npcs = list(team.npcs.values())[:n_samples]
2156
+ results = []
2157
+
2158
+ for i, agent in enumerate(sampled_npcs):
2159
+ response = get_llm_response(
2160
+ f"Sample {i+1}: {prompt}\nContext: {context}",
2161
+ npc=agent,
2162
+ context=context,
2163
+ **kwargs
2164
+ )
2165
+ results.append({
2166
+ 'agent': agent.name,
2167
+ 'response': response.get("response", "")
2168
+ })
2169
+ else:
2170
+
2171
+ if sample_params is None:
2172
+ sample_params = {"temperature": [0.3, 0.7, 1.0]}
2173
+
2174
+ results = []
2175
+ for i in range(n_samples):
2176
+ temp = sample_params.get('temperature', [0.7])[i % len(sample_params.get('temperature', [0.7]))]
2177
+ response = get_llm_response(
2178
+ f"Sample {i+1}: {prompt}\nContext: {context}",
2179
+ model=model,
2180
+ provider=provider,
2181
+ npc=npc,
2182
+ temperature=temp,
2183
+ context=context,
2184
+ **kwargs
2185
+ )
2186
+ results.append({
2187
+ 'variation': f'temp_{temp}',
2188
+ 'response': response.get("response", "")
2189
+ })
2190
+
2191
+
2192
+ response_texts = [r['response'] for r in results]
2193
+ return synthesize(response_texts, sync_strategy, model, provider, npc or (team.forenpc if team else None), context)
2194
+
2195
+ def harmonize(
2196
+ prompt: str,
2197
+ items: List[str],
2198
+ model: str = None,
2199
+ provider: str = None,
2200
+ npc: Any = None,
2201
+ team: Any = None,
2202
+ harmony_rules: List[str] = None,
2203
+ context: str = None,
2204
+ agent_roles: List[str] = None,
2205
+ **kwargs
2206
+ ) -> Dict[str, Any]:
2207
+ """Harmonize using multiple specialized agents"""
2208
+
2209
+ if team and hasattr(team, 'npcs'):
2210
+
2211
+ available_agents = list(team.npcs.values())
2212
+
2213
+ if agent_roles:
2214
+
2215
+ selected_agents = []
2216
+ for role in agent_roles:
2217
+ matching_agent = next((a for a in available_agents if role.lower() in a.name.lower() or role.lower() in a.primary_directive.lower()), None)
2218
+ if matching_agent:
2219
+ selected_agents.append(matching_agent)
2220
+ agents_to_use = selected_agents or available_agents[:len(items)]
2221
+ else:
2222
+
2223
+ agents_to_use = available_agents[:min(len(items), len(available_agents))]
2224
+
2225
+ harmonized_results = []
2226
+ for i, (item, agent) in enumerate(zip(items, agents_to_use)):
2227
+ harmony_prompt = f"""Harmonize this element: {item}
2228
+ Task: {prompt}
2229
+ Rules: {', '.join(harmony_rules or ['maintain_consistency'])}
2230
+ Context: {context}
2231
+ Your role in harmony: {agent.primary_directive}"""
2232
+
2233
+ response = get_llm_response(
2234
+ harmony_prompt,
2235
+ npc=agent,
2236
+ context=context,
2237
+ **kwargs
2238
+ )
2239
+ harmonized_results.append({
2240
+ 'agent': agent.name,
2241
+ 'item': item,
2242
+ 'harmonized': response.get("response", "")
2243
+ })
2244
+
2245
+
2246
+ coordinator = team.get_forenpc() if team else npc
2247
+ synthesis_prompt = f"""Synthesize these harmonized elements:
2248
+ {chr(10).join([f"{r['agent']}: {r['harmonized']}" for r in harmonized_results])}
2249
+ Create unified harmonious result."""
2250
+
2251
+ return get_llm_response(synthesis_prompt, npc=coordinator, context=context, **kwargs)
2252
+
2253
+ else:
2254
+
2255
+ items_text = chr(10).join([f"{i+1}. {item}" for i, item in enumerate(items)])
2256
+ harmony_prompt = f"""Harmonize these items: {items_text}
2257
+ Task: {prompt}
2258
+ Rules: {', '.join(harmony_rules or ['maintain_consistency'])}
2259
+ Context: {context}"""
2260
+
2261
+ return get_llm_response(harmony_prompt, model=model, provider=provider, npc=npc, context=context, **kwargs)
2262
+
2263
+ def orchestrate(
2264
+ prompt: str,
2265
+ items: List[str],
2266
+ model: str = None,
2267
+ provider: str = None,
2268
+ npc: Any = None,
2269
+ team: Any = None,
2270
+ workflow: str = "sequential_coordination",
2271
+ context: str = None,
2272
+ **kwargs
2273
+ ) -> Dict[str, Any]:
2274
+ """Orchestrate using team.orchestrate method"""
2275
+
2276
+ if team and hasattr(team, 'orchestrate'):
2277
+
2278
+ orchestration_request = f"""Orchestrate workflow: {workflow}
2279
+ Task: {prompt}
2280
+ Items: {chr(10).join([f'- {item}' for item in items])}
2281
+ Context: {context}"""
2282
+
2283
+ return team.orchestrate(orchestration_request)
2284
+
2285
+ else:
2286
+
2287
+ items_text = chr(10).join([f"{i+1}. {item}" for i, item in enumerate(items)])
2288
+ orchestrate_prompt = f"""Orchestrate using {workflow}:
2289
+ Task: {prompt}
2290
+ Items: {items_text}
2291
+ Context: {context}"""
2292
+
2293
+ return get_llm_response(orchestrate_prompt, model=model, provider=provider, npc=npc, context=context, **kwargs)
2294
+
2295
+ def spread_and_sync(
2296
+ prompt: str,
2297
+ variations: List[str],
2298
+ model: str = None,
2299
+ provider: str = None,
2300
+ npc: Any = None,
2301
+ team: Any = None,
2302
+ sync_strategy: str = "consensus",
2303
+ context: str = None,
2304
+ **kwargs
2305
+ ) -> Dict[str, Any]:
2306
+ """Spread across agents/variations then sync with distribution analysis"""
2307
+
2308
+ if team and hasattr(team, 'npcs') and len(team.npcs) >= len(variations):
2309
+
2310
+ agents = list(team.npcs.values())[:len(variations)]
2311
+ results = []
2312
+
2313
+ for variation, agent in zip(variations, agents):
2314
+ variation_prompt = f"""Analyze from {variation} perspective:
2315
+ Task: {prompt}
2316
+ Context: {context}
2317
+ Apply your expertise with {variation} approach."""
2318
+
2319
+ response = get_llm_response(variation_prompt, npc=agent, context=context, **kwargs)
2320
+ results.append({
2321
+ 'agent': agent.name,
2322
+ 'variation': variation,
2323
+ 'response': response.get("response", "")
2324
+ })
2325
+ else:
2326
+
2327
+ results = []
2328
+ agent = npc or (team.get_forenpc() if team else None)
2329
+
2330
+ for variation in variations:
2331
+ variation_prompt = f"""Analyze from {variation} perspective:
2332
+ Task: {prompt}
2333
+ Context: {context}"""
2334
+
2335
+ response = get_llm_response(variation_prompt, model=model, provider=provider, npc=agent, context=context, **kwargs)
2336
+ results.append({
2337
+ 'variation': variation,
2338
+ 'response': response.get("response", "")
2339
+ })
2340
+
2341
+
2342
+ response_texts = [r['response'] for r in results]
2343
+ return synthesize(response_texts, sync_strategy, model, provider, npc or (team.get_forenpc() if team else None), context)
2344
+
2345
+ def criticize(
2346
+ prompt: str,
2347
+ model: str = None,
2348
+ provider: str = None,
2349
+ npc: Any = None,
2350
+ team: Any = None,
2351
+ context: str = None,
2352
+ **kwargs
2353
+ ) -> Dict[str, Any]:
2354
+ """Provide critical analysis and constructive criticism"""
2355
+ critique_prompt = f"""
2356
+ Provide a critical analysis and constructive criticism of the following:
2357
+ {prompt}
2358
+
2359
+ Focus on identifying weaknesses, potential improvements, and alternative approaches.
2360
+ Be specific and provide actionable feedback.
2361
+ """
2362
+
2363
+ return get_llm_response(
2364
+ critique_prompt,
2365
+ model=model,
2366
+ provider=provider,
2367
+ npc=npc,
2368
+ team=team,
2369
+ context=context,
2370
+ **kwargs
2371
+ )
2372
+ def synthesize(
2373
+ prompt: str,
2374
+ model: str = None,
2375
+ provider: str = None,
2376
+ npc: Any = None,
2377
+ team: Any = None,
2378
+ context: str = None,
2379
+ **kwargs
2380
+ ) -> Dict[str, Any]:
2381
+ """Synthesize information from multiple sources or perspectives"""
2382
+
2383
+ # Extract responses from kwargs if provided, otherwise use prompt as single response
2384
+ responses = kwargs.get('responses', [prompt])
2385
+ sync_strategy = kwargs.get('sync_strategy', 'consensus')
2386
+
2387
+ # If we have multiple responses, create a synthesis prompt
2388
+ if len(responses) > 1:
2389
+ synthesis_prompt = f"""Synthesize these multiple perspectives:
2390
+
2391
+ {chr(10).join([f'Response {i+1}: {r}' for i, r in enumerate(responses)])}
2392
+
2393
+ Synthesis strategy: {sync_strategy}
2394
+ Context: {context}
2395
+
2396
+ Create a coherent synthesis that incorporates key insights from all perspectives."""
2397
+ else:
2398
+ # For single response, just summarize/refine it
2399
+ synthesis_prompt = f"""Refine and synthesize this content:
2400
+
2401
+ {responses[0]}
2402
+
2403
+ Context: {context}
2404
+
2405
+ Create a clear, concise synthesis that captures the essence of the content."""
2406
+
2407
+ return get_llm_response(
2408
+ synthesis_prompt,
2409
+ model=model,
2410
+ provider=provider,
2411
+ npc=npc,
2412
+ team=team,
2413
+ context=context,
2414
+ **kwargs
2415
+ )