npcsh 1.1.4__py3-none-any.whl → 1.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (102) hide show
  1. npcsh/_state.py +470 -367
  2. npcsh/npc_team/corca_example.png +0 -0
  3. npcsh/npc_team/jinxs/{python_executor.jinx → code/python.jinx} +1 -1
  4. npcsh/npc_team/jinxs/{bash_executer.jinx → code/sh.jinx} +1 -2
  5. npcsh/npc_team/jinxs/code/sql.jinx +16 -0
  6. npcsh/npc_team/jinxs/modes/alicanto.jinx +88 -0
  7. npcsh/npc_team/jinxs/modes/corca.jinx +28 -0
  8. npcsh/npc_team/jinxs/modes/guac.jinx +46 -0
  9. npcsh/npc_team/jinxs/modes/plonk.jinx +57 -0
  10. npcsh/npc_team/jinxs/modes/pti.jinx +28 -0
  11. npcsh/npc_team/jinxs/modes/spool.jinx +40 -0
  12. npcsh/npc_team/jinxs/modes/wander.jinx +81 -0
  13. npcsh/npc_team/jinxs/modes/yap.jinx +25 -0
  14. npcsh/npc_team/jinxs/utils/breathe.jinx +20 -0
  15. npcsh/npc_team/jinxs/utils/core/build.jinx +65 -0
  16. npcsh/npc_team/jinxs/utils/core/compile.jinx +50 -0
  17. npcsh/npc_team/jinxs/utils/core/help.jinx +52 -0
  18. npcsh/npc_team/jinxs/utils/core/init.jinx +41 -0
  19. npcsh/npc_team/jinxs/utils/core/jinxs.jinx +32 -0
  20. npcsh/npc_team/jinxs/utils/core/set.jinx +40 -0
  21. npcsh/npc_team/jinxs/{edit_file.jinx → utils/edit_file.jinx} +1 -1
  22. npcsh/npc_team/jinxs/utils/flush.jinx +39 -0
  23. npcsh/npc_team/jinxs/utils/npc-studio.jinx +77 -0
  24. npcsh/npc_team/jinxs/utils/ots.jinx +61 -0
  25. npcsh/npc_team/jinxs/utils/plan.jinx +33 -0
  26. npcsh/npc_team/jinxs/utils/roll.jinx +66 -0
  27. npcsh/npc_team/jinxs/utils/sample.jinx +56 -0
  28. npcsh/npc_team/jinxs/utils/search.jinx +130 -0
  29. npcsh/npc_team/jinxs/utils/serve.jinx +29 -0
  30. npcsh/npc_team/jinxs/utils/sleep.jinx +116 -0
  31. npcsh/npc_team/jinxs/utils/trigger.jinx +36 -0
  32. npcsh/npc_team/jinxs/utils/vixynt.jinx +117 -0
  33. npcsh/npcsh.py +13 -11
  34. npcsh/routes.py +97 -1419
  35. npcsh-1.1.6.data/data/npcsh/npc_team/alicanto.jinx +88 -0
  36. npcsh-1.1.6.data/data/npcsh/npc_team/breathe.jinx +20 -0
  37. npcsh-1.1.6.data/data/npcsh/npc_team/build.jinx +65 -0
  38. npcsh-1.1.6.data/data/npcsh/npc_team/compile.jinx +50 -0
  39. npcsh-1.1.6.data/data/npcsh/npc_team/corca.jinx +28 -0
  40. npcsh-1.1.6.data/data/npcsh/npc_team/corca_example.png +0 -0
  41. {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/edit_file.jinx +1 -1
  42. npcsh-1.1.6.data/data/npcsh/npc_team/flush.jinx +39 -0
  43. npcsh-1.1.6.data/data/npcsh/npc_team/guac.jinx +46 -0
  44. npcsh-1.1.6.data/data/npcsh/npc_team/help.jinx +52 -0
  45. npcsh-1.1.6.data/data/npcsh/npc_team/init.jinx +41 -0
  46. npcsh-1.1.6.data/data/npcsh/npc_team/jinxs.jinx +32 -0
  47. npcsh-1.1.6.data/data/npcsh/npc_team/npc-studio.jinx +77 -0
  48. npcsh-1.1.6.data/data/npcsh/npc_team/ots.jinx +61 -0
  49. npcsh-1.1.6.data/data/npcsh/npc_team/plan.jinx +33 -0
  50. npcsh-1.1.6.data/data/npcsh/npc_team/plonk.jinx +57 -0
  51. npcsh-1.1.6.data/data/npcsh/npc_team/pti.jinx +28 -0
  52. npcsh-1.1.4.data/data/npcsh/npc_team/python_executor.jinx → npcsh-1.1.6.data/data/npcsh/npc_team/python.jinx +1 -1
  53. npcsh-1.1.6.data/data/npcsh/npc_team/roll.jinx +66 -0
  54. npcsh-1.1.6.data/data/npcsh/npc_team/sample.jinx +56 -0
  55. npcsh-1.1.6.data/data/npcsh/npc_team/search.jinx +130 -0
  56. npcsh-1.1.6.data/data/npcsh/npc_team/serve.jinx +29 -0
  57. npcsh-1.1.6.data/data/npcsh/npc_team/set.jinx +40 -0
  58. npcsh-1.1.4.data/data/npcsh/npc_team/bash_executer.jinx → npcsh-1.1.6.data/data/npcsh/npc_team/sh.jinx +1 -2
  59. npcsh-1.1.6.data/data/npcsh/npc_team/sleep.jinx +116 -0
  60. npcsh-1.1.6.data/data/npcsh/npc_team/spool.jinx +40 -0
  61. npcsh-1.1.6.data/data/npcsh/npc_team/sql.jinx +16 -0
  62. npcsh-1.1.6.data/data/npcsh/npc_team/trigger.jinx +36 -0
  63. npcsh-1.1.6.data/data/npcsh/npc_team/vixynt.jinx +117 -0
  64. npcsh-1.1.6.data/data/npcsh/npc_team/wander.jinx +81 -0
  65. npcsh-1.1.6.data/data/npcsh/npc_team/yap.jinx +25 -0
  66. {npcsh-1.1.4.dist-info → npcsh-1.1.6.dist-info}/METADATA +1 -10
  67. npcsh-1.1.6.dist-info/RECORD +124 -0
  68. npcsh/npc_team/jinxs/image_generation.jinx +0 -29
  69. npcsh/npc_team/jinxs/internet_search.jinx +0 -31
  70. npcsh/npc_team/jinxs/kg_search.jinx +0 -43
  71. npcsh/npc_team/jinxs/memory_search.jinx +0 -36
  72. npcsh/npc_team/jinxs/screen_cap.jinx +0 -25
  73. npcsh-1.1.4.data/data/npcsh/npc_team/image_generation.jinx +0 -29
  74. npcsh-1.1.4.data/data/npcsh/npc_team/internet_search.jinx +0 -31
  75. npcsh-1.1.4.data/data/npcsh/npc_team/kg_search.jinx +0 -43
  76. npcsh-1.1.4.data/data/npcsh/npc_team/memory_search.jinx +0 -36
  77. npcsh-1.1.4.data/data/npcsh/npc_team/screen_cap.jinx +0 -25
  78. npcsh-1.1.4.dist-info/RECORD +0 -78
  79. {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/alicanto.npc +0 -0
  80. {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/alicanto.png +0 -0
  81. {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/corca.npc +0 -0
  82. {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/corca.png +0 -0
  83. {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/foreman.npc +0 -0
  84. {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/frederic.npc +0 -0
  85. {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/frederic4.png +0 -0
  86. {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/guac.png +0 -0
  87. {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
  88. {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/kadiefa.png +0 -0
  89. {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
  90. {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
  91. {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/plonk.npc +0 -0
  92. {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/plonk.png +0 -0
  93. {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
  94. {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/plonkjr.png +0 -0
  95. {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/sibiji.npc +0 -0
  96. {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/sibiji.png +0 -0
  97. {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/spool.png +0 -0
  98. {npcsh-1.1.4.data → npcsh-1.1.6.data}/data/npcsh/npc_team/yap.png +0 -0
  99. {npcsh-1.1.4.dist-info → npcsh-1.1.6.dist-info}/WHEEL +0 -0
  100. {npcsh-1.1.4.dist-info → npcsh-1.1.6.dist-info}/entry_points.txt +0 -0
  101. {npcsh-1.1.4.dist-info → npcsh-1.1.6.dist-info}/licenses/LICENSE +0 -0
  102. {npcsh-1.1.4.dist-info → npcsh-1.1.6.dist-info}/top_level.txt +0 -0
Binary file
@@ -1,4 +1,4 @@
1
- jinx_name: python_executor
1
+ jinx_name: python
2
2
  description: Execute scripts with python. You must set the ultimate result as the "output"
3
3
  variable. It MUST be a string.
4
4
  Do not add unnecessary print statements.
@@ -1,8 +1,7 @@
1
- jinx_name: bash_executor
1
+ jinx_name: sh
2
2
  description: Execute bash queries. Should be used to grep for file contents, list directories, explore information to answer user questions more practically.
3
3
  inputs:
4
4
  - bash_command
5
- - user_request
6
5
  steps:
7
6
  - engine: python
8
7
  code: |
@@ -0,0 +1,16 @@
1
+ jinx_name: sql_executor
2
+ description: Execute queries on the ~/npcsh_history.db to pull data. The database
3
+ contains only information about conversations and other user-provided data. It does
4
+ not store any information about individual files. Avoid using percent signs unless absolutely necessary.
5
+ inputs:
6
+ - sql_query
7
+ steps:
8
+ - engine: python
9
+ code: |
10
+ import pandas as pd
11
+ query = "{{ sql_query }}"
12
+ try:
13
+ df = pd.read_sql_query(query, npc.db_conn)
14
+ except Exception as e:
15
+ df = pd.DataFrame({'Error': [str(e)]})
16
+ output = df.to_string()
@@ -0,0 +1,88 @@
1
+ jinx_name: "alicanto"
2
+ description: "Conduct deep research with multiple perspectives, identifying gold insights and cliff warnings"
3
+ inputs:
4
+ - query: "" # Required research query.
5
+ - num_npcs: 5 # Number of NPCs to involve in research.
6
+ - depth: 3 # Depth of research.
7
+ - model: "" # LLM model to use. Defaults to NPCSH_CHAT_MODEL or NPC's model.
8
+ - provider: "" # LLM provider to use. Defaults to NPCSH_CHAT_PROVIDER or NPC's provider.
9
+ - max_steps: 20 # Maximum number of steps in Alicanto research.
10
+ - skip_research: True # Whether to skip the research phase.
11
+ - exploration: "" # Exploration factor (float).
12
+ - creativity: "" # Creativity factor (float).
13
+ - format: "" # Output format (report, summary, full).
14
+ steps:
15
+ - name: "conduct_alicanto_research"
16
+ engine: "python"
17
+ code: |
18
+ import traceback
19
+ import logging
20
+ from npcsh.alicanto import alicanto
21
+ # Assuming NPCSH_CHAT_MODEL and NPCSH_CHAT_PROVIDER are accessible
22
+
23
+ query = context.get('query')
24
+ num_npcs = int(context.get('num_npcs', 5)) # Ensure int type
25
+ depth = int(context.get('depth', 3)) # Ensure int type
26
+ llm_model = context.get('model')
27
+ llm_provider = context.get('provider')
28
+ max_steps = int(context.get('max_steps', 20)) # Ensure int type
29
+ skip_research = context.get('skip_research', True)
30
+ exploration_factor = context.get('exploration')
31
+ creativity_factor = context.get('creativity')
32
+ output_format = context.get('format')
33
+ output_messages = context.get('messages', [])
34
+ current_npc = context.get('npc')
35
+
36
+ if not query or not query.strip():
37
+ context['output'] = "Usage: /alicanto <research query> [--num-npcs N] [--depth N] [--exploration 0.3] [--creativity 0.5] [--format report|summary|full]"
38
+ context['messages'] = output_messages
39
+ exit()
40
+
41
+ # Fallback for model/provider if not explicitly set in Jinx inputs
42
+ if not llm_model and current_npc and current_npc.model:
43
+ llm_model = current_npc.model
44
+ if not llm_provider and current_npc and current_npc.provider:
45
+ llm_provider = current_npc.provider
46
+
47
+ # Final fallbacks (these would ideally come from npcsh._state config)
48
+ # Assuming NPCSH_CHAT_MODEL and NPCSH_CHAT_PROVIDER exist and are imported implicitly or set by environment
49
+ # Hardcoding defaults for demonstration if not available through NPC or _state
50
+ if not llm_model: llm_model = "gemini-1.5-pro"
51
+ if not llm_provider: llm_provider = "gemini"
52
+
53
+ try:
54
+ logging.info(f"Starting Alicanto research on: {query}")
55
+
56
+ alicanto_kwargs = {
57
+ 'query': query,
58
+ 'num_npcs': num_npcs,
59
+ 'depth': depth,
60
+ 'model': llm_model,
61
+ 'provider': llm_provider,
62
+ 'max_steps': max_steps,
63
+ 'skip_research': skip_research,
64
+ }
65
+
66
+ if exploration_factor: alicanto_kwargs['exploration_factor'] = float(exploration_factor)
67
+ if creativity_factor: alicanto_kwargs['creativity_factor'] = float(creativity_factor)
68
+ if output_format: alicanto_kwargs['output_format'] = output_format
69
+
70
+ result = alicanto(**alicanto_kwargs)
71
+
72
+ output_result = ""
73
+ if isinstance(result, dict):
74
+ if "integration" in result:
75
+ output_result = result["integration"]
76
+ else:
77
+ output_result = "Alicanto research completed. Full results available in returned data."
78
+ else:
79
+ output_result = str(result)
80
+
81
+ context['output'] = output_result
82
+ context['messages'] = output_messages
83
+ context['alicanto_result'] = result # Store full result in context
84
+ except Exception as e:
85
+ traceback.print_exc()
86
+ logging.error(f"Error during Alicanto research: {e}")
87
+ context['output'] = f"Error during Alicanto research: {e}"
88
+ context['messages'] = output_messages
@@ -0,0 +1,28 @@
1
+ jinx_name: "corca"
2
+ description: "Enter the Corca MCP-powered agentic shell. Usage: /corca [--mcp-server-path path]"
3
+ inputs:
4
+ - command: "/corca" # The full command string, e.g., "/corca --mcp-server-path /tmp/mcp"
5
+ steps:
6
+ - name: "enter_corca"
7
+ engine: "python"
8
+ code: |
9
+ # Assume npcsh._state and enter_corca_mode are accessible in the environment
10
+
11
+ from npcsh._state import initial_state, setup_shell
12
+ from npcsh.corca import enter_corca_mode
13
+
14
+
15
+ full_command_str = context.get('command')
16
+ output_messages = context.get('messages', [])
17
+
18
+ command_history, team, default_npc = setup_shell()
19
+
20
+ result = enter_corca_mode(
21
+ command=full_command_str,
22
+ command_history=command_history,
23
+ shell_state=initial_state,
24
+ **context # Pass all context as kwargs to enter_corca_mode as it expects
25
+ )
26
+
27
+ context['output'] = result.get('output', 'Entered Corca mode.')
28
+ context['messages'] = result.get('messages', output_messages)
@@ -0,0 +1,46 @@
1
+ jinx_name: "guac"
2
+ description: "Enter guac mode for plotting and data visualization."
3
+ inputs:
4
+ - config_dir: "" # Optional configuration directory.
5
+ - plots_dir: "" # Optional directory for plots.
6
+ - refresh_period: 100 # Refresh period for guac mode.
7
+ - lang: "" # Language setting for guac mode.
8
+ steps:
9
+ - name: "enter_guac"
10
+ engine: "python"
11
+ code: |
12
+ import os
13
+ from sqlalchemy import create_engine
14
+ from npcpy.npc_compiler import NPC, Team
15
+ from npcsh.guac import enter_guac_mode
16
+
17
+ config_dir = context.get('config_dir')
18
+ plots_dir = context.get('plots_dir')
19
+ refresh_period = context.get('refresh_period')
20
+ lang = context.get('lang')
21
+ output_messages = context.get('messages', [])
22
+
23
+ db_path = os.path.expanduser('~/npcsh_history.db')
24
+ db_conn = create_engine(f'sqlite:///{db_path}')
25
+
26
+ npc_file = os.path.expanduser('~/.npcsh/guac/npc_team/guac.npc')
27
+ npc_team_dir = os.path.expanduser('~/.npcsh/guac/npc_team/')
28
+
29
+ # Ensure directories exist for guac NPC/Team
30
+ os.makedirs(os.path.dirname(npc_file), exist_ok=True)
31
+
32
+ guac_npc = NPC(file=npc_file, db_conn=db_conn)
33
+ guac_team = Team(npc_team_dir, db_conn=db_conn)
34
+
35
+ enter_guac_mode(
36
+ npc=guac_npc,
37
+ team=guac_team,
38
+ config_dir=config_dir,
39
+ plots_dir=plots_dir,
40
+ npc_team_dir=npc_team_dir,
41
+ refresh_period=int(refresh_period), # Ensure int type
42
+ lang=lang
43
+ )
44
+
45
+ context['output'] = 'Exiting Guac Mode'
46
+ context['messages'] = output_messages
@@ -0,0 +1,57 @@
1
+ jinx_name: "plonk"
2
+ description: "Use vision model to interact with GUI. Usage: /plonk <task description>"
3
+ inputs:
4
+ - task_description: "" # Required task description for GUI interaction.
5
+ - vmodel: "" # Vision model to use. Defaults to NPCSH_VISION_MODEL or NPC's model.
6
+ - vprovider: "" # Vision model provider. Defaults to NPCSH_VISION_PROVIDER or NPC's provider.
7
+ steps:
8
+ - name: "execute_plonk"
9
+ engine: "python"
10
+ code: |
11
+ import traceback
12
+ from npcsh.plonk import execute_plonk_command, format_plonk_summary
13
+ # Assuming NPCSH_VISION_MODEL and NPCSH_VISION_PROVIDER are accessible
14
+
15
+ task_description = context.get('task_description')
16
+ vision_model = context.get('vmodel')
17
+ vision_provider = context.get('vprovider')
18
+ plonk_context = context.get('plonk_context') # Passed from original context
19
+ current_npc = context.get('npc')
20
+ output_messages = context.get('messages', [])
21
+
22
+ if not task_description or not task_description.strip():
23
+ context['output'] = "Usage: /plonk <task_description> [--vmodel model_name] [--vprovider provider_name]"
24
+ context['messages'] = output_messages
25
+ exit()
26
+
27
+ # Fallback for model/provider if not explicitly set in Jinx inputs
28
+ if not vision_model and current_npc and current_npc.model:
29
+ vision_model = current_npc.model
30
+ if not vision_provider and current_npc and current_npc.provider:
31
+ vision_provider = current_npc.provider
32
+
33
+ # Final fallbacks (these would ideally come from npcsh._state config)
34
+ if not vision_model: vision_model = "gemini-1.5-pro-vision" # Example default
35
+ if not vision_provider: vision_provider = "gemini" # Example default
36
+
37
+ try:
38
+ summary_data = execute_plonk_command(
39
+ request=task_description,
40
+ model=vision_model,
41
+ provider=vision_provider,
42
+ npc=current_npc,
43
+ plonk_context=plonk_context,
44
+ debug=True # Assuming debug is often desired for plonk
45
+ )
46
+
47
+ if summary_data and isinstance(summary_data, list):
48
+ output_report = format_plonk_summary(summary_data)
49
+ context['output'] = output_report
50
+ else:
51
+ context['output'] = "Plonk command did not complete within the maximum number of iterations."
52
+
53
+ except Exception as e:
54
+ traceback.print_exc()
55
+ context['output'] = f"Error executing plonk command: {e}"
56
+
57
+ context['messages'] = output_messages
@@ -0,0 +1,28 @@
1
+ jinx_name: "pti"
2
+ description: "Enter Pardon-The-Interruption mode for human-in-the-loop reasoning."
3
+ inputs:
4
+ - command_args: "" # The full command string or specific arguments for PTI mode.
5
+ steps:
6
+ - name: "enter_pti"
7
+ engine: "python"
8
+ code: |
9
+ import traceback
10
+ from npcsh.pti import enter_pti_mode
11
+
12
+ command_args = context.get('command_args', '') # The full command string from router
13
+ output_messages = context.get('messages', [])
14
+
15
+ try:
16
+ # enter_pti_mode likely expects the full command string for its own parsing
17
+ result = enter_pti_mode(command=command_args, **context)
18
+
19
+ if isinstance(result, dict):
20
+ context['output'] = result.get('output', 'Entered PTI mode.')
21
+ context['messages'] = result.get('messages', output_messages)
22
+ else:
23
+ context['output'] = str(result)
24
+ context['messages'] = output_messages
25
+ except Exception as e:
26
+ traceback.print_exc()
27
+ context['output'] = f"Error entering pti mode: {e}"
28
+ context['messages'] = output_messages
@@ -0,0 +1,40 @@
1
+ jinx_name: "spool"
2
+ description: "Enter interactive chat (spool) mode"
3
+ inputs: [] # Spool mode typically takes its parameters directly from the environment/kwargs
4
+ steps:
5
+ - name: "enter_spool"
6
+ engine: "python"
7
+ code: |
8
+ import traceback
9
+ from npcpy.npc_compiler import NPC, Team
10
+ from npcsh.spool import enter_spool_mode
11
+
12
+ output_messages = context.get('messages', [])
13
+ current_npc = context.get('npc')
14
+ current_team = context.get('team')
15
+
16
+ try:
17
+ # Handle potential string NPC name if passed from CLI
18
+ if isinstance(current_npc, str) and current_team:
19
+ npc_name = current_npc
20
+ if npc_name in current_team.npcs:
21
+ current_npc = current_team.npcs[npc_name]
22
+ else:
23
+ context['output'] = f"Error: NPC '{npc_name}' not found in team. Available NPCs: {', '.join(current_team.npcs.keys())}"
24
+ context['messages'] = output_messages
25
+ exit()
26
+ context['npc'] = current_npc # Ensure the NPC object is updated in context
27
+
28
+ result = enter_spool_mode(**context) # Pass all context as kwargs
29
+
30
+ if isinstance(result, dict):
31
+ context['output'] = result.get('output', 'Exited Spool Mode.')
32
+ context['messages'] = result.get('messages', output_messages)
33
+ else:
34
+ context['output'] = str(result)
35
+ context['messages'] = output_messages
36
+
37
+ except Exception as e:
38
+ traceback.print_exc()
39
+ context['output'] = f"Error entering spool mode: {e}"
40
+ context['messages'] = output_messages
@@ -0,0 +1,81 @@
1
+ jinx_name: "wander"
2
+ description: "Enter wander mode (experimental)"
3
+ inputs:
4
+ - problem: "" # The problem to wander about.
5
+ - environment: "" # Optional environment for wander mode.
6
+ - low_temp: 0.5 # Low temperature setting for LLM.
7
+ - high_temp: 1.9 # High temperature setting for LLM.
8
+ - interruption_likelihood: 1.0 # Likelihood of interruption.
9
+ - sample_rate: 0.4 # Sample rate.
10
+ - n_high_temp_streams: 5 # Number of high temperature streams.
11
+ - include_events: False # Whether to include events.
12
+ - num_events: 3 # Number of events to include.
13
+ steps:
14
+ - name: "enter_wander"
15
+ engine: "python"
16
+ code: |
17
+ import traceback
18
+ from npcsh.wander import enter_wander_mode
19
+
20
+ problem = context.get('problem')
21
+ environment = context.get('environment')
22
+ low_temp = float(context.get('low_temp', 0.5)) # Ensure float type
23
+ high_temp = float(context.get('high_temp', 1.9)) # Ensure float type
24
+ interruption_likelihood = float(context.get('interruption_likelihood', 1.0)) # Ensure float type
25
+ sample_rate = float(context.get('sample_rate', 0.4)) # Ensure float type
26
+ n_high_temp_streams = int(context.get('n_high_temp_streams', 5)) # Ensure int type
27
+ include_events = context.get('include_events', False) # Boolean type
28
+ num_events = int(context.get('num_events', 3)) # Ensure int type
29
+
30
+ current_npc = context.get('npc')
31
+ llm_model = context.get('model')
32
+ llm_provider = context.get('provider')
33
+ output_messages = context.get('messages', [])
34
+
35
+ if not problem or not problem.strip():
36
+ context['output'] = "Usage: /wander <problem> [key=value...]"
37
+ context['messages'] = output_messages
38
+ exit()
39
+
40
+ # Fallback for model/provider if not explicitly set in Jinx inputs
41
+ if not llm_model and current_npc and current_npc.model:
42
+ llm_model = current_npc.model
43
+ if not llm_provider and current_npc and current_npc.provider:
44
+ llm_provider = current_npc.provider
45
+
46
+ # Final fallbacks (these would ideally come from npcsh._state config)
47
+ if not llm_model: llm_model = "gemini-1.5-pro" # Example default
48
+ if not llm_provider: llm_provider = "gemini" # Example default
49
+
50
+ try:
51
+ mode_args = {
52
+ 'problem': problem,
53
+ 'npc': current_npc,
54
+ 'model': llm_model,
55
+ 'provider': llm_provider,
56
+ 'environment': environment,
57
+ 'low_temp': low_temp,
58
+ 'high_temp': high_temp,
59
+ 'interruption_likelihood': interruption_likelihood,
60
+ 'sample_rate': sample_rate,
61
+ 'n_high_temp_streams': n_high_temp_streams,
62
+ 'include_events': include_events,
63
+ 'num_events': num_events
64
+ }
65
+
66
+ result = enter_wander_mode(**mode_args)
67
+
68
+ output_result = ""
69
+ if isinstance(result, list) and result:
70
+ output_result = result[-1].get("insight", "Wander mode session complete.")
71
+ else:
72
+ output_result = str(result) if result else "Wander mode session complete."
73
+
74
+ output_messages.append({"role": "assistant", "content": output_result})
75
+ context['output'] = output_result
76
+ context['messages'] = output_messages
77
+
78
+ except Exception as e:
79
+ traceback.print_exc()
80
+ context['output'] = f"Error during wander mode: {e}"
81
+ context['messages'] = output_messages
@@ -0,0 +1,25 @@
1
+ jinx_name: "yap"
2
+ description: "Enter voice chat (yap) mode"
3
+ inputs: [] # Yap mode takes its parameters directly from the environment/kwargs
4
+ steps:
5
+ - name: "enter_yap"
6
+ engine: "python"
7
+ code: |
8
+ import traceback
9
+ from npcsh.yap import enter_yap_mode
10
+
11
+ output_messages = context.get('messages', [])
12
+
13
+ try:
14
+ result = enter_yap_mode(**context) # Pass all context as kwargs
15
+
16
+ if isinstance(result, dict):
17
+ context['output'] = result.get('output', 'Exited Yap Mode.')
18
+ context['messages'] = result.get('messages', output_messages)
19
+ else:
20
+ context['output'] = str(result)
21
+ context['messages'] = output_messages
22
+ except Exception as e:
23
+ traceback.print_exc()
24
+ context['output'] = f"Error entering yap mode: {e}"
25
+ context['messages'] = output_messages
@@ -0,0 +1,20 @@
1
+ jinx_name: "breathe"
2
+ description: "Condense context on a regular cadence"
3
+ inputs: [] # The breathe command takes all relevant context from the NPC's environment
4
+ steps:
5
+ - name: "condense_context"
6
+ engine: "python"
7
+ code: |
8
+ from npcpy.llm_funcs import breathe
9
+
10
+ output_messages = context.get('messages', [])
11
+
12
+ # Pass all current context as kwargs to breathe
13
+ result = breathe(**context)
14
+
15
+ if isinstance(result, dict):
16
+ context['output'] = result.get('output', 'Context condensed.')
17
+ context['messages'] = result.get('messages', output_messages)
18
+ else:
19
+ context['output'] = "Context condensation process initiated."
20
+ context['messages'] = output_messages
@@ -0,0 +1,65 @@
1
+ jinx_name: "build"
2
+ description: "Build deployment artifacts for NPC team"
3
+ inputs:
4
+ - target: "flask" # The type of deployment target (e.g., flask, docker, cli, static).
5
+ - output: "./build" # The output directory for built artifacts.
6
+ - team: "./npc_team" # The path to the NPC team directory.
7
+ - port: 5337 # The port for flask server builds.
8
+ - cors: "" # Comma-separated CORS origins for flask server builds.
9
+ steps:
10
+ - name: "execute_build"
11
+ engine: "python"
12
+ code: |
13
+ import os
14
+
15
+ # Assume these build functions are available in the execution environment
16
+ # from a larger project context, e.g., from npcpy.build_funcs
17
+ try:
18
+ from npcpy.build_funcs import (
19
+ build_flask_server,
20
+ build_docker_compose,
21
+ build_cli_executable,
22
+ build_static_site,
23
+ )
24
+ except ImportError:
25
+ # Provide mock functions for demonstration or error handling
26
+ def build_flask_server(config, **kwargs): return {"output": f"Mock build flask: {config}", "messages": []}
27
+ def build_docker_compose(config, **kwargs): return {"output": f"Mock build docker: {config}", "messages": []}
28
+ def build_cli_executable(config, **kwargs): return {"output": f"Mock build cli: {config}", "messages": []}
29
+ def build_static_site(config, **kwargs): return {"output": f"Mock build static: {config}", "messages": []}
30
+
31
+ target = context.get('target')
32
+ output_dir = context.get('output')
33
+ team_path = context.get('team')
34
+ port = context.get('port')
35
+ cors_origins_str = context.get('cors')
36
+
37
+ cors_origins = [origin.strip() for origin in cors_origins_str.split(',')] if cors_origins_str.strip() else None
38
+
39
+ build_config = {
40
+ 'team_path': os.path.abspath(os.path.expanduser(team_path)),
41
+ 'output_dir': os.path.abspath(os.path.expanduser(output_dir)),
42
+ 'target': target,
43
+ 'port': port,
44
+ 'cors_origins': cors_origins,
45
+ }
46
+
47
+ builders = {
48
+ 'flask': build_flask_server,
49
+ 'docker': build_docker_compose,
50
+ 'cli': build_cli_executable,
51
+ 'static': build_static_site,
52
+ }
53
+
54
+ output_messages = context.get('messages', [])
55
+ output_result = ""
56
+
57
+ if target not in builders:
58
+ output_result = f"Unknown target: {target}. Available: {list(builders.keys())}"
59
+ else:
60
+ result = builders[target](build_config, messages=output_messages)
61
+ output_result = result.get('output', 'Build command executed.')
62
+ output_messages = result.get('messages', output_messages) # Update messages from builder call
63
+
64
+ context['output'] = output_result
65
+ context['messages'] = output_messages
@@ -0,0 +1,50 @@
1
+ jinx_name: "compile"
2
+ description: "Compile NPC profiles"
3
+ inputs:
4
+ - npc_file_path: "" # Optional path to a specific NPC file to compile.
5
+ - npc_team_dir: "./npc_team" # Directory containing NPC profiles to compile, if no specific file is given.
6
+ steps:
7
+ - name: "compile_npcs"
8
+ engine: "python"
9
+ code: |
10
+ import os
11
+ import traceback
12
+ from npcpy.npc_compiler import NPC, Team # Assuming Team might be needed for full directory compilation
13
+
14
+ npc_file_path_arg = context.get('npc_file_path')
15
+ npc_team_dir = context.get('npc_team_dir')
16
+ output_messages = context.get('messages', [])
17
+
18
+ output_result = ""
19
+ compiled_npc_object = None
20
+
21
+ try:
22
+ if npc_file_path_arg and npc_file_path_arg.strip():
23
+ npc_full_path = os.path.abspath(os.path.expanduser(npc_file_path_arg))
24
+ if os.path.exists(npc_full_path):
25
+ # Assuming NPC() constructor "compiles" it by loading its definition
26
+ compiled_npc_object = NPC(file=npc_full_path, db_conn=context.get('db_conn'))
27
+ output_result = f"Compiled NPC: {npc_full_path}"
28
+ else:
29
+ output_result = f"Error: NPC file not found: {npc_full_path}"
30
+ else:
31
+ # Compile all NPCs in the directory. This would typically involve iterating and loading.
32
+ # For simplicity in this Jinx, we just acknowledge the directory.
33
+ # A more robust implementation would loop through .npc files and compile them.
34
+ abs_npc_team_dir = os.path.abspath(os.path.expanduser(npc_team_dir))
35
+ if os.path.exists(abs_npc_team_dir):
36
+ output_result = f"Acknowledged compilation for all NPCs in directory: {abs_npc_team_dir}"
37
+ # Example of loading a Team and setting the compiled_npc_object to its forenpc if available
38
+ # team = Team(team_path=abs_npc_team_dir, db_conn=context.get('db_conn'))
39
+ # if team.forenpc:
40
+ # compiled_npc_object = team.forenpc
41
+ else:
42
+ output_result = f"Error: NPC team directory not found: {npc_team_dir}"
43
+ except Exception as e:
44
+ traceback.print_exc()
45
+ output_result = f"Error compiling: {e}"
46
+
47
+ context['output'] = output_result
48
+ context['messages'] = output_messages
49
+ if compiled_npc_object:
50
+ context['compiled_npc_object'] = compiled_npc_object # Store the compiled NPC object if any
@@ -0,0 +1,52 @@
1
+ jinx_name: help
2
+ description: Show help for commands, NPCs, or Jinxs
3
+ inputs:
4
+ - topic: null
5
+ steps:
6
+ - name: show_help
7
+ engine: python
8
+ code: |
9
+ import json
10
+ from npcsh._state import CANONICAL_ARGS, get_argument_help
11
+
12
+ topic = context.get('topic')
13
+
14
+ if not topic:
15
+ output_lines = ["# Available Commands\n\n"]
16
+
17
+ all_jinxs = {}
18
+ if hasattr(npc, 'team') and npc.team and hasattr(npc.team, 'jinxs_dict'):
19
+ all_jinxs.update(npc.team.jinxs_dict)
20
+ if hasattr(npc, 'jinxs_dict') and npc.jinxs_dict:
21
+ all_jinxs.update(npc.jinxs_dict)
22
+
23
+ for cmd in sorted(all_jinxs.keys()):
24
+ jinx_obj = all_jinxs[cmd]
25
+ desc = getattr(jinx_obj, 'description', 'No description')
26
+ output_lines.append(f"/{cmd} - {desc}\n\n")
27
+
28
+ arg_help_map = get_argument_help()
29
+ if arg_help_map:
30
+ output_lines.append("## Common Command-Line Flags\n\n")
31
+ output_lines.append("The shortest unambiguous prefix works.\n")
32
+
33
+ for arg in sorted(CANONICAL_ARGS):
34
+ aliases = arg_help_map.get(arg, [])
35
+ alias_str = f"(-{min(aliases, key=len)})" if aliases else ""
36
+ output_lines.append(f"--{arg:<20} {alias_str}\n")
37
+
38
+ output = "".join(output_lines)
39
+ else:
40
+ jinx_obj = None
41
+ if hasattr(npc, 'team') and npc.team and hasattr(npc.team, 'jinxs_dict'):
42
+ jinx_obj = npc.team.jinxs_dict.get(topic)
43
+ if not jinx_obj and hasattr(npc, 'jinxs_dict'):
44
+ jinx_obj = npc.jinxs_dict.get(topic)
45
+
46
+ if jinx_obj:
47
+ output = f"## Help for Jinx: `/{topic}`\n\n"
48
+ output += f"- **Description**: {jinx_obj.description}\n"
49
+ if hasattr(jinx_obj, 'inputs') and jinx_obj.inputs:
50
+ output += f"- **Inputs**: {json.dumps(jinx_obj.inputs, indent=2)}\n"
51
+ else:
52
+ output = f"No help topic found for `{topic}`."