npcsh 1.1.4__py3-none-any.whl → 1.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +47 -63
- npcsh/npc_team/corca_example.png +0 -0
- npcsh/npc_team/jinxs/{python_executor.jinx → code/python.jinx} +1 -1
- npcsh/npc_team/jinxs/{bash_executer.jinx → code/sh.jinx} +1 -1
- npcsh/npc_team/jinxs/code/sql.jinx +18 -0
- npcsh/npc_team/jinxs/modes/alicanto.jinx +88 -0
- npcsh/npc_team/jinxs/modes/corca.jinx +28 -0
- npcsh/npc_team/jinxs/modes/guac.jinx +46 -0
- npcsh/npc_team/jinxs/modes/plonk.jinx +57 -0
- npcsh/npc_team/jinxs/modes/pti.jinx +28 -0
- npcsh/npc_team/jinxs/modes/spool.jinx +40 -0
- npcsh/npc_team/jinxs/modes/wander.jinx +81 -0
- npcsh/npc_team/jinxs/modes/yap.jinx +25 -0
- npcsh/npc_team/jinxs/utils/breathe.jinx +20 -0
- npcsh/npc_team/jinxs/utils/core/build.jinx +65 -0
- npcsh/npc_team/jinxs/utils/core/compile.jinx +50 -0
- npcsh/npc_team/jinxs/utils/core/help.jinx +52 -0
- npcsh/npc_team/jinxs/utils/core/init.jinx +41 -0
- npcsh/npc_team/jinxs/utils/core/jinxs.jinx +32 -0
- npcsh/npc_team/jinxs/utils/core/set.jinx +40 -0
- npcsh/npc_team/jinxs/{edit_file.jinx → utils/edit_file.jinx} +1 -1
- npcsh/npc_team/jinxs/utils/flush.jinx +39 -0
- npcsh/npc_team/jinxs/utils/npc-studio.jinx +82 -0
- npcsh/npc_team/jinxs/utils/ots.jinx +92 -0
- npcsh/npc_team/jinxs/utils/plan.jinx +33 -0
- npcsh/npc_team/jinxs/utils/roll.jinx +66 -0
- npcsh/npc_team/jinxs/utils/sample.jinx +56 -0
- npcsh/npc_team/jinxs/utils/search/brainblast.jinx +51 -0
- npcsh/npc_team/jinxs/utils/search/rag.jinx +70 -0
- npcsh/npc_team/jinxs/utils/search/search.jinx +192 -0
- npcsh/npc_team/jinxs/utils/serve.jinx +29 -0
- npcsh/npc_team/jinxs/utils/sleep.jinx +116 -0
- npcsh/npc_team/jinxs/utils/trigger.jinx +36 -0
- npcsh/npc_team/jinxs/utils/vixynt.jinx +129 -0
- npcsh/npcsh.py +13 -11
- npcsh/routes.py +80 -1420
- npcsh-1.1.5.data/data/npcsh/npc_team/alicanto.jinx +88 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/brainblast.jinx +51 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/breathe.jinx +20 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/build.jinx +65 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/compile.jinx +50 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/corca.jinx +28 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/corca_example.png +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/edit_file.jinx +1 -1
- npcsh-1.1.5.data/data/npcsh/npc_team/flush.jinx +39 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/guac.jinx +46 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/help.jinx +52 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/init.jinx +41 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/jinxs.jinx +32 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/npc-studio.jinx +82 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/ots.jinx +92 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/plan.jinx +33 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/plonk.jinx +57 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/pti.jinx +28 -0
- npcsh-1.1.4.data/data/npcsh/npc_team/python_executor.jinx → npcsh-1.1.5.data/data/npcsh/npc_team/python.jinx +1 -1
- npcsh-1.1.5.data/data/npcsh/npc_team/rag.jinx +70 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/roll.jinx +66 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/sample.jinx +56 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/search.jinx +192 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/serve.jinx +29 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/set.jinx +40 -0
- npcsh-1.1.4.data/data/npcsh/npc_team/bash_executer.jinx → npcsh-1.1.5.data/data/npcsh/npc_team/sh.jinx +1 -1
- npcsh-1.1.5.data/data/npcsh/npc_team/sleep.jinx +116 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/spool.jinx +40 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/sql.jinx +18 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/trigger.jinx +36 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/vixynt.jinx +129 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/wander.jinx +81 -0
- npcsh-1.1.5.data/data/npcsh/npc_team/yap.jinx +25 -0
- {npcsh-1.1.4.dist-info → npcsh-1.1.5.dist-info}/METADATA +1 -1
- npcsh-1.1.5.dist-info/RECORD +132 -0
- npcsh/npc_team/jinxs/image_generation.jinx +0 -29
- npcsh/npc_team/jinxs/internet_search.jinx +0 -31
- npcsh/npc_team/jinxs/screen_cap.jinx +0 -25
- npcsh-1.1.4.data/data/npcsh/npc_team/image_generation.jinx +0 -29
- npcsh-1.1.4.data/data/npcsh/npc_team/internet_search.jinx +0 -31
- npcsh-1.1.4.data/data/npcsh/npc_team/screen_cap.jinx +0 -25
- npcsh-1.1.4.dist-info/RECORD +0 -78
- /npcsh/npc_team/jinxs/{kg_search.jinx → utils/search/kg_search.jinx} +0 -0
- /npcsh/npc_team/jinxs/{memory_search.jinx → utils/search/memory_search.jinx} +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/alicanto.npc +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/alicanto.png +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/corca.npc +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/corca.png +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/foreman.npc +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/frederic.npc +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/frederic4.png +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/guac.png +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/kadiefa.png +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/kg_search.jinx +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/memory_search.jinx +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/plonk.npc +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/plonk.png +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/plonkjr.png +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/sibiji.npc +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/sibiji.png +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/spool.png +0 -0
- {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/yap.png +0 -0
- {npcsh-1.1.4.dist-info → npcsh-1.1.5.dist-info}/WHEEL +0 -0
- {npcsh-1.1.4.dist-info → npcsh-1.1.5.dist-info}/entry_points.txt +0 -0
- {npcsh-1.1.4.dist-info → npcsh-1.1.5.dist-info}/licenses/LICENSE +0 -0
- {npcsh-1.1.4.dist-info → npcsh-1.1.5.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
jinx_name: "alicanto"
|
|
2
|
+
description: "Conduct deep research with multiple perspectives, identifying gold insights and cliff warnings"
|
|
3
|
+
inputs:
|
|
4
|
+
- query: "" # Required research query.
|
|
5
|
+
- num_npcs: 5 # Number of NPCs to involve in research.
|
|
6
|
+
- depth: 3 # Depth of research.
|
|
7
|
+
- model: "" # LLM model to use. Defaults to NPCSH_CHAT_MODEL or NPC's model.
|
|
8
|
+
- provider: "" # LLM provider to use. Defaults to NPCSH_CHAT_PROVIDER or NPC's provider.
|
|
9
|
+
- max_steps: 20 # Maximum number of steps in Alicanto research.
|
|
10
|
+
- skip_research: True # Whether to skip the research phase.
|
|
11
|
+
- exploration: "" # Exploration factor (float).
|
|
12
|
+
- creativity: "" # Creativity factor (float).
|
|
13
|
+
- format: "" # Output format (report, summary, full).
|
|
14
|
+
steps:
|
|
15
|
+
- name: "conduct_alicanto_research"
|
|
16
|
+
engine: "python"
|
|
17
|
+
code: |
|
|
18
|
+
import traceback
|
|
19
|
+
import logging
|
|
20
|
+
from npcsh.alicanto import alicanto
|
|
21
|
+
# Assuming NPCSH_CHAT_MODEL and NPCSH_CHAT_PROVIDER are accessible
|
|
22
|
+
|
|
23
|
+
query = context.get('query')
|
|
24
|
+
num_npcs = int(context.get('num_npcs', 5)) # Ensure int type
|
|
25
|
+
depth = int(context.get('depth', 3)) # Ensure int type
|
|
26
|
+
llm_model = context.get('model')
|
|
27
|
+
llm_provider = context.get('provider')
|
|
28
|
+
max_steps = int(context.get('max_steps', 20)) # Ensure int type
|
|
29
|
+
skip_research = context.get('skip_research', True)
|
|
30
|
+
exploration_factor = context.get('exploration')
|
|
31
|
+
creativity_factor = context.get('creativity')
|
|
32
|
+
output_format = context.get('format')
|
|
33
|
+
output_messages = context.get('messages', [])
|
|
34
|
+
current_npc = context.get('npc')
|
|
35
|
+
|
|
36
|
+
if not query or not query.strip():
|
|
37
|
+
context['output'] = "Usage: /alicanto <research query> [--num-npcs N] [--depth N] [--exploration 0.3] [--creativity 0.5] [--format report|summary|full]"
|
|
38
|
+
context['messages'] = output_messages
|
|
39
|
+
exit()
|
|
40
|
+
|
|
41
|
+
# Fallback for model/provider if not explicitly set in Jinx inputs
|
|
42
|
+
if not llm_model and current_npc and current_npc.model:
|
|
43
|
+
llm_model = current_npc.model
|
|
44
|
+
if not llm_provider and current_npc and current_npc.provider:
|
|
45
|
+
llm_provider = current_npc.provider
|
|
46
|
+
|
|
47
|
+
# Final fallbacks (these would ideally come from npcsh._state config)
|
|
48
|
+
# Assuming NPCSH_CHAT_MODEL and NPCSH_CHAT_PROVIDER exist and are imported implicitly or set by environment
|
|
49
|
+
# Hardcoding defaults for demonstration if not available through NPC or _state
|
|
50
|
+
if not llm_model: llm_model = "gemini-1.5-pro"
|
|
51
|
+
if not llm_provider: llm_provider = "gemini"
|
|
52
|
+
|
|
53
|
+
try:
|
|
54
|
+
logging.info(f"Starting Alicanto research on: {query}")
|
|
55
|
+
|
|
56
|
+
alicanto_kwargs = {
|
|
57
|
+
'query': query,
|
|
58
|
+
'num_npcs': num_npcs,
|
|
59
|
+
'depth': depth,
|
|
60
|
+
'model': llm_model,
|
|
61
|
+
'provider': llm_provider,
|
|
62
|
+
'max_steps': max_steps,
|
|
63
|
+
'skip_research': skip_research,
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
if exploration_factor: alicanto_kwargs['exploration_factor'] = float(exploration_factor)
|
|
67
|
+
if creativity_factor: alicanto_kwargs['creativity_factor'] = float(creativity_factor)
|
|
68
|
+
if output_format: alicanto_kwargs['output_format'] = output_format
|
|
69
|
+
|
|
70
|
+
result = alicanto(**alicanto_kwargs)
|
|
71
|
+
|
|
72
|
+
output_result = ""
|
|
73
|
+
if isinstance(result, dict):
|
|
74
|
+
if "integration" in result:
|
|
75
|
+
output_result = result["integration"]
|
|
76
|
+
else:
|
|
77
|
+
output_result = "Alicanto research completed. Full results available in returned data."
|
|
78
|
+
else:
|
|
79
|
+
output_result = str(result)
|
|
80
|
+
|
|
81
|
+
context['output'] = output_result
|
|
82
|
+
context['messages'] = output_messages
|
|
83
|
+
context['alicanto_result'] = result # Store full result in context
|
|
84
|
+
except Exception as e:
|
|
85
|
+
traceback.print_exc()
|
|
86
|
+
logging.error(f"Error during Alicanto research: {e}")
|
|
87
|
+
context['output'] = f"Error during Alicanto research: {e}"
|
|
88
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
jinx_name: "brainblast"
|
|
2
|
+
description: "Execute an advanced chunked search on command history"
|
|
3
|
+
inputs:
|
|
4
|
+
- search_query: "" # Required search terms.
|
|
5
|
+
- history_db_path: "~/npcsh_history.db" # Path to the command history database.
|
|
6
|
+
steps:
|
|
7
|
+
- name: "execute_brainblast"
|
|
8
|
+
engine: "python"
|
|
9
|
+
code: |
|
|
10
|
+
import os
|
|
11
|
+
import traceback
|
|
12
|
+
from npcpy.memory.command_history import CommandHistory
|
|
13
|
+
from npcpy.memory.search import execute_brainblast_command
|
|
14
|
+
|
|
15
|
+
search_query = context.get('search_query')
|
|
16
|
+
history_db_path = context.get('history_db_path')
|
|
17
|
+
output_messages = context.get('messages', [])
|
|
18
|
+
|
|
19
|
+
if not search_query or not search_query.strip():
|
|
20
|
+
context['output'] = "Usage: /brainblast <search_terms>"
|
|
21
|
+
context['messages'] = output_messages
|
|
22
|
+
exit()
|
|
23
|
+
|
|
24
|
+
command_history = context.get('command_history')
|
|
25
|
+
if not command_history:
|
|
26
|
+
try:
|
|
27
|
+
command_history = CommandHistory(os.path.expanduser(history_db_path))
|
|
28
|
+
context['command_history'] = command_history # Add to context for potential reuse
|
|
29
|
+
except Exception as e:
|
|
30
|
+
context['output'] = f"Error connecting to command history: {e}"
|
|
31
|
+
context['messages'] = output_messages
|
|
32
|
+
exit()
|
|
33
|
+
|
|
34
|
+
try:
|
|
35
|
+
# The original handler passed **kwargs, so we pass current context
|
|
36
|
+
# The execute_brainblast_command might filter out 'messages' itself
|
|
37
|
+
result = execute_brainblast_command(
|
|
38
|
+
command=search_query,
|
|
39
|
+
command_history=command_history,
|
|
40
|
+
**{k:v for k,v in context.items() if k != 'messages'}) # Exclude messages if not expected
|
|
41
|
+
|
|
42
|
+
if isinstance(result, dict):
|
|
43
|
+
context['output'] = result.get('output', 'Brainblast search executed.')
|
|
44
|
+
context['messages'] = result.get('messages', output_messages)
|
|
45
|
+
else:
|
|
46
|
+
context['output'] = str(result)
|
|
47
|
+
context['messages'] = output_messages
|
|
48
|
+
except Exception as e:
|
|
49
|
+
traceback.print_exc()
|
|
50
|
+
context['output'] = f"Error executing brainblast command: {e}"
|
|
51
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
jinx_name: "breathe"
|
|
2
|
+
description: "Condense context on a regular cadence"
|
|
3
|
+
inputs: [] # The breathe command takes all relevant context from the NPC's environment
|
|
4
|
+
steps:
|
|
5
|
+
- name: "condense_context"
|
|
6
|
+
engine: "python"
|
|
7
|
+
code: |
|
|
8
|
+
from npcpy.llm_funcs import breathe
|
|
9
|
+
|
|
10
|
+
output_messages = context.get('messages', [])
|
|
11
|
+
|
|
12
|
+
# Pass all current context as kwargs to breathe
|
|
13
|
+
result = breathe(**context)
|
|
14
|
+
|
|
15
|
+
if isinstance(result, dict):
|
|
16
|
+
context['output'] = result.get('output', 'Context condensed.')
|
|
17
|
+
context['messages'] = result.get('messages', output_messages)
|
|
18
|
+
else:
|
|
19
|
+
context['output'] = "Context condensation process initiated."
|
|
20
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
jinx_name: "build"
|
|
2
|
+
description: "Build deployment artifacts for NPC team"
|
|
3
|
+
inputs:
|
|
4
|
+
- target: "flask" # The type of deployment target (e.g., flask, docker, cli, static).
|
|
5
|
+
- output: "./build" # The output directory for built artifacts.
|
|
6
|
+
- team: "./npc_team" # The path to the NPC team directory.
|
|
7
|
+
- port: 5337 # The port for flask server builds.
|
|
8
|
+
- cors: "" # Comma-separated CORS origins for flask server builds.
|
|
9
|
+
steps:
|
|
10
|
+
- name: "execute_build"
|
|
11
|
+
engine: "python"
|
|
12
|
+
code: |
|
|
13
|
+
import os
|
|
14
|
+
|
|
15
|
+
# Assume these build functions are available in the execution environment
|
|
16
|
+
# from a larger project context, e.g., from npcpy.build_funcs
|
|
17
|
+
try:
|
|
18
|
+
from npcpy.build_funcs import (
|
|
19
|
+
build_flask_server,
|
|
20
|
+
build_docker_compose,
|
|
21
|
+
build_cli_executable,
|
|
22
|
+
build_static_site,
|
|
23
|
+
)
|
|
24
|
+
except ImportError:
|
|
25
|
+
# Provide mock functions for demonstration or error handling
|
|
26
|
+
def build_flask_server(config, **kwargs): return {"output": f"Mock build flask: {config}", "messages": []}
|
|
27
|
+
def build_docker_compose(config, **kwargs): return {"output": f"Mock build docker: {config}", "messages": []}
|
|
28
|
+
def build_cli_executable(config, **kwargs): return {"output": f"Mock build cli: {config}", "messages": []}
|
|
29
|
+
def build_static_site(config, **kwargs): return {"output": f"Mock build static: {config}", "messages": []}
|
|
30
|
+
|
|
31
|
+
target = context.get('target')
|
|
32
|
+
output_dir = context.get('output')
|
|
33
|
+
team_path = context.get('team')
|
|
34
|
+
port = context.get('port')
|
|
35
|
+
cors_origins_str = context.get('cors')
|
|
36
|
+
|
|
37
|
+
cors_origins = [origin.strip() for origin in cors_origins_str.split(',')] if cors_origins_str.strip() else None
|
|
38
|
+
|
|
39
|
+
build_config = {
|
|
40
|
+
'team_path': os.path.abspath(os.path.expanduser(team_path)),
|
|
41
|
+
'output_dir': os.path.abspath(os.path.expanduser(output_dir)),
|
|
42
|
+
'target': target,
|
|
43
|
+
'port': port,
|
|
44
|
+
'cors_origins': cors_origins,
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
builders = {
|
|
48
|
+
'flask': build_flask_server,
|
|
49
|
+
'docker': build_docker_compose,
|
|
50
|
+
'cli': build_cli_executable,
|
|
51
|
+
'static': build_static_site,
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
output_messages = context.get('messages', [])
|
|
55
|
+
output_result = ""
|
|
56
|
+
|
|
57
|
+
if target not in builders:
|
|
58
|
+
output_result = f"Unknown target: {target}. Available: {list(builders.keys())}"
|
|
59
|
+
else:
|
|
60
|
+
result = builders[target](build_config, messages=output_messages)
|
|
61
|
+
output_result = result.get('output', 'Build command executed.')
|
|
62
|
+
output_messages = result.get('messages', output_messages) # Update messages from builder call
|
|
63
|
+
|
|
64
|
+
context['output'] = output_result
|
|
65
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
jinx_name: "compile"
|
|
2
|
+
description: "Compile NPC profiles"
|
|
3
|
+
inputs:
|
|
4
|
+
- npc_file_path: "" # Optional path to a specific NPC file to compile.
|
|
5
|
+
- npc_team_dir: "./npc_team" # Directory containing NPC profiles to compile, if no specific file is given.
|
|
6
|
+
steps:
|
|
7
|
+
- name: "compile_npcs"
|
|
8
|
+
engine: "python"
|
|
9
|
+
code: |
|
|
10
|
+
import os
|
|
11
|
+
import traceback
|
|
12
|
+
from npcpy.npc_compiler import NPC, Team # Assuming Team might be needed for full directory compilation
|
|
13
|
+
|
|
14
|
+
npc_file_path_arg = context.get('npc_file_path')
|
|
15
|
+
npc_team_dir = context.get('npc_team_dir')
|
|
16
|
+
output_messages = context.get('messages', [])
|
|
17
|
+
|
|
18
|
+
output_result = ""
|
|
19
|
+
compiled_npc_object = None
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
if npc_file_path_arg and npc_file_path_arg.strip():
|
|
23
|
+
npc_full_path = os.path.abspath(os.path.expanduser(npc_file_path_arg))
|
|
24
|
+
if os.path.exists(npc_full_path):
|
|
25
|
+
# Assuming NPC() constructor "compiles" it by loading its definition
|
|
26
|
+
compiled_npc_object = NPC(file=npc_full_path, db_conn=context.get('db_conn'))
|
|
27
|
+
output_result = f"Compiled NPC: {npc_full_path}"
|
|
28
|
+
else:
|
|
29
|
+
output_result = f"Error: NPC file not found: {npc_full_path}"
|
|
30
|
+
else:
|
|
31
|
+
# Compile all NPCs in the directory. This would typically involve iterating and loading.
|
|
32
|
+
# For simplicity in this Jinx, we just acknowledge the directory.
|
|
33
|
+
# A more robust implementation would loop through .npc files and compile them.
|
|
34
|
+
abs_npc_team_dir = os.path.abspath(os.path.expanduser(npc_team_dir))
|
|
35
|
+
if os.path.exists(abs_npc_team_dir):
|
|
36
|
+
output_result = f"Acknowledged compilation for all NPCs in directory: {abs_npc_team_dir}"
|
|
37
|
+
# Example of loading a Team and setting the compiled_npc_object to its forenpc if available
|
|
38
|
+
# team = Team(team_path=abs_npc_team_dir, db_conn=context.get('db_conn'))
|
|
39
|
+
# if team.forenpc:
|
|
40
|
+
# compiled_npc_object = team.forenpc
|
|
41
|
+
else:
|
|
42
|
+
output_result = f"Error: NPC team directory not found: {npc_team_dir}"
|
|
43
|
+
except Exception as e:
|
|
44
|
+
traceback.print_exc()
|
|
45
|
+
output_result = f"Error compiling: {e}"
|
|
46
|
+
|
|
47
|
+
context['output'] = output_result
|
|
48
|
+
context['messages'] = output_messages
|
|
49
|
+
if compiled_npc_object:
|
|
50
|
+
context['compiled_npc_object'] = compiled_npc_object # Store the compiled NPC object if any
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
jinx_name: "corca"
|
|
2
|
+
description: "Enter the Corca MCP-powered agentic shell. Usage: /corca [--mcp-server-path path]"
|
|
3
|
+
inputs:
|
|
4
|
+
- command: "/corca" # The full command string, e.g., "/corca --mcp-server-path /tmp/mcp"
|
|
5
|
+
steps:
|
|
6
|
+
- name: "enter_corca"
|
|
7
|
+
engine: "python"
|
|
8
|
+
code: |
|
|
9
|
+
# Assume npcsh._state and enter_corca_mode are accessible in the environment
|
|
10
|
+
|
|
11
|
+
from npcsh._state import initial_state, setup_shell
|
|
12
|
+
from npcsh.corca import enter_corca_mode
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
full_command_str = context.get('command')
|
|
16
|
+
output_messages = context.get('messages', [])
|
|
17
|
+
|
|
18
|
+
command_history, team, default_npc = setup_shell()
|
|
19
|
+
|
|
20
|
+
result = enter_corca_mode(
|
|
21
|
+
command=full_command_str,
|
|
22
|
+
command_history=command_history,
|
|
23
|
+
shell_state=initial_state,
|
|
24
|
+
**context # Pass all context as kwargs to enter_corca_mode as it expects
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
context['output'] = result.get('output', 'Entered Corca mode.')
|
|
28
|
+
context['messages'] = result.get('messages', output_messages)
|
|
Binary file
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
jinx_name: "flush"
|
|
2
|
+
description: "Flush the last N messages from the conversation history."
|
|
3
|
+
inputs:
|
|
4
|
+
- n: 1 # The number of messages to flush (default to 1).
|
|
5
|
+
steps:
|
|
6
|
+
- name: "flush_messages"
|
|
7
|
+
engine: "python"
|
|
8
|
+
code: |
|
|
9
|
+
n = int(context.get('n', 1))
|
|
10
|
+
output_messages = context.get('messages', [])
|
|
11
|
+
|
|
12
|
+
if n <= 0:
|
|
13
|
+
context['output'] = "Error: Number of messages must be positive."
|
|
14
|
+
context['messages'] = output_messages
|
|
15
|
+
exit()
|
|
16
|
+
|
|
17
|
+
new_messages = list(output_messages)
|
|
18
|
+
original_len = len(new_messages)
|
|
19
|
+
removed_count = 0
|
|
20
|
+
|
|
21
|
+
if new_messages and new_messages[0].get("role") == "system":
|
|
22
|
+
system_message = new_messages[0]
|
|
23
|
+
working_messages = new_messages[1:]
|
|
24
|
+
num_to_remove = min(n, len(working_messages))
|
|
25
|
+
if num_to_remove > 0:
|
|
26
|
+
final_messages = [system_message] + working_messages[:-num_to_remove]
|
|
27
|
+
removed_count = num_to_remove
|
|
28
|
+
else:
|
|
29
|
+
final_messages = [system_message]
|
|
30
|
+
else:
|
|
31
|
+
num_to_remove = min(n, original_len)
|
|
32
|
+
if num_to_remove > 0:
|
|
33
|
+
final_messages = new_messages[:-num_to_remove]
|
|
34
|
+
removed_count = num_to_remove
|
|
35
|
+
else:
|
|
36
|
+
final_messages = []
|
|
37
|
+
|
|
38
|
+
context['output'] = f"Flushed {removed_count} message(s). Context is now {len(final_messages)} messages."
|
|
39
|
+
context['messages'] = final_messages
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
jinx_name: "guac"
|
|
2
|
+
description: "Enter guac mode for plotting and data visualization."
|
|
3
|
+
inputs:
|
|
4
|
+
- config_dir: "" # Optional configuration directory.
|
|
5
|
+
- plots_dir: "" # Optional directory for plots.
|
|
6
|
+
- refresh_period: 100 # Refresh period for guac mode.
|
|
7
|
+
- lang: "" # Language setting for guac mode.
|
|
8
|
+
steps:
|
|
9
|
+
- name: "enter_guac"
|
|
10
|
+
engine: "python"
|
|
11
|
+
code: |
|
|
12
|
+
import os
|
|
13
|
+
from sqlalchemy import create_engine
|
|
14
|
+
from npcpy.npc_compiler import NPC, Team
|
|
15
|
+
from npcsh.guac import enter_guac_mode
|
|
16
|
+
|
|
17
|
+
config_dir = context.get('config_dir')
|
|
18
|
+
plots_dir = context.get('plots_dir')
|
|
19
|
+
refresh_period = context.get('refresh_period')
|
|
20
|
+
lang = context.get('lang')
|
|
21
|
+
output_messages = context.get('messages', [])
|
|
22
|
+
|
|
23
|
+
db_path = os.path.expanduser('~/npcsh_history.db')
|
|
24
|
+
db_conn = create_engine(f'sqlite:///{db_path}')
|
|
25
|
+
|
|
26
|
+
npc_file = os.path.expanduser('~/.npcsh/guac/npc_team/guac.npc')
|
|
27
|
+
npc_team_dir = os.path.expanduser('~/.npcsh/guac/npc_team/')
|
|
28
|
+
|
|
29
|
+
# Ensure directories exist for guac NPC/Team
|
|
30
|
+
os.makedirs(os.path.dirname(npc_file), exist_ok=True)
|
|
31
|
+
|
|
32
|
+
guac_npc = NPC(file=npc_file, db_conn=db_conn)
|
|
33
|
+
guac_team = Team(npc_team_dir, db_conn=db_conn)
|
|
34
|
+
|
|
35
|
+
enter_guac_mode(
|
|
36
|
+
npc=guac_npc,
|
|
37
|
+
team=guac_team,
|
|
38
|
+
config_dir=config_dir,
|
|
39
|
+
plots_dir=plots_dir,
|
|
40
|
+
npc_team_dir=npc_team_dir,
|
|
41
|
+
refresh_period=int(refresh_period), # Ensure int type
|
|
42
|
+
lang=lang
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
context['output'] = 'Exiting Guac Mode'
|
|
46
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
jinx_name: help
|
|
2
|
+
description: Show help for commands, NPCs, or Jinxs
|
|
3
|
+
inputs:
|
|
4
|
+
- topic: null
|
|
5
|
+
steps:
|
|
6
|
+
- name: show_help
|
|
7
|
+
engine: python
|
|
8
|
+
code: |
|
|
9
|
+
import json
|
|
10
|
+
from npcsh._state import CANONICAL_ARGS, get_argument_help
|
|
11
|
+
|
|
12
|
+
topic = context.get('topic')
|
|
13
|
+
|
|
14
|
+
if not topic:
|
|
15
|
+
output_lines = ["# Available Commands\n\n"]
|
|
16
|
+
|
|
17
|
+
all_jinxs = {}
|
|
18
|
+
if hasattr(npc, 'team') and npc.team and hasattr(npc.team, 'jinxs_dict'):
|
|
19
|
+
all_jinxs.update(npc.team.jinxs_dict)
|
|
20
|
+
if hasattr(npc, 'jinxs_dict') and npc.jinxs_dict:
|
|
21
|
+
all_jinxs.update(npc.jinxs_dict)
|
|
22
|
+
|
|
23
|
+
for cmd in sorted(all_jinxs.keys()):
|
|
24
|
+
jinx_obj = all_jinxs[cmd]
|
|
25
|
+
desc = getattr(jinx_obj, 'description', 'No description')
|
|
26
|
+
output_lines.append(f"/{cmd} - {desc}\n\n")
|
|
27
|
+
|
|
28
|
+
arg_help_map = get_argument_help()
|
|
29
|
+
if arg_help_map:
|
|
30
|
+
output_lines.append("## Common Command-Line Flags\n\n")
|
|
31
|
+
output_lines.append("The shortest unambiguous prefix works.\n")
|
|
32
|
+
|
|
33
|
+
for arg in sorted(CANONICAL_ARGS):
|
|
34
|
+
aliases = arg_help_map.get(arg, [])
|
|
35
|
+
alias_str = f"(-{min(aliases, key=len)})" if aliases else ""
|
|
36
|
+
output_lines.append(f"--{arg:<20} {alias_str}\n")
|
|
37
|
+
|
|
38
|
+
output = "".join(output_lines)
|
|
39
|
+
else:
|
|
40
|
+
jinx_obj = None
|
|
41
|
+
if hasattr(npc, 'team') and npc.team and hasattr(npc.team, 'jinxs_dict'):
|
|
42
|
+
jinx_obj = npc.team.jinxs_dict.get(topic)
|
|
43
|
+
if not jinx_obj and hasattr(npc, 'jinxs_dict'):
|
|
44
|
+
jinx_obj = npc.jinxs_dict.get(topic)
|
|
45
|
+
|
|
46
|
+
if jinx_obj:
|
|
47
|
+
output = f"## Help for Jinx: `/{topic}`\n\n"
|
|
48
|
+
output += f"- **Description**: {jinx_obj.description}\n"
|
|
49
|
+
if hasattr(jinx_obj, 'inputs') and jinx_obj.inputs:
|
|
50
|
+
output += f"- **Inputs**: {json.dumps(jinx_obj.inputs, indent=2)}\n"
|
|
51
|
+
else:
|
|
52
|
+
output = f"No help topic found for `{topic}`."
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
jinx_name: "init"
|
|
2
|
+
description: "Initialize NPC project"
|
|
3
|
+
inputs:
|
|
4
|
+
- directory: "." # The directory where the NPC project should be initialized.
|
|
5
|
+
- templates: "" # Optional templates to use for initialization.
|
|
6
|
+
- context: "" # Optional context for project initialization.
|
|
7
|
+
- model: "" # Optional LLM model to set as default for the project.
|
|
8
|
+
- provider: "" # Optional LLM provider to set as default for the project.
|
|
9
|
+
steps:
|
|
10
|
+
- name: "initialize_project"
|
|
11
|
+
engine: "python"
|
|
12
|
+
code: |
|
|
13
|
+
import os
|
|
14
|
+
import traceback
|
|
15
|
+
from npcpy.npc_compiler import initialize_npc_project
|
|
16
|
+
|
|
17
|
+
directory = context.get('directory')
|
|
18
|
+
templates = context.get('templates')
|
|
19
|
+
context_param = context.get('context') # Renamed to avoid conflict with Jinx context
|
|
20
|
+
model = context.get('model')
|
|
21
|
+
provider = context.get('provider')
|
|
22
|
+
output_messages = context.get('messages', [])
|
|
23
|
+
|
|
24
|
+
output_result = ""
|
|
25
|
+
try:
|
|
26
|
+
initialize_npc_project(
|
|
27
|
+
directory=directory,
|
|
28
|
+
templates=templates,
|
|
29
|
+
context=context_param, # Use the renamed context parameter
|
|
30
|
+
model=model,
|
|
31
|
+
provider=provider
|
|
32
|
+
)
|
|
33
|
+
output_result = f"NPC project initialized in {os.path.abspath(directory)}."
|
|
34
|
+
except NameError:
|
|
35
|
+
output_result = "Init function (initialize_npc_project) not available."
|
|
36
|
+
except Exception as e:
|
|
37
|
+
traceback.print_exc()
|
|
38
|
+
output_result = f"Error initializing project: {e}"
|
|
39
|
+
|
|
40
|
+
context['output'] = output_result
|
|
41
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
jinx_name: jinxs
|
|
2
|
+
description: Show available jinxs for the current NPC/Team
|
|
3
|
+
inputs: []
|
|
4
|
+
steps:
|
|
5
|
+
- name: list_jinxs
|
|
6
|
+
engine: python
|
|
7
|
+
code: |
|
|
8
|
+
output_lines = ["Available Jinxs:\n"]
|
|
9
|
+
jinxs_listed = set()
|
|
10
|
+
|
|
11
|
+
if hasattr(npc, 'team') and npc.team:
|
|
12
|
+
team = npc.team
|
|
13
|
+
|
|
14
|
+
if hasattr(team, 'jinxs_dict') and team.jinxs_dict:
|
|
15
|
+
output_lines.append(f"\n--- Team Jinxs ---\n")
|
|
16
|
+
for name, jinx_obj in sorted(team.jinxs_dict.items()):
|
|
17
|
+
desc = getattr(jinx_obj, 'description', 'No description available.')
|
|
18
|
+
output_lines.append(f"- /{name}: {desc}\n")
|
|
19
|
+
jinxs_listed.add(name)
|
|
20
|
+
|
|
21
|
+
if hasattr(npc, 'jinxs_dict') and npc.jinxs_dict:
|
|
22
|
+
output_lines.append(f"\n--- NPC Jinxs for {npc.name} ---\n")
|
|
23
|
+
for name, jinx_obj in sorted(npc.jinxs_dict.items()):
|
|
24
|
+
if name not in jinxs_listed:
|
|
25
|
+
desc = getattr(jinx_obj, 'description', 'No description available.')
|
|
26
|
+
output_lines.append(f"- /{name}: {desc}\n")
|
|
27
|
+
jinxs_listed.add(name)
|
|
28
|
+
|
|
29
|
+
if not jinxs_listed:
|
|
30
|
+
output = "No jinxs available for the current context."
|
|
31
|
+
else:
|
|
32
|
+
output = "".join(output_lines)
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
jinx_name: "npc-studio"
|
|
2
|
+
description: "Start npc studio"
|
|
3
|
+
inputs:
|
|
4
|
+
- user_command: "" # Any additional arguments to pass to the npc studio launch.
|
|
5
|
+
steps:
|
|
6
|
+
- name: "launch_npc_studio"
|
|
7
|
+
engine: "python"
|
|
8
|
+
code: |
|
|
9
|
+
import os
|
|
10
|
+
import subprocess
|
|
11
|
+
import sys
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
import traceback
|
|
14
|
+
|
|
15
|
+
NPC_STUDIO_DIR = Path.home() / ".npcsh" / "npc-studio"
|
|
16
|
+
|
|
17
|
+
def ensure_repo():
|
|
18
|
+
"""Clone or update the npc-studio repo."""
|
|
19
|
+
if not NPC_STUDIO_DIR.exists():
|
|
20
|
+
os.makedirs(NPC_STUDIO_DIR.parent, exist_ok=True)
|
|
21
|
+
subprocess.check_call([
|
|
22
|
+
"git", "clone",
|
|
23
|
+
"https://github.com/npc-worldwide/npc-studio.git",
|
|
24
|
+
str(NPC_STUDIO_DIR)
|
|
25
|
+
])
|
|
26
|
+
else:
|
|
27
|
+
subprocess.check_call(
|
|
28
|
+
["git", "pull"],
|
|
29
|
+
cwd=NPC_STUDIO_DIR
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
def install_dependencies():
|
|
33
|
+
"""Install npm and pip dependencies."""
|
|
34
|
+
subprocess.check_call(["npm", "install"], cwd=NPC_STUDIO_DIR)
|
|
35
|
+
|
|
36
|
+
req_file = NPC_STUDIO_DIR / "requirements.txt"
|
|
37
|
+
if req_file.exists():
|
|
38
|
+
subprocess.check_call([sys.executable, "-m", "pip", "install", "-r", str(req_file)])
|
|
39
|
+
|
|
40
|
+
def launch_npc_studio(path_to_open: str = None):
|
|
41
|
+
"""
|
|
42
|
+
Launch the NPC Studio backend + frontend.
|
|
43
|
+
Returns PIDs for processes.
|
|
44
|
+
"""
|
|
45
|
+
ensure_repo()
|
|
46
|
+
install_dependencies()
|
|
47
|
+
|
|
48
|
+
backend = subprocess.Popen(
|
|
49
|
+
[sys.executable, "npc_studio_serve.py"],
|
|
50
|
+
cwd=NPC_STUDIO_DIR,
|
|
51
|
+
shell = False
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
# npm run dev is typically for the frontend development server
|
|
55
|
+
dev_server = subprocess.Popen(
|
|
56
|
+
["npm", "run", "dev"],
|
|
57
|
+
cwd=NPC_STUDIO_DIR,
|
|
58
|
+
shell=False
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
# npm start is typically for electron or other packaged frontend
|
|
62
|
+
frontend = subprocess.Popen(
|
|
63
|
+
["npm", "start"],
|
|
64
|
+
cwd=NPC_STUDIO_DIR,
|
|
65
|
+
shell=False
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
return backend, dev_server, frontend
|
|
69
|
+
|
|
70
|
+
user_command = context.get('user_command')
|
|
71
|
+
output_messages = context.get('messages', [])
|
|
72
|
+
output_result = ""
|
|
73
|
+
|
|
74
|
+
try:
|
|
75
|
+
backend, electron, frontend = launch_npc_studio(user_command or None)
|
|
76
|
+
output_result = f"NPC Studio started!\nBackend PID={backend.pid}, Electron PID={electron.pid} Frontend PID={frontend.pid}"
|
|
77
|
+
except Exception as e:
|
|
78
|
+
traceback.print_exc()
|
|
79
|
+
output_result = f"Failed to start NPC Studio: {e}"
|
|
80
|
+
|
|
81
|
+
context['output'] = output_result
|
|
82
|
+
context['messages'] = output_messages
|