npcsh 0.1.2__py3-none-any.whl → 1.1.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +3508 -0
- npcsh/alicanto.py +65 -0
- npcsh/build.py +291 -0
- npcsh/completion.py +206 -0
- npcsh/config.py +163 -0
- npcsh/corca.py +50 -0
- npcsh/execution.py +185 -0
- npcsh/guac.py +46 -0
- npcsh/mcp_helpers.py +357 -0
- npcsh/mcp_server.py +299 -0
- npcsh/npc.py +323 -0
- npcsh/npc_team/alicanto.npc +2 -0
- npcsh/npc_team/alicanto.png +0 -0
- npcsh/npc_team/corca.npc +12 -0
- npcsh/npc_team/corca.png +0 -0
- npcsh/npc_team/corca_example.png +0 -0
- npcsh/npc_team/foreman.npc +7 -0
- npcsh/npc_team/frederic.npc +6 -0
- npcsh/npc_team/frederic4.png +0 -0
- npcsh/npc_team/guac.png +0 -0
- npcsh/npc_team/jinxs/code/python.jinx +11 -0
- npcsh/npc_team/jinxs/code/sh.jinx +34 -0
- npcsh/npc_team/jinxs/code/sql.jinx +16 -0
- npcsh/npc_team/jinxs/modes/alicanto.jinx +194 -0
- npcsh/npc_team/jinxs/modes/corca.jinx +249 -0
- npcsh/npc_team/jinxs/modes/guac.jinx +317 -0
- npcsh/npc_team/jinxs/modes/plonk.jinx +214 -0
- npcsh/npc_team/jinxs/modes/pti.jinx +170 -0
- npcsh/npc_team/jinxs/modes/spool.jinx +161 -0
- npcsh/npc_team/jinxs/modes/wander.jinx +186 -0
- npcsh/npc_team/jinxs/modes/yap.jinx +262 -0
- npcsh/npc_team/jinxs/npc_studio/npc-studio.jinx +77 -0
- npcsh/npc_team/jinxs/utils/agent.jinx +17 -0
- npcsh/npc_team/jinxs/utils/chat.jinx +44 -0
- npcsh/npc_team/jinxs/utils/cmd.jinx +44 -0
- npcsh/npc_team/jinxs/utils/compress.jinx +140 -0
- npcsh/npc_team/jinxs/utils/core/build.jinx +65 -0
- npcsh/npc_team/jinxs/utils/core/compile.jinx +50 -0
- npcsh/npc_team/jinxs/utils/core/help.jinx +52 -0
- npcsh/npc_team/jinxs/utils/core/init.jinx +41 -0
- npcsh/npc_team/jinxs/utils/core/jinxs.jinx +32 -0
- npcsh/npc_team/jinxs/utils/core/set.jinx +40 -0
- npcsh/npc_team/jinxs/utils/edit_file.jinx +94 -0
- npcsh/npc_team/jinxs/utils/load_file.jinx +35 -0
- npcsh/npc_team/jinxs/utils/ots.jinx +61 -0
- npcsh/npc_team/jinxs/utils/roll.jinx +68 -0
- npcsh/npc_team/jinxs/utils/sample.jinx +56 -0
- npcsh/npc_team/jinxs/utils/search.jinx +130 -0
- npcsh/npc_team/jinxs/utils/serve.jinx +26 -0
- npcsh/npc_team/jinxs/utils/sleep.jinx +116 -0
- npcsh/npc_team/jinxs/utils/trigger.jinx +61 -0
- npcsh/npc_team/jinxs/utils/usage.jinx +33 -0
- npcsh/npc_team/jinxs/utils/vixynt.jinx +144 -0
- npcsh/npc_team/kadiefa.npc +3 -0
- npcsh/npc_team/kadiefa.png +0 -0
- npcsh/npc_team/npcsh.ctx +18 -0
- npcsh/npc_team/npcsh_sibiji.png +0 -0
- npcsh/npc_team/plonk.npc +2 -0
- npcsh/npc_team/plonk.png +0 -0
- npcsh/npc_team/plonkjr.npc +2 -0
- npcsh/npc_team/plonkjr.png +0 -0
- npcsh/npc_team/sibiji.npc +3 -0
- npcsh/npc_team/sibiji.png +0 -0
- npcsh/npc_team/spool.png +0 -0
- npcsh/npc_team/yap.png +0 -0
- npcsh/npcsh.py +296 -112
- npcsh/parsing.py +118 -0
- npcsh/plonk.py +54 -0
- npcsh/pti.py +54 -0
- npcsh/routes.py +139 -0
- npcsh/spool.py +48 -0
- npcsh/ui.py +199 -0
- npcsh/wander.py +62 -0
- npcsh/yap.py +50 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/agent.jinx +17 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.jinx +194 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.npc +2 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/build.jinx +65 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/chat.jinx +44 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/cmd.jinx +44 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/compile.jinx +50 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/compress.jinx +140 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/corca.jinx +249 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/corca.npc +12 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/corca.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/corca_example.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/edit_file.jinx +94 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/foreman.npc +7 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/frederic.npc +6 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/frederic4.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/guac.jinx +317 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/guac.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/help.jinx +52 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/init.jinx +41 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/jinxs.jinx +32 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/kadiefa.npc +3 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/kadiefa.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/load_file.jinx +35 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/npc-studio.jinx +77 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/npcsh.ctx +18 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/ots.jinx +61 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/plonk.jinx +214 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/plonk.npc +2 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/plonk.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/plonkjr.npc +2 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/plonkjr.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/pti.jinx +170 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/python.jinx +11 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/roll.jinx +68 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/sample.jinx +56 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/search.jinx +130 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/serve.jinx +26 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/set.jinx +40 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/sh.jinx +34 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/sibiji.npc +3 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/sibiji.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/sleep.jinx +116 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/spool.jinx +161 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/spool.png +0 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/sql.jinx +16 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/trigger.jinx +61 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/usage.jinx +33 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/vixynt.jinx +144 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/wander.jinx +186 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/yap.jinx +262 -0
- npcsh-1.1.13.data/data/npcsh/npc_team/yap.png +0 -0
- npcsh-1.1.13.dist-info/METADATA +522 -0
- npcsh-1.1.13.dist-info/RECORD +135 -0
- {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info}/WHEEL +1 -1
- npcsh-1.1.13.dist-info/entry_points.txt +9 -0
- {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info/licenses}/LICENSE +1 -1
- npcsh/command_history.py +0 -81
- npcsh/helpers.py +0 -36
- npcsh/llm_funcs.py +0 -295
- npcsh/main.py +0 -5
- npcsh/modes.py +0 -343
- npcsh/npc_compiler.py +0 -124
- npcsh-0.1.2.dist-info/METADATA +0 -99
- npcsh-0.1.2.dist-info/RECORD +0 -14
- npcsh-0.1.2.dist-info/entry_points.txt +0 -2
- {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
jinx_name: "compress"
|
|
2
|
+
description: "Manages conversation and knowledge context. Defaults to compacting context. Use flags for other operations."
|
|
3
|
+
inputs:
|
|
4
|
+
- flush: "" # The number of recent messages to flush.
|
|
5
|
+
- sleep: False # If true, evolves the knowledge graph.
|
|
6
|
+
- dream: False # Used with --sleep. Runs creative synthesis.
|
|
7
|
+
- ops: "" # Used with --sleep. Comma-separated list of KG operations.
|
|
8
|
+
- model: "" # Used with --sleep. LLM model for KG evolution.
|
|
9
|
+
- provider: "" # Used with --sleep. LLM provider for KG evolution.
|
|
10
|
+
steps:
|
|
11
|
+
- name: "manage_context_and_memory"
|
|
12
|
+
engine: "python"
|
|
13
|
+
code: |
|
|
14
|
+
import os
|
|
15
|
+
import traceback
|
|
16
|
+
from npcpy.llm_funcs import breathe
|
|
17
|
+
from npcpy.memory.command_history import CommandHistory, load_kg_from_db, save_kg_to_db
|
|
18
|
+
from npcpy.memory.knowledge_graph import kg_sleep_process, kg_dream_process
|
|
19
|
+
|
|
20
|
+
# --- Get all inputs from context ---
|
|
21
|
+
flush_n_str = context.get('flush')
|
|
22
|
+
is_sleeping = context.get('sleep')
|
|
23
|
+
is_dreaming = context.get('dream')
|
|
24
|
+
operations_str = context.get('ops')
|
|
25
|
+
llm_model = context.get('model')
|
|
26
|
+
llm_provider = context.get('provider')
|
|
27
|
+
output_messages = context.get('messages', [])
|
|
28
|
+
|
|
29
|
+
USAGE = """Usage:
|
|
30
|
+
/compress (Compacts conversation context)
|
|
31
|
+
/compress --flush <number> (Removes the last N messages)
|
|
32
|
+
/compress --sleep [...] (Evolves the knowledge graph)
|
|
33
|
+
--dream (With --sleep: enables creative synthesis)
|
|
34
|
+
--ops "op1,op2" (With --sleep: specifies KG operations)
|
|
35
|
+
--model <name> (With --sleep: specifies LLM model)
|
|
36
|
+
--provider <name> (With --sleep: specifies LLM provider)"""
|
|
37
|
+
|
|
38
|
+
# --- Argument Validation: Ensure mutual exclusivity ---
|
|
39
|
+
is_flushing = flush_n_str is not None and flush_n_str.strip() != ''
|
|
40
|
+
if is_sleeping and is_flushing:
|
|
41
|
+
context['output'] = f"Error: --sleep and --flush are mutually exclusive.\n{USAGE}"
|
|
42
|
+
context['messages'] = output_messages
|
|
43
|
+
exit()
|
|
44
|
+
|
|
45
|
+
# --- Dispatcher: Route to the correct functionality ---
|
|
46
|
+
|
|
47
|
+
# 1. SLEEP: Evolve the Knowledge Graph
|
|
48
|
+
if is_sleeping:
|
|
49
|
+
current_npc = context.get('npc')
|
|
50
|
+
current_team = context.get('team')
|
|
51
|
+
|
|
52
|
+
# Parameter setup for KG process
|
|
53
|
+
operations_config = [op.strip() for op in operations_str.split(',')] if operations_str else None
|
|
54
|
+
if not llm_model and current_npc: llm_model = current_npc.model
|
|
55
|
+
if not llm_provider and current_npc: llm_provider = current_npc.provider
|
|
56
|
+
if not llm_model: llm_model = "gemini-1.5-pro"
|
|
57
|
+
if not llm_provider: llm_provider = "gemini"
|
|
58
|
+
|
|
59
|
+
team_name = current_team.name if current_team else "__none__"
|
|
60
|
+
npc_name = current_npc.name if current_npc else "__none__"
|
|
61
|
+
current_path = os.getcwd()
|
|
62
|
+
scope_str = f"Team: '{team_name}', NPC: '{npc_name}', Path: '{current_path}'"
|
|
63
|
+
|
|
64
|
+
command_history = None
|
|
65
|
+
try:
|
|
66
|
+
db_path = os.getenv("NPCSH_DB_PATH", os.path.expanduser("~/npcsh_history.db"))
|
|
67
|
+
command_history = CommandHistory(db_path)
|
|
68
|
+
engine = command_history.engine
|
|
69
|
+
current_kg = load_kg_from_db(engine, team_name, npc_name, current_path)
|
|
70
|
+
|
|
71
|
+
if not current_kg or not current_kg.get('facts'):
|
|
72
|
+
context['output'] = f"Knowledge graph for the current scope is empty. Nothing to process.\n- Scope: {scope_str}"
|
|
73
|
+
exit()
|
|
74
|
+
|
|
75
|
+
original_facts = len(current_kg.get('facts', []))
|
|
76
|
+
original_concepts = len(current_kg.get('concepts', []))
|
|
77
|
+
|
|
78
|
+
evolved_kg, _ = kg_sleep_process(existing_kg=current_kg, model=llm_model, provider=llm_provider, npc=current_npc, operations_config=operations_config)
|
|
79
|
+
process_type = "Sleep"
|
|
80
|
+
|
|
81
|
+
if is_dreaming:
|
|
82
|
+
evolved_kg, _ = kg_dream_process(existing_kg=evolved_kg, model=llm_model, provider=llm_provider, npc=current_npc)
|
|
83
|
+
process_type += " & Dream"
|
|
84
|
+
|
|
85
|
+
save_kg_to_db(engine, evolved_kg, team_name, npc_name, current_path)
|
|
86
|
+
|
|
87
|
+
new_facts = len(evolved_kg.get('facts', []))
|
|
88
|
+
new_concepts = len(evolved_kg.get('concepts', []))
|
|
89
|
+
|
|
90
|
+
context['output'] = (f"{process_type} process complete.\n"
|
|
91
|
+
f"- Facts: {original_facts} -> {new_facts} ({new_facts - original_facts:+})\n"
|
|
92
|
+
f"- Concepts: {original_concepts} -> {new_concepts} ({new_concepts - original_concepts:+})")
|
|
93
|
+
except Exception as e:
|
|
94
|
+
traceback.print_exc()
|
|
95
|
+
context['output'] = f"Error during KG evolution: {e}"
|
|
96
|
+
finally:
|
|
97
|
+
if command_history: command_history.close()
|
|
98
|
+
context['messages'] = output_messages
|
|
99
|
+
|
|
100
|
+
# 2. FLUSH: Remove messages from context
|
|
101
|
+
elif is_flushing:
|
|
102
|
+
try:
|
|
103
|
+
n = int(flush_n_str)
|
|
104
|
+
if n <= 0:
|
|
105
|
+
context['output'] = "Error: Number of messages to flush must be positive."
|
|
106
|
+
exit()
|
|
107
|
+
except ValueError:
|
|
108
|
+
context['output'] = f"Error: Invalid number '{flush_n_str}'. {USAGE}"
|
|
109
|
+
exit()
|
|
110
|
+
|
|
111
|
+
messages_list = list(output_messages)
|
|
112
|
+
original_len = len(messages_list)
|
|
113
|
+
final_messages = []
|
|
114
|
+
|
|
115
|
+
if messages_list and messages_list[0].get("role") == "system":
|
|
116
|
+
system_message = messages_list.pop(0)
|
|
117
|
+
num_to_remove = min(n, len(messages_list))
|
|
118
|
+
final_messages = [system_message] + messages_list[:-num_to_remove]
|
|
119
|
+
else:
|
|
120
|
+
num_to_remove = min(n, original_len)
|
|
121
|
+
final_messages = messages_list[:-num_to_remove]
|
|
122
|
+
|
|
123
|
+
removed_count = original_len - len(final_messages)
|
|
124
|
+
context['output'] = f"Flushed {removed_count} message(s). Context is now {len(final_messages)} messages."
|
|
125
|
+
context['messages'] = final_messages
|
|
126
|
+
|
|
127
|
+
# 3. DEFAULT: Compact conversation context
|
|
128
|
+
else:
|
|
129
|
+
try:
|
|
130
|
+
result = breathe(**context)
|
|
131
|
+
if isinstance(result, dict):
|
|
132
|
+
context['output'] = result.get('output', 'Context compressed.')
|
|
133
|
+
context['messages'] = result.get('messages', output_messages)
|
|
134
|
+
else:
|
|
135
|
+
context['output'] = "Context compression process initiated."
|
|
136
|
+
context['messages'] = output_messages
|
|
137
|
+
except Exception as e:
|
|
138
|
+
traceback.print_exc()
|
|
139
|
+
context['output'] = f"Error during context compression: {e}"
|
|
140
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
jinx_name: "build"
|
|
2
|
+
description: "Build deployment artifacts for NPC team"
|
|
3
|
+
inputs:
|
|
4
|
+
- target: "flask" # The type of deployment target (e.g., flask, docker, cli, static).
|
|
5
|
+
- output: "./build" # The output directory for built artifacts.
|
|
6
|
+
- team: "./npc_team" # The path to the NPC team directory.
|
|
7
|
+
- port: 5337 # The port for flask server builds.
|
|
8
|
+
- cors: "" # Comma-separated CORS origins for flask server builds.
|
|
9
|
+
steps:
|
|
10
|
+
- name: "execute_build"
|
|
11
|
+
engine: "python"
|
|
12
|
+
code: |
|
|
13
|
+
import os
|
|
14
|
+
|
|
15
|
+
# Assume these build functions are available in the execution environment
|
|
16
|
+
# from a larger project context, e.g., from npcpy.build_funcs
|
|
17
|
+
try:
|
|
18
|
+
from npcpy.build_funcs import (
|
|
19
|
+
build_flask_server,
|
|
20
|
+
build_docker_compose,
|
|
21
|
+
build_cli_executable,
|
|
22
|
+
build_static_site,
|
|
23
|
+
)
|
|
24
|
+
except ImportError:
|
|
25
|
+
# Provide mock functions for demonstration or error handling
|
|
26
|
+
def build_flask_server(config, **kwargs): return {"output": f"Mock build flask: {config}", "messages": []}
|
|
27
|
+
def build_docker_compose(config, **kwargs): return {"output": f"Mock build docker: {config}", "messages": []}
|
|
28
|
+
def build_cli_executable(config, **kwargs): return {"output": f"Mock build cli: {config}", "messages": []}
|
|
29
|
+
def build_static_site(config, **kwargs): return {"output": f"Mock build static: {config}", "messages": []}
|
|
30
|
+
|
|
31
|
+
target = context.get('target')
|
|
32
|
+
output_dir = context.get('output')
|
|
33
|
+
team_path = context.get('team')
|
|
34
|
+
port = context.get('port')
|
|
35
|
+
cors_origins_str = context.get('cors')
|
|
36
|
+
|
|
37
|
+
cors_origins = [origin.strip() for origin in cors_origins_str.split(',')] if cors_origins_str.strip() else None
|
|
38
|
+
|
|
39
|
+
build_config = {
|
|
40
|
+
'team_path': os.path.abspath(os.path.expanduser(team_path)),
|
|
41
|
+
'output_dir': os.path.abspath(os.path.expanduser(output_dir)),
|
|
42
|
+
'target': target,
|
|
43
|
+
'port': port,
|
|
44
|
+
'cors_origins': cors_origins,
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
builders = {
|
|
48
|
+
'flask': build_flask_server,
|
|
49
|
+
'docker': build_docker_compose,
|
|
50
|
+
'cli': build_cli_executable,
|
|
51
|
+
'static': build_static_site,
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
output_messages = context.get('messages', [])
|
|
55
|
+
output_result = ""
|
|
56
|
+
|
|
57
|
+
if target not in builders:
|
|
58
|
+
output_result = f"Unknown target: {target}. Available: {list(builders.keys())}"
|
|
59
|
+
else:
|
|
60
|
+
result = builders[target](build_config, messages=output_messages)
|
|
61
|
+
output_result = result.get('output', 'Build command executed.')
|
|
62
|
+
output_messages = result.get('messages', output_messages) # Update messages from builder call
|
|
63
|
+
|
|
64
|
+
context['output'] = output_result
|
|
65
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
jinx_name: "compile"
|
|
2
|
+
description: "Compile NPC profiles"
|
|
3
|
+
inputs:
|
|
4
|
+
- npc_file_path: "" # Optional path to a specific NPC file to compile.
|
|
5
|
+
- npc_team_dir: "./npc_team" # Directory containing NPC profiles to compile, if no specific file is given.
|
|
6
|
+
steps:
|
|
7
|
+
- name: "compile_npcs"
|
|
8
|
+
engine: "python"
|
|
9
|
+
code: |
|
|
10
|
+
import os
|
|
11
|
+
import traceback
|
|
12
|
+
from npcpy.npc_compiler import NPC, Team # Assuming Team might be needed for full directory compilation
|
|
13
|
+
|
|
14
|
+
npc_file_path_arg = context.get('npc_file_path')
|
|
15
|
+
npc_team_dir = context.get('npc_team_dir')
|
|
16
|
+
output_messages = context.get('messages', [])
|
|
17
|
+
|
|
18
|
+
output_result = ""
|
|
19
|
+
compiled_npc_object = None
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
if npc_file_path_arg and npc_file_path_arg.strip():
|
|
23
|
+
npc_full_path = os.path.abspath(os.path.expanduser(npc_file_path_arg))
|
|
24
|
+
if os.path.exists(npc_full_path):
|
|
25
|
+
# Assuming NPC() constructor "compiles" it by loading its definition
|
|
26
|
+
compiled_npc_object = NPC(file=npc_full_path, db_conn=context.get('db_conn'))
|
|
27
|
+
output_result = f"Compiled NPC: {npc_full_path}"
|
|
28
|
+
else:
|
|
29
|
+
output_result = f"Error: NPC file not found: {npc_full_path}"
|
|
30
|
+
else:
|
|
31
|
+
# Compile all NPCs in the directory. This would typically involve iterating and loading.
|
|
32
|
+
# For simplicity in this Jinx, we just acknowledge the directory.
|
|
33
|
+
# A more robust implementation would loop through .npc files and compile them.
|
|
34
|
+
abs_npc_team_dir = os.path.abspath(os.path.expanduser(npc_team_dir))
|
|
35
|
+
if os.path.exists(abs_npc_team_dir):
|
|
36
|
+
output_result = f"Acknowledged compilation for all NPCs in directory: {abs_npc_team_dir}"
|
|
37
|
+
# Example of loading a Team and setting the compiled_npc_object to its forenpc if available
|
|
38
|
+
# team = Team(team_path=abs_npc_team_dir, db_conn=context.get('db_conn'))
|
|
39
|
+
# if team.forenpc:
|
|
40
|
+
# compiled_npc_object = team.forenpc
|
|
41
|
+
else:
|
|
42
|
+
output_result = f"Error: NPC team directory not found: {npc_team_dir}"
|
|
43
|
+
except Exception as e:
|
|
44
|
+
traceback.print_exc()
|
|
45
|
+
output_result = f"Error compiling: {e}"
|
|
46
|
+
|
|
47
|
+
context['output'] = output_result
|
|
48
|
+
context['messages'] = output_messages
|
|
49
|
+
if compiled_npc_object:
|
|
50
|
+
context['compiled_npc_object'] = compiled_npc_object # Store the compiled NPC object if any
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
jinx_name: help
|
|
2
|
+
description: Show help for commands, NPCs, or Jinxs
|
|
3
|
+
inputs:
|
|
4
|
+
- topic: null
|
|
5
|
+
steps:
|
|
6
|
+
- name: show_help
|
|
7
|
+
engine: python
|
|
8
|
+
code: |
|
|
9
|
+
import json
|
|
10
|
+
from npcsh._state import CANONICAL_ARGS, get_argument_help
|
|
11
|
+
|
|
12
|
+
topic = context.get('topic')
|
|
13
|
+
|
|
14
|
+
if not topic:
|
|
15
|
+
output_lines = ["# Available Commands\n\n"]
|
|
16
|
+
|
|
17
|
+
all_jinxs = {}
|
|
18
|
+
if hasattr(npc, 'team') and npc.team and hasattr(npc.team, 'jinxs_dict'):
|
|
19
|
+
all_jinxs.update(npc.team.jinxs_dict)
|
|
20
|
+
if hasattr(npc, 'jinxs_dict') and npc.jinxs_dict:
|
|
21
|
+
all_jinxs.update(npc.jinxs_dict)
|
|
22
|
+
|
|
23
|
+
for cmd in sorted(all_jinxs.keys()):
|
|
24
|
+
jinx_obj = all_jinxs[cmd]
|
|
25
|
+
desc = getattr(jinx_obj, 'description', 'No description')
|
|
26
|
+
output_lines.append(f"/{cmd} - {desc}\n\n")
|
|
27
|
+
|
|
28
|
+
arg_help_map = get_argument_help()
|
|
29
|
+
if arg_help_map:
|
|
30
|
+
output_lines.append("## Common Command-Line Flags\n\n")
|
|
31
|
+
output_lines.append("The shortest unambiguous prefix works.\n")
|
|
32
|
+
|
|
33
|
+
for arg in sorted(CANONICAL_ARGS):
|
|
34
|
+
aliases = arg_help_map.get(arg, [])
|
|
35
|
+
alias_str = f"(-{min(aliases, key=len)})" if aliases else ""
|
|
36
|
+
output_lines.append(f"--{arg:<20} {alias_str}\n")
|
|
37
|
+
|
|
38
|
+
output = "".join(output_lines)
|
|
39
|
+
else:
|
|
40
|
+
jinx_obj = None
|
|
41
|
+
if hasattr(npc, 'team') and npc.team and hasattr(npc.team, 'jinxs_dict'):
|
|
42
|
+
jinx_obj = npc.team.jinxs_dict.get(topic)
|
|
43
|
+
if not jinx_obj and hasattr(npc, 'jinxs_dict'):
|
|
44
|
+
jinx_obj = npc.jinxs_dict.get(topic)
|
|
45
|
+
|
|
46
|
+
if jinx_obj:
|
|
47
|
+
output = f"## Help for Jinx: `/{topic}`\n\n"
|
|
48
|
+
output += f"- **Description**: {jinx_obj.description}\n"
|
|
49
|
+
if hasattr(jinx_obj, 'inputs') and jinx_obj.inputs:
|
|
50
|
+
output += f"- **Inputs**: {json.dumps(jinx_obj.inputs, indent=2)}\n"
|
|
51
|
+
else:
|
|
52
|
+
output = f"No help topic found for `{topic}`."
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
jinx_name: "init"
|
|
2
|
+
description: "Initialize NPC project"
|
|
3
|
+
inputs:
|
|
4
|
+
- directory: "." # The directory where the NPC project should be initialized.
|
|
5
|
+
- templates: "" # Optional templates to use for initialization.
|
|
6
|
+
- context: "" # Optional context for project initialization.
|
|
7
|
+
- model: "" # Optional LLM model to set as default for the project.
|
|
8
|
+
- provider: "" # Optional LLM provider to set as default for the project.
|
|
9
|
+
steps:
|
|
10
|
+
- name: "initialize_project"
|
|
11
|
+
engine: "python"
|
|
12
|
+
code: |
|
|
13
|
+
import os
|
|
14
|
+
import traceback
|
|
15
|
+
from npcpy.npc_compiler import initialize_npc_project
|
|
16
|
+
|
|
17
|
+
directory = context.get('directory')
|
|
18
|
+
templates = context.get('templates')
|
|
19
|
+
context_param = context.get('context') # Renamed to avoid conflict with Jinx context
|
|
20
|
+
model = context.get('model')
|
|
21
|
+
provider = context.get('provider')
|
|
22
|
+
output_messages = context.get('messages', [])
|
|
23
|
+
|
|
24
|
+
output_result = ""
|
|
25
|
+
try:
|
|
26
|
+
initialize_npc_project(
|
|
27
|
+
directory=directory,
|
|
28
|
+
templates=templates,
|
|
29
|
+
context=context_param, # Use the renamed context parameter
|
|
30
|
+
model=model,
|
|
31
|
+
provider=provider
|
|
32
|
+
)
|
|
33
|
+
output_result = f"NPC project initialized in {os.path.abspath(directory)}."
|
|
34
|
+
except NameError:
|
|
35
|
+
output_result = "Init function (initialize_npc_project) not available."
|
|
36
|
+
except Exception as e:
|
|
37
|
+
traceback.print_exc()
|
|
38
|
+
output_result = f"Error initializing project: {e}"
|
|
39
|
+
|
|
40
|
+
context['output'] = output_result
|
|
41
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
jinx_name: jinxs
|
|
2
|
+
description: Show available jinxs for the current NPC/Team
|
|
3
|
+
inputs: []
|
|
4
|
+
steps:
|
|
5
|
+
- name: list_jinxs
|
|
6
|
+
engine: python
|
|
7
|
+
code: |
|
|
8
|
+
output_lines = ["Available Jinxs:\n"]
|
|
9
|
+
jinxs_listed = set()
|
|
10
|
+
|
|
11
|
+
if hasattr(npc, 'team') and npc.team:
|
|
12
|
+
team = npc.team
|
|
13
|
+
|
|
14
|
+
if hasattr(team, 'jinxs_dict') and team.jinxs_dict:
|
|
15
|
+
output_lines.append(f"\n--- Team Jinxs ---\n")
|
|
16
|
+
for name, jinx_obj in sorted(team.jinxs_dict.items()):
|
|
17
|
+
desc = getattr(jinx_obj, 'description', 'No description available.')
|
|
18
|
+
output_lines.append(f"- /{name}: {desc}\n")
|
|
19
|
+
jinxs_listed.add(name)
|
|
20
|
+
|
|
21
|
+
if hasattr(npc, 'jinxs_dict') and npc.jinxs_dict:
|
|
22
|
+
output_lines.append(f"\n--- NPC Jinxs for {npc.name} ---\n")
|
|
23
|
+
for name, jinx_obj in sorted(npc.jinxs_dict.items()):
|
|
24
|
+
if name not in jinxs_listed:
|
|
25
|
+
desc = getattr(jinx_obj, 'description', 'No description available.')
|
|
26
|
+
output_lines.append(f"- /{name}: {desc}\n")
|
|
27
|
+
jinxs_listed.add(name)
|
|
28
|
+
|
|
29
|
+
if not jinxs_listed:
|
|
30
|
+
output = "No jinxs available for the current context."
|
|
31
|
+
else:
|
|
32
|
+
output = "".join(output_lines)
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
jinx_name: "set"
|
|
2
|
+
description: "Set configuration values"
|
|
3
|
+
inputs:
|
|
4
|
+
- key: "" # The configuration key to set.
|
|
5
|
+
- value: "" # The value to set for the configuration key.
|
|
6
|
+
steps:
|
|
7
|
+
- name: "set_config_value"
|
|
8
|
+
engine: "python"
|
|
9
|
+
code: |
|
|
10
|
+
import traceback
|
|
11
|
+
# Assuming set_npcsh_config_value is accessible
|
|
12
|
+
try:
|
|
13
|
+
from npcsh._state import set_npcsh_config_value
|
|
14
|
+
except ImportError:
|
|
15
|
+
def set_npcsh_config_value(key, value):
|
|
16
|
+
print(f"Mock: Setting config '{key}' to '{value}'")
|
|
17
|
+
# In a real scenario, this might write to a config file or global state
|
|
18
|
+
pass
|
|
19
|
+
|
|
20
|
+
key = context.get('key')
|
|
21
|
+
value = context.get('value')
|
|
22
|
+
output_messages = context.get('messages', [])
|
|
23
|
+
|
|
24
|
+
output_result = ""
|
|
25
|
+
if not key or not value:
|
|
26
|
+
context['output'] = "Usage: /set <key>=<value>"
|
|
27
|
+
context['messages'] = output_messages
|
|
28
|
+
exit()
|
|
29
|
+
|
|
30
|
+
try:
|
|
31
|
+
set_npcsh_config_value(key, value)
|
|
32
|
+
output_result = f"Configuration value '{key}' set."
|
|
33
|
+
except NameError:
|
|
34
|
+
output_result = "Set function (set_npcsh_config_value) not available."
|
|
35
|
+
except Exception as e:
|
|
36
|
+
traceback.print_exc()
|
|
37
|
+
output_result = f"Error setting configuration '{key}': {e}"
|
|
38
|
+
|
|
39
|
+
context['output'] = output_result
|
|
40
|
+
context['messages'] = output_messages
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
jinx_name: edit_file
|
|
2
|
+
description: Examines a file, determines what changes are needed, and applies those
|
|
3
|
+
changes.
|
|
4
|
+
inputs:
|
|
5
|
+
- file_path
|
|
6
|
+
- edit_instructions
|
|
7
|
+
- backup: true
|
|
8
|
+
steps:
|
|
9
|
+
- name: "edit_file"
|
|
10
|
+
engine: "python"
|
|
11
|
+
code: |
|
|
12
|
+
import os
|
|
13
|
+
from npcpy.llm_funcs import get_llm_response
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
file_path = os.path.expanduser("{{ file_path }}")
|
|
17
|
+
edit_instructions = "{{ edit_instructions }}"
|
|
18
|
+
backup_str = "{{ backup }}"
|
|
19
|
+
create_backup = backup_str.lower() not in ('false', 'no', '0', '')
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
with open(file_path, 'r') as f:
|
|
23
|
+
original_content = f.read()
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
if create_backup:
|
|
27
|
+
backup_path = file_path + ".bak"
|
|
28
|
+
with open(backup_path, 'w') as f:
|
|
29
|
+
f.write(original_content)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
prompt = """You are a code editing assistant. Analyze this file and make the requested changes.
|
|
33
|
+
|
|
34
|
+
File content:
|
|
35
|
+
""" + original_content + """
|
|
36
|
+
|
|
37
|
+
Edit instructions: """ + edit_instructions + """
|
|
38
|
+
|
|
39
|
+
Return a JSON object with these fields:
|
|
40
|
+
1. "modifications": An array of modification objects, where each object has:
|
|
41
|
+
- "type": One of "replace", "insert_after", "insert_before", or "delete"
|
|
42
|
+
- "target": For "insert_after" and "insert_before", the text to insert after/before
|
|
43
|
+
For "delete", the text to delete
|
|
44
|
+
- "original": For "replace", the text to be replaced
|
|
45
|
+
- "replacement": For "replace", the text to replace with
|
|
46
|
+
- "insertion": For "insert_after" and "insert_before", the text to insert
|
|
47
|
+
2. "explanation": Brief explanation of the changes made
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
response = get_llm_response(prompt, model=npc.model, provider=npc.provider, npc=npc, format="json")
|
|
51
|
+
|
|
52
|
+
result = response.get("response", {})
|
|
53
|
+
modifications = result.get("modifications", [])
|
|
54
|
+
explanation = result.get("explanation", "No explanation provided")
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
updated_content = original_content
|
|
58
|
+
changes_applied = 0
|
|
59
|
+
|
|
60
|
+
for mod in modifications:
|
|
61
|
+
print(mod)
|
|
62
|
+
mod_type = mod.get("type")
|
|
63
|
+
|
|
64
|
+
if mod_type == "replace":
|
|
65
|
+
original = mod.get("original")
|
|
66
|
+
replacement = mod.get("replacement")
|
|
67
|
+
if original in updated_content:
|
|
68
|
+
updated_content = updated_content.replace(original, replacement)
|
|
69
|
+
changes_applied += 1
|
|
70
|
+
|
|
71
|
+
elif mod_type == "insert_after":
|
|
72
|
+
target = mod.get("target")
|
|
73
|
+
insertion = mod.get("insertion")
|
|
74
|
+
if target in updated_content:
|
|
75
|
+
updated_content = updated_content.replace(target, target + insertion)
|
|
76
|
+
changes_applied += 1
|
|
77
|
+
|
|
78
|
+
elif mod_type == "insert_before":
|
|
79
|
+
target = mod.get("target")
|
|
80
|
+
insertion = mod.get("insertion")
|
|
81
|
+
if target in updated_content:
|
|
82
|
+
updated_content = updated_content.replace(target, insertion + target)
|
|
83
|
+
changes_applied += 1
|
|
84
|
+
|
|
85
|
+
elif mod_type == "delete":
|
|
86
|
+
target = mod.get("target")
|
|
87
|
+
if target in updated_content:
|
|
88
|
+
updated_content = updated_content.replace(target, "")
|
|
89
|
+
changes_applied += 1
|
|
90
|
+
|
|
91
|
+
with open(file_path, 'w') as f:
|
|
92
|
+
f.write(updated_content)
|
|
93
|
+
|
|
94
|
+
output = "Applied " + str(changes_applied) + " changes to " + file_path + "\n\n" + explanation
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
jinx_name: load_file
|
|
2
|
+
description: Loads and returns the contents of a file using npcpy's file loaders
|
|
3
|
+
inputs:
|
|
4
|
+
- file_path
|
|
5
|
+
steps:
|
|
6
|
+
- name: "load_file"
|
|
7
|
+
engine: "python"
|
|
8
|
+
code: |
|
|
9
|
+
import os
|
|
10
|
+
from npcpy.data.load import load_file_contents
|
|
11
|
+
|
|
12
|
+
# Expand user path and get absolute path
|
|
13
|
+
file_path = os.path.expanduser("{{ file_path }}")
|
|
14
|
+
|
|
15
|
+
# Check if file exists
|
|
16
|
+
if not os.path.exists(file_path):
|
|
17
|
+
output = f"Error: File not found at {file_path}"
|
|
18
|
+
else:
|
|
19
|
+
try:
|
|
20
|
+
# Load file contents using npcpy's loader
|
|
21
|
+
# Returns chunks by default with chunk_size=250
|
|
22
|
+
chunks = load_file_contents(file_path)
|
|
23
|
+
|
|
24
|
+
# Join chunks back together for full content
|
|
25
|
+
if isinstance(chunks, list):
|
|
26
|
+
if chunks and chunks[0].startswith("Error"):
|
|
27
|
+
output = chunks[0]
|
|
28
|
+
else:
|
|
29
|
+
file_content = "\n".join(chunks)
|
|
30
|
+
output = f"File: {file_path}\n\n{file_content}"
|
|
31
|
+
else:
|
|
32
|
+
output = f"File: {file_path}\n\n{chunks}"
|
|
33
|
+
|
|
34
|
+
except Exception as e:
|
|
35
|
+
output = f"Error reading file {file_path}: {str(e)}"
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
jinx_name: "ots"
|
|
2
|
+
description: "Take screenshot and analyze with vision model. Usage: /ots <prompt>"
|
|
3
|
+
inputs:
|
|
4
|
+
- prompt
|
|
5
|
+
- image_paths_args: ""
|
|
6
|
+
- vmodel: ""
|
|
7
|
+
- vprovider: ""
|
|
8
|
+
steps:
|
|
9
|
+
- name: "analyze_screenshot_or_image"
|
|
10
|
+
engine: "python"
|
|
11
|
+
code: |
|
|
12
|
+
import os
|
|
13
|
+
import traceback
|
|
14
|
+
from npcpy.llm_funcs import get_llm_response
|
|
15
|
+
from npcpy.data.image import capture_screenshot
|
|
16
|
+
|
|
17
|
+
user_prompt = context.get('prompt') or ""
|
|
18
|
+
image_paths_args_str = context.get('image_paths_args') or ""
|
|
19
|
+
vision_model = context.get('vmodel') or ""
|
|
20
|
+
vision_provider = context.get('vprovider') or ""
|
|
21
|
+
stream_output = context.get('stream') or False
|
|
22
|
+
api_url = context.get('api_url') or ""
|
|
23
|
+
api_key = context.get('api_key') or ""
|
|
24
|
+
output_messages = context.get('messages', [])
|
|
25
|
+
current_npc = context.get('npc')
|
|
26
|
+
|
|
27
|
+
image_paths = []
|
|
28
|
+
if image_paths_args_str.strip():
|
|
29
|
+
for img_path_arg in image_paths_args_str.split(','):
|
|
30
|
+
full_path = os.path.abspath(os.path.expanduser(img_path_arg.strip()))
|
|
31
|
+
if os.path.exists(full_path):
|
|
32
|
+
image_paths.append(full_path)
|
|
33
|
+
|
|
34
|
+
if not image_paths:
|
|
35
|
+
screenshot_info = capture_screenshot(full=False)
|
|
36
|
+
if screenshot_info and "file_path" in screenshot_info:
|
|
37
|
+
image_paths.append(screenshot_info["file_path"])
|
|
38
|
+
print(f"📸 Screenshot captured: {screenshot_info.get('filename', os.path.basename(screenshot_info['file_path']))}")
|
|
39
|
+
|
|
40
|
+
if not vision_model:
|
|
41
|
+
vision_model = getattr(current_npc, 'model', 'gemma3:4b')
|
|
42
|
+
|
|
43
|
+
if not vision_provider:
|
|
44
|
+
vision_provider = getattr(current_npc, 'provider', 'ollama')
|
|
45
|
+
|
|
46
|
+
response_data = get_llm_response(
|
|
47
|
+
prompt=user_prompt,
|
|
48
|
+
model=vision_model,
|
|
49
|
+
provider=vision_provider,
|
|
50
|
+
messages=output_messages,
|
|
51
|
+
images=image_paths,
|
|
52
|
+
stream=stream_output,
|
|
53
|
+
npc=current_npc,
|
|
54
|
+
api_url=api_url or None,
|
|
55
|
+
api_key=api_key or None
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
context['output'] = response_data.get('response', 'No response received')
|
|
59
|
+
context['messages'] = response_data.get('messages', output_messages)
|
|
60
|
+
context['model'] = vision_model
|
|
61
|
+
context['provider'] = vision_provider
|