npcsh 0.1.2__py3-none-any.whl → 1.1.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (143) hide show
  1. npcsh/_state.py +3508 -0
  2. npcsh/alicanto.py +65 -0
  3. npcsh/build.py +291 -0
  4. npcsh/completion.py +206 -0
  5. npcsh/config.py +163 -0
  6. npcsh/corca.py +50 -0
  7. npcsh/execution.py +185 -0
  8. npcsh/guac.py +46 -0
  9. npcsh/mcp_helpers.py +357 -0
  10. npcsh/mcp_server.py +299 -0
  11. npcsh/npc.py +323 -0
  12. npcsh/npc_team/alicanto.npc +2 -0
  13. npcsh/npc_team/alicanto.png +0 -0
  14. npcsh/npc_team/corca.npc +12 -0
  15. npcsh/npc_team/corca.png +0 -0
  16. npcsh/npc_team/corca_example.png +0 -0
  17. npcsh/npc_team/foreman.npc +7 -0
  18. npcsh/npc_team/frederic.npc +6 -0
  19. npcsh/npc_team/frederic4.png +0 -0
  20. npcsh/npc_team/guac.png +0 -0
  21. npcsh/npc_team/jinxs/code/python.jinx +11 -0
  22. npcsh/npc_team/jinxs/code/sh.jinx +34 -0
  23. npcsh/npc_team/jinxs/code/sql.jinx +16 -0
  24. npcsh/npc_team/jinxs/modes/alicanto.jinx +194 -0
  25. npcsh/npc_team/jinxs/modes/corca.jinx +249 -0
  26. npcsh/npc_team/jinxs/modes/guac.jinx +317 -0
  27. npcsh/npc_team/jinxs/modes/plonk.jinx +214 -0
  28. npcsh/npc_team/jinxs/modes/pti.jinx +170 -0
  29. npcsh/npc_team/jinxs/modes/spool.jinx +161 -0
  30. npcsh/npc_team/jinxs/modes/wander.jinx +186 -0
  31. npcsh/npc_team/jinxs/modes/yap.jinx +262 -0
  32. npcsh/npc_team/jinxs/npc_studio/npc-studio.jinx +77 -0
  33. npcsh/npc_team/jinxs/utils/agent.jinx +17 -0
  34. npcsh/npc_team/jinxs/utils/chat.jinx +44 -0
  35. npcsh/npc_team/jinxs/utils/cmd.jinx +44 -0
  36. npcsh/npc_team/jinxs/utils/compress.jinx +140 -0
  37. npcsh/npc_team/jinxs/utils/core/build.jinx +65 -0
  38. npcsh/npc_team/jinxs/utils/core/compile.jinx +50 -0
  39. npcsh/npc_team/jinxs/utils/core/help.jinx +52 -0
  40. npcsh/npc_team/jinxs/utils/core/init.jinx +41 -0
  41. npcsh/npc_team/jinxs/utils/core/jinxs.jinx +32 -0
  42. npcsh/npc_team/jinxs/utils/core/set.jinx +40 -0
  43. npcsh/npc_team/jinxs/utils/edit_file.jinx +94 -0
  44. npcsh/npc_team/jinxs/utils/load_file.jinx +35 -0
  45. npcsh/npc_team/jinxs/utils/ots.jinx +61 -0
  46. npcsh/npc_team/jinxs/utils/roll.jinx +68 -0
  47. npcsh/npc_team/jinxs/utils/sample.jinx +56 -0
  48. npcsh/npc_team/jinxs/utils/search.jinx +130 -0
  49. npcsh/npc_team/jinxs/utils/serve.jinx +26 -0
  50. npcsh/npc_team/jinxs/utils/sleep.jinx +116 -0
  51. npcsh/npc_team/jinxs/utils/trigger.jinx +61 -0
  52. npcsh/npc_team/jinxs/utils/usage.jinx +33 -0
  53. npcsh/npc_team/jinxs/utils/vixynt.jinx +144 -0
  54. npcsh/npc_team/kadiefa.npc +3 -0
  55. npcsh/npc_team/kadiefa.png +0 -0
  56. npcsh/npc_team/npcsh.ctx +18 -0
  57. npcsh/npc_team/npcsh_sibiji.png +0 -0
  58. npcsh/npc_team/plonk.npc +2 -0
  59. npcsh/npc_team/plonk.png +0 -0
  60. npcsh/npc_team/plonkjr.npc +2 -0
  61. npcsh/npc_team/plonkjr.png +0 -0
  62. npcsh/npc_team/sibiji.npc +3 -0
  63. npcsh/npc_team/sibiji.png +0 -0
  64. npcsh/npc_team/spool.png +0 -0
  65. npcsh/npc_team/yap.png +0 -0
  66. npcsh/npcsh.py +296 -112
  67. npcsh/parsing.py +118 -0
  68. npcsh/plonk.py +54 -0
  69. npcsh/pti.py +54 -0
  70. npcsh/routes.py +139 -0
  71. npcsh/spool.py +48 -0
  72. npcsh/ui.py +199 -0
  73. npcsh/wander.py +62 -0
  74. npcsh/yap.py +50 -0
  75. npcsh-1.1.13.data/data/npcsh/npc_team/agent.jinx +17 -0
  76. npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.jinx +194 -0
  77. npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.npc +2 -0
  78. npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.png +0 -0
  79. npcsh-1.1.13.data/data/npcsh/npc_team/build.jinx +65 -0
  80. npcsh-1.1.13.data/data/npcsh/npc_team/chat.jinx +44 -0
  81. npcsh-1.1.13.data/data/npcsh/npc_team/cmd.jinx +44 -0
  82. npcsh-1.1.13.data/data/npcsh/npc_team/compile.jinx +50 -0
  83. npcsh-1.1.13.data/data/npcsh/npc_team/compress.jinx +140 -0
  84. npcsh-1.1.13.data/data/npcsh/npc_team/corca.jinx +249 -0
  85. npcsh-1.1.13.data/data/npcsh/npc_team/corca.npc +12 -0
  86. npcsh-1.1.13.data/data/npcsh/npc_team/corca.png +0 -0
  87. npcsh-1.1.13.data/data/npcsh/npc_team/corca_example.png +0 -0
  88. npcsh-1.1.13.data/data/npcsh/npc_team/edit_file.jinx +94 -0
  89. npcsh-1.1.13.data/data/npcsh/npc_team/foreman.npc +7 -0
  90. npcsh-1.1.13.data/data/npcsh/npc_team/frederic.npc +6 -0
  91. npcsh-1.1.13.data/data/npcsh/npc_team/frederic4.png +0 -0
  92. npcsh-1.1.13.data/data/npcsh/npc_team/guac.jinx +317 -0
  93. npcsh-1.1.13.data/data/npcsh/npc_team/guac.png +0 -0
  94. npcsh-1.1.13.data/data/npcsh/npc_team/help.jinx +52 -0
  95. npcsh-1.1.13.data/data/npcsh/npc_team/init.jinx +41 -0
  96. npcsh-1.1.13.data/data/npcsh/npc_team/jinxs.jinx +32 -0
  97. npcsh-1.1.13.data/data/npcsh/npc_team/kadiefa.npc +3 -0
  98. npcsh-1.1.13.data/data/npcsh/npc_team/kadiefa.png +0 -0
  99. npcsh-1.1.13.data/data/npcsh/npc_team/load_file.jinx +35 -0
  100. npcsh-1.1.13.data/data/npcsh/npc_team/npc-studio.jinx +77 -0
  101. npcsh-1.1.13.data/data/npcsh/npc_team/npcsh.ctx +18 -0
  102. npcsh-1.1.13.data/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
  103. npcsh-1.1.13.data/data/npcsh/npc_team/ots.jinx +61 -0
  104. npcsh-1.1.13.data/data/npcsh/npc_team/plonk.jinx +214 -0
  105. npcsh-1.1.13.data/data/npcsh/npc_team/plonk.npc +2 -0
  106. npcsh-1.1.13.data/data/npcsh/npc_team/plonk.png +0 -0
  107. npcsh-1.1.13.data/data/npcsh/npc_team/plonkjr.npc +2 -0
  108. npcsh-1.1.13.data/data/npcsh/npc_team/plonkjr.png +0 -0
  109. npcsh-1.1.13.data/data/npcsh/npc_team/pti.jinx +170 -0
  110. npcsh-1.1.13.data/data/npcsh/npc_team/python.jinx +11 -0
  111. npcsh-1.1.13.data/data/npcsh/npc_team/roll.jinx +68 -0
  112. npcsh-1.1.13.data/data/npcsh/npc_team/sample.jinx +56 -0
  113. npcsh-1.1.13.data/data/npcsh/npc_team/search.jinx +130 -0
  114. npcsh-1.1.13.data/data/npcsh/npc_team/serve.jinx +26 -0
  115. npcsh-1.1.13.data/data/npcsh/npc_team/set.jinx +40 -0
  116. npcsh-1.1.13.data/data/npcsh/npc_team/sh.jinx +34 -0
  117. npcsh-1.1.13.data/data/npcsh/npc_team/sibiji.npc +3 -0
  118. npcsh-1.1.13.data/data/npcsh/npc_team/sibiji.png +0 -0
  119. npcsh-1.1.13.data/data/npcsh/npc_team/sleep.jinx +116 -0
  120. npcsh-1.1.13.data/data/npcsh/npc_team/spool.jinx +161 -0
  121. npcsh-1.1.13.data/data/npcsh/npc_team/spool.png +0 -0
  122. npcsh-1.1.13.data/data/npcsh/npc_team/sql.jinx +16 -0
  123. npcsh-1.1.13.data/data/npcsh/npc_team/trigger.jinx +61 -0
  124. npcsh-1.1.13.data/data/npcsh/npc_team/usage.jinx +33 -0
  125. npcsh-1.1.13.data/data/npcsh/npc_team/vixynt.jinx +144 -0
  126. npcsh-1.1.13.data/data/npcsh/npc_team/wander.jinx +186 -0
  127. npcsh-1.1.13.data/data/npcsh/npc_team/yap.jinx +262 -0
  128. npcsh-1.1.13.data/data/npcsh/npc_team/yap.png +0 -0
  129. npcsh-1.1.13.dist-info/METADATA +522 -0
  130. npcsh-1.1.13.dist-info/RECORD +135 -0
  131. {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info}/WHEEL +1 -1
  132. npcsh-1.1.13.dist-info/entry_points.txt +9 -0
  133. {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info/licenses}/LICENSE +1 -1
  134. npcsh/command_history.py +0 -81
  135. npcsh/helpers.py +0 -36
  136. npcsh/llm_funcs.py +0 -295
  137. npcsh/main.py +0 -5
  138. npcsh/modes.py +0 -343
  139. npcsh/npc_compiler.py +0 -124
  140. npcsh-0.1.2.dist-info/METADATA +0 -99
  141. npcsh-0.1.2.dist-info/RECORD +0 -14
  142. npcsh-0.1.2.dist-info/entry_points.txt +0 -2
  143. {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,194 @@
1
+ jinx_name: alicanto
2
+ description: Deep research mode - multi-perspective exploration with gold insights and cliff warnings
3
+ npc: forenpc
4
+ inputs:
5
+ - query: null
6
+ - num_npcs: 5
7
+ - depth: 3
8
+ - model: null
9
+ - provider: null
10
+ - max_steps: 20
11
+ - skip_research: true
12
+ - exploration: 0.3
13
+ - creativity: 0.5
14
+ - format: report
15
+
16
+ steps:
17
+ - name: alicanto_research
18
+ engine: python
19
+ code: |
20
+ import os
21
+ from termcolor import colored
22
+
23
+ from npcpy.llm_funcs import get_llm_response
24
+ from npcpy.data.web import search_web
25
+ from npcpy.npc_compiler import NPC
26
+
27
+ npc = context.get('npc')
28
+ team = context.get('team')
29
+ messages = context.get('messages', [])
30
+
31
+ query = context.get('query')
32
+ num_npcs = int(context.get('num_npcs', 5))
33
+ depth = int(context.get('depth', 3))
34
+ max_steps = int(context.get('max_steps', 20))
35
+ skip_research = context.get('skip_research', True)
36
+ exploration = float(context.get('exploration', 0.3))
37
+ creativity = float(context.get('creativity', 0.5))
38
+ output_format = context.get('format', 'report')
39
+
40
+ model = context.get('model') or (npc.model if npc else 'gemini-1.5-pro')
41
+ provider = context.get('provider') or (npc.provider if npc else 'gemini')
42
+
43
+ if not query:
44
+ context['output'] = """Usage: /alicanto <research query>
45
+
46
+ Options:
47
+ --num-npcs N Number of research perspectives (default: 5)
48
+ --depth N Research depth (default: 3)
49
+ --max-steps N Maximum research steps (default: 20)
50
+ --exploration F Exploration factor 0-1 (default: 0.3)
51
+ --creativity F Creativity factor 0-1 (default: 0.5)
52
+ --format FORMAT Output: report|summary|full (default: report)
53
+
54
+ Example: /alicanto What are the latest advances in quantum computing?"""
55
+ context['messages'] = messages
56
+ exit()
57
+
58
+ print(f"""
59
+ █████╗ ██╗ ██╗ ██████╗ █████╗ ███╗ ██╗████████╗ ██████╗
60
+ ██╔══██╗██║ ██║██╔════╝██╔══██╗████╗ ██║╚══██╔══╝██╔═══██╗
61
+ ███████║██║ ██║██║ ███████║██╔██╗ ██║ ██║ ██║ ██║
62
+ ██╔══██║██║ ██║██║ ██╔══██║██║╚██╗██║ ██║ ██║ ██║
63
+ ██║ ██║███████╗██║╚██████╗██║ ██║██║ ╚████║ ██║ ╚██████╔╝
64
+ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═════╝╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═════╝
65
+
66
+ Deep Research Mode
67
+ Query: {query}
68
+ Perspectives: {num_npcs} | Depth: {depth} | Max Steps: {max_steps}
69
+ """)
70
+
71
+ # Generate research perspectives
72
+ perspectives_prompt = f"""Generate {num_npcs} distinct research perspectives for investigating: "{query}"
73
+
74
+ For each perspective, provide:
75
+ 1. Name (a descriptive title)
76
+ 2. Approach (how this perspective would investigate)
77
+ 3. Key questions to explore
78
+
79
+ Return as a numbered list."""
80
+
81
+ print(colored("Generating research perspectives...", "cyan"))
82
+ resp = get_llm_response(
83
+ perspectives_prompt,
84
+ model=model,
85
+ provider=provider,
86
+ npc=npc
87
+ )
88
+ perspectives = str(resp.get('response', ''))
89
+ print(perspectives)
90
+
91
+ # Conduct web research if not skipped
92
+ research_findings = ""
93
+ if not skip_research:
94
+ print(colored("\nConducting web research...", "cyan"))
95
+ try:
96
+ search_results = search_web(query, n_results=5)
97
+ if search_results:
98
+ research_findings = "\n\nWeb Research Findings:\n"
99
+ for i, result in enumerate(search_results[:5], 1):
100
+ title = result.get('title', 'No title')
101
+ snippet = result.get('snippet', result.get('body', ''))[:200]
102
+ research_findings += f"\n{i}. {title}\n {snippet}...\n"
103
+ print(colored(f"Found {len(search_results)} sources", "green"))
104
+ except Exception as e:
105
+ print(colored(f"Web search error: {e}", "yellow"))
106
+
107
+ # Multi-step exploration from each perspective
108
+ all_insights = []
109
+ gold_insights = [] # Key valuable findings
110
+ cliff_warnings = [] # Potential pitfalls or caveats
111
+
112
+ for step in range(min(depth, max_steps)):
113
+ print(colored(f"\n--- Research Depth {step + 1}/{depth} ---", "cyan"))
114
+
115
+ explore_prompt = f"""Research query: "{query}"
116
+
117
+ Perspectives generated:
118
+ {perspectives}
119
+
120
+ {research_findings}
121
+
122
+ Previous insights: {all_insights[-3:] if all_insights else 'None yet'}
123
+
124
+ For depth level {step + 1}:
125
+ 1. Explore deeper implications from each perspective
126
+ 2. Identify GOLD insights (valuable, non-obvious findings) - mark with [GOLD]
127
+ 3. Identify CLIFF warnings (pitfalls, caveats, risks) - mark with [CLIFF]
128
+ 4. Connect insights across perspectives
129
+
130
+ Exploration factor: {exploration} (higher = more diverse exploration)
131
+ Creativity factor: {creativity} (higher = more novel connections)"""
132
+
133
+ resp = get_llm_response(
134
+ explore_prompt,
135
+ model=model,
136
+ provider=provider,
137
+ temperature=creativity,
138
+ npc=npc
139
+ )
140
+
141
+ step_insights = str(resp.get('response', ''))
142
+ print(step_insights)
143
+
144
+ # Extract gold and cliff markers
145
+ if '[GOLD]' in step_insights:
146
+ gold_insights.extend([line.strip() for line in step_insights.split('\n') if '[GOLD]' in line])
147
+ if '[CLIFF]' in step_insights:
148
+ cliff_warnings.extend([line.strip() for line in step_insights.split('\n') if '[CLIFF]' in line])
149
+
150
+ all_insights.append(step_insights)
151
+
152
+ # Generate final synthesis
153
+ print(colored("\n--- Synthesizing Research ---", "cyan"))
154
+
155
+ synthesis_prompt = f"""Synthesize research on: "{query}"
156
+
157
+ All insights gathered:
158
+ {chr(10).join(all_insights)}
159
+
160
+ Gold insights identified:
161
+ {chr(10).join(gold_insights) if gold_insights else 'None explicitly marked'}
162
+
163
+ Cliff warnings identified:
164
+ {chr(10).join(cliff_warnings) if cliff_warnings else 'None explicitly marked'}
165
+
166
+ Generate a {output_format} that:
167
+ 1. Summarizes key findings
168
+ 2. Highlights the most valuable insights (gold)
169
+ 3. Notes important caveats and risks (cliffs)
170
+ 4. Provides actionable conclusions"""
171
+
172
+ resp = get_llm_response(
173
+ synthesis_prompt,
174
+ model=model,
175
+ provider=provider,
176
+ npc=npc
177
+ )
178
+
179
+ final_report = str(resp.get('response', ''))
180
+ print("\n" + "="*60)
181
+ print(colored("ALICANTO RESEARCH REPORT", "green", attrs=['bold']))
182
+ print("="*60)
183
+ print(final_report)
184
+
185
+ context['output'] = final_report
186
+ context['messages'] = messages
187
+ context['alicanto_result'] = {
188
+ 'query': query,
189
+ 'perspectives': perspectives,
190
+ 'insights': all_insights,
191
+ 'gold': gold_insights,
192
+ 'cliffs': cliff_warnings,
193
+ 'report': final_report
194
+ }
@@ -0,0 +1,2 @@
1
+ name: alicanto
2
+ primary_directive: You are Alicanto the mythical bird. You have been spotted and it is your job to lead users to explore the world.
@@ -0,0 +1,65 @@
1
+ jinx_name: "build"
2
+ description: "Build deployment artifacts for NPC team"
3
+ inputs:
4
+ - target: "flask" # The type of deployment target (e.g., flask, docker, cli, static).
5
+ - output: "./build" # The output directory for built artifacts.
6
+ - team: "./npc_team" # The path to the NPC team directory.
7
+ - port: 5337 # The port for flask server builds.
8
+ - cors: "" # Comma-separated CORS origins for flask server builds.
9
+ steps:
10
+ - name: "execute_build"
11
+ engine: "python"
12
+ code: |
13
+ import os
14
+
15
+ # Assume these build functions are available in the execution environment
16
+ # from a larger project context, e.g., from npcpy.build_funcs
17
+ try:
18
+ from npcpy.build_funcs import (
19
+ build_flask_server,
20
+ build_docker_compose,
21
+ build_cli_executable,
22
+ build_static_site,
23
+ )
24
+ except ImportError:
25
+ # Provide mock functions for demonstration or error handling
26
+ def build_flask_server(config, **kwargs): return {"output": f"Mock build flask: {config}", "messages": []}
27
+ def build_docker_compose(config, **kwargs): return {"output": f"Mock build docker: {config}", "messages": []}
28
+ def build_cli_executable(config, **kwargs): return {"output": f"Mock build cli: {config}", "messages": []}
29
+ def build_static_site(config, **kwargs): return {"output": f"Mock build static: {config}", "messages": []}
30
+
31
+ target = context.get('target')
32
+ output_dir = context.get('output')
33
+ team_path = context.get('team')
34
+ port = context.get('port')
35
+ cors_origins_str = context.get('cors')
36
+
37
+ cors_origins = [origin.strip() for origin in cors_origins_str.split(',')] if cors_origins_str.strip() else None
38
+
39
+ build_config = {
40
+ 'team_path': os.path.abspath(os.path.expanduser(team_path)),
41
+ 'output_dir': os.path.abspath(os.path.expanduser(output_dir)),
42
+ 'target': target,
43
+ 'port': port,
44
+ 'cors_origins': cors_origins,
45
+ }
46
+
47
+ builders = {
48
+ 'flask': build_flask_server,
49
+ 'docker': build_docker_compose,
50
+ 'cli': build_cli_executable,
51
+ 'static': build_static_site,
52
+ }
53
+
54
+ output_messages = context.get('messages', [])
55
+ output_result = ""
56
+
57
+ if target not in builders:
58
+ output_result = f"Unknown target: {target}. Available: {list(builders.keys())}"
59
+ else:
60
+ result = builders[target](build_config, messages=output_messages)
61
+ output_result = result.get('output', 'Build command executed.')
62
+ output_messages = result.get('messages', output_messages) # Update messages from builder call
63
+
64
+ context['output'] = output_result
65
+ context['messages'] = output_messages
@@ -0,0 +1,44 @@
1
+ jinx_name: chat
2
+ description: Simple chat mode - LLM conversation without tool execution
3
+ inputs:
4
+ - query: null
5
+ - model: null
6
+ - provider: null
7
+ - stream: true
8
+
9
+ steps:
10
+ - name: chat_response
11
+ engine: python
12
+ code: |
13
+ from npcpy.llm_funcs import get_llm_response
14
+
15
+ npc = context.get('npc')
16
+ messages = context.get('messages', [])
17
+ query = context.get('query', '')
18
+ stream = context.get('stream', True)
19
+
20
+ model = context.get('model') or (npc.model if npc else None)
21
+ provider = context.get('provider') or (npc.provider if npc else None)
22
+
23
+ if not query:
24
+ context['output'] = ''
25
+ context['messages'] = messages
26
+ else:
27
+ response = get_llm_response(
28
+ query,
29
+ model=model,
30
+ provider=provider,
31
+ npc=npc,
32
+ stream=stream,
33
+ messages=messages
34
+ )
35
+
36
+ context['output'] = response.get('response', '')
37
+ context['messages'] = response.get('messages', messages)
38
+
39
+ # Track usage
40
+ if 'usage' in response and npc and hasattr(npc, 'shared_context'):
41
+ usage = response['usage']
42
+ npc.shared_context['session_input_tokens'] += usage.get('input_tokens', 0)
43
+ npc.shared_context['session_output_tokens'] += usage.get('output_tokens', 0)
44
+ npc.shared_context['turn_count'] += 1
@@ -0,0 +1,44 @@
1
+ jinx_name: cmd
2
+ description: Command mode - LLM generates and executes shell commands
3
+ inputs:
4
+ - query: null
5
+ - model: null
6
+ - provider: null
7
+ - stream: true
8
+
9
+ steps:
10
+ - name: cmd_execute
11
+ engine: python
12
+ code: |
13
+ from npcpy.llm_funcs import execute_llm_command
14
+
15
+ npc = context.get('npc')
16
+ messages = context.get('messages', [])
17
+ query = context.get('query', '')
18
+ stream = context.get('stream', True)
19
+
20
+ model = context.get('model') or (npc.model if npc else None)
21
+ provider = context.get('provider') or (npc.provider if npc else None)
22
+
23
+ if not query:
24
+ context['output'] = ''
25
+ context['messages'] = messages
26
+ else:
27
+ response = execute_llm_command(
28
+ query,
29
+ model=model,
30
+ provider=provider,
31
+ npc=npc,
32
+ stream=stream,
33
+ messages=messages
34
+ )
35
+
36
+ context['output'] = response.get('response', '')
37
+ context['messages'] = response.get('messages', messages)
38
+
39
+ # Track usage
40
+ if 'usage' in response and npc and hasattr(npc, 'shared_context'):
41
+ usage = response['usage']
42
+ npc.shared_context['session_input_tokens'] += usage.get('input_tokens', 0)
43
+ npc.shared_context['session_output_tokens'] += usage.get('output_tokens', 0)
44
+ npc.shared_context['turn_count'] += 1
@@ -0,0 +1,50 @@
1
+ jinx_name: "compile"
2
+ description: "Compile NPC profiles"
3
+ inputs:
4
+ - npc_file_path: "" # Optional path to a specific NPC file to compile.
5
+ - npc_team_dir: "./npc_team" # Directory containing NPC profiles to compile, if no specific file is given.
6
+ steps:
7
+ - name: "compile_npcs"
8
+ engine: "python"
9
+ code: |
10
+ import os
11
+ import traceback
12
+ from npcpy.npc_compiler import NPC, Team # Assuming Team might be needed for full directory compilation
13
+
14
+ npc_file_path_arg = context.get('npc_file_path')
15
+ npc_team_dir = context.get('npc_team_dir')
16
+ output_messages = context.get('messages', [])
17
+
18
+ output_result = ""
19
+ compiled_npc_object = None
20
+
21
+ try:
22
+ if npc_file_path_arg and npc_file_path_arg.strip():
23
+ npc_full_path = os.path.abspath(os.path.expanduser(npc_file_path_arg))
24
+ if os.path.exists(npc_full_path):
25
+ # Assuming NPC() constructor "compiles" it by loading its definition
26
+ compiled_npc_object = NPC(file=npc_full_path, db_conn=context.get('db_conn'))
27
+ output_result = f"Compiled NPC: {npc_full_path}"
28
+ else:
29
+ output_result = f"Error: NPC file not found: {npc_full_path}"
30
+ else:
31
+ # Compile all NPCs in the directory. This would typically involve iterating and loading.
32
+ # For simplicity in this Jinx, we just acknowledge the directory.
33
+ # A more robust implementation would loop through .npc files and compile them.
34
+ abs_npc_team_dir = os.path.abspath(os.path.expanduser(npc_team_dir))
35
+ if os.path.exists(abs_npc_team_dir):
36
+ output_result = f"Acknowledged compilation for all NPCs in directory: {abs_npc_team_dir}"
37
+ # Example of loading a Team and setting the compiled_npc_object to its forenpc if available
38
+ # team = Team(team_path=abs_npc_team_dir, db_conn=context.get('db_conn'))
39
+ # if team.forenpc:
40
+ # compiled_npc_object = team.forenpc
41
+ else:
42
+ output_result = f"Error: NPC team directory not found: {npc_team_dir}"
43
+ except Exception as e:
44
+ traceback.print_exc()
45
+ output_result = f"Error compiling: {e}"
46
+
47
+ context['output'] = output_result
48
+ context['messages'] = output_messages
49
+ if compiled_npc_object:
50
+ context['compiled_npc_object'] = compiled_npc_object # Store the compiled NPC object if any
@@ -0,0 +1,140 @@
1
+ jinx_name: "compress"
2
+ description: "Manages conversation and knowledge context. Defaults to compacting context. Use flags for other operations."
3
+ inputs:
4
+ - flush: "" # The number of recent messages to flush.
5
+ - sleep: False # If true, evolves the knowledge graph.
6
+ - dream: False # Used with --sleep. Runs creative synthesis.
7
+ - ops: "" # Used with --sleep. Comma-separated list of KG operations.
8
+ - model: "" # Used with --sleep. LLM model for KG evolution.
9
+ - provider: "" # Used with --sleep. LLM provider for KG evolution.
10
+ steps:
11
+ - name: "manage_context_and_memory"
12
+ engine: "python"
13
+ code: |
14
+ import os
15
+ import traceback
16
+ from npcpy.llm_funcs import breathe
17
+ from npcpy.memory.command_history import CommandHistory, load_kg_from_db, save_kg_to_db
18
+ from npcpy.memory.knowledge_graph import kg_sleep_process, kg_dream_process
19
+
20
+ # --- Get all inputs from context ---
21
+ flush_n_str = context.get('flush')
22
+ is_sleeping = context.get('sleep')
23
+ is_dreaming = context.get('dream')
24
+ operations_str = context.get('ops')
25
+ llm_model = context.get('model')
26
+ llm_provider = context.get('provider')
27
+ output_messages = context.get('messages', [])
28
+
29
+ USAGE = """Usage:
30
+ /compress (Compacts conversation context)
31
+ /compress --flush <number> (Removes the last N messages)
32
+ /compress --sleep [...] (Evolves the knowledge graph)
33
+ --dream (With --sleep: enables creative synthesis)
34
+ --ops "op1,op2" (With --sleep: specifies KG operations)
35
+ --model <name> (With --sleep: specifies LLM model)
36
+ --provider <name> (With --sleep: specifies LLM provider)"""
37
+
38
+ # --- Argument Validation: Ensure mutual exclusivity ---
39
+ is_flushing = flush_n_str is not None and flush_n_str.strip() != ''
40
+ if is_sleeping and is_flushing:
41
+ context['output'] = f"Error: --sleep and --flush are mutually exclusive.\n{USAGE}"
42
+ context['messages'] = output_messages
43
+ exit()
44
+
45
+ # --- Dispatcher: Route to the correct functionality ---
46
+
47
+ # 1. SLEEP: Evolve the Knowledge Graph
48
+ if is_sleeping:
49
+ current_npc = context.get('npc')
50
+ current_team = context.get('team')
51
+
52
+ # Parameter setup for KG process
53
+ operations_config = [op.strip() for op in operations_str.split(',')] if operations_str else None
54
+ if not llm_model and current_npc: llm_model = current_npc.model
55
+ if not llm_provider and current_npc: llm_provider = current_npc.provider
56
+ if not llm_model: llm_model = "gemini-1.5-pro"
57
+ if not llm_provider: llm_provider = "gemini"
58
+
59
+ team_name = current_team.name if current_team else "__none__"
60
+ npc_name = current_npc.name if current_npc else "__none__"
61
+ current_path = os.getcwd()
62
+ scope_str = f"Team: '{team_name}', NPC: '{npc_name}', Path: '{current_path}'"
63
+
64
+ command_history = None
65
+ try:
66
+ db_path = os.getenv("NPCSH_DB_PATH", os.path.expanduser("~/npcsh_history.db"))
67
+ command_history = CommandHistory(db_path)
68
+ engine = command_history.engine
69
+ current_kg = load_kg_from_db(engine, team_name, npc_name, current_path)
70
+
71
+ if not current_kg or not current_kg.get('facts'):
72
+ context['output'] = f"Knowledge graph for the current scope is empty. Nothing to process.\n- Scope: {scope_str}"
73
+ exit()
74
+
75
+ original_facts = len(current_kg.get('facts', []))
76
+ original_concepts = len(current_kg.get('concepts', []))
77
+
78
+ evolved_kg, _ = kg_sleep_process(existing_kg=current_kg, model=llm_model, provider=llm_provider, npc=current_npc, operations_config=operations_config)
79
+ process_type = "Sleep"
80
+
81
+ if is_dreaming:
82
+ evolved_kg, _ = kg_dream_process(existing_kg=evolved_kg, model=llm_model, provider=llm_provider, npc=current_npc)
83
+ process_type += " & Dream"
84
+
85
+ save_kg_to_db(engine, evolved_kg, team_name, npc_name, current_path)
86
+
87
+ new_facts = len(evolved_kg.get('facts', []))
88
+ new_concepts = len(evolved_kg.get('concepts', []))
89
+
90
+ context['output'] = (f"{process_type} process complete.\n"
91
+ f"- Facts: {original_facts} -> {new_facts} ({new_facts - original_facts:+})\n"
92
+ f"- Concepts: {original_concepts} -> {new_concepts} ({new_concepts - original_concepts:+})")
93
+ except Exception as e:
94
+ traceback.print_exc()
95
+ context['output'] = f"Error during KG evolution: {e}"
96
+ finally:
97
+ if command_history: command_history.close()
98
+ context['messages'] = output_messages
99
+
100
+ # 2. FLUSH: Remove messages from context
101
+ elif is_flushing:
102
+ try:
103
+ n = int(flush_n_str)
104
+ if n <= 0:
105
+ context['output'] = "Error: Number of messages to flush must be positive."
106
+ exit()
107
+ except ValueError:
108
+ context['output'] = f"Error: Invalid number '{flush_n_str}'. {USAGE}"
109
+ exit()
110
+
111
+ messages_list = list(output_messages)
112
+ original_len = len(messages_list)
113
+ final_messages = []
114
+
115
+ if messages_list and messages_list[0].get("role") == "system":
116
+ system_message = messages_list.pop(0)
117
+ num_to_remove = min(n, len(messages_list))
118
+ final_messages = [system_message] + messages_list[:-num_to_remove]
119
+ else:
120
+ num_to_remove = min(n, original_len)
121
+ final_messages = messages_list[:-num_to_remove]
122
+
123
+ removed_count = original_len - len(final_messages)
124
+ context['output'] = f"Flushed {removed_count} message(s). Context is now {len(final_messages)} messages."
125
+ context['messages'] = final_messages
126
+
127
+ # 3. DEFAULT: Compact conversation context
128
+ else:
129
+ try:
130
+ result = breathe(**context)
131
+ if isinstance(result, dict):
132
+ context['output'] = result.get('output', 'Context compressed.')
133
+ context['messages'] = result.get('messages', output_messages)
134
+ else:
135
+ context['output'] = "Context compression process initiated."
136
+ context['messages'] = output_messages
137
+ except Exception as e:
138
+ traceback.print_exc()
139
+ context['output'] = f"Error during context compression: {e}"
140
+ context['messages'] = output_messages