npcsh 0.1.2__py3-none-any.whl → 1.1.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (143) hide show
  1. npcsh/_state.py +3508 -0
  2. npcsh/alicanto.py +65 -0
  3. npcsh/build.py +291 -0
  4. npcsh/completion.py +206 -0
  5. npcsh/config.py +163 -0
  6. npcsh/corca.py +50 -0
  7. npcsh/execution.py +185 -0
  8. npcsh/guac.py +46 -0
  9. npcsh/mcp_helpers.py +357 -0
  10. npcsh/mcp_server.py +299 -0
  11. npcsh/npc.py +323 -0
  12. npcsh/npc_team/alicanto.npc +2 -0
  13. npcsh/npc_team/alicanto.png +0 -0
  14. npcsh/npc_team/corca.npc +12 -0
  15. npcsh/npc_team/corca.png +0 -0
  16. npcsh/npc_team/corca_example.png +0 -0
  17. npcsh/npc_team/foreman.npc +7 -0
  18. npcsh/npc_team/frederic.npc +6 -0
  19. npcsh/npc_team/frederic4.png +0 -0
  20. npcsh/npc_team/guac.png +0 -0
  21. npcsh/npc_team/jinxs/code/python.jinx +11 -0
  22. npcsh/npc_team/jinxs/code/sh.jinx +34 -0
  23. npcsh/npc_team/jinxs/code/sql.jinx +16 -0
  24. npcsh/npc_team/jinxs/modes/alicanto.jinx +194 -0
  25. npcsh/npc_team/jinxs/modes/corca.jinx +249 -0
  26. npcsh/npc_team/jinxs/modes/guac.jinx +317 -0
  27. npcsh/npc_team/jinxs/modes/plonk.jinx +214 -0
  28. npcsh/npc_team/jinxs/modes/pti.jinx +170 -0
  29. npcsh/npc_team/jinxs/modes/spool.jinx +161 -0
  30. npcsh/npc_team/jinxs/modes/wander.jinx +186 -0
  31. npcsh/npc_team/jinxs/modes/yap.jinx +262 -0
  32. npcsh/npc_team/jinxs/npc_studio/npc-studio.jinx +77 -0
  33. npcsh/npc_team/jinxs/utils/agent.jinx +17 -0
  34. npcsh/npc_team/jinxs/utils/chat.jinx +44 -0
  35. npcsh/npc_team/jinxs/utils/cmd.jinx +44 -0
  36. npcsh/npc_team/jinxs/utils/compress.jinx +140 -0
  37. npcsh/npc_team/jinxs/utils/core/build.jinx +65 -0
  38. npcsh/npc_team/jinxs/utils/core/compile.jinx +50 -0
  39. npcsh/npc_team/jinxs/utils/core/help.jinx +52 -0
  40. npcsh/npc_team/jinxs/utils/core/init.jinx +41 -0
  41. npcsh/npc_team/jinxs/utils/core/jinxs.jinx +32 -0
  42. npcsh/npc_team/jinxs/utils/core/set.jinx +40 -0
  43. npcsh/npc_team/jinxs/utils/edit_file.jinx +94 -0
  44. npcsh/npc_team/jinxs/utils/load_file.jinx +35 -0
  45. npcsh/npc_team/jinxs/utils/ots.jinx +61 -0
  46. npcsh/npc_team/jinxs/utils/roll.jinx +68 -0
  47. npcsh/npc_team/jinxs/utils/sample.jinx +56 -0
  48. npcsh/npc_team/jinxs/utils/search.jinx +130 -0
  49. npcsh/npc_team/jinxs/utils/serve.jinx +26 -0
  50. npcsh/npc_team/jinxs/utils/sleep.jinx +116 -0
  51. npcsh/npc_team/jinxs/utils/trigger.jinx +61 -0
  52. npcsh/npc_team/jinxs/utils/usage.jinx +33 -0
  53. npcsh/npc_team/jinxs/utils/vixynt.jinx +144 -0
  54. npcsh/npc_team/kadiefa.npc +3 -0
  55. npcsh/npc_team/kadiefa.png +0 -0
  56. npcsh/npc_team/npcsh.ctx +18 -0
  57. npcsh/npc_team/npcsh_sibiji.png +0 -0
  58. npcsh/npc_team/plonk.npc +2 -0
  59. npcsh/npc_team/plonk.png +0 -0
  60. npcsh/npc_team/plonkjr.npc +2 -0
  61. npcsh/npc_team/plonkjr.png +0 -0
  62. npcsh/npc_team/sibiji.npc +3 -0
  63. npcsh/npc_team/sibiji.png +0 -0
  64. npcsh/npc_team/spool.png +0 -0
  65. npcsh/npc_team/yap.png +0 -0
  66. npcsh/npcsh.py +296 -112
  67. npcsh/parsing.py +118 -0
  68. npcsh/plonk.py +54 -0
  69. npcsh/pti.py +54 -0
  70. npcsh/routes.py +139 -0
  71. npcsh/spool.py +48 -0
  72. npcsh/ui.py +199 -0
  73. npcsh/wander.py +62 -0
  74. npcsh/yap.py +50 -0
  75. npcsh-1.1.13.data/data/npcsh/npc_team/agent.jinx +17 -0
  76. npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.jinx +194 -0
  77. npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.npc +2 -0
  78. npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.png +0 -0
  79. npcsh-1.1.13.data/data/npcsh/npc_team/build.jinx +65 -0
  80. npcsh-1.1.13.data/data/npcsh/npc_team/chat.jinx +44 -0
  81. npcsh-1.1.13.data/data/npcsh/npc_team/cmd.jinx +44 -0
  82. npcsh-1.1.13.data/data/npcsh/npc_team/compile.jinx +50 -0
  83. npcsh-1.1.13.data/data/npcsh/npc_team/compress.jinx +140 -0
  84. npcsh-1.1.13.data/data/npcsh/npc_team/corca.jinx +249 -0
  85. npcsh-1.1.13.data/data/npcsh/npc_team/corca.npc +12 -0
  86. npcsh-1.1.13.data/data/npcsh/npc_team/corca.png +0 -0
  87. npcsh-1.1.13.data/data/npcsh/npc_team/corca_example.png +0 -0
  88. npcsh-1.1.13.data/data/npcsh/npc_team/edit_file.jinx +94 -0
  89. npcsh-1.1.13.data/data/npcsh/npc_team/foreman.npc +7 -0
  90. npcsh-1.1.13.data/data/npcsh/npc_team/frederic.npc +6 -0
  91. npcsh-1.1.13.data/data/npcsh/npc_team/frederic4.png +0 -0
  92. npcsh-1.1.13.data/data/npcsh/npc_team/guac.jinx +317 -0
  93. npcsh-1.1.13.data/data/npcsh/npc_team/guac.png +0 -0
  94. npcsh-1.1.13.data/data/npcsh/npc_team/help.jinx +52 -0
  95. npcsh-1.1.13.data/data/npcsh/npc_team/init.jinx +41 -0
  96. npcsh-1.1.13.data/data/npcsh/npc_team/jinxs.jinx +32 -0
  97. npcsh-1.1.13.data/data/npcsh/npc_team/kadiefa.npc +3 -0
  98. npcsh-1.1.13.data/data/npcsh/npc_team/kadiefa.png +0 -0
  99. npcsh-1.1.13.data/data/npcsh/npc_team/load_file.jinx +35 -0
  100. npcsh-1.1.13.data/data/npcsh/npc_team/npc-studio.jinx +77 -0
  101. npcsh-1.1.13.data/data/npcsh/npc_team/npcsh.ctx +18 -0
  102. npcsh-1.1.13.data/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
  103. npcsh-1.1.13.data/data/npcsh/npc_team/ots.jinx +61 -0
  104. npcsh-1.1.13.data/data/npcsh/npc_team/plonk.jinx +214 -0
  105. npcsh-1.1.13.data/data/npcsh/npc_team/plonk.npc +2 -0
  106. npcsh-1.1.13.data/data/npcsh/npc_team/plonk.png +0 -0
  107. npcsh-1.1.13.data/data/npcsh/npc_team/plonkjr.npc +2 -0
  108. npcsh-1.1.13.data/data/npcsh/npc_team/plonkjr.png +0 -0
  109. npcsh-1.1.13.data/data/npcsh/npc_team/pti.jinx +170 -0
  110. npcsh-1.1.13.data/data/npcsh/npc_team/python.jinx +11 -0
  111. npcsh-1.1.13.data/data/npcsh/npc_team/roll.jinx +68 -0
  112. npcsh-1.1.13.data/data/npcsh/npc_team/sample.jinx +56 -0
  113. npcsh-1.1.13.data/data/npcsh/npc_team/search.jinx +130 -0
  114. npcsh-1.1.13.data/data/npcsh/npc_team/serve.jinx +26 -0
  115. npcsh-1.1.13.data/data/npcsh/npc_team/set.jinx +40 -0
  116. npcsh-1.1.13.data/data/npcsh/npc_team/sh.jinx +34 -0
  117. npcsh-1.1.13.data/data/npcsh/npc_team/sibiji.npc +3 -0
  118. npcsh-1.1.13.data/data/npcsh/npc_team/sibiji.png +0 -0
  119. npcsh-1.1.13.data/data/npcsh/npc_team/sleep.jinx +116 -0
  120. npcsh-1.1.13.data/data/npcsh/npc_team/spool.jinx +161 -0
  121. npcsh-1.1.13.data/data/npcsh/npc_team/spool.png +0 -0
  122. npcsh-1.1.13.data/data/npcsh/npc_team/sql.jinx +16 -0
  123. npcsh-1.1.13.data/data/npcsh/npc_team/trigger.jinx +61 -0
  124. npcsh-1.1.13.data/data/npcsh/npc_team/usage.jinx +33 -0
  125. npcsh-1.1.13.data/data/npcsh/npc_team/vixynt.jinx +144 -0
  126. npcsh-1.1.13.data/data/npcsh/npc_team/wander.jinx +186 -0
  127. npcsh-1.1.13.data/data/npcsh/npc_team/yap.jinx +262 -0
  128. npcsh-1.1.13.data/data/npcsh/npc_team/yap.png +0 -0
  129. npcsh-1.1.13.dist-info/METADATA +522 -0
  130. npcsh-1.1.13.dist-info/RECORD +135 -0
  131. {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info}/WHEEL +1 -1
  132. npcsh-1.1.13.dist-info/entry_points.txt +9 -0
  133. {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info/licenses}/LICENSE +1 -1
  134. npcsh/command_history.py +0 -81
  135. npcsh/helpers.py +0 -36
  136. npcsh/llm_funcs.py +0 -295
  137. npcsh/main.py +0 -5
  138. npcsh/modes.py +0 -343
  139. npcsh/npc_compiler.py +0 -124
  140. npcsh-0.1.2.dist-info/METADATA +0 -99
  141. npcsh-0.1.2.dist-info/RECORD +0 -14
  142. npcsh-0.1.2.dist-info/entry_points.txt +0 -2
  143. {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,68 @@
1
+ jinx_name: "roll"
2
+ description: "Generate a video from a text prompt."
3
+ inputs:
4
+ - prompt: "" # Required text prompt for video generation.
5
+ - vgmodel: "" # Video generation model to use. Defaults to NPCSH_VIDEO_GEN_MODEL or NPC's model.
6
+ - vgprovider: "" # Video generation provider to use. Defaults to NPCSH_VIDEO_GEN_PROVIDER or NPC's provider.
7
+ - num_frames: 125 # Number of frames for the video.
8
+ - width: 256 # Width of the video.
9
+ - height: 256 # Height of the video.
10
+ - output_path: "output.mp4" # Output file path for the video.
11
+ steps:
12
+ - name: "generate_video"
13
+ engine: "python"
14
+ code: |
15
+ import traceback
16
+ from npcpy.llm_funcs import gen_video
17
+ # Assuming NPCSH_VIDEO_GEN_MODEL and NPCSH_VIDEO_GEN_PROVIDER are accessible
18
+
19
+ prompt = context.get('prompt')
20
+ num_frames = int(context.get('num_frames', 125)) # Ensure int type
21
+ width = int(context.get('width', 256)) # Ensure int type
22
+ height = int(context.get('height', 256)) # Ensure int type
23
+ output_path = context.get('output_path')
24
+ video_gen_model = context.get('vgmodel')
25
+ video_gen_provider = context.get('vgprovider')
26
+ output_messages = context.get('messages', [])
27
+ current_npc = context.get('npc')
28
+
29
+ if not prompt or not prompt.strip():
30
+ context['output'] = "Usage: /roll <your prompt>"
31
+ context['messages'] = output_messages
32
+ exit()
33
+
34
+ # Fallback for model/provider if not explicitly set in Jinx inputs
35
+ if not video_gen_model and current_npc and current_npc.model:
36
+ video_gen_model = current_npc.model
37
+ if not video_gen_provider and current_npc and current_npc.provider:
38
+ video_gen_provider = current_npc.provider
39
+
40
+ # Final fallbacks (these would ideally come from npcsh._state config)
41
+ if not video_gen_model:
42
+ video_gen_model = "stable-video-diffusion" # Example default
43
+ if not video_gen_provider:
44
+ video_gen_provider = "diffusers" # Example default
45
+
46
+ try:
47
+ result = gen_video(
48
+ prompt=prompt,
49
+ model=video_gen_model,
50
+ provider=video_gen_provider,
51
+ npc=current_npc,
52
+ num_frames=num_frames,
53
+ width=width,
54
+ height=height,
55
+ output_path=output_path,
56
+ **context.get('api_kwargs', {}) # Assuming api_kwargs might be passed
57
+ )
58
+
59
+ if isinstance(result, dict):
60
+ context['output'] = result.get('output', 'Video generated.')
61
+ context['messages'] = result.get('messages', output_messages)
62
+ else:
63
+ context['output'] = str(result)
64
+ context['messages'] = output_messages
65
+ except Exception as e:
66
+ traceback.print_exc()
67
+ context['output'] = f"Error generating video: {e}"
68
+ context['messages'] = output_messages
@@ -0,0 +1,56 @@
1
+ jinx_name: "sample"
2
+ description: "Send a prompt directly to the LLM."
3
+ inputs:
4
+ - prompt: "" # Required text prompt to send to the LLM.
5
+ - model: "" # LLM model to use. Defaults to NPC's model.
6
+ - provider: "" # LLM provider to use. Defaults to NPC's provider.
7
+ steps:
8
+ - name: "send_prompt_to_llm"
9
+ engine: "python"
10
+ code: |
11
+ import traceback
12
+ from npcpy.llm_funcs import get_llm_response
13
+
14
+ prompt = context.get('prompt')
15
+ llm_model = context.get('model')
16
+ llm_provider = context.get('provider')
17
+ output_messages = context.get('messages', [])
18
+ current_npc = context.get('npc')
19
+
20
+ if not prompt or not prompt.strip():
21
+ context['output'] = "Usage: /sample <your prompt> [-m --model] model [-p --provider] provider"
22
+ context['messages'] = output_messages
23
+ exit()
24
+
25
+ # Fallback for model/provider if not explicitly set in Jinx inputs
26
+ if not llm_model and current_npc and current_npc.model:
27
+ llm_model = current_npc.model
28
+ if not llm_provider and current_npc and current_npc.provider:
29
+ llm_provider = current_npc.provider
30
+
31
+ # Final fallbacks (these would ideally come from npcsh._state config)
32
+ if not llm_model: llm_model = "gemini-1.5-pro" # Example default
33
+ if not llm_provider: llm_provider = "gemini" # Example default
34
+
35
+ try:
36
+ result = get_llm_response(
37
+ prompt=prompt,
38
+ model=llm_model,
39
+ provider=llm_provider,
40
+ npc=current_npc,
41
+ **{k:v for k,v in context.items() if k not in ['messages', 'prompt', 'model', 'provider']} # Pass other context
42
+ )
43
+
44
+ if isinstance(result, dict):
45
+ context['output'] = result.get('response')
46
+ context['messages'] = result.get('messages', output_messages)
47
+ context['model'] = llm_model
48
+ context['provider'] = llm_provider
49
+ else:
50
+ context['output'] = str(result)
51
+ context['messages'] = output_messages
52
+
53
+ except Exception as e:
54
+ traceback.print_exc()
55
+ context['output'] = f"Error sampling LLM: {e}"
56
+ context['messages'] = output_messages
@@ -0,0 +1,130 @@
1
+ jinx_name: "search"
2
+ description: >
3
+ Executes a search across various sources.
4
+ Usage:
5
+ /search <query> (Default: Web Search)
6
+ /search --memory <query> (Search approved memories)
7
+ /search --kg <query> (Search the knowledge graph)
8
+ /search --rag [-f <paths>] <query> (Execute a RAG search)
9
+ /search --brainblast <query> (Advanced history search)
10
+ inputs:
11
+ - query: ""
12
+ - sprovider: ""
13
+ - memory: false
14
+ - kg: false
15
+ - rag: false
16
+ - brainblast: false
17
+ - file_paths: ""
18
+ - history_db_path: "~/npcsh_history.db"
19
+ - vector_db_path: "~/npcsh_chroma.db"
20
+ - emodel: ""
21
+ - eprovider: ""
22
+ steps:
23
+ - name: "execute_unified_search"
24
+ engine: "python"
25
+ code: |
26
+ import os
27
+ import traceback
28
+
29
+ # Access query from context
30
+ query = context.get('query')
31
+ if not query or not query.strip():
32
+ context['output'] = "Usage: /search [--memory|--kg|--rag|--brainblast] <query>"
33
+ else:
34
+ # state is available as a GLOBAL variable (from extra_globals)
35
+ # Access it directly, not from context
36
+ try:
37
+ current_state = state # This should work now
38
+ except NameError:
39
+ context['output'] = "Error: Shell state not available in jinx context"
40
+ raise
41
+
42
+ current_npc = current_state.npc
43
+ current_team = current_state.team
44
+
45
+ npc_name = getattr(current_npc, 'name', '__none__') if current_npc else '__none__'
46
+ team_name = getattr(current_team, 'name', '__none__') if current_team else '__none__'
47
+ current_path = os.getcwd()
48
+ db_path = os.path.expanduser(context.get("history_db_path") or "~/.npcsh/npcsh_history.db")
49
+
50
+ try:
51
+ cmd_history = CommandHistory(db_path)
52
+
53
+ if context.get('memory'):
54
+ memories = get_relevant_memories(
55
+ command_history=cmd_history,
56
+ npc_name=npc_name,
57
+ team_name=team_name,
58
+ path=current_path,
59
+ query=query,
60
+ max_memories=10,
61
+ state=current_state # Pass the state object
62
+ )
63
+ print(memories)
64
+
65
+ if not memories:
66
+ output = f"No memories found for query: '{query}'"
67
+ else:
68
+ output = f"Found {len(memories)} memories:\n\n" + "\n".join(
69
+ f"{i}. [{mem.get('timestamp', 'unknown')}] {mem.get('final_memory') or mem.get('initial_memory')}"
70
+ for i, mem in enumerate(memories, 1)
71
+ )
72
+
73
+ elif context.get('kg'):
74
+ facts = search_kg_facts(
75
+ cmd_history,
76
+ npc_name,
77
+ team_name,
78
+ current_path,
79
+ query
80
+ )
81
+ print(facts)
82
+
83
+ if not facts:
84
+ output = f"No KG facts found for query: '{query}'"
85
+ else:
86
+ output = f"Found {len(facts)} KG facts:\n\n" + "\n".join(
87
+ f"{i}. {fact.get('statement')}" for i, fact in enumerate(facts, 1)
88
+ )
89
+
90
+ elif context.get('rag'):
91
+ file_paths_str = context.get('file_paths', '')
92
+ file_paths = [os.path.abspath(os.path.expanduser(p.strip())) for p in file_paths_str.split(',') if p.strip()]
93
+ emodel = context.get('emodel') or current_state.embedding_model
94
+ eprovider = context.get('eprovider') or current_state.embedding_provider
95
+
96
+ file_contents = []
97
+ for path in file_paths:
98
+ chunks = load_file_contents(path)
99
+ basename = os.path.basename(path)
100
+ file_contents.extend([f"{basename}: {chunk}" for chunk in chunks])
101
+
102
+ result = execute_rag_command(
103
+ command=query,
104
+ vector_db_path=os.path.expanduser(context.get('vector_db_path') or "~/.npcsh/npcsh_chroma.db"),
105
+ embedding_model=emodel,
106
+ embedding_provider=eprovider,
107
+ file_contents=file_contents or None
108
+ )
109
+ print(result)
110
+ output = result.get('response', 'No response from RAG.')
111
+
112
+ elif context.get('brainblast'):
113
+ result = execute_brainblast_command(
114
+ command=query,
115
+ command_history=cmd_history,
116
+ **context
117
+ )
118
+ print(result)
119
+ output = result.get('output', 'Brainblast search executed.')
120
+
121
+ else:
122
+ # Default to web search
123
+ provider = context.get('sprovider') or current_state.search_provider
124
+ results = search_web(query, provider=provider)
125
+ output = "\n".join([f"- {res}" for res in results]) if results else "No web results found."
126
+
127
+ except Exception as e:
128
+ output = f"An error occurred in the search jinx: {e}\n{traceback.format_exc()}"
129
+
130
+ context['output'] = output
@@ -0,0 +1,26 @@
1
+ jinx_name: "serve"
2
+ description: "Serve an NPC Team"
3
+ inputs:
4
+ - port: 5337 # The port to run the Flask server on.
5
+ - cors: "" # Comma-separated CORS origins.
6
+ steps:
7
+ - name: "start_flask_server"
8
+ engine: "python"
9
+ code: |
10
+ from npcpy.serve import start_flask_server
11
+
12
+ port = context.get('port')
13
+ cors_str = context.get('cors')
14
+ output_messages = context.get('messages', [])
15
+
16
+ cors_origins = None
17
+ if cors_str and cors_str.strip():
18
+ cors_origins = [origin.strip() for origin in cors_str.split(",")]
19
+
20
+ start_flask_server(
21
+ port=int(port), # Ensure port is an integer
22
+ cors_origins=cors_origins,
23
+ )
24
+
25
+ context['output'] = "NPC Team server started. Execution of this jinx will pause until the server is stopped."
26
+ context['messages'] = output_messages
@@ -0,0 +1,116 @@
1
+ jinx_name: "sleep"
2
+ description: "Evolve knowledge graph. Use --dream to also run creative synthesis."
3
+ inputs:
4
+ - dream: False # Boolean flag to also run creative synthesis (dream process).
5
+ - ops: "" # Comma-separated list of operations to configure KG sleep process.
6
+ - model: "" # LLM model to use for KG evolution. Defaults to NPC's model.
7
+ - provider: "" # LLM provider to use for KG evolution. Defaults to NPC's provider.
8
+ steps:
9
+ - name: "evolve_knowledge_graph"
10
+ engine: "python"
11
+ code: |
12
+ import os
13
+ import traceback
14
+ from npcpy.memory.command_history import CommandHistory, load_kg_from_db, save_kg_to_db
15
+ from npcpy.memory.knowledge_graph import kg_sleep_process, kg_dream_process
16
+ # Assuming render_markdown is available if needed for logging progress
17
+
18
+ is_dreaming = context.get('dream')
19
+ operations_str = context.get('ops')
20
+ llm_model = context.get('model')
21
+ llm_provider = context.get('provider')
22
+ output_messages = context.get('messages', [])
23
+ current_npc = context.get('npc')
24
+ current_team = context.get('team')
25
+
26
+ operations_config = None
27
+ if operations_str and isinstance(operations_str, str):
28
+ operations_config = [op.strip() for op in operations_str.split(',')]
29
+
30
+ # Fallback for model/provider if not explicitly set in Jinx inputs
31
+ if not llm_model and current_npc and current_npc.model:
32
+ llm_model = current_npc.model
33
+ if not llm_provider and current_npc and current_npc.provider:
34
+ llm_provider = current_npc.provider
35
+
36
+ # Final fallbacks (these would ideally come from npcsh._state config)
37
+ if not llm_model: llm_model = "gemini-1.5-pro" # Example default
38
+ if not llm_provider: llm_provider = "gemini" # Example default
39
+
40
+ team_name = current_team.name if current_team else "__none__"
41
+ npc_name = current_npc.name if isinstance(current_npc, type(None).__class__) else "__none__"
42
+ current_path = os.getcwd()
43
+ scope_str = f"Team: '{team_name}', NPC: '{npc_name}', Path: '{current_path}'"
44
+
45
+ # Assume render_markdown exists
46
+ # render_markdown(f"- Checking knowledge graph for scope: {scope_str}")
47
+
48
+ command_history = None
49
+ try:
50
+ db_path = os.getenv("NPCSH_DB_PATH", os.path.expanduser("~/npcsh_history.db"))
51
+ command_history = CommandHistory(db_path)
52
+ engine = command_history.engine
53
+ except Exception as e:
54
+ context['output'] = f"Error connecting to history database for KG access: {e}"
55
+ context['messages'] = output_messages
56
+ exit()
57
+
58
+ output_result = ""
59
+ try:
60
+ current_kg = load_kg_from_db(engine, team_name, npc_name, current_path)
61
+
62
+ if not current_kg or not current_kg.get('facts'):
63
+ output_msg = f"Knowledge graph for the current scope is empty. Nothing to process.\n"
64
+ output_msg += f" - Scope Checked: {scope_str}\n\n"
65
+ output_msg += "**Hint:** Have a conversation or run some commands first to build up knowledge in this specific context. The KG is unique to each combination of Team, NPC, and directory."
66
+ context['output'] = output_msg
67
+ context['messages'] = output_messages
68
+ exit()
69
+
70
+ original_facts = len(current_kg.get('facts', []))
71
+ original_concepts = len(current_kg.get('concepts', []))
72
+
73
+ process_type = "Sleep"
74
+ ops_display = f"with operations: {operations_config}" if operations_config else "with random operations"
75
+ # render_markdown(f"- Initiating sleep process {ops_display}")
76
+
77
+ evolved_kg, _ = kg_sleep_process(
78
+ existing_kg=current_kg,
79
+ model=llm_model,
80
+ provider=llm_provider,
81
+ npc=current_npc,
82
+ operations_config=operations_config
83
+ )
84
+
85
+ if is_dreaming:
86
+ process_type += " & Dream"
87
+ # render_markdown(f"- Initiating dream process on the evolved KG...")
88
+ evolved_kg, _ = kg_dream_process(
89
+ existing_kg=evolved_kg,
90
+ model=llm_model,
91
+ provider=llm_provider,
92
+ npc=current_npc
93
+ )
94
+
95
+ save_kg_to_db(engine, evolved_kg, team_name, npc_name, current_path) # Changed conn to engine
96
+
97
+ new_facts = len(evolved_kg.get('facts', []))
98
+ new_concepts = len(evolved_kg.get('concepts', []))
99
+
100
+ output_result = f"{process_type} process complete.\n"
101
+ output_result += f"- Facts: {original_facts} -> {new_facts} ({new_facts - original_facts:+})\n"
102
+ output_result += f"- Concepts: {original_concepts} -> {new_concepts} ({new_concepts - original_concepts:+})"
103
+
104
+ print('Evolved facts:', evolved_kg.get('facts'))
105
+ print('Evolved concepts:', evolved_kg.get('concepts'))
106
+
107
+ context['output'] = output_result
108
+ context['messages'] = output_messages
109
+
110
+ except Exception as e:
111
+ traceback.print_exc()
112
+ context['output'] = f"Error during KG evolution process: {e}"
113
+ context['messages'] = output_messages
114
+ finally:
115
+ if command_history: # Check if it was successfully initialized
116
+ command_history.close()
@@ -0,0 +1,61 @@
1
+ jinx_name: "trigger"
2
+ description: "Creates a persistent listener (--listen) or a scheduled task (--cron)."
3
+ inputs:
4
+ - listen: "" # The description for a persistent, event-driven listener.
5
+ - cron: "" # The description for a scheduled, time-based task.
6
+ steps:
7
+ - name: "execute_command"
8
+ engine: "python"
9
+ code: |
10
+ import traceback
11
+ from npcpy.work.trigger import execute_trigger_command # For --listen
12
+ from npcpy.work.plan import execute_plan_command # For --cron
13
+
14
+ listen_description = context.get('listen')
15
+ cron_description = context.get('cron')
16
+ output_messages = context.get('messages', [])
17
+
18
+ USAGE = 'Usage: /trigger --listen "<description>" OR /trigger --cron "<description>"'
19
+
20
+ # Determine which command was used and set the appropriate variables
21
+ subcommand = None
22
+ description = None
23
+ executor_func = None
24
+
25
+ # --- Argument Validation ---
26
+ # Ensure mutual exclusivity
27
+ if listen_description and cron_description:
28
+ context['output'] = f"Error: --listen and --cron are mutually exclusive. {USAGE}"
29
+ context['messages'] = output_messages
30
+ exit()
31
+
32
+ # --- Command Dispatch ---
33
+ if listen_description:
34
+ subcommand = 'listen'
35
+ description = listen_description
36
+ executor_func = execute_trigger_command
37
+ elif cron_description:
38
+ subcommand = 'cron'
39
+ description = cron_description
40
+ executor_func = execute_plan_command
41
+ else:
42
+ # Handle case where no arguments were provided
43
+ context['output'] = f"Error: You must provide either --listen or --cron. {USAGE}"
44
+ context['messages'] = output_messages
45
+ exit()
46
+
47
+ # --- Execution ---
48
+ try:
49
+ result = executor_func(command=description, **context)
50
+
51
+ if isinstance(result, dict):
52
+ output_key = 'Listener' if subcommand == 'listen' else 'Cron job'
53
+ context['output'] = result.get('output', f'{output_key} created successfully.')
54
+ context['messages'] = result.get('messages', output_messages)
55
+ else:
56
+ context['output'] = str(result)
57
+ context['messages'] = output_messages
58
+ except Exception as e:
59
+ traceback.print_exc()
60
+ context['output'] = f"Error creating {subcommand}: {e}"
61
+ context['messages'] = output_messages
@@ -0,0 +1,33 @@
1
+ jinx_name: usage
2
+ description: Display current session token usage and cost
3
+ inputs: []
4
+ steps:
5
+ - name: show_usage
6
+ engine: python
7
+ code: |
8
+ state = context.get('state')
9
+ if not state:
10
+ output = "No state available"
11
+ else:
12
+ inp = getattr(state, 'session_input_tokens', 0)
13
+ out = getattr(state, 'session_output_tokens', 0)
14
+ cost = getattr(state, 'session_cost_usd', 0.0)
15
+ turns = getattr(state, 'turn_count', 0)
16
+ total = inp + out
17
+
18
+ def fmt(n):
19
+ return f"{n/1000:.1f}k" if n >= 1000 else str(n)
20
+
21
+ def fmt_cost(c):
22
+ if c == 0:
23
+ return "free (local)"
24
+ elif c < 0.01:
25
+ return f"${c:.4f}"
26
+ else:
27
+ return f"${c:.2f}"
28
+
29
+ output = f"Session Usage\n"
30
+ output += f"Tokens: {fmt(inp)} in / {fmt(out)} out ({fmt(total)} total)\n"
31
+ output += f"Cost: {fmt_cost(cost)}\n"
32
+ output += f"Turns: {turns}"
33
+ context['output'] = output
@@ -0,0 +1,144 @@
1
+ jinx_name: "vixynt"
2
+ description: "Generates images from text descriptions or edits existing ones."
3
+ inputs:
4
+ - prompt
5
+ - model: null
6
+ - provider: null
7
+ - output_name: null
8
+ - attachments: null
9
+ - n_images: null
10
+ - height: null
11
+ - width: null
12
+ steps:
13
+ - name: "generate_or_edit_image"
14
+ engine: "python"
15
+ code: |
16
+ import os
17
+ import base64
18
+ from io import BytesIO
19
+ from datetime import datetime
20
+ from PIL import Image
21
+ from npcpy.llm_funcs import gen_image
22
+
23
+ # Extract inputs from context with proper type conversion
24
+ image_prompt = str(context.get('prompt', '')).strip()
25
+ output_name = context.get('output_name')
26
+ attachments_str = context.get('attachments')
27
+
28
+ # Handle integer inputs - they may come as strings or ints
29
+ try:
30
+ n_images = int(context.get('n_images', 1))
31
+ except (ValueError, TypeError):
32
+ n_images = 1
33
+
34
+ try:
35
+ height = int(context.get('height', 1024))
36
+ except (ValueError, TypeError):
37
+ height = 1024
38
+
39
+ try:
40
+ width = int(context.get('width', 1024))
41
+ except (ValueError, TypeError):
42
+ width = 1024
43
+
44
+ # Get model and provider, prioritizing context, then NPC, then environment variables
45
+ model = context.get('model')
46
+ provider = context.get('provider')
47
+
48
+ # Use NPC's model/provider as fallback
49
+ if not model and npc and hasattr(npc, 'model') and npc.model:
50
+ model = npc.model
51
+ if not provider and npc and hasattr(npc, 'provider') and npc.provider:
52
+ provider = npc.provider
53
+
54
+ # Fallback to environment variables
55
+ if not model:
56
+ model = os.getenv('NPCSH_IMAGE_GEN_MODEL')
57
+ if not provider:
58
+ provider = os.getenv('NPCSH_IMAGE_GEN_PROVIDER')
59
+
60
+ # Final hardcoded fallbacks if nothing else is set
61
+ if not model:
62
+ model = "runwayml/stable-diffusion-v1-5"
63
+ if not provider:
64
+ provider = "diffusers"
65
+
66
+ # Parse attachments
67
+ input_images = []
68
+ if attachments_str and str(attachments_str).strip():
69
+ input_images = [p.strip() for p in str(attachments_str).split(',')]
70
+
71
+ output_messages = context.get('messages', [])
72
+
73
+ if not image_prompt:
74
+ output = "Error: No prompt provided for image generation."
75
+ else:
76
+ try:
77
+ # Generate image(s)
78
+ result = gen_image(
79
+ prompt=image_prompt,
80
+ model=model,
81
+ provider=provider,
82
+ npc=npc,
83
+ height=height,
84
+ width=width,
85
+ n_images=n_images,
86
+ input_images=input_images if input_images else None
87
+ )
88
+
89
+ # Ensure we have a list of images
90
+ if not isinstance(result, list):
91
+ images_list = [result] if result is not None else []
92
+ else:
93
+ images_list = result
94
+
95
+ saved_files = []
96
+ html_image_tags = [] # This list will store the raw HTML <img> tags
97
+
98
+ for i, image in enumerate(images_list):
99
+ if image is None:
100
+ continue
101
+
102
+ # Determine output filename
103
+ if output_name and str(output_name).strip():
104
+ base_name, ext = os.path.splitext(os.path.expanduser(str(output_name)))
105
+ if not ext:
106
+ ext = ".png"
107
+ current_output_file = f"{base_name}_{i}{ext}" if len(images_list) > 1 else f"{base_name}{ext}"
108
+ else:
109
+ os.makedirs(os.path.expanduser("~/.npcsh/images/"), exist_ok=True)
110
+ current_output_file = (
111
+ os.path.expanduser("~/.npcsh/images/")
112
+ + f"image_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{i}.png"
113
+ )
114
+
115
+ # Save image to file
116
+ image.save(current_output_file)
117
+ saved_files.append(current_output_file)
118
+
119
+ # Convert image to base64 and create an HTML <img> tag
120
+ with open(current_output_file, 'rb') as f:
121
+ img_data = base64.b64encode(f.read()).decode()
122
+ # Using raw HTML <img> tag with data URI
123
+ html_image_tags.append(f'<img src="data:image/png;base64,{img_data}" alt="Generated Image {i+1}" style="max-width: 100%; display: block; margin-top: 10px;">')
124
+
125
+ if saved_files:
126
+ output_text_message = f"Image(s) generated and saved to: {', '.join(saved_files)}"
127
+ if input_images:
128
+ output_text_message = f"Image(s) edited and saved to: {', '.join(saved_files)}"
129
+
130
+ output = output_text_message # Keep the text message clean
131
+ output += f"\n\nThe image files have been saved and are ready to view."
132
+ output += "\n\n" + "\n".join(html_image_tags) # Append all HTML <img> tags to the output
133
+ else:
134
+ output = "No images were generated."
135
+
136
+ except Exception as e:
137
+ import traceback
138
+ traceback.print_exc()
139
+ output = f"Error {'editing' if input_images else 'generating'} image: {str(e)}"
140
+
141
+ context['output'] = output
142
+ context['messages'] = output_messages
143
+ context['model'] = model
144
+ context['provider'] = provider
@@ -0,0 +1,3 @@
1
+ name: kadiefa
2
+ primary_directive: |
3
+ You are kadiefa, the exploratory snow leopard. You love to find new paths and to explore hidden gems. You go into caverns no cat has ventured into before. You climb peaks that others call crazy. You are at the height of your power. Your role is to lead the way for users to explore complex research questions and to think outside of the box.
Binary file
@@ -0,0 +1,18 @@
1
+ context: |
2
+ The npcsh NPC team is devoted to providing a safe and helpful
3
+ environment for users where they can work and be as successful as possible.
4
+ npcsh is a command-line tool that makes it easy for users to harness
5
+ the power of LLMs from a command line shell. npcsh is a command line toolkit consisting of several programs.
6
+ databases:
7
+ - ~/npcsh_history.db
8
+ mcp_servers:
9
+ - ~/.npcsh/mcp_server.py
10
+ use_global_jinxs: true
11
+ forenpc: sibiji
12
+ preferences:
13
+ - If you come up with an idea, it is critical that you also provide a way to validate the idea.
14
+ - Never change function names unless requested. keep things idempotent.
15
+ - If plots are requested for python code, prefer to use matplotlib. Do not ever use seaborn.
16
+ - Object oriented programming should be used sparingly and only when practical. Otherwise, opt for functional implementations.
17
+ - Never write unit tests unless explicitly requested.
18
+ - If we want you to write tests, we mean we want you to write example use cases that show how the code works.
Binary file
@@ -0,0 +1,2 @@
1
+ name: plonk
2
+ primary_directive: You are the superior automation specialist of the NPC team.
Binary file