npcsh 1.0.20__py3-none-any.whl → 1.0.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. npcsh/_state.py +6 -5
  2. npcsh/corca.py +146 -129
  3. npcsh/guac.py +394 -119
  4. npcsh/npc_team/alicanto.npc +2 -0
  5. npcsh/npc_team/alicanto.png +0 -0
  6. npcsh/npc_team/corca.npc +13 -0
  7. npcsh/npc_team/corca.png +0 -0
  8. npcsh/npc_team/foreman.npc +7 -0
  9. npcsh/npc_team/frederic.npc +6 -0
  10. npcsh/npc_team/frederic4.png +0 -0
  11. npcsh/npc_team/guac.png +0 -0
  12. npcsh/npc_team/jinxs/bash_executer.jinx +20 -0
  13. npcsh/npc_team/jinxs/edit_file.jinx +94 -0
  14. npcsh/npc_team/jinxs/image_generation.jinx +29 -0
  15. npcsh/npc_team/jinxs/internet_search.jinx +31 -0
  16. npcsh/npc_team/jinxs/python_executor.jinx +11 -0
  17. npcsh/npc_team/jinxs/screen_cap.jinx +25 -0
  18. npcsh/npc_team/kadiefa.npc +3 -0
  19. npcsh/npc_team/kadiefa.png +0 -0
  20. npcsh/npc_team/npcsh.ctx +18 -0
  21. npcsh/npc_team/npcsh_sibiji.png +0 -0
  22. npcsh/npc_team/plonk.npc +2 -0
  23. npcsh/npc_team/plonk.png +0 -0
  24. npcsh/npc_team/plonkjr.npc +2 -0
  25. npcsh/npc_team/plonkjr.png +0 -0
  26. npcsh/npc_team/sibiji.npc +3 -0
  27. npcsh/npc_team/sibiji.png +0 -0
  28. npcsh/npc_team/spool.png +0 -0
  29. npcsh/npc_team/yap.png +0 -0
  30. npcsh-1.0.22.data/data/npcsh/npc_team/alicanto.npc +2 -0
  31. npcsh-1.0.22.data/data/npcsh/npc_team/alicanto.png +0 -0
  32. npcsh-1.0.22.data/data/npcsh/npc_team/bash_executer.jinx +20 -0
  33. npcsh-1.0.22.data/data/npcsh/npc_team/corca.npc +13 -0
  34. npcsh-1.0.22.data/data/npcsh/npc_team/corca.png +0 -0
  35. npcsh-1.0.22.data/data/npcsh/npc_team/edit_file.jinx +94 -0
  36. npcsh-1.0.22.data/data/npcsh/npc_team/foreman.npc +7 -0
  37. npcsh-1.0.22.data/data/npcsh/npc_team/frederic.npc +6 -0
  38. npcsh-1.0.22.data/data/npcsh/npc_team/frederic4.png +0 -0
  39. npcsh-1.0.22.data/data/npcsh/npc_team/guac.png +0 -0
  40. npcsh-1.0.22.data/data/npcsh/npc_team/image_generation.jinx +29 -0
  41. npcsh-1.0.22.data/data/npcsh/npc_team/internet_search.jinx +31 -0
  42. npcsh-1.0.22.data/data/npcsh/npc_team/kadiefa.npc +3 -0
  43. npcsh-1.0.22.data/data/npcsh/npc_team/kadiefa.png +0 -0
  44. npcsh-1.0.22.data/data/npcsh/npc_team/npcsh.ctx +18 -0
  45. npcsh-1.0.22.data/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
  46. npcsh-1.0.22.data/data/npcsh/npc_team/plonk.npc +2 -0
  47. npcsh-1.0.22.data/data/npcsh/npc_team/plonk.png +0 -0
  48. npcsh-1.0.22.data/data/npcsh/npc_team/plonkjr.npc +2 -0
  49. npcsh-1.0.22.data/data/npcsh/npc_team/plonkjr.png +0 -0
  50. npcsh-1.0.22.data/data/npcsh/npc_team/python_executor.jinx +11 -0
  51. npcsh-1.0.22.data/data/npcsh/npc_team/screen_cap.jinx +25 -0
  52. npcsh-1.0.22.data/data/npcsh/npc_team/sibiji.npc +3 -0
  53. npcsh-1.0.22.data/data/npcsh/npc_team/sibiji.png +0 -0
  54. npcsh-1.0.22.data/data/npcsh/npc_team/spool.png +0 -0
  55. npcsh-1.0.22.data/data/npcsh/npc_team/yap.png +0 -0
  56. {npcsh-1.0.20.dist-info → npcsh-1.0.22.dist-info}/METADATA +8 -3
  57. npcsh-1.0.22.dist-info/RECORD +73 -0
  58. npcsh-1.0.20.dist-info/RECORD +0 -21
  59. {npcsh-1.0.20.dist-info → npcsh-1.0.22.dist-info}/WHEEL +0 -0
  60. {npcsh-1.0.20.dist-info → npcsh-1.0.22.dist-info}/entry_points.txt +0 -0
  61. {npcsh-1.0.20.dist-info → npcsh-1.0.22.dist-info}/licenses/LICENSE +0 -0
  62. {npcsh-1.0.20.dist-info → npcsh-1.0.22.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,2 @@
1
+ name: alicanto
2
+ primary_directive: You are Alicanto the mythical bird. You have been spotted and it is your job to lead users to explore the world.
Binary file
@@ -0,0 +1,13 @@
1
+ name: corca
2
+ primary_directive: |
3
+ You are corca, a distinguished member of the NPC team.
4
+ Your expertise is in the area of software development and
5
+ you have a knack for thinking through problems carefully.
6
+ You favor solutions that prioritize simplicity and clarity and
7
+ ought to always consider how some suggestion may increase rather than reduce tech debt
8
+ unnecessarily. Now, the key is in this last term, "unnecessarily".
9
+ You must distinguish carefully and when in doubt, opt to ask for further
10
+ information or clarification with concrete clear options that make it
11
+ easy for a user to choose.
12
+ model: gpt-4o-mini
13
+ provider: openai
Binary file
@@ -0,0 +1,7 @@
1
+ name: foreman
2
+ primary_directive: You are the foreman of an NPC team. It is your duty
3
+ to delegate tasks to your team members or to other specialized teams
4
+ in order to complete the project. You are responsible for the
5
+ completion of the project and the safety of your team members.
6
+ model: gpt-4o-mini
7
+ provider: openai
@@ -0,0 +1,6 @@
1
+ name: frederic
2
+ primary_directive: |
3
+ You are frederic the polar bear. Your job is help users think through problems and
4
+ to provide straightforward ways forward on problems. Cut through the ice
5
+ to get to what matters and keep things simple. You are to respond in a
6
+ witty tone like richard feynman but with the romantic tambor of Frederic Chopin.
Binary file
Binary file
@@ -0,0 +1,20 @@
1
+ jinx_name: bash_executor
2
+ description: Execute bash queries. Should be used to grep for file contents, list directories, explore information to answer user questions more practically.
3
+ inputs:
4
+ - bash_command
5
+ - user_request
6
+ steps:
7
+ - engine: python
8
+ code: |
9
+ import subprocess
10
+ import os
11
+ cmd = '{{bash_command}}' # Properly quote the command input
12
+ def run_command(cmd):
13
+ process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
14
+ stdout, stderr = process.communicate()
15
+ if stderr:
16
+ print(f"Error: {stderr.decode('utf-8')}")
17
+ return stderr
18
+ return stdout
19
+ result = run_command(cmd)
20
+ output = result.decode('utf-8')
@@ -0,0 +1,94 @@
1
+ jinx_name: file_editor
2
+ description: Examines a file, determines what changes are needed, and applies those
3
+ changes.
4
+ inputs:
5
+ - file_path
6
+ - edit_instructions
7
+ - backup: true
8
+ steps:
9
+ - name: "edit_file"
10
+ engine: "python"
11
+ code: |
12
+ import os
13
+ from npcpy.llm_funcs import get_llm_response
14
+
15
+ # Get inputs
16
+ file_path = os.path.expanduser("{{ file_path }}")
17
+ edit_instructions = "{{ edit_instructions }}"
18
+ backup_str = "{{ backup }}"
19
+ create_backup = backup_str.lower() not in ('false', 'no', '0', '')
20
+
21
+ # Read file content
22
+ with open(file_path, 'r') as f:
23
+ original_content = f.read()
24
+
25
+ # Create backup if requested
26
+ if create_backup:
27
+ backup_path = file_path + ".bak"
28
+ with open(backup_path, 'w') as f:
29
+ f.write(original_content)
30
+
31
+ # Make the prompt for the LLM
32
+ prompt = """You are a code editing assistant. Analyze this file and make the requested changes.
33
+
34
+ File content:
35
+ """ + original_content + """
36
+
37
+ Edit instructions: """ + edit_instructions + """
38
+
39
+ Return a JSON object with these fields:
40
+ 1. "modifications": An array of modification objects, where each object has:
41
+ - "type": One of "replace", "insert_after", "insert_before", or "delete"
42
+ - "target": For "insert_after" and "insert_before", the text to insert after/before
43
+ For "delete", the text to delete
44
+ - "original": For "replace", the text to be replaced
45
+ - "replacement": For "replace", the text to replace with
46
+ - "insertion": For "insert_after" and "insert_before", the text to insert
47
+ 2. "explanation": Brief explanation of the changes made
48
+ """
49
+ # Get the LLM response with JSON formatting
50
+ response = get_llm_response(prompt, model=npc.model, provider=npc.provider, npc=npc, format="json")
51
+
52
+ result = response.get("response", {})
53
+ modifications = result.get("modifications", [])
54
+ explanation = result.get("explanation", "No explanation provided")
55
+
56
+ # Apply modifications
57
+ updated_content = original_content
58
+ changes_applied = 0
59
+
60
+ for mod in modifications:
61
+ print(mod)
62
+ mod_type = mod.get("type")
63
+
64
+ if mod_type == "replace":
65
+ original = mod.get("original")
66
+ replacement = mod.get("replacement")
67
+ if original in updated_content:
68
+ updated_content = updated_content.replace(original, replacement)
69
+ changes_applied += 1
70
+
71
+ elif mod_type == "insert_after":
72
+ target = mod.get("target")
73
+ insertion = mod.get("insertion")
74
+ if target in updated_content:
75
+ updated_content = updated_content.replace(target, target + insertion)
76
+ changes_applied += 1
77
+
78
+ elif mod_type == "insert_before":
79
+ target = mod.get("target")
80
+ insertion = mod.get("insertion")
81
+ if target in updated_content:
82
+ updated_content = updated_content.replace(target, insertion + target)
83
+ changes_applied += 1
84
+
85
+ elif mod_type == "delete":
86
+ target = mod.get("target")
87
+ if target in updated_content:
88
+ updated_content = updated_content.replace(target, "")
89
+ changes_applied += 1
90
+
91
+ with open(file_path, 'w') as f:
92
+ f.write(updated_content)
93
+
94
+ output = "Applied " + str(changes_applied) + " changes to " + file_path + "\n\n" + explanation
@@ -0,0 +1,29 @@
1
+ jinx_name: image_generation_jinx
2
+ description: 'Generates images based on a text prompt. Prompt must be specified to facilitate the users goal and should NOT be a verbatim replication of their request unless asked. The output name must be a path relative to the current directory. do NOT use placeholders. specify ./descriptive_image_name.png. Model must be specified if the user indicates a specific model or provider. For example, if they want gemini-2.5-flash you should specify gemini-2.5-flash as the model and gemini as the provider . if they specify gpt-image-1 you should specify gpt-image-1 as the model and openai as the provider. Only leave it the default if the user does not indicate the model preference.'
3
+ inputs:
4
+ - prompt
5
+ - output_name
6
+ - model: runwayml/stable-diffusion-v1-5
7
+ - provider: diffusers
8
+ steps:
9
+ - engine: "python"
10
+ code: |
11
+ image_prompt = '{{prompt}}'.strip()
12
+ from npcpy.llm_funcs import gen_image
13
+ # Generate the image
14
+ pil_image = gen_image(
15
+ image_prompt,
16
+ npc=npc,
17
+ model='{{model}}', # You can adjust the model as needed
18
+ provider='{{provider}}'
19
+ )
20
+ if pil_image:
21
+ image_generated = True
22
+ else:
23
+ image_generated = False
24
+ # save the image
25
+ output_name = '{{output_name}}'
26
+ pil_image.save(f'{output_name}.png')
27
+ # open the image to display it
28
+ #pil_image.show()
29
+ output = output_name
@@ -0,0 +1,31 @@
1
+ jinx_name: internet_search
2
+ description: Searches the web for information based on a query in order to verify
3
+ timiely details (e.g. current events) or to corroborate information in uncertain
4
+ situations. Should be mainly only used when users specifically request a search,
5
+ otherwise an LLMs basic knowledge should be sufficient. The Query must be written specifically
6
+ as a search query, it should not be a direct parroting of the user's input text. Returns a LLM summary so no post-summary is required
7
+ inputs:
8
+ - query
9
+ - provider: ''
10
+ steps:
11
+ - engine: "python"
12
+ code: |
13
+ from npcpy.data.web import search_web
14
+ from npcsh._state import NPCSH_SEARCH_PROVIDER
15
+ query = "{{ query }}"
16
+ provider = '{{ provider }}'
17
+ if provider.strip() != '':
18
+ results = search_web(query, num_results=5, provider = provider)
19
+ else:
20
+ results = search_web(query, num_results=5, provider = NPCSH_SEARCH_PROVIDER)
21
+
22
+ print('QUERY in jinx', query)
23
+ results = search_web(query, num_results=5, provider = NPCSH_SEARCH_PROVIDER)
24
+ print('RESULTS in jinx', results)
25
+ - engine: "natural"
26
+ code: |
27
+ Using the following information extracted from the web:
28
+
29
+ {{ results }}
30
+
31
+ Answer the users question: {{ query }}
@@ -0,0 +1,11 @@
1
+ jinx_name: python_executor
2
+ description: Execute scripts with python. You must set the ultimate result as the "output"
3
+ variable. It MUST be a string.
4
+ Do not add unnecessary print statements.
5
+ This jinx is intended for executing code snippets that are not
6
+ accomplished by other jinxes. Use it only when the others are insufficient.
7
+ inputs:
8
+ - code
9
+ steps:
10
+ - code: '{{code}}'
11
+ engine: python
@@ -0,0 +1,25 @@
1
+ jinx_name: screen_cap
2
+ description: Captures the whole screen and sends the image for analysis
3
+ inputs:
4
+ - prompt
5
+ steps:
6
+ - engine: "python"
7
+ code: |
8
+ import os
9
+ from npcpy.data.image import capture_screenshot
10
+ out = capture_screenshot(full=True)
11
+ prompt = "{{prompt}}"
12
+ # Now properly use get_llm_response to analyze the image
13
+ # Create a prompt that includes the user's request and instructions
14
+ analysis_prompt = prompt + "\n\nAttached is a screenshot of my screen currently. Please use this to evaluate the situation. If the user asked for you to explain what's on their screen or something similar, they are referring to the details contained within the attached image."
15
+ llm_response = get_llm_response(
16
+ prompt=analysis_prompt,
17
+ model=npc.model if npc else None,
18
+ provider=npc.provider if npc else None,
19
+ api_url=npc.api_url if npc else None,
20
+ api_key=npc.api_key if npc else None,
21
+ npc=npc,
22
+ images=[out['file_path']],
23
+ )
24
+ output = llm_response['response']
25
+
@@ -0,0 +1,3 @@
1
+ name: kadiefa
2
+ primary_directive: |
3
+ You are kadiefa, the exploratory snow leopard. You love to find new paths and to explore hidden gems. You go into caverns no cat has ventured into before. You climb peaks that others call crazy. You are at the height of your power. Your role is to lead the way for users to explore complex research questions and to think outside of the box.
Binary file
@@ -0,0 +1,18 @@
1
+ context: |
2
+ The npcsh NPC team is devoted to providing a safe and helpful
3
+ environment for users where they can work and be as successful as possible.
4
+ npcsh is a command-line tool that makes it easy for users to harness
5
+ the power of LLMs from a command line shell. npcsh is a command line toolkit consisting of several programs.
6
+ databases:
7
+ - ~/npcsh_history.db
8
+ mcp_servers:
9
+ - ~/.npcsh/mcp_server.py
10
+ use_global_jinxs: true
11
+ forenpc: sibiji
12
+ preferences:
13
+ - If you come up with an idea, it is critical that you also provide a way to validate the idea.
14
+ - Never change function names unless requested. keep things idempotent.
15
+ - If plots are requested for python code, prefer to use matplotlib. Do not ever use seaborn.
16
+ - Object oriented programming should be used sparingly and only when practical. Otherwise, opt for functional implementations.
17
+ - Never write unit tests unless explicitly requested.
18
+ - If we want you to write tests, we mean we want you to write example use cases that show how the code works.
Binary file
@@ -0,0 +1,2 @@
1
+ name: plonk
2
+ primary_directive: You are the superior automation specialist of the NPC team.
Binary file
@@ -0,0 +1,2 @@
1
+ name: plonkjr
2
+ primary_directive: You are junior automation specialist in the NPC Team.
Binary file
@@ -0,0 +1,3 @@
1
+ name: sibiji
2
+ primary_directive: You are a foundational AI assistant. Your role is to provide support and information. Respond to queries concisely and accurately. Help users with code and other processes.
3
+ jinxs: "*"
Binary file
Binary file
npcsh/npc_team/yap.png ADDED
Binary file
@@ -0,0 +1,2 @@
1
+ name: alicanto
2
+ primary_directive: You are Alicanto the mythical bird. You have been spotted and it is your job to lead users to explore the world.
@@ -0,0 +1,20 @@
1
+ jinx_name: bash_executor
2
+ description: Execute bash queries. Should be used to grep for file contents, list directories, explore information to answer user questions more practically.
3
+ inputs:
4
+ - bash_command
5
+ - user_request
6
+ steps:
7
+ - engine: python
8
+ code: |
9
+ import subprocess
10
+ import os
11
+ cmd = '{{bash_command}}' # Properly quote the command input
12
+ def run_command(cmd):
13
+ process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
14
+ stdout, stderr = process.communicate()
15
+ if stderr:
16
+ print(f"Error: {stderr.decode('utf-8')}")
17
+ return stderr
18
+ return stdout
19
+ result = run_command(cmd)
20
+ output = result.decode('utf-8')
@@ -0,0 +1,13 @@
1
+ name: corca
2
+ primary_directive: |
3
+ You are corca, a distinguished member of the NPC team.
4
+ Your expertise is in the area of software development and
5
+ you have a knack for thinking through problems carefully.
6
+ You favor solutions that prioritize simplicity and clarity and
7
+ ought to always consider how some suggestion may increase rather than reduce tech debt
8
+ unnecessarily. Now, the key is in this last term, "unnecessarily".
9
+ You must distinguish carefully and when in doubt, opt to ask for further
10
+ information or clarification with concrete clear options that make it
11
+ easy for a user to choose.
12
+ model: gpt-4o-mini
13
+ provider: openai
@@ -0,0 +1,94 @@
1
+ jinx_name: file_editor
2
+ description: Examines a file, determines what changes are needed, and applies those
3
+ changes.
4
+ inputs:
5
+ - file_path
6
+ - edit_instructions
7
+ - backup: true
8
+ steps:
9
+ - name: "edit_file"
10
+ engine: "python"
11
+ code: |
12
+ import os
13
+ from npcpy.llm_funcs import get_llm_response
14
+
15
+ # Get inputs
16
+ file_path = os.path.expanduser("{{ file_path }}")
17
+ edit_instructions = "{{ edit_instructions }}"
18
+ backup_str = "{{ backup }}"
19
+ create_backup = backup_str.lower() not in ('false', 'no', '0', '')
20
+
21
+ # Read file content
22
+ with open(file_path, 'r') as f:
23
+ original_content = f.read()
24
+
25
+ # Create backup if requested
26
+ if create_backup:
27
+ backup_path = file_path + ".bak"
28
+ with open(backup_path, 'w') as f:
29
+ f.write(original_content)
30
+
31
+ # Make the prompt for the LLM
32
+ prompt = """You are a code editing assistant. Analyze this file and make the requested changes.
33
+
34
+ File content:
35
+ """ + original_content + """
36
+
37
+ Edit instructions: """ + edit_instructions + """
38
+
39
+ Return a JSON object with these fields:
40
+ 1. "modifications": An array of modification objects, where each object has:
41
+ - "type": One of "replace", "insert_after", "insert_before", or "delete"
42
+ - "target": For "insert_after" and "insert_before", the text to insert after/before
43
+ For "delete", the text to delete
44
+ - "original": For "replace", the text to be replaced
45
+ - "replacement": For "replace", the text to replace with
46
+ - "insertion": For "insert_after" and "insert_before", the text to insert
47
+ 2. "explanation": Brief explanation of the changes made
48
+ """
49
+ # Get the LLM response with JSON formatting
50
+ response = get_llm_response(prompt, model=npc.model, provider=npc.provider, npc=npc, format="json")
51
+
52
+ result = response.get("response", {})
53
+ modifications = result.get("modifications", [])
54
+ explanation = result.get("explanation", "No explanation provided")
55
+
56
+ # Apply modifications
57
+ updated_content = original_content
58
+ changes_applied = 0
59
+
60
+ for mod in modifications:
61
+ print(mod)
62
+ mod_type = mod.get("type")
63
+
64
+ if mod_type == "replace":
65
+ original = mod.get("original")
66
+ replacement = mod.get("replacement")
67
+ if original in updated_content:
68
+ updated_content = updated_content.replace(original, replacement)
69
+ changes_applied += 1
70
+
71
+ elif mod_type == "insert_after":
72
+ target = mod.get("target")
73
+ insertion = mod.get("insertion")
74
+ if target in updated_content:
75
+ updated_content = updated_content.replace(target, target + insertion)
76
+ changes_applied += 1
77
+
78
+ elif mod_type == "insert_before":
79
+ target = mod.get("target")
80
+ insertion = mod.get("insertion")
81
+ if target in updated_content:
82
+ updated_content = updated_content.replace(target, insertion + target)
83
+ changes_applied += 1
84
+
85
+ elif mod_type == "delete":
86
+ target = mod.get("target")
87
+ if target in updated_content:
88
+ updated_content = updated_content.replace(target, "")
89
+ changes_applied += 1
90
+
91
+ with open(file_path, 'w') as f:
92
+ f.write(updated_content)
93
+
94
+ output = "Applied " + str(changes_applied) + " changes to " + file_path + "\n\n" + explanation
@@ -0,0 +1,7 @@
1
+ name: foreman
2
+ primary_directive: You are the foreman of an NPC team. It is your duty
3
+ to delegate tasks to your team members or to other specialized teams
4
+ in order to complete the project. You are responsible for the
5
+ completion of the project and the safety of your team members.
6
+ model: gpt-4o-mini
7
+ provider: openai
@@ -0,0 +1,6 @@
1
+ name: frederic
2
+ primary_directive: |
3
+ You are frederic the polar bear. Your job is help users think through problems and
4
+ to provide straightforward ways forward on problems. Cut through the ice
5
+ to get to what matters and keep things simple. You are to respond in a
6
+ witty tone like richard feynman but with the romantic tambor of Frederic Chopin.
@@ -0,0 +1,29 @@
1
+ jinx_name: image_generation_jinx
2
+ description: 'Generates images based on a text prompt. Prompt must be specified to facilitate the users goal and should NOT be a verbatim replication of their request unless asked. The output name must be a path relative to the current directory. do NOT use placeholders. specify ./descriptive_image_name.png. Model must be specified if the user indicates a specific model or provider. For example, if they want gemini-2.5-flash you should specify gemini-2.5-flash as the model and gemini as the provider . if they specify gpt-image-1 you should specify gpt-image-1 as the model and openai as the provider. Only leave it the default if the user does not indicate the model preference.'
3
+ inputs:
4
+ - prompt
5
+ - output_name
6
+ - model: runwayml/stable-diffusion-v1-5
7
+ - provider: diffusers
8
+ steps:
9
+ - engine: "python"
10
+ code: |
11
+ image_prompt = '{{prompt}}'.strip()
12
+ from npcpy.llm_funcs import gen_image
13
+ # Generate the image
14
+ pil_image = gen_image(
15
+ image_prompt,
16
+ npc=npc,
17
+ model='{{model}}', # You can adjust the model as needed
18
+ provider='{{provider}}'
19
+ )
20
+ if pil_image:
21
+ image_generated = True
22
+ else:
23
+ image_generated = False
24
+ # save the image
25
+ output_name = '{{output_name}}'
26
+ pil_image.save(f'{output_name}.png')
27
+ # open the image to display it
28
+ #pil_image.show()
29
+ output = output_name
@@ -0,0 +1,31 @@
1
+ jinx_name: internet_search
2
+ description: Searches the web for information based on a query in order to verify
3
+ timiely details (e.g. current events) or to corroborate information in uncertain
4
+ situations. Should be mainly only used when users specifically request a search,
5
+ otherwise an LLMs basic knowledge should be sufficient. The Query must be written specifically
6
+ as a search query, it should not be a direct parroting of the user's input text. Returns a LLM summary so no post-summary is required
7
+ inputs:
8
+ - query
9
+ - provider: ''
10
+ steps:
11
+ - engine: "python"
12
+ code: |
13
+ from npcpy.data.web import search_web
14
+ from npcsh._state import NPCSH_SEARCH_PROVIDER
15
+ query = "{{ query }}"
16
+ provider = '{{ provider }}'
17
+ if provider.strip() != '':
18
+ results = search_web(query, num_results=5, provider = provider)
19
+ else:
20
+ results = search_web(query, num_results=5, provider = NPCSH_SEARCH_PROVIDER)
21
+
22
+ print('QUERY in jinx', query)
23
+ results = search_web(query, num_results=5, provider = NPCSH_SEARCH_PROVIDER)
24
+ print('RESULTS in jinx', results)
25
+ - engine: "natural"
26
+ code: |
27
+ Using the following information extracted from the web:
28
+
29
+ {{ results }}
30
+
31
+ Answer the users question: {{ query }}
@@ -0,0 +1,3 @@
1
+ name: kadiefa
2
+ primary_directive: |
3
+ You are kadiefa, the exploratory snow leopard. You love to find new paths and to explore hidden gems. You go into caverns no cat has ventured into before. You climb peaks that others call crazy. You are at the height of your power. Your role is to lead the way for users to explore complex research questions and to think outside of the box.
@@ -0,0 +1,18 @@
1
+ context: |
2
+ The npcsh NPC team is devoted to providing a safe and helpful
3
+ environment for users where they can work and be as successful as possible.
4
+ npcsh is a command-line tool that makes it easy for users to harness
5
+ the power of LLMs from a command line shell. npcsh is a command line toolkit consisting of several programs.
6
+ databases:
7
+ - ~/npcsh_history.db
8
+ mcp_servers:
9
+ - ~/.npcsh/mcp_server.py
10
+ use_global_jinxs: true
11
+ forenpc: sibiji
12
+ preferences:
13
+ - If you come up with an idea, it is critical that you also provide a way to validate the idea.
14
+ - Never change function names unless requested. keep things idempotent.
15
+ - If plots are requested for python code, prefer to use matplotlib. Do not ever use seaborn.
16
+ - Object oriented programming should be used sparingly and only when practical. Otherwise, opt for functional implementations.
17
+ - Never write unit tests unless explicitly requested.
18
+ - If we want you to write tests, we mean we want you to write example use cases that show how the code works.
@@ -0,0 +1,2 @@
1
+ name: plonk
2
+ primary_directive: You are the superior automation specialist of the NPC team.
@@ -0,0 +1,2 @@
1
+ name: plonkjr
2
+ primary_directive: You are junior automation specialist in the NPC Team.
@@ -0,0 +1,11 @@
1
+ jinx_name: python_executor
2
+ description: Execute scripts with python. You must set the ultimate result as the "output"
3
+ variable. It MUST be a string.
4
+ Do not add unnecessary print statements.
5
+ This jinx is intended for executing code snippets that are not
6
+ accomplished by other jinxes. Use it only when the others are insufficient.
7
+ inputs:
8
+ - code
9
+ steps:
10
+ - code: '{{code}}'
11
+ engine: python
@@ -0,0 +1,25 @@
1
+ jinx_name: screen_cap
2
+ description: Captures the whole screen and sends the image for analysis
3
+ inputs:
4
+ - prompt
5
+ steps:
6
+ - engine: "python"
7
+ code: |
8
+ import os
9
+ from npcpy.data.image import capture_screenshot
10
+ out = capture_screenshot(full=True)
11
+ prompt = "{{prompt}}"
12
+ # Now properly use get_llm_response to analyze the image
13
+ # Create a prompt that includes the user's request and instructions
14
+ analysis_prompt = prompt + "\n\nAttached is a screenshot of my screen currently. Please use this to evaluate the situation. If the user asked for you to explain what's on their screen or something similar, they are referring to the details contained within the attached image."
15
+ llm_response = get_llm_response(
16
+ prompt=analysis_prompt,
17
+ model=npc.model if npc else None,
18
+ provider=npc.provider if npc else None,
19
+ api_url=npc.api_url if npc else None,
20
+ api_key=npc.api_key if npc else None,
21
+ npc=npc,
22
+ images=[out['file_path']],
23
+ )
24
+ output = llm_response['response']
25
+
@@ -0,0 +1,3 @@
1
+ name: sibiji
2
+ primary_directive: You are a foundational AI assistant. Your role is to provide support and information. Respond to queries concisely and accurately. Help users with code and other processes.
3
+ jinxs: "*"