npcsh 1.1.5__py3-none-any.whl → 1.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. npcsh/_state.py +483 -336
  2. npcsh/npc_team/jinxs/code/sh.jinx +0 -1
  3. npcsh/npc_team/jinxs/code/sql.jinx +1 -3
  4. npcsh/npc_team/jinxs/utils/npc-studio.jinx +33 -38
  5. npcsh/npc_team/jinxs/utils/ots.jinx +34 -65
  6. npcsh/npc_team/jinxs/utils/search.jinx +130 -0
  7. npcsh/npc_team/jinxs/utils/vixynt.jinx +33 -45
  8. npcsh/routes.py +32 -14
  9. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/npc-studio.jinx +33 -38
  10. npcsh-1.1.7.data/data/npcsh/npc_team/ots.jinx +61 -0
  11. npcsh-1.1.7.data/data/npcsh/npc_team/search.jinx +130 -0
  12. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/sh.jinx +0 -1
  13. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/sql.jinx +1 -3
  14. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/vixynt.jinx +33 -45
  15. {npcsh-1.1.5.dist-info → npcsh-1.1.7.dist-info}/METADATA +1 -10
  16. {npcsh-1.1.5.dist-info → npcsh-1.1.7.dist-info}/RECORD +65 -73
  17. npcsh/npc_team/jinxs/utils/search/brainblast.jinx +0 -51
  18. npcsh/npc_team/jinxs/utils/search/kg_search.jinx +0 -43
  19. npcsh/npc_team/jinxs/utils/search/memory_search.jinx +0 -36
  20. npcsh/npc_team/jinxs/utils/search/rag.jinx +0 -70
  21. npcsh/npc_team/jinxs/utils/search/search.jinx +0 -192
  22. npcsh-1.1.5.data/data/npcsh/npc_team/brainblast.jinx +0 -51
  23. npcsh-1.1.5.data/data/npcsh/npc_team/kg_search.jinx +0 -43
  24. npcsh-1.1.5.data/data/npcsh/npc_team/memory_search.jinx +0 -36
  25. npcsh-1.1.5.data/data/npcsh/npc_team/ots.jinx +0 -92
  26. npcsh-1.1.5.data/data/npcsh/npc_team/rag.jinx +0 -70
  27. npcsh-1.1.5.data/data/npcsh/npc_team/search.jinx +0 -192
  28. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/alicanto.jinx +0 -0
  29. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/alicanto.npc +0 -0
  30. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/alicanto.png +0 -0
  31. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/breathe.jinx +0 -0
  32. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/build.jinx +0 -0
  33. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/compile.jinx +0 -0
  34. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/corca.jinx +0 -0
  35. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/corca.npc +0 -0
  36. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/corca.png +0 -0
  37. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/corca_example.png +0 -0
  38. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/edit_file.jinx +0 -0
  39. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/flush.jinx +0 -0
  40. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/foreman.npc +0 -0
  41. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/frederic.npc +0 -0
  42. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/frederic4.png +0 -0
  43. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/guac.jinx +0 -0
  44. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/guac.png +0 -0
  45. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/help.jinx +0 -0
  46. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/init.jinx +0 -0
  47. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/jinxs.jinx +0 -0
  48. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
  49. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/kadiefa.png +0 -0
  50. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
  51. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
  52. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/plan.jinx +0 -0
  53. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/plonk.jinx +0 -0
  54. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/plonk.npc +0 -0
  55. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/plonk.png +0 -0
  56. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
  57. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/plonkjr.png +0 -0
  58. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/pti.jinx +0 -0
  59. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/python.jinx +0 -0
  60. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/roll.jinx +0 -0
  61. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/sample.jinx +0 -0
  62. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/serve.jinx +0 -0
  63. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/set.jinx +0 -0
  64. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/sibiji.npc +0 -0
  65. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/sibiji.png +0 -0
  66. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/sleep.jinx +0 -0
  67. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/spool.jinx +0 -0
  68. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/spool.png +0 -0
  69. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/trigger.jinx +0 -0
  70. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/wander.jinx +0 -0
  71. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/yap.jinx +0 -0
  72. {npcsh-1.1.5.data → npcsh-1.1.7.data}/data/npcsh/npc_team/yap.png +0 -0
  73. {npcsh-1.1.5.dist-info → npcsh-1.1.7.dist-info}/WHEEL +0 -0
  74. {npcsh-1.1.5.dist-info → npcsh-1.1.7.dist-info}/entry_points.txt +0 -0
  75. {npcsh-1.1.5.dist-info → npcsh-1.1.7.dist-info}/licenses/LICENSE +0 -0
  76. {npcsh-1.1.5.dist-info → npcsh-1.1.7.dist-info}/top_level.txt +0 -0
@@ -1,43 +0,0 @@
1
- jinx_name: search_kg
2
- description: Search knowledge graph for relevant facts
3
- inputs:
4
- - query
5
- steps:
6
- - name: retrieve_facts
7
- engine: python
8
- code: |
9
- from npcpy.memory.command_history import load_kg_from_db
10
- import os
11
-
12
- kg = load_kg_from_db(
13
- command_history.engine,
14
- team.name if team else '__none__',
15
- npc.name if hasattr(npc, 'name') else '__none__',
16
- os.getcwd()
17
- )
18
-
19
- query_lower = '{{ query }}'.lower()
20
- matching_facts = []
21
-
22
- if kg and 'facts' in kg:
23
- for fact in kg['facts']:
24
- statement = fact.get('statement', '').lower()
25
- if query_lower in statement:
26
- matching_facts.append(fact)
27
-
28
- output = []
29
- for i, fact in enumerate(matching_facts[:10], 1):
30
- statement = fact.get('statement', '')
31
- fact_type = fact.get('type', 'unknown')
32
- output.append(f"{i}. [{fact_type}] {statement}")
33
-
34
- output = "\n".join(output) if output else "No facts found"
35
-
36
- - name: analyze_facts
37
- engine: natural
38
- code: |
39
- Knowledge graph facts for query "{{ query }}":
40
-
41
- {{ retrieve_facts }}
42
-
43
- Analyze how these facts relate to the query.
@@ -1,36 +0,0 @@
1
- jinx_name: search_memories
2
- description: Search through approved memories for relevant context
3
- inputs:
4
- - query
5
- steps:
6
- - name: retrieve_memories
7
- engine: python
8
- code: |
9
- from npcsh._state import get_relevant_memories
10
- import os
11
-
12
- memories = get_relevant_memories(
13
- command_history=command_history,
14
- npc_name=npc.name if hasattr(npc, 'name') else '__none__',
15
- team_name=team.name if team else '__none__',
16
- path=os.getcwd(),
17
- query='{{ query }}',
18
- max_memories=10,
19
- state=state
20
- )
21
-
22
- output = []
23
- for i, mem in enumerate(memories, 1):
24
- content = mem.get('final_memory', mem.get('initial_memory', ''))
25
- output.append(f"{i}. {content}")
26
-
27
- output = "\n".join(output) if output else "No memories found"
28
-
29
- - name: format_results
30
- engine: natural
31
- code: |
32
- Found memories for query "{{ query }}":
33
-
34
- {{ retrieve_memories }}
35
-
36
- Summarize the key points from these memories.
@@ -1,92 +0,0 @@
1
- jinx_name: "ots"
2
- description: "Take screenshot and analyze with vision model"
3
- inputs:
4
- - image_paths_args: "" # Optional comma-separated paths to image files for analysis.
5
- - prompt: "" # The prompt for the LLM about the image(s).
6
- - vmodel: "" # Vision model to use. Defaults to NPCSH_VISION_MODEL or NPC's model.
7
- - vprovider: "" # Vision model provider. Defaults to NPCSH_VISION_PROVIDER or NPC's provider.
8
- - stream: False # Whether to stream the output from the LLM.
9
- - api_url: "" # API URL for the LLM.
10
- - api_key: "" # API key for the LLM.
11
- steps:
12
- - name: "analyze_screenshot_or_image"
13
- engine: "python"
14
- code: |
15
- import os
16
- import traceback
17
- from npcpy.llm_funcs import get_llm_response
18
- from npcpy.data.image import capture_screenshot
19
- # Assuming NPCSH_VISION_MODEL and NPCSH_VISION_PROVIDER are accessible through _state or defaults
20
- # For simplicity in Jinx, we'll use fallbacks or assume context will provide
21
-
22
- image_paths_args_str = context.get('image_paths_args')
23
- user_prompt = context.get('prompt')
24
- vision_model = context.get('vmodel')
25
- vision_provider = context.get('vprovider')
26
- stream_output = context.get('stream')
27
- api_url = context.get('api_url')
28
- api_key = context.get('api_key')
29
- output_messages = context.get('messages', [])
30
- current_npc = context.get('npc')
31
-
32
- image_paths = []
33
- if image_paths_args_str and image_paths_args_str.strip():
34
- for img_path_arg in image_paths_args_str.split(','):
35
- full_path = os.path.abspath(os.path.expanduser(img_path_arg.strip()))
36
- if os.path.exists(full_path):
37
- image_paths.append(full_path)
38
- else:
39
- context['output'] = f"Error: Image file not found at {full_path}"
40
- context['messages'] = output_messages
41
- exit()
42
-
43
- if not image_paths:
44
- screenshot_info = capture_screenshot(full=False)
45
- if screenshot_info and "file_path" in screenshot_info:
46
- image_paths.append(screenshot_info["file_path"])
47
- print(f"Screenshot captured: {screenshot_info.get('filename', os.path.basename(screenshot_info['file_path']))}")
48
- else:
49
- context['output'] = "Error: Failed to capture screenshot."
50
- context['messages'] = output_messages
51
- exit()
52
-
53
- if not image_paths:
54
- context['output'] = "No valid images found or captured."
55
- context['messages'] = output_messages
56
- exit()
57
-
58
- if not user_prompt or not user_prompt.strip():
59
- # In a non-interactive Jinx, a default prompt is better than waiting for input
60
- user_prompt = "Describe the image(s)."
61
-
62
- # Fallback for model/provider if not explicitly set in Jinx inputs
63
- if not vision_model and current_npc and current_npc.model:
64
- vision_model = current_npc.model
65
- if not vision_provider and current_npc and current_npc.provider:
66
- vision_provider = current_npc.provider
67
-
68
- # Final fallbacks (these would ideally come from npcsh._state config)
69
- if not vision_model: vision_model = "gemini-1.5-pro-vision" # Example default
70
- if not vision_provider: vision_provider = "gemini" # Example default
71
-
72
- try:
73
- response_data = get_llm_response(
74
- prompt=user_prompt,
75
- model=vision_model,
76
- provider=vision_provider,
77
- messages=output_messages, # Pass current messages to LLM
78
- images=image_paths,
79
- stream=stream_output,
80
- npc=current_npc,
81
- api_url=api_url,
82
- api_key=api_key
83
- )
84
- context['output'] = response_data.get('response')
85
- context['messages'] = response_data.get('messages', output_messages)
86
- context['model'] = vision_model
87
- context['provider'] = vision_provider
88
-
89
- except Exception as e:
90
- traceback.print_exc()
91
- context['output'] = f"Error during /ots command: {e}"
92
- context['messages'] = output_messages
@@ -1,70 +0,0 @@
1
- jinx_name: "rag"
2
- description: "Execute a RAG command using ChromaDB embeddings with optional file input (-f/--file)"
3
- inputs:
4
- - query: "" # Required search query for RAG.
5
- - file_paths: "" # Optional comma-separated file paths to include in RAG.
6
- - vector_db_path: "~/npcsh_chroma.db" # Path to the ChromaDB vector database.
7
- - emodel: "" # Embedding model to use. Defaults to NPCSH_EMBEDDING_MODEL or NPC's model.
8
- - eprovider: "" # Embedding provider to use. Defaults to NPCSH_EMBEDDING_PROVIDER or NPC's provider.
9
- steps:
10
- - name: "execute_rag"
11
- engine: "python"
12
- code: |
13
- import os
14
- import traceback
15
- from npcpy.data.load import load_file_contents
16
- from npcpy.memory.search import execute_rag_command
17
- # Assuming NPCSH_EMBEDDING_MODEL and NPCSH_EMBEDDING_PROVIDER are accessible
18
-
19
- query = context.get('query')
20
- file_paths_str = context.get('file_paths')
21
- vector_db_path = context.get('vector_db_path')
22
- embedding_model = context.get('emodel')
23
- embedding_provider = context.get('eprovider')
24
- output_messages = context.get('messages', [])
25
- current_npc = context.get('npc')
26
-
27
- file_paths = []
28
- if file_paths_str and file_paths_str.strip():
29
- file_paths = [os.path.abspath(os.path.expanduser(p.strip())) for p in file_paths_str.split(',')]
30
-
31
- if not query and not file_paths:
32
- context['output'] = "Usage: /rag [-f file_path] <query>"
33
- context['messages'] = output_messages
34
- exit()
35
-
36
- # Fallback for model/provider if not explicitly set in Jinx inputs
37
- if not embedding_model and current_npc and current_npc.model:
38
- embedding_model = current_npc.model
39
- if not embedding_provider and current_npc and current_npc.provider:
40
- embedding_provider = current_npc.provider
41
-
42
- # Final fallbacks (these would ideally come from npcsh._state config)
43
- if not embedding_model: embedding_model = "nomic-ai/nomic-embed-text-v1.5" # Example default
44
- if not embedding_provider: embedding_provider = "ollama" # Example default
45
-
46
- try:
47
- file_contents = []
48
- for file_path in file_paths:
49
- try:
50
- chunks = load_file_contents(file_path)
51
- file_name = os.path.basename(file_path)
52
- file_contents.extend([f"[{file_name}] {chunk}" for chunk in chunks])
53
- except Exception as file_err:
54
- file_contents.append(f"Error processing file {file_path}: {str(file_err)}")
55
-
56
- result = execute_rag_command(
57
- command=query,
58
- vector_db_path=os.path.expanduser(vector_db_path),
59
- embedding_model=embedding_model,
60
- embedding_provider=embedding_provider,
61
- file_contents=file_contents if file_paths else None,
62
- **{k:v for k,v in context.items() if k not in ['messages', 'query', 'file_paths']} # Pass other context
63
- )
64
- context['output'] = result.get('response')
65
- context['messages'] = result.get('messages', output_messages)
66
-
67
- except Exception as e:
68
- traceback.print_exc()
69
- context['output'] = f"Error executing RAG command: {e}"
70
- context['messages'] = output_messages
@@ -1,192 +0,0 @@
1
- jinx_name: "search"
2
- description: "Execute web search or memory/KG search. Usage: /search [-m/-mem | -kg] <query>"
3
- inputs:
4
- - query: "" # Required search query.
5
- - search_type: "web" # Type of search: "web", "memory", or "kg".
6
- - sprovider: "" # Search provider for web search. Defaults to NPCSH_SEARCH_PROVIDER.
7
- - history_db_path: "~/npcsh_history.db" # Path to the command history database for memory/KG search.
8
- steps:
9
- - name: "execute_search"
10
- engine: "python"
11
- code: |
12
- import os
13
- import traceback
14
- from npcpy.data.web import search_web
15
- from npcpy.memory.command_history import CommandHistory, load_kg_from_db, save_kg_to_db # For KG search
16
- from npcsh._state import get_relevant_memories # For memory search
17
- # Assuming NPCSH_SEARCH_PROVIDER is accessible
18
-
19
- query = context.get('query')
20
- search_type = context.get('search_type')
21
- search_provider = context.get('sprovider')
22
- history_db_path = context.get('history_db_path')
23
- output_messages = context.get('messages', [])
24
- current_npc = context.get('npc')
25
- current_team = context.get('team')
26
-
27
- if not query or not query.strip():
28
- context['output'] = (
29
- "Usage:\n"
30
- " /search <query> - Web search\n"
31
- " /search -m <query> - Memory search\n"
32
- " /search -kg <query> - Knowledge graph search"
33
- )
34
- context['messages'] = output_messages
35
- exit()
36
-
37
- def search_memories(query_str, current_context, output_msgs):
38
- command_history = current_context.get('command_history')
39
- if not command_history:
40
- db_path = os.path.expanduser(current_context.get("history_db_path", "~/npcsh_history.db"))
41
- try:
42
- command_history = CommandHistory(db_path)
43
- current_context['command_history'] = command_history
44
- except Exception as e:
45
- return {"output": f"Error connecting to history: {e}", "messages": output_msgs}
46
-
47
- npc_name = current_context.get('npc_name', '__none__')
48
- team_name = current_context.get('team_name', '__none__')
49
- current_path = current_context.get('current_path', os.getcwd())
50
- state = current_context.get('state')
51
-
52
- try:
53
- memories = get_relevant_memories(
54
- command_history=command_history,
55
- npc_name=npc_name,
56
- team_name=team_name,
57
- path=current_path,
58
- query=query_str,
59
- max_memories=10,
60
- state=state
61
- )
62
-
63
- if not memories:
64
- output = f"No memories found for query: '{query_str}'"
65
- else:
66
- output = f"Found {len(memories)} memories:\n\n"
67
- for i, mem in enumerate(memories, 1):
68
- final_mem = (
69
- mem.get('final_memory') or
70
- mem.get('initial_memory')
71
- )
72
- timestamp = mem.get('timestamp', 'unknown')
73
- output += f"{i}. [{timestamp}] {final_mem}\n"
74
-
75
- return {"output": output, "messages": output_msgs}
76
-
77
- except Exception as e:
78
- traceback.print_exc()
79
- return {"output": f"Error searching memories: {e}", "messages": output_msgs}
80
-
81
- def search_knowledge_graph(query_str, current_context, output_msgs):
82
- command_history = current_context.get('command_history')
83
- if not command_history:
84
- db_path = os.path.expanduser(current_context.get("history_db_path", "~/npcsh_history.db"))
85
- try:
86
- command_history = CommandHistory(db_path)
87
- current_context['command_history'] = command_history
88
- except Exception as e:
89
- return {"output": f"Error connecting to history: {e}", "messages": output_msgs}
90
-
91
- npc_name = current_context.get('npc_name', '__none__')
92
- team_name = current_context.get('team_name', '__none__')
93
- current_path = current_context.get('current_path', os.getcwd())
94
-
95
- try:
96
- engine = command_history.engine
97
- kg = load_kg_from_db(
98
- engine,
99
- team_name,
100
- npc_name,
101
- current_path
102
- )
103
-
104
- if not kg or not kg.get('facts'):
105
- return {
106
- "output": (
107
- f"No knowledge graph found for current scope.\n"
108
- f"Scope: Team='{team_name}', "
109
- f"NPC='{npc_name}', Path='{current_path}'"
110
- ),
111
- "messages": output_msgs
112
- }
113
-
114
- query_lower = query_str.lower()
115
- matching_facts = []
116
- matching_concepts = []
117
-
118
- for fact in kg.get('facts', []):
119
- statement = fact.get('statement', '').lower()
120
- if query_lower in statement:
121
- matching_facts.append(fact)
122
-
123
- for concept in kg.get('concepts', []):
124
- name = concept.get('name', '').lower()
125
- desc = concept.get('description', '').lower()
126
- if query_lower in name or query_lower in desc:
127
- matching_concepts.append(concept)
128
-
129
- output = f"Knowledge Graph Search Results for '{query_str}':\n\n"
130
-
131
- if matching_facts:
132
- output += f"## Facts ({len(matching_facts)}):\n"
133
- for i, fact in enumerate(matching_facts, 1):
134
- output += f"{i}. {fact.get('statement')}\n"
135
- output += "\n"
136
-
137
- if matching_concepts:
138
- output += f"## Concepts ({len(matching_concepts)}):\n"
139
- for i, concept in enumerate(matching_concepts, 1):
140
- name = concept.get('name')
141
- desc = concept.get('description', '')
142
- output += f"{i}. {name}: {desc}\n"
143
-
144
- if not matching_facts and not matching_concepts:
145
- output += "No matching facts or concepts found."
146
-
147
- return {"output": output, "messages": output_msgs}
148
-
149
- except Exception as e:
150
- traceback.print_exc()
151
- return {"output": f"Error searching KG: {e}", "messages": output_msgs}
152
-
153
- def search_web_default(query_str, current_context, output_msgs):
154
- # Fallback for search_provider if not explicitly set in Jinx inputs
155
- current_search_provider = current_context.get('sprovider')
156
- # If NPCSH_SEARCH_PROVIDER is accessible, use it. Otherwise, a default.
157
- # For Jinx, let's just use a hardcoded default if not provided.
158
- if not current_search_provider:
159
- current_search_provider = "google" # Example default
160
-
161
- # Assuming render_markdown is accessible
162
- # render_markdown(f'- Searching {current_search_provider} for "{query_str}"') # Not directly supported in Jinx steps
163
-
164
- try:
165
- search_results = search_web(query_str, provider=current_search_provider)
166
- output = (
167
- "\n".join([f"- {res}" for res in search_results])
168
- if search_results
169
- else "No results found."
170
- )
171
- except Exception as e:
172
- traceback.print_exc()
173
- output = f"Error during web search: {e}"
174
-
175
- return {"output": output, "messages": output_msgs}
176
-
177
-
178
- # Populate npc_name, team_name, current_path for search functions
179
- context['npc_name'] = current_npc.name if isinstance(current_npc, type(None).__class__) else "__none__"
180
- context['team_name'] = current_team.name if current_team else "__none__"
181
- context['current_path'] = os.getcwd() # Or get from context if available
182
-
183
- final_result = None
184
- if search_type == 'memory':
185
- final_result = search_memories(query, context, output_messages)
186
- elif search_type == 'kg':
187
- final_result = search_knowledge_graph(query, context, output_messages)
188
- else:
189
- final_result = search_web_default(query, context, output_messages)
190
-
191
- context['output'] = final_result.get('output')
192
- context['messages'] = final_result.get('messages', output_messages)
File without changes