npcsh 1.1.13__py3-none-any.whl → 1.1.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +491 -80
- npcsh/mcp_server.py +2 -1
- npcsh/npc.py +84 -32
- npcsh/npc_team/alicanto.npc +22 -1
- npcsh/npc_team/corca.npc +28 -9
- npcsh/npc_team/frederic.npc +25 -4
- npcsh/npc_team/guac.npc +22 -0
- npcsh/npc_team/jinxs/bin/nql.jinx +141 -0
- npcsh/npc_team/jinxs/bin/sync.jinx +230 -0
- {npcsh-1.1.13.data/data/npcsh/npc_team → npcsh/npc_team/jinxs/bin}/vixynt.jinx +8 -30
- npcsh/npc_team/jinxs/bin/wander.jinx +152 -0
- npcsh/npc_team/jinxs/lib/browser/browser_action.jinx +220 -0
- npcsh/npc_team/jinxs/lib/browser/browser_screenshot.jinx +40 -0
- npcsh/npc_team/jinxs/lib/browser/close_browser.jinx +14 -0
- npcsh/npc_team/jinxs/lib/browser/open_browser.jinx +43 -0
- npcsh/npc_team/jinxs/lib/computer_use/click.jinx +23 -0
- npcsh/npc_team/jinxs/lib/computer_use/key_press.jinx +26 -0
- npcsh/npc_team/jinxs/lib/computer_use/launch_app.jinx +37 -0
- npcsh/npc_team/jinxs/lib/computer_use/screenshot.jinx +23 -0
- npcsh/npc_team/jinxs/lib/computer_use/type_text.jinx +27 -0
- npcsh/npc_team/jinxs/lib/computer_use/wait.jinx +21 -0
- {npcsh-1.1.13.data/data/npcsh/npc_team → npcsh/npc_team/jinxs/lib/core}/edit_file.jinx +3 -3
- {npcsh-1.1.13.data/data/npcsh/npc_team → npcsh/npc_team/jinxs/lib/core}/load_file.jinx +1 -1
- npcsh/npc_team/jinxs/lib/core/paste.jinx +134 -0
- {npcsh-1.1.13.data/data/npcsh/npc_team → npcsh/npc_team/jinxs/lib/core}/search.jinx +2 -1
- npcsh/npc_team/jinxs/{code → lib/core}/sh.jinx +2 -8
- npcsh/npc_team/jinxs/{code → lib/core}/sql.jinx +1 -1
- npcsh/npc_team/jinxs/lib/orchestration/convene.jinx +232 -0
- npcsh/npc_team/jinxs/lib/orchestration/delegate.jinx +184 -0
- npcsh/npc_team/jinxs/lib/research/arxiv.jinx +76 -0
- npcsh/npc_team/jinxs/lib/research/paper_search.jinx +101 -0
- npcsh/npc_team/jinxs/lib/research/semantic_scholar.jinx +69 -0
- npcsh/npc_team/jinxs/{utils/core → lib/utils}/build.jinx +8 -8
- npcsh/npc_team/jinxs/lib/utils/jinxs.jinx +176 -0
- npcsh/npc_team/jinxs/lib/utils/shh.jinx +17 -0
- npcsh/npc_team/jinxs/lib/utils/switch.jinx +62 -0
- npcsh/npc_team/jinxs/lib/utils/switches.jinx +61 -0
- npcsh/npc_team/jinxs/lib/utils/teamviz.jinx +205 -0
- npcsh/npc_team/jinxs/lib/utils/verbose.jinx +17 -0
- npcsh/npc_team/kadiefa.npc +19 -1
- npcsh/npc_team/plonk.npc +26 -1
- npcsh/npc_team/plonkjr.npc +22 -1
- npcsh/npc_team/sibiji.npc +23 -2
- npcsh/npcsh.py +153 -39
- npcsh/ui.py +22 -1
- npcsh-1.1.15.data/data/npcsh/npc_team/alicanto.npc +23 -0
- npcsh-1.1.15.data/data/npcsh/npc_team/arxiv.jinx +76 -0
- npcsh-1.1.15.data/data/npcsh/npc_team/browser_action.jinx +220 -0
- npcsh-1.1.15.data/data/npcsh/npc_team/browser_screenshot.jinx +40 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/build.jinx +8 -8
- npcsh-1.1.15.data/data/npcsh/npc_team/click.jinx +23 -0
- npcsh-1.1.15.data/data/npcsh/npc_team/close_browser.jinx +14 -0
- npcsh-1.1.15.data/data/npcsh/npc_team/convene.jinx +232 -0
- npcsh-1.1.15.data/data/npcsh/npc_team/corca.npc +31 -0
- npcsh-1.1.15.data/data/npcsh/npc_team/delegate.jinx +184 -0
- {npcsh/npc_team/jinxs/utils → npcsh-1.1.15.data/data/npcsh/npc_team}/edit_file.jinx +3 -3
- npcsh-1.1.15.data/data/npcsh/npc_team/frederic.npc +27 -0
- npcsh-1.1.15.data/data/npcsh/npc_team/guac.npc +22 -0
- npcsh-1.1.15.data/data/npcsh/npc_team/jinxs.jinx +176 -0
- npcsh-1.1.15.data/data/npcsh/npc_team/kadiefa.npc +21 -0
- npcsh-1.1.15.data/data/npcsh/npc_team/key_press.jinx +26 -0
- npcsh-1.1.15.data/data/npcsh/npc_team/launch_app.jinx +37 -0
- {npcsh/npc_team/jinxs/utils → npcsh-1.1.15.data/data/npcsh/npc_team}/load_file.jinx +1 -1
- npcsh-1.1.15.data/data/npcsh/npc_team/nql.jinx +141 -0
- npcsh-1.1.15.data/data/npcsh/npc_team/open_browser.jinx +43 -0
- npcsh-1.1.15.data/data/npcsh/npc_team/paper_search.jinx +101 -0
- npcsh-1.1.15.data/data/npcsh/npc_team/paste.jinx +134 -0
- npcsh-1.1.15.data/data/npcsh/npc_team/plonk.npc +27 -0
- npcsh-1.1.15.data/data/npcsh/npc_team/plonkjr.npc +23 -0
- npcsh-1.1.15.data/data/npcsh/npc_team/screenshot.jinx +23 -0
- {npcsh/npc_team/jinxs/utils → npcsh-1.1.15.data/data/npcsh/npc_team}/search.jinx +2 -1
- npcsh-1.1.15.data/data/npcsh/npc_team/semantic_scholar.jinx +69 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/sh.jinx +2 -8
- npcsh-1.1.15.data/data/npcsh/npc_team/shh.jinx +17 -0
- npcsh-1.1.15.data/data/npcsh/npc_team/sibiji.npc +24 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/sql.jinx +1 -1
- npcsh-1.1.15.data/data/npcsh/npc_team/switch.jinx +62 -0
- npcsh-1.1.15.data/data/npcsh/npc_team/switches.jinx +61 -0
- npcsh-1.1.15.data/data/npcsh/npc_team/sync.jinx +230 -0
- npcsh-1.1.15.data/data/npcsh/npc_team/teamviz.jinx +205 -0
- npcsh-1.1.15.data/data/npcsh/npc_team/type_text.jinx +27 -0
- npcsh-1.1.15.data/data/npcsh/npc_team/verbose.jinx +17 -0
- {npcsh/npc_team/jinxs/utils → npcsh-1.1.15.data/data/npcsh/npc_team}/vixynt.jinx +8 -30
- npcsh-1.1.15.data/data/npcsh/npc_team/wait.jinx +21 -0
- npcsh-1.1.15.data/data/npcsh/npc_team/wander.jinx +152 -0
- {npcsh-1.1.13.dist-info → npcsh-1.1.15.dist-info}/METADATA +399 -58
- npcsh-1.1.15.dist-info/RECORD +170 -0
- npcsh-1.1.15.dist-info/entry_points.txt +19 -0
- npcsh-1.1.15.dist-info/top_level.txt +2 -0
- project/__init__.py +1 -0
- npcsh/npc_team/foreman.npc +0 -7
- npcsh/npc_team/jinxs/modes/alicanto.jinx +0 -194
- npcsh/npc_team/jinxs/modes/corca.jinx +0 -249
- npcsh/npc_team/jinxs/modes/guac.jinx +0 -317
- npcsh/npc_team/jinxs/modes/plonk.jinx +0 -214
- npcsh/npc_team/jinxs/modes/pti.jinx +0 -170
- npcsh/npc_team/jinxs/modes/wander.jinx +0 -186
- npcsh/npc_team/jinxs/utils/agent.jinx +0 -17
- npcsh/npc_team/jinxs/utils/core/jinxs.jinx +0 -32
- npcsh-1.1.13.data/data/npcsh/npc_team/agent.jinx +0 -17
- npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.jinx +0 -194
- npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.npc +0 -2
- npcsh-1.1.13.data/data/npcsh/npc_team/corca.jinx +0 -249
- npcsh-1.1.13.data/data/npcsh/npc_team/corca.npc +0 -12
- npcsh-1.1.13.data/data/npcsh/npc_team/foreman.npc +0 -7
- npcsh-1.1.13.data/data/npcsh/npc_team/frederic.npc +0 -6
- npcsh-1.1.13.data/data/npcsh/npc_team/guac.jinx +0 -317
- npcsh-1.1.13.data/data/npcsh/npc_team/jinxs.jinx +0 -32
- npcsh-1.1.13.data/data/npcsh/npc_team/kadiefa.npc +0 -3
- npcsh-1.1.13.data/data/npcsh/npc_team/plonk.jinx +0 -214
- npcsh-1.1.13.data/data/npcsh/npc_team/plonk.npc +0 -2
- npcsh-1.1.13.data/data/npcsh/npc_team/plonkjr.npc +0 -2
- npcsh-1.1.13.data/data/npcsh/npc_team/pti.jinx +0 -170
- npcsh-1.1.13.data/data/npcsh/npc_team/sibiji.npc +0 -3
- npcsh-1.1.13.data/data/npcsh/npc_team/wander.jinx +0 -186
- npcsh-1.1.13.dist-info/RECORD +0 -135
- npcsh-1.1.13.dist-info/entry_points.txt +0 -9
- npcsh-1.1.13.dist-info/top_level.txt +0 -1
- /npcsh/npc_team/jinxs/{utils → bin}/roll.jinx +0 -0
- /npcsh/npc_team/jinxs/{utils → bin}/sample.jinx +0 -0
- /npcsh/npc_team/jinxs/{modes → bin}/spool.jinx +0 -0
- /npcsh/npc_team/jinxs/{modes → bin}/yap.jinx +0 -0
- /npcsh/npc_team/jinxs/{utils → lib/computer_use}/trigger.jinx +0 -0
- /npcsh/npc_team/jinxs/{utils → lib/core}/chat.jinx +0 -0
- /npcsh/npc_team/jinxs/{utils → lib/core}/cmd.jinx +0 -0
- /npcsh/npc_team/jinxs/{utils → lib/core}/compress.jinx +0 -0
- /npcsh/npc_team/jinxs/{utils → lib/core}/ots.jinx +0 -0
- /npcsh/npc_team/jinxs/{code → lib/core}/python.jinx +0 -0
- /npcsh/npc_team/jinxs/{utils → lib/core}/sleep.jinx +0 -0
- /npcsh/npc_team/jinxs/{utils/core → lib/utils}/compile.jinx +0 -0
- /npcsh/npc_team/jinxs/{utils/core → lib/utils}/help.jinx +0 -0
- /npcsh/npc_team/jinxs/{utils/core → lib/utils}/init.jinx +0 -0
- /npcsh/npc_team/jinxs/{utils → lib/utils}/serve.jinx +0 -0
- /npcsh/npc_team/jinxs/{utils/core → lib/utils}/set.jinx +0 -0
- /npcsh/npc_team/jinxs/{utils → lib/utils}/usage.jinx +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/alicanto.png +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/chat.jinx +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/cmd.jinx +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/compile.jinx +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/compress.jinx +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/corca.png +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/corca_example.png +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/frederic4.png +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/guac.png +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/help.jinx +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/init.jinx +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/kadiefa.png +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/npc-studio.jinx +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/ots.jinx +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/plonk.png +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/plonkjr.png +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/python.jinx +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/roll.jinx +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/sample.jinx +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/serve.jinx +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/set.jinx +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/sibiji.png +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/sleep.jinx +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/spool.jinx +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/spool.png +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/trigger.jinx +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/usage.jinx +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/yap.jinx +0 -0
- {npcsh-1.1.13.data → npcsh-1.1.15.data}/data/npcsh/npc_team/yap.png +0 -0
- {npcsh-1.1.13.dist-info → npcsh-1.1.15.dist-info}/WHEEL +0 -0
- {npcsh-1.1.13.dist-info → npcsh-1.1.15.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,170 +0,0 @@
|
|
|
1
|
-
jinx_name: pti
|
|
2
|
-
description: Pardon-The-Interruption - human-in-the-loop reasoning with think tags and interruptible streaming
|
|
3
|
-
npc: frederic
|
|
4
|
-
inputs:
|
|
5
|
-
- model: null
|
|
6
|
-
- provider: null
|
|
7
|
-
- files: null
|
|
8
|
-
- reasoning_model: null
|
|
9
|
-
|
|
10
|
-
steps:
|
|
11
|
-
- name: pti_repl
|
|
12
|
-
engine: python
|
|
13
|
-
code: |
|
|
14
|
-
import os
|
|
15
|
-
import sys
|
|
16
|
-
from termcolor import colored
|
|
17
|
-
|
|
18
|
-
from npcpy.llm_funcs import get_llm_response
|
|
19
|
-
from npcpy.npc_sysenv import get_system_message, render_markdown
|
|
20
|
-
from npcpy.data.load import load_file_contents
|
|
21
|
-
from npcpy.data.text import rag_search
|
|
22
|
-
|
|
23
|
-
npc = context.get('npc')
|
|
24
|
-
team = context.get('team')
|
|
25
|
-
messages = context.get('messages', [])
|
|
26
|
-
files = context.get('files')
|
|
27
|
-
|
|
28
|
-
# PTI uses reasoning model for deeper thinking
|
|
29
|
-
model = context.get('reasoning_model') or context.get('model') or (npc.model if npc else None)
|
|
30
|
-
provider = context.get('provider') or (npc.provider if npc else None)
|
|
31
|
-
|
|
32
|
-
print("""
|
|
33
|
-
██████╗ ████████╗██╗
|
|
34
|
-
██╔══██╗╚══██╔══╝██║
|
|
35
|
-
██████╔╝ ██║ ██║
|
|
36
|
-
██╔═══╝ ██║ ██║
|
|
37
|
-
██║ ██║ ██║
|
|
38
|
-
╚═╝ ╚═╝ ╚═╝
|
|
39
|
-
|
|
40
|
-
Pardon-The-Interruption
|
|
41
|
-
Human-in-the-loop reasoning mode
|
|
42
|
-
""")
|
|
43
|
-
|
|
44
|
-
npc_name = npc.name if npc else "pti"
|
|
45
|
-
print(f"Entering PTI mode (NPC: {npc_name}). Type '/pq' to exit.")
|
|
46
|
-
print(" - AI will use <think> tags for step-by-step reasoning")
|
|
47
|
-
print(" - Use <request_for_input> to pause and ask questions")
|
|
48
|
-
print(" - Ctrl+C interrupts stream for immediate feedback")
|
|
49
|
-
|
|
50
|
-
# Load files if provided
|
|
51
|
-
loaded_content = {}
|
|
52
|
-
if files:
|
|
53
|
-
if isinstance(files, str):
|
|
54
|
-
files = [f.strip() for f in files.split(',')]
|
|
55
|
-
for file_path in files:
|
|
56
|
-
file_path = os.path.expanduser(file_path)
|
|
57
|
-
if os.path.exists(file_path):
|
|
58
|
-
try:
|
|
59
|
-
chunks = load_file_contents(file_path)
|
|
60
|
-
loaded_content[file_path] = "\n".join(chunks)
|
|
61
|
-
print(colored(f"Loaded: {file_path}", "green"))
|
|
62
|
-
except Exception as e:
|
|
63
|
-
print(colored(f"Error loading {file_path}: {e}", "red"))
|
|
64
|
-
|
|
65
|
-
# System message for PTI mode
|
|
66
|
-
pti_system = """You are an AI assistant in PTI (Pardon-The-Interruption) mode.
|
|
67
|
-
|
|
68
|
-
IMPORTANT INSTRUCTIONS:
|
|
69
|
-
1. Think step-by-step using <think>...</think> tags to show your reasoning
|
|
70
|
-
2. When you need more information from the user, use <request_for_input>your question</request_for_input>
|
|
71
|
-
3. Be thorough but concise in your reasoning
|
|
72
|
-
4. The user can interrupt at any time to provide guidance
|
|
73
|
-
|
|
74
|
-
Example:
|
|
75
|
-
<think>
|
|
76
|
-
Let me break this down...
|
|
77
|
-
Step 1: First I need to understand X
|
|
78
|
-
Step 2: Then consider Y
|
|
79
|
-
</think>
|
|
80
|
-
|
|
81
|
-
<request_for_input>
|
|
82
|
-
I notice you mentioned Z. Could you clarify what you mean by that?
|
|
83
|
-
</request_for_input>"""
|
|
84
|
-
|
|
85
|
-
if not messages or messages[0].get("role") != "system":
|
|
86
|
-
messages.insert(0, {"role": "system", "content": pti_system})
|
|
87
|
-
|
|
88
|
-
# REPL loop
|
|
89
|
-
user_input = None
|
|
90
|
-
while True:
|
|
91
|
-
try:
|
|
92
|
-
if not user_input:
|
|
93
|
-
prompt_str = f"{npc_name}:pti> "
|
|
94
|
-
user_input = input(prompt_str).strip()
|
|
95
|
-
|
|
96
|
-
if not user_input:
|
|
97
|
-
user_input = None
|
|
98
|
-
continue
|
|
99
|
-
|
|
100
|
-
if user_input.lower() == "/pq":
|
|
101
|
-
print("Exiting PTI mode.")
|
|
102
|
-
break
|
|
103
|
-
|
|
104
|
-
# Build prompt with file context
|
|
105
|
-
prompt_for_llm = user_input
|
|
106
|
-
if loaded_content:
|
|
107
|
-
context_str = "\n".join([f"--- {fname} ---\n{content}" for fname, content in loaded_content.items()])
|
|
108
|
-
prompt_for_llm += f"\n\nContext:\n{context_str}"
|
|
109
|
-
|
|
110
|
-
prompt_for_llm += "\n\nThink step-by-step using <think> tags. Use <request_for_input> when you need clarification."
|
|
111
|
-
|
|
112
|
-
messages.append({"role": "user", "content": user_input})
|
|
113
|
-
|
|
114
|
-
try:
|
|
115
|
-
resp = get_llm_response(
|
|
116
|
-
prompt_for_llm,
|
|
117
|
-
model=model,
|
|
118
|
-
provider=provider,
|
|
119
|
-
messages=messages[:-1], # Don't duplicate the user message
|
|
120
|
-
stream=True,
|
|
121
|
-
npc=npc
|
|
122
|
-
)
|
|
123
|
-
|
|
124
|
-
response_stream = resp.get('response')
|
|
125
|
-
full_response = ""
|
|
126
|
-
request_found = False
|
|
127
|
-
|
|
128
|
-
# Stream the response
|
|
129
|
-
for chunk in response_stream:
|
|
130
|
-
chunk_content = ""
|
|
131
|
-
if hasattr(chunk, 'choices') and chunk.choices:
|
|
132
|
-
delta = chunk.choices[0].delta
|
|
133
|
-
if hasattr(delta, 'content') and delta.content:
|
|
134
|
-
chunk_content = delta.content
|
|
135
|
-
elif isinstance(chunk, dict):
|
|
136
|
-
chunk_content = chunk.get("message", {}).get("content", "")
|
|
137
|
-
|
|
138
|
-
if chunk_content:
|
|
139
|
-
print(chunk_content, end='', flush=True)
|
|
140
|
-
full_response += chunk_content
|
|
141
|
-
|
|
142
|
-
# Check for request_for_input
|
|
143
|
-
if "</request_for_input>" in full_response:
|
|
144
|
-
request_found = True
|
|
145
|
-
break
|
|
146
|
-
|
|
147
|
-
print() # newline after stream
|
|
148
|
-
|
|
149
|
-
messages.append({"role": "assistant", "content": full_response})
|
|
150
|
-
user_input = None # Reset for next iteration
|
|
151
|
-
|
|
152
|
-
except KeyboardInterrupt:
|
|
153
|
-
print(colored("\n\n--- Interrupted ---", "yellow"))
|
|
154
|
-
interrupt_input = input("Your feedback: ").strip()
|
|
155
|
-
if interrupt_input:
|
|
156
|
-
user_input = interrupt_input
|
|
157
|
-
else:
|
|
158
|
-
user_input = None
|
|
159
|
-
continue
|
|
160
|
-
|
|
161
|
-
except KeyboardInterrupt:
|
|
162
|
-
print("\nUse '/pq' to exit or continue.")
|
|
163
|
-
user_input = None
|
|
164
|
-
continue
|
|
165
|
-
except EOFError:
|
|
166
|
-
print("\nExiting PTI mode.")
|
|
167
|
-
break
|
|
168
|
-
|
|
169
|
-
context['output'] = "Exited PTI mode."
|
|
170
|
-
context['messages'] = messages
|
|
@@ -1,186 +0,0 @@
|
|
|
1
|
-
jinx_name: wander
|
|
2
|
-
description: Experimental wandering mode - creative exploration with varied temperatures and random events
|
|
3
|
-
inputs:
|
|
4
|
-
- problem: null
|
|
5
|
-
- environment: null
|
|
6
|
-
- low_temp: 0.5
|
|
7
|
-
- high_temp: 1.9
|
|
8
|
-
- interruption_likelihood: 1.0
|
|
9
|
-
- sample_rate: 0.4
|
|
10
|
-
- n_streams: 5
|
|
11
|
-
- include_events: false
|
|
12
|
-
- num_events: 3
|
|
13
|
-
- model: null
|
|
14
|
-
- provider: null
|
|
15
|
-
|
|
16
|
-
steps:
|
|
17
|
-
- name: wander_explore
|
|
18
|
-
engine: python
|
|
19
|
-
code: |
|
|
20
|
-
import os
|
|
21
|
-
import random
|
|
22
|
-
from termcolor import colored
|
|
23
|
-
|
|
24
|
-
from npcpy.llm_funcs import get_llm_response
|
|
25
|
-
|
|
26
|
-
npc = context.get('npc')
|
|
27
|
-
messages = context.get('messages', [])
|
|
28
|
-
|
|
29
|
-
problem = context.get('problem')
|
|
30
|
-
environment = context.get('environment')
|
|
31
|
-
low_temp = float(context.get('low_temp', 0.5))
|
|
32
|
-
high_temp = float(context.get('high_temp', 1.9))
|
|
33
|
-
interruption_likelihood = float(context.get('interruption_likelihood', 1.0))
|
|
34
|
-
sample_rate = float(context.get('sample_rate', 0.4))
|
|
35
|
-
n_streams = int(context.get('n_streams', 5))
|
|
36
|
-
include_events = context.get('include_events', False)
|
|
37
|
-
num_events = int(context.get('num_events', 3))
|
|
38
|
-
|
|
39
|
-
model = context.get('model') or (npc.model if npc else 'gemini-1.5-pro')
|
|
40
|
-
provider = context.get('provider') or (npc.provider if npc else 'gemini')
|
|
41
|
-
|
|
42
|
-
if not problem:
|
|
43
|
-
context['output'] = """Usage: /wander <problem to explore>
|
|
44
|
-
|
|
45
|
-
Options:
|
|
46
|
-
--environment DESC Metaphorical environment for wandering
|
|
47
|
-
--low-temp F Low temperature (default: 0.5)
|
|
48
|
-
--high-temp F High temperature (default: 1.9)
|
|
49
|
-
--n-streams N Number of exploration streams (default: 5)
|
|
50
|
-
--include-events Add random events during wandering
|
|
51
|
-
|
|
52
|
-
Example: /wander How might we reimagine urban transportation?"""
|
|
53
|
-
context['messages'] = messages
|
|
54
|
-
exit()
|
|
55
|
-
|
|
56
|
-
print(f"""
|
|
57
|
-
██╗ ██╗ █████╗ ███╗ ██╗██████╗ ███████╗██████╗
|
|
58
|
-
██║ ██║██╔══██╗████╗ ██║██╔══██╗██╔════╝██╔══██╗
|
|
59
|
-
██║ █╗ ██║███████║██╔██╗ ██║██║ ██║█████╗ ██████╔╝
|
|
60
|
-
██║███╗██║██╔══██║██║╚██╗██║██║ ██║██╔══╝ ██╔══██╗
|
|
61
|
-
╚███╔███╔╝██║ ██║██║ ╚████║██████╔╝███████╗██║ ██║
|
|
62
|
-
╚══╝╚══╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚══════╝╚═╝ ╚═╝
|
|
63
|
-
|
|
64
|
-
Experimental Wandering Mode
|
|
65
|
-
Problem: {problem}
|
|
66
|
-
Temperature range: {low_temp} - {high_temp}
|
|
67
|
-
Streams: {n_streams}
|
|
68
|
-
""")
|
|
69
|
-
|
|
70
|
-
# Generate environment if not provided
|
|
71
|
-
if not environment:
|
|
72
|
-
env_prompt = f"""Create a rich, metaphorical environment for wandering through while thinking about:
|
|
73
|
-
"{problem}"
|
|
74
|
-
|
|
75
|
-
The environment should:
|
|
76
|
-
1. Have distinct regions or areas
|
|
77
|
-
2. Include various elements and features
|
|
78
|
-
3. Be metaphorically related to the problem
|
|
79
|
-
4. Be described in 3-5 sentences
|
|
80
|
-
|
|
81
|
-
Provide only the description, no framing."""
|
|
82
|
-
|
|
83
|
-
print(colored("Generating wandering environment...", "cyan"))
|
|
84
|
-
resp = get_llm_response(env_prompt, model=model, provider=provider, temperature=0.7, npc=npc)
|
|
85
|
-
environment = str(resp.get('response', 'A vast conceptual landscape stretches before you.'))
|
|
86
|
-
print(f"\n{environment}\n")
|
|
87
|
-
|
|
88
|
-
# Event types for random encounters
|
|
89
|
-
event_types = ["encounter", "discovery", "obstacle", "insight", "shift", "memory"]
|
|
90
|
-
|
|
91
|
-
all_insights = []
|
|
92
|
-
wandering_log = []
|
|
93
|
-
|
|
94
|
-
for stream_idx in range(n_streams):
|
|
95
|
-
# Alternate between low and high temperature
|
|
96
|
-
if stream_idx % 2 == 0:
|
|
97
|
-
temp = low_temp
|
|
98
|
-
mode = "focused"
|
|
99
|
-
else:
|
|
100
|
-
temp = high_temp
|
|
101
|
-
mode = "creative"
|
|
102
|
-
|
|
103
|
-
print(colored(f"\n--- Stream {stream_idx + 1}/{n_streams} ({mode}, temp={temp}) ---", "cyan"))
|
|
104
|
-
|
|
105
|
-
# Generate random event if enabled
|
|
106
|
-
event_context = ""
|
|
107
|
-
if include_events and random.random() < sample_rate:
|
|
108
|
-
event_type = random.choice(event_types)
|
|
109
|
-
event_prompt = f"""In the environment: {environment}
|
|
110
|
-
|
|
111
|
-
While exploring the problem "{problem}", generate a {event_type} event.
|
|
112
|
-
The event should be metaphorical and relate to the problem.
|
|
113
|
-
Describe it in 2-3 sentences."""
|
|
114
|
-
|
|
115
|
-
event_resp = get_llm_response(event_prompt, model=model, provider=provider, temperature=0.9, npc=npc)
|
|
116
|
-
event = str(event_resp.get('response', ''))
|
|
117
|
-
event_context = f"\n\nEvent ({event_type}): {event}"
|
|
118
|
-
print(colored(f"[{event_type.upper()}] {event[:100]}...", "yellow"))
|
|
119
|
-
|
|
120
|
-
# Main wandering exploration
|
|
121
|
-
wander_prompt = f"""You are wandering through: {environment}
|
|
122
|
-
|
|
123
|
-
Problem being explored: "{problem}"
|
|
124
|
-
{event_context}
|
|
125
|
-
|
|
126
|
-
Previous insights: {all_insights[-3:] if all_insights else 'Starting fresh'}
|
|
127
|
-
|
|
128
|
-
In this {mode} exploration (temperature {temp}):
|
|
129
|
-
1. Let your mind wander through the conceptual space
|
|
130
|
-
2. Make unexpected connections
|
|
131
|
-
3. Notice what emerges from the wandering
|
|
132
|
-
4. Share any insights, questions, or realizations
|
|
133
|
-
|
|
134
|
-
Think freely and explore."""
|
|
135
|
-
|
|
136
|
-
resp = get_llm_response(wander_prompt, model=model, provider=provider, temperature=temp, npc=npc)
|
|
137
|
-
stream_output = str(resp.get('response', ''))
|
|
138
|
-
print(stream_output)
|
|
139
|
-
|
|
140
|
-
all_insights.append(stream_output)
|
|
141
|
-
wandering_log.append({
|
|
142
|
-
"stream": stream_idx + 1,
|
|
143
|
-
"mode": mode,
|
|
144
|
-
"temperature": temp,
|
|
145
|
-
"event": event_context if include_events else None,
|
|
146
|
-
"insight": stream_output
|
|
147
|
-
})
|
|
148
|
-
|
|
149
|
-
# Random interruption
|
|
150
|
-
if random.random() < interruption_likelihood * 0.2:
|
|
151
|
-
print(colored("\n[Pause for reflection...]", "magenta"))
|
|
152
|
-
reflect_prompt = f"Briefly reflect on what's emerged so far about: {problem}"
|
|
153
|
-
reflect_resp = get_llm_response(reflect_prompt, model=model, provider=provider, temperature=0.4, npc=npc)
|
|
154
|
-
print(colored(str(reflect_resp.get('response', ''))[:200], "magenta"))
|
|
155
|
-
|
|
156
|
-
# Synthesis
|
|
157
|
-
print(colored("\n--- Synthesizing Wanderings ---", "cyan"))
|
|
158
|
-
|
|
159
|
-
synthesis_prompt = f"""After wandering through "{environment}" exploring "{problem}":
|
|
160
|
-
|
|
161
|
-
All insights gathered:
|
|
162
|
-
{chr(10).join(all_insights)}
|
|
163
|
-
|
|
164
|
-
Synthesize what emerged from this wandering:
|
|
165
|
-
1. Key themes that appeared
|
|
166
|
-
2. Unexpected connections made
|
|
167
|
-
3. New questions raised
|
|
168
|
-
4. Potential directions to explore further"""
|
|
169
|
-
|
|
170
|
-
resp = get_llm_response(synthesis_prompt, model=model, provider=provider, temperature=0.5, npc=npc)
|
|
171
|
-
synthesis = str(resp.get('response', ''))
|
|
172
|
-
|
|
173
|
-
print("\n" + "="*50)
|
|
174
|
-
print(colored("WANDERING SYNTHESIS", "green", attrs=['bold']))
|
|
175
|
-
print("="*50)
|
|
176
|
-
print(synthesis)
|
|
177
|
-
|
|
178
|
-
context['output'] = synthesis
|
|
179
|
-
context['messages'] = messages
|
|
180
|
-
context['wander_result'] = {
|
|
181
|
-
'problem': problem,
|
|
182
|
-
'environment': environment,
|
|
183
|
-
'log': wandering_log,
|
|
184
|
-
'insights': all_insights,
|
|
185
|
-
'synthesis': synthesis
|
|
186
|
-
}
|
|
@@ -1,17 +0,0 @@
|
|
|
1
|
-
jinx_name: agent
|
|
2
|
-
description: Provides an LLM response with tool use enabled.
|
|
3
|
-
inputs:
|
|
4
|
-
- query
|
|
5
|
-
- auto_process_tool_calls: True
|
|
6
|
-
- use_core_tools: True
|
|
7
|
-
steps:
|
|
8
|
-
- name: get_agent_response
|
|
9
|
-
engine: python
|
|
10
|
-
code: |
|
|
11
|
-
response = npc.get_llm_response(
|
|
12
|
-
request=query,
|
|
13
|
-
messages=context.get('messages', []),
|
|
14
|
-
auto_process_tool_calls={{ auto_process_tool_calls | default(True) }},
|
|
15
|
-
use_core_tools={{ use_core_tools | default(True) }}
|
|
16
|
-
)
|
|
17
|
-
output = response.get('response', '')
|
|
@@ -1,32 +0,0 @@
|
|
|
1
|
-
jinx_name: jinxs
|
|
2
|
-
description: Show available jinxs for the current NPC/Team
|
|
3
|
-
inputs: []
|
|
4
|
-
steps:
|
|
5
|
-
- name: list_jinxs
|
|
6
|
-
engine: python
|
|
7
|
-
code: |
|
|
8
|
-
output_lines = ["Available Jinxs:\n"]
|
|
9
|
-
jinxs_listed = set()
|
|
10
|
-
|
|
11
|
-
if hasattr(npc, 'team') and npc.team:
|
|
12
|
-
team = npc.team
|
|
13
|
-
|
|
14
|
-
if hasattr(team, 'jinxs_dict') and team.jinxs_dict:
|
|
15
|
-
output_lines.append(f"\n--- Team Jinxs ---\n")
|
|
16
|
-
for name, jinx_obj in sorted(team.jinxs_dict.items()):
|
|
17
|
-
desc = getattr(jinx_obj, 'description', 'No description available.')
|
|
18
|
-
output_lines.append(f"- /{name}: {desc}\n")
|
|
19
|
-
jinxs_listed.add(name)
|
|
20
|
-
|
|
21
|
-
if hasattr(npc, 'jinxs_dict') and npc.jinxs_dict:
|
|
22
|
-
output_lines.append(f"\n--- NPC Jinxs for {npc.name} ---\n")
|
|
23
|
-
for name, jinx_obj in sorted(npc.jinxs_dict.items()):
|
|
24
|
-
if name not in jinxs_listed:
|
|
25
|
-
desc = getattr(jinx_obj, 'description', 'No description available.')
|
|
26
|
-
output_lines.append(f"- /{name}: {desc}\n")
|
|
27
|
-
jinxs_listed.add(name)
|
|
28
|
-
|
|
29
|
-
if not jinxs_listed:
|
|
30
|
-
output = "No jinxs available for the current context."
|
|
31
|
-
else:
|
|
32
|
-
output = "".join(output_lines)
|
|
@@ -1,17 +0,0 @@
|
|
|
1
|
-
jinx_name: agent
|
|
2
|
-
description: Provides an LLM response with tool use enabled.
|
|
3
|
-
inputs:
|
|
4
|
-
- query
|
|
5
|
-
- auto_process_tool_calls: True
|
|
6
|
-
- use_core_tools: True
|
|
7
|
-
steps:
|
|
8
|
-
- name: get_agent_response
|
|
9
|
-
engine: python
|
|
10
|
-
code: |
|
|
11
|
-
response = npc.get_llm_response(
|
|
12
|
-
request=query,
|
|
13
|
-
messages=context.get('messages', []),
|
|
14
|
-
auto_process_tool_calls={{ auto_process_tool_calls | default(True) }},
|
|
15
|
-
use_core_tools={{ use_core_tools | default(True) }}
|
|
16
|
-
)
|
|
17
|
-
output = response.get('response', '')
|
|
@@ -1,194 +0,0 @@
|
|
|
1
|
-
jinx_name: alicanto
|
|
2
|
-
description: Deep research mode - multi-perspective exploration with gold insights and cliff warnings
|
|
3
|
-
npc: forenpc
|
|
4
|
-
inputs:
|
|
5
|
-
- query: null
|
|
6
|
-
- num_npcs: 5
|
|
7
|
-
- depth: 3
|
|
8
|
-
- model: null
|
|
9
|
-
- provider: null
|
|
10
|
-
- max_steps: 20
|
|
11
|
-
- skip_research: true
|
|
12
|
-
- exploration: 0.3
|
|
13
|
-
- creativity: 0.5
|
|
14
|
-
- format: report
|
|
15
|
-
|
|
16
|
-
steps:
|
|
17
|
-
- name: alicanto_research
|
|
18
|
-
engine: python
|
|
19
|
-
code: |
|
|
20
|
-
import os
|
|
21
|
-
from termcolor import colored
|
|
22
|
-
|
|
23
|
-
from npcpy.llm_funcs import get_llm_response
|
|
24
|
-
from npcpy.data.web import search_web
|
|
25
|
-
from npcpy.npc_compiler import NPC
|
|
26
|
-
|
|
27
|
-
npc = context.get('npc')
|
|
28
|
-
team = context.get('team')
|
|
29
|
-
messages = context.get('messages', [])
|
|
30
|
-
|
|
31
|
-
query = context.get('query')
|
|
32
|
-
num_npcs = int(context.get('num_npcs', 5))
|
|
33
|
-
depth = int(context.get('depth', 3))
|
|
34
|
-
max_steps = int(context.get('max_steps', 20))
|
|
35
|
-
skip_research = context.get('skip_research', True)
|
|
36
|
-
exploration = float(context.get('exploration', 0.3))
|
|
37
|
-
creativity = float(context.get('creativity', 0.5))
|
|
38
|
-
output_format = context.get('format', 'report')
|
|
39
|
-
|
|
40
|
-
model = context.get('model') or (npc.model if npc else 'gemini-1.5-pro')
|
|
41
|
-
provider = context.get('provider') or (npc.provider if npc else 'gemini')
|
|
42
|
-
|
|
43
|
-
if not query:
|
|
44
|
-
context['output'] = """Usage: /alicanto <research query>
|
|
45
|
-
|
|
46
|
-
Options:
|
|
47
|
-
--num-npcs N Number of research perspectives (default: 5)
|
|
48
|
-
--depth N Research depth (default: 3)
|
|
49
|
-
--max-steps N Maximum research steps (default: 20)
|
|
50
|
-
--exploration F Exploration factor 0-1 (default: 0.3)
|
|
51
|
-
--creativity F Creativity factor 0-1 (default: 0.5)
|
|
52
|
-
--format FORMAT Output: report|summary|full (default: report)
|
|
53
|
-
|
|
54
|
-
Example: /alicanto What are the latest advances in quantum computing?"""
|
|
55
|
-
context['messages'] = messages
|
|
56
|
-
exit()
|
|
57
|
-
|
|
58
|
-
print(f"""
|
|
59
|
-
█████╗ ██╗ ██╗ ██████╗ █████╗ ███╗ ██╗████████╗ ██████╗
|
|
60
|
-
██╔══██╗██║ ██║██╔════╝██╔══██╗████╗ ██║╚══██╔══╝██╔═══██╗
|
|
61
|
-
███████║██║ ██║██║ ███████║██╔██╗ ██║ ██║ ██║ ██║
|
|
62
|
-
██╔══██║██║ ██║██║ ██╔══██║██║╚██╗██║ ██║ ██║ ██║
|
|
63
|
-
██║ ██║███████╗██║╚██████╗██║ ██║██║ ╚████║ ██║ ╚██████╔╝
|
|
64
|
-
╚═╝ ╚═╝╚══════╝╚═╝ ╚═════╝╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═════╝
|
|
65
|
-
|
|
66
|
-
Deep Research Mode
|
|
67
|
-
Query: {query}
|
|
68
|
-
Perspectives: {num_npcs} | Depth: {depth} | Max Steps: {max_steps}
|
|
69
|
-
""")
|
|
70
|
-
|
|
71
|
-
# Generate research perspectives
|
|
72
|
-
perspectives_prompt = f"""Generate {num_npcs} distinct research perspectives for investigating: "{query}"
|
|
73
|
-
|
|
74
|
-
For each perspective, provide:
|
|
75
|
-
1. Name (a descriptive title)
|
|
76
|
-
2. Approach (how this perspective would investigate)
|
|
77
|
-
3. Key questions to explore
|
|
78
|
-
|
|
79
|
-
Return as a numbered list."""
|
|
80
|
-
|
|
81
|
-
print(colored("Generating research perspectives...", "cyan"))
|
|
82
|
-
resp = get_llm_response(
|
|
83
|
-
perspectives_prompt,
|
|
84
|
-
model=model,
|
|
85
|
-
provider=provider,
|
|
86
|
-
npc=npc
|
|
87
|
-
)
|
|
88
|
-
perspectives = str(resp.get('response', ''))
|
|
89
|
-
print(perspectives)
|
|
90
|
-
|
|
91
|
-
# Conduct web research if not skipped
|
|
92
|
-
research_findings = ""
|
|
93
|
-
if not skip_research:
|
|
94
|
-
print(colored("\nConducting web research...", "cyan"))
|
|
95
|
-
try:
|
|
96
|
-
search_results = search_web(query, n_results=5)
|
|
97
|
-
if search_results:
|
|
98
|
-
research_findings = "\n\nWeb Research Findings:\n"
|
|
99
|
-
for i, result in enumerate(search_results[:5], 1):
|
|
100
|
-
title = result.get('title', 'No title')
|
|
101
|
-
snippet = result.get('snippet', result.get('body', ''))[:200]
|
|
102
|
-
research_findings += f"\n{i}. {title}\n {snippet}...\n"
|
|
103
|
-
print(colored(f"Found {len(search_results)} sources", "green"))
|
|
104
|
-
except Exception as e:
|
|
105
|
-
print(colored(f"Web search error: {e}", "yellow"))
|
|
106
|
-
|
|
107
|
-
# Multi-step exploration from each perspective
|
|
108
|
-
all_insights = []
|
|
109
|
-
gold_insights = [] # Key valuable findings
|
|
110
|
-
cliff_warnings = [] # Potential pitfalls or caveats
|
|
111
|
-
|
|
112
|
-
for step in range(min(depth, max_steps)):
|
|
113
|
-
print(colored(f"\n--- Research Depth {step + 1}/{depth} ---", "cyan"))
|
|
114
|
-
|
|
115
|
-
explore_prompt = f"""Research query: "{query}"
|
|
116
|
-
|
|
117
|
-
Perspectives generated:
|
|
118
|
-
{perspectives}
|
|
119
|
-
|
|
120
|
-
{research_findings}
|
|
121
|
-
|
|
122
|
-
Previous insights: {all_insights[-3:] if all_insights else 'None yet'}
|
|
123
|
-
|
|
124
|
-
For depth level {step + 1}:
|
|
125
|
-
1. Explore deeper implications from each perspective
|
|
126
|
-
2. Identify GOLD insights (valuable, non-obvious findings) - mark with [GOLD]
|
|
127
|
-
3. Identify CLIFF warnings (pitfalls, caveats, risks) - mark with [CLIFF]
|
|
128
|
-
4. Connect insights across perspectives
|
|
129
|
-
|
|
130
|
-
Exploration factor: {exploration} (higher = more diverse exploration)
|
|
131
|
-
Creativity factor: {creativity} (higher = more novel connections)"""
|
|
132
|
-
|
|
133
|
-
resp = get_llm_response(
|
|
134
|
-
explore_prompt,
|
|
135
|
-
model=model,
|
|
136
|
-
provider=provider,
|
|
137
|
-
temperature=creativity,
|
|
138
|
-
npc=npc
|
|
139
|
-
)
|
|
140
|
-
|
|
141
|
-
step_insights = str(resp.get('response', ''))
|
|
142
|
-
print(step_insights)
|
|
143
|
-
|
|
144
|
-
# Extract gold and cliff markers
|
|
145
|
-
if '[GOLD]' in step_insights:
|
|
146
|
-
gold_insights.extend([line.strip() for line in step_insights.split('\n') if '[GOLD]' in line])
|
|
147
|
-
if '[CLIFF]' in step_insights:
|
|
148
|
-
cliff_warnings.extend([line.strip() for line in step_insights.split('\n') if '[CLIFF]' in line])
|
|
149
|
-
|
|
150
|
-
all_insights.append(step_insights)
|
|
151
|
-
|
|
152
|
-
# Generate final synthesis
|
|
153
|
-
print(colored("\n--- Synthesizing Research ---", "cyan"))
|
|
154
|
-
|
|
155
|
-
synthesis_prompt = f"""Synthesize research on: "{query}"
|
|
156
|
-
|
|
157
|
-
All insights gathered:
|
|
158
|
-
{chr(10).join(all_insights)}
|
|
159
|
-
|
|
160
|
-
Gold insights identified:
|
|
161
|
-
{chr(10).join(gold_insights) if gold_insights else 'None explicitly marked'}
|
|
162
|
-
|
|
163
|
-
Cliff warnings identified:
|
|
164
|
-
{chr(10).join(cliff_warnings) if cliff_warnings else 'None explicitly marked'}
|
|
165
|
-
|
|
166
|
-
Generate a {output_format} that:
|
|
167
|
-
1. Summarizes key findings
|
|
168
|
-
2. Highlights the most valuable insights (gold)
|
|
169
|
-
3. Notes important caveats and risks (cliffs)
|
|
170
|
-
4. Provides actionable conclusions"""
|
|
171
|
-
|
|
172
|
-
resp = get_llm_response(
|
|
173
|
-
synthesis_prompt,
|
|
174
|
-
model=model,
|
|
175
|
-
provider=provider,
|
|
176
|
-
npc=npc
|
|
177
|
-
)
|
|
178
|
-
|
|
179
|
-
final_report = str(resp.get('response', ''))
|
|
180
|
-
print("\n" + "="*60)
|
|
181
|
-
print(colored("ALICANTO RESEARCH REPORT", "green", attrs=['bold']))
|
|
182
|
-
print("="*60)
|
|
183
|
-
print(final_report)
|
|
184
|
-
|
|
185
|
-
context['output'] = final_report
|
|
186
|
-
context['messages'] = messages
|
|
187
|
-
context['alicanto_result'] = {
|
|
188
|
-
'query': query,
|
|
189
|
-
'perspectives': perspectives,
|
|
190
|
-
'insights': all_insights,
|
|
191
|
-
'gold': gold_insights,
|
|
192
|
-
'cliffs': cliff_warnings,
|
|
193
|
-
'report': final_report
|
|
194
|
-
}
|