npcsh 1.1.12__py3-none-any.whl → 1.1.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +700 -377
- npcsh/alicanto.py +54 -1153
- npcsh/completion.py +206 -0
- npcsh/config.py +163 -0
- npcsh/corca.py +35 -1462
- npcsh/execution.py +185 -0
- npcsh/guac.py +31 -1986
- npcsh/npc_team/jinxs/code/sh.jinx +11 -15
- npcsh/npc_team/jinxs/modes/alicanto.jinx +186 -80
- npcsh/npc_team/jinxs/modes/corca.jinx +243 -22
- npcsh/npc_team/jinxs/modes/guac.jinx +313 -42
- npcsh/npc_team/jinxs/modes/plonk.jinx +209 -48
- npcsh/npc_team/jinxs/modes/pti.jinx +167 -25
- npcsh/npc_team/jinxs/modes/spool.jinx +158 -37
- npcsh/npc_team/jinxs/modes/wander.jinx +179 -74
- npcsh/npc_team/jinxs/modes/yap.jinx +258 -21
- npcsh/npc_team/jinxs/utils/chat.jinx +39 -12
- npcsh/npc_team/jinxs/utils/cmd.jinx +44 -0
- npcsh/npc_team/jinxs/utils/search.jinx +3 -3
- npcsh/npc_team/jinxs/utils/usage.jinx +33 -0
- npcsh/npcsh.py +76 -20
- npcsh/parsing.py +118 -0
- npcsh/plonk.py +41 -329
- npcsh/pti.py +41 -201
- npcsh/spool.py +34 -239
- npcsh/ui.py +199 -0
- npcsh/wander.py +54 -542
- npcsh/yap.py +38 -570
- npcsh-1.1.14.data/data/npcsh/npc_team/alicanto.jinx +194 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/chat.jinx +44 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/cmd.jinx +44 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/corca.jinx +249 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/guac.jinx +317 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/plonk.jinx +214 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/pti.jinx +170 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/search.jinx +3 -3
- npcsh-1.1.14.data/data/npcsh/npc_team/sh.jinx +34 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/spool.jinx +161 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/usage.jinx +33 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/wander.jinx +186 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/yap.jinx +262 -0
- {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/METADATA +1 -1
- npcsh-1.1.14.dist-info/RECORD +135 -0
- npcsh-1.1.12.data/data/npcsh/npc_team/alicanto.jinx +0 -88
- npcsh-1.1.12.data/data/npcsh/npc_team/chat.jinx +0 -17
- npcsh-1.1.12.data/data/npcsh/npc_team/corca.jinx +0 -28
- npcsh-1.1.12.data/data/npcsh/npc_team/guac.jinx +0 -46
- npcsh-1.1.12.data/data/npcsh/npc_team/plonk.jinx +0 -53
- npcsh-1.1.12.data/data/npcsh/npc_team/pti.jinx +0 -28
- npcsh-1.1.12.data/data/npcsh/npc_team/sh.jinx +0 -38
- npcsh-1.1.12.data/data/npcsh/npc_team/spool.jinx +0 -40
- npcsh-1.1.12.data/data/npcsh/npc_team/wander.jinx +0 -81
- npcsh-1.1.12.data/data/npcsh/npc_team/yap.jinx +0 -25
- npcsh-1.1.12.dist-info/RECORD +0 -126
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/agent.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/alicanto.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/alicanto.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/build.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/compile.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/compress.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca_example.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/edit_file.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/foreman.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/frederic.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/frederic4.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/guac.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/help.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/init.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/jinxs.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/kadiefa.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/load_file.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npc-studio.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/ots.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonk.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonk.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonkjr.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/python.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/roll.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sample.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/serve.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/set.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sibiji.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sibiji.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sleep.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/spool.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sql.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/trigger.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/vixynt.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/yap.png +0 -0
- {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/WHEEL +0 -0
- {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/entry_points.txt +0 -0
- {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/licenses/LICENSE +0 -0
- {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/top_level.txt +0 -0
npcsh/alicanto.py
CHANGED
|
@@ -1,1164 +1,65 @@
|
|
|
1
|
-
import json
|
|
2
|
-
import requests
|
|
3
|
-
import argparse
|
|
4
|
-
import os
|
|
5
|
-
import subprocess
|
|
6
|
-
import tempfile
|
|
7
|
-
import random
|
|
8
|
-
import shutil
|
|
9
|
-
from typing import List, Dict, Any, Optional, Tuple
|
|
10
|
-
from datetime import datetime
|
|
11
|
-
from dataclasses import dataclass, asdict, field
|
|
12
|
-
from pathlib import Path
|
|
13
|
-
from concurrent.futures import ThreadPoolExecutor
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
try:
|
|
17
|
-
from datasets import load_dataset
|
|
18
|
-
except:
|
|
19
|
-
load_dataset = None
|
|
20
|
-
from sklearn.feature_extraction.text import TfidfVectorizer
|
|
21
|
-
from sklearn.metrics.pairwise import cosine_similarity
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
from npcpy.tools import auto_tools
|
|
26
|
-
from npcpy.llm_funcs import get_llm_response
|
|
27
|
-
from npcpy.data.web import search_web
|
|
28
|
-
from npcpy.npc_compiler import NPC, Team
|
|
29
|
-
from npcsh._state import NPCSH_CHAT_MODEL, NPCSH_CHAT_PROVIDER
|
|
30
|
-
|
|
31
|
-
from litellm.exceptions import Timeout, ContextWindowExceededError
|
|
32
|
-
import pandas as pd
|
|
33
|
-
import numpy as np
|
|
34
|
-
|
|
35
|
-
from npcsh.wander import perform_single_wandering
|
|
36
|
-
|
|
37
|
-
@dataclass
|
|
38
|
-
class ResearchStep:
|
|
39
|
-
step: int
|
|
40
|
-
thought: str
|
|
41
|
-
action: str
|
|
42
|
-
outcome: str
|
|
43
|
-
|
|
44
|
-
@dataclass
|
|
45
|
-
class SubAgentTrace:
|
|
46
|
-
hypothesis: str
|
|
47
|
-
agent_name: str
|
|
48
|
-
agent_persona: str
|
|
49
|
-
steps: List[ResearchStep] = field(default_factory=list)
|
|
50
|
-
final_files: Dict[str, str] = field(default_factory=dict)
|
|
51
|
-
was_successful: bool = False
|
|
52
|
-
|
|
53
|
-
@dataclass
|
|
54
|
-
class Paper:
|
|
55
|
-
title: str = ""
|
|
56
|
-
abstract: str = ""
|
|
57
|
-
introduction: List[str] = field(default_factory=list)
|
|
58
|
-
methods: List[str] = field(default_factory=list)
|
|
59
|
-
results: List[str] = field(default_factory=list)
|
|
60
|
-
discussion: List[str] = field(default_factory=list)
|
|
61
|
-
|
|
62
|
-
def create_file(filename: str, content: str) -> str:
|
|
63
|
-
filepath = os.path.abspath(filename)
|
|
64
|
-
if os.path.exists(filepath):
|
|
65
|
-
return f"Error: File '{filename}' already exists. Use append_to_file or replace_in_file to modify."
|
|
66
|
-
os.makedirs(os.path.dirname(filepath), exist_ok=True)
|
|
67
|
-
with open(filepath, 'w') as f:
|
|
68
|
-
f.write(content)
|
|
69
|
-
return f"File '{filename}' created successfully."
|
|
70
|
-
|
|
71
|
-
def append_to_file(filename: str, content: str) -> str:
|
|
72
|
-
filepath = os.path.abspath(filename)
|
|
73
|
-
if not os.path.exists(filepath):
|
|
74
|
-
return f"Error: File '{filename}' not found. Use create_file first."
|
|
75
|
-
with open(filepath, 'a') as f:
|
|
76
|
-
f.write("\n" + content)
|
|
77
|
-
return f"Content appended to '{filename}'."
|
|
78
|
-
|
|
79
|
-
def replace_in_file(filename: str, old_content: str, new_content: str) -> str:
|
|
80
|
-
filepath = os.path.abspath(filename)
|
|
81
|
-
if not os.path.exists(filepath):
|
|
82
|
-
return f"Error: File '{filename}' not found."
|
|
83
|
-
with open(filepath, 'r') as f:
|
|
84
|
-
file_contents = f.read()
|
|
85
|
-
file_contents = file_contents.replace(old_content, new_content)
|
|
86
|
-
with open(filepath, 'w') as f:
|
|
87
|
-
f.write(file_contents)
|
|
88
|
-
return f"Content in '{filename}' replaced."
|
|
89
|
-
|
|
90
|
-
def read_file(filename: str) -> str:
|
|
91
|
-
filepath = os.path.abspath(filename)
|
|
92
|
-
if not os.path.exists(filepath):
|
|
93
|
-
return f"Error: File '{filename}' not found."
|
|
94
|
-
with open(filepath, 'r') as f:
|
|
95
|
-
return f.read()
|
|
96
|
-
|
|
97
|
-
def list_files(directory: str = ".") -> List[str]:
|
|
98
|
-
return os.listdir(directory)
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
DATASET_CACHE = None
|
|
102
|
-
SEARCH_INDEX = None
|
|
103
|
-
|
|
104
|
-
def load_and_combine_datasets() -> pd.DataFrame:
|
|
105
|
-
all_papers = []
|
|
106
|
-
|
|
107
|
-
try:
|
|
108
|
-
research_papers = load_dataset("ta-datalab/research_papers", split="train")
|
|
109
|
-
for paper in research_papers:
|
|
110
|
-
all_papers.append({
|
|
111
|
-
'title': paper.get('title', ''),
|
|
112
|
-
'abstract': paper.get('abstract', ''),
|
|
113
|
-
'authors': paper.get('authors', []),
|
|
114
|
-
'year': paper.get('year', None),
|
|
115
|
-
'venue': paper.get('venue', ''),
|
|
116
|
-
'url': paper.get('url', ''),
|
|
117
|
-
'paperId': paper.get('id', ''),
|
|
118
|
-
'citationCount': 0,
|
|
119
|
-
'source': 'research_papers'
|
|
120
|
-
})
|
|
121
|
-
except Exception as e:
|
|
122
|
-
print(f"Failed to load ta-datalab/research_papers: {e}")
|
|
123
|
-
|
|
124
|
-
try:
|
|
125
|
-
ml_papers = load_dataset("CShorten/ML-ArXiv-Papers", split="train")
|
|
126
|
-
for paper in ml_papers:
|
|
127
|
-
all_papers.append({
|
|
128
|
-
'title': paper.get('title', ''),
|
|
129
|
-
'abstract': paper.get('abstract', ''),
|
|
130
|
-
'authors': paper.get('authors', '').split(', ') if paper.get('authors') else [],
|
|
131
|
-
'year': paper.get('year', None),
|
|
132
|
-
'venue': 'arXiv',
|
|
133
|
-
'url': paper.get('url', ''),
|
|
134
|
-
'paperId': paper.get('id', ''),
|
|
135
|
-
'citationCount': 0,
|
|
136
|
-
'source': 'ml_arxiv'
|
|
137
|
-
})
|
|
138
|
-
except Exception as e:
|
|
139
|
-
print(f"Failed to load CShorten/ML-ArXiv-Papers: {e}")
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
df = pd.DataFrame(all_papers)
|
|
143
|
-
df = df.dropna(subset=['title', 'abstract'])
|
|
144
|
-
df = df[df['abstract'].str.len() > 50]
|
|
145
|
-
return df
|
|
146
|
-
|
|
147
|
-
def create_search_index(df: pd.DataFrame):
|
|
148
|
-
search_texts = df['title'].fillna('') + ' ' + df['abstract'].fillna('')
|
|
149
|
-
vectorizer = TfidfVectorizer(max_features=10000, stop_words='english', ngram_range=(1, 2))
|
|
150
|
-
tfidf_matrix = vectorizer.fit_transform(search_texts)
|
|
151
|
-
return {'vectorizer': vectorizer, 'tfidf_matrix': tfidf_matrix, 'dataframe': df}
|
|
152
|
-
|
|
153
|
-
def initialize_dataset_search():
|
|
154
|
-
global DATASET_CACHE, SEARCH_INDEX
|
|
155
|
-
if DATASET_CACHE is None:
|
|
156
|
-
DATASET_CACHE = load_and_combine_datasets()
|
|
157
|
-
if SEARCH_INDEX is None:
|
|
158
|
-
SEARCH_INDEX = create_search_index(DATASET_CACHE)
|
|
159
|
-
return SEARCH_INDEX
|
|
160
|
-
|
|
161
|
-
import time
|
|
162
|
-
|
|
163
|
-
LAST_S2_REQUEST_TIME = 0
|
|
164
|
-
S2_RATE_LIMIT_DELAY = 30
|
|
165
|
-
|
|
166
|
-
def search_semantic_scholar(query: str, limit: int = 10) -> List[Dict[str, Any]]:
|
|
167
|
-
global LAST_S2_REQUEST_TIME
|
|
168
|
-
|
|
169
|
-
api_key = os.environ.get('S2_API_KEY')
|
|
170
|
-
if not api_key:
|
|
171
|
-
return []
|
|
172
|
-
|
|
173
|
-
current_time = time.time()
|
|
174
|
-
time_since_last = current_time - LAST_S2_REQUEST_TIME
|
|
175
|
-
|
|
176
|
-
if time_since_last < S2_RATE_LIMIT_DELAY:
|
|
177
|
-
sleep_time = S2_RATE_LIMIT_DELAY - time_since_last
|
|
178
|
-
print(f"Rate limiting: still need {sleep_time:.2f}s before S2 request")
|
|
179
|
-
return None
|
|
180
|
-
|
|
181
|
-
LAST_S2_REQUEST_TIME = time.time()
|
|
182
|
-
|
|
183
|
-
url = "https://api.semanticscholar.org/graph/v1/paper/search"
|
|
184
|
-
headers = {"x-api-key": api_key}
|
|
185
|
-
params = {
|
|
186
|
-
"query": query,
|
|
187
|
-
"limit": limit,
|
|
188
|
-
"fields": "title,abstract,authors,year,citationCount,url,tldr"
|
|
189
|
-
}
|
|
190
|
-
print('Semantic SCholar calls')
|
|
191
|
-
try:
|
|
192
|
-
response = requests.get(url, headers=headers, params=params,
|
|
193
|
-
timeout=30)
|
|
194
|
-
print('semantic scholar response')
|
|
195
|
-
response.raise_for_status()
|
|
196
|
-
return response.json().get('data', [])
|
|
197
|
-
except requests.exceptions.RequestException as e:
|
|
198
|
-
print(f"Semantic Scholar API error: {e}")
|
|
199
|
-
return []
|
|
200
|
-
|
|
201
|
-
def search_papers(query: str, limit: int = 10) -> List[Dict]:
|
|
202
|
-
s2_results = search_semantic_scholar(query, limit)
|
|
203
|
-
if s2_results:
|
|
204
|
-
return s2_results
|
|
205
|
-
|
|
206
|
-
search_index = initialize_dataset_search()
|
|
207
|
-
query_vector = search_index['vectorizer'].transform([query])
|
|
208
|
-
similarities = cosine_similarity(query_vector, search_index['tfidf_matrix']).flatten()
|
|
209
|
-
top_indices = similarities.argsort()[-limit:][::-1]
|
|
210
|
-
results = [search_index['dataframe'].iloc[idx].to_dict() for idx in top_indices if similarities[idx] > 0.01]
|
|
211
|
-
return results
|
|
212
|
-
|
|
213
|
-
def execute_shell_command(command: str) -> Dict[str, Any]:
|
|
214
|
-
try:
|
|
215
|
-
result = subprocess.run(command, shell=True, capture_output=True, text=True, timeout=60)
|
|
216
|
-
return {
|
|
217
|
-
"success": result.returncode == 0,
|
|
218
|
-
"stdout": result.stdout,
|
|
219
|
-
"stderr": result.stderr
|
|
220
|
-
}
|
|
221
|
-
except Exception as e:
|
|
222
|
-
return {"success": False, "stderr": str(e)}
|
|
223
|
-
|
|
224
|
-
def update_paper(paper_state: Paper, section: str, content: str) -> Paper:
|
|
225
|
-
if not hasattr(paper_state, section):
|
|
226
|
-
return paper_state
|
|
227
|
-
target_section = getattr(paper_state, section)
|
|
228
|
-
if isinstance(target_section, list):
|
|
229
|
-
target_section.append(content)
|
|
230
|
-
else:
|
|
231
|
-
setattr(paper_state, section, content)
|
|
232
|
-
return paper_state
|
|
233
|
-
|
|
234
|
-
def get_creative_ideas_for_stuck_agent(
|
|
235
|
-
problem_description: str,
|
|
236
|
-
npc: NPC,
|
|
237
|
-
model: str,
|
|
238
|
-
provider: str
|
|
239
|
-
) -> str:
|
|
240
|
-
print(f"\n--- SUB-AGENT {npc.name} IS STUCK, INITIATING WANDER ---")
|
|
241
|
-
_, _, raw_brainstorm, _, _ = perform_single_wandering(
|
|
242
|
-
problem=problem_description,
|
|
243
|
-
npc=npc,
|
|
244
|
-
model=model,
|
|
245
|
-
provider=provider
|
|
246
|
-
)
|
|
247
|
-
return raw_brainstorm
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
@dataclass
|
|
251
|
-
class FileProvenance:
|
|
252
|
-
filename: str
|
|
253
|
-
step_history: List[Tuple[int, str, str, str]] = field(default_factory=list)
|
|
254
|
-
|
|
255
|
-
def get_filesystem_state() -> Dict[str, str]:
|
|
256
|
-
import hashlib
|
|
257
|
-
files = {}
|
|
258
|
-
for f in os.listdir("."):
|
|
259
|
-
if os.path.isfile(f):
|
|
260
|
-
with open(f, 'rb') as file:
|
|
261
|
-
content = file.read()
|
|
262
|
-
files[f] = hashlib.md5(content).hexdigest()[:8]
|
|
263
|
-
return files
|
|
264
|
-
|
|
265
|
-
def summarize_step(thought: str,
|
|
266
|
-
action: str,
|
|
267
|
-
outcome: str,
|
|
268
|
-
fs_before: Dict[str, str],
|
|
269
|
-
fs_after: Dict[str, str],
|
|
270
|
-
file_provenance: Dict[str, FileProvenance],
|
|
271
|
-
step_num: int,
|
|
272
|
-
model: str,
|
|
273
|
-
provider: str,
|
|
274
|
-
npc: NPC) -> str:
|
|
275
|
-
|
|
276
|
-
import hashlib
|
|
277
|
-
import os
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
current_files = {}
|
|
281
|
-
for f in os.listdir("."):
|
|
282
|
-
if os.path.isfile(f):
|
|
283
|
-
with open(f, 'rb') as file:
|
|
284
|
-
content = file.read()
|
|
285
|
-
current_files[f] = {
|
|
286
|
-
'size': len(content),
|
|
287
|
-
'checksum': hashlib.md5(content).hexdigest()[:8]
|
|
288
|
-
}
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
for f in fs_after:
|
|
292
|
-
if f not in file_provenance:
|
|
293
|
-
file_provenance[f] = FileProvenance(filename=f)
|
|
294
|
-
|
|
295
|
-
change_summary = ""
|
|
296
|
-
if f not in fs_before:
|
|
297
|
-
change_summary = f"Created with {current_files[f]['size']} bytes"
|
|
298
|
-
file_provenance[f].step_history.append((step_num, "CREATED", fs_after[f], change_summary))
|
|
299
|
-
elif fs_before.get(f) != fs_after[f]:
|
|
300
|
-
change_summary = f"Modified to {current_files[f]['size']} bytes"
|
|
301
|
-
file_provenance[f].step_history.append((step_num, "MODIFIED", fs_after[f], change_summary))
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
provenance_summary = []
|
|
305
|
-
for filename, prov in file_provenance.items():
|
|
306
|
-
history = "; ".join([f"Step {step}: {action} ({checksum}) - {changes}" for step, action, checksum, changes in prov.step_history])
|
|
307
|
-
provenance_summary.append(f"{filename}: {history}")
|
|
308
|
-
|
|
309
|
-
prompt = f"""AGENT'S REASONING: {thought}
|
|
310
|
-
|
|
311
|
-
AGENT'S ACTION: {action}
|
|
312
|
-
AGENT'S CLAIMED OUTCOME: {outcome}
|
|
313
|
-
|
|
314
|
-
COMPLETE FILE PROVENANCE:
|
|
315
|
-
{chr(10).join(provenance_summary)}
|
|
316
|
-
|
|
317
|
-
CURRENT FILESYSTEM:
|
|
318
|
-
Files: {list(current_files.keys())}
|
|
319
|
-
Details: {current_files}
|
|
320
|
-
|
|
321
|
-
Explain plainly what happened and whether the actions produced any measurable effects. If the agent thinks then it is likely time to direct it to
|
|
322
|
-
carry out a specific action.
|
|
323
|
-
|
|
324
|
-
Return JSON with "summary" and "next_step" keys.""" + """
|
|
325
|
-
|
|
326
|
-
{
|
|
327
|
-
"summary": " a summary of what they did and claimed and the extent to which it produced the intended outcome .",
|
|
328
|
-
"next_step": "The concrete next step for the agent to carry out in their research.
|
|
329
|
-
|
|
330
|
-
}
|
|
331
|
-
"""
|
|
332
|
-
|
|
333
|
-
response = get_llm_response(prompt, model=model, provider=provider, npc=npc, format='json')
|
|
334
|
-
summary_data = response.get('response')
|
|
335
|
-
|
|
336
|
-
return summary_data
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
from sqlalchemy import create_engine, Column, Integer, String, Text, DateTime
|
|
341
|
-
from sqlalchemy.ext.declarative import declarative_base
|
|
342
|
-
from sqlalchemy.orm import sessionmaker
|
|
343
|
-
import csv
|
|
344
|
-
import os
|
|
345
|
-
from datetime import datetime
|
|
346
|
-
|
|
347
|
-
Base = declarative_base()
|
|
348
|
-
|
|
349
|
-
class AlicantoPersona(Base):
|
|
350
|
-
__tablename__ = 'alicanto_personas'
|
|
351
|
-
|
|
352
|
-
id = Column(Integer, primary_key=True, autoincrement=True)
|
|
353
|
-
name = Column(String(255))
|
|
354
|
-
birth_year = Column(Integer)
|
|
355
|
-
location = Column(Text)
|
|
356
|
-
leader = Column(Text)
|
|
357
|
-
interests = Column(Text)
|
|
358
|
-
worldview = Column(Text)
|
|
359
|
-
approach = Column(Text)
|
|
360
|
-
persona_text = Column(Text)
|
|
361
|
-
created_at = Column(DateTime, default=datetime.utcnow)
|
|
362
|
-
|
|
363
|
-
def save_persona_to_databases(persona_data: dict):
|
|
364
|
-
"""Save persona to both SQLite and CSV for persistence"""
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
db_path = os.path.expanduser("~/npcsh_history.db")
|
|
368
|
-
engine = create_engine(f'sqlite:///{db_path}')
|
|
369
|
-
Base.metadata.create_all(engine)
|
|
370
|
-
Session = sessionmaker(bind=engine)
|
|
371
|
-
session = Session()
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
persona = AlicantoPersona(
|
|
375
|
-
name=persona_data.get('name'),
|
|
376
|
-
birth_year=persona_data.get('birth_year'),
|
|
377
|
-
location=persona_data.get('location'),
|
|
378
|
-
leader=persona_data.get('leader'),
|
|
379
|
-
interests=json.dumps(persona_data.get('interests', [])),
|
|
380
|
-
worldview=persona_data.get('worldview'),
|
|
381
|
-
approach=persona_data.get('approach'),
|
|
382
|
-
persona_text=persona_data.get('persona_text')
|
|
383
|
-
)
|
|
384
|
-
|
|
385
|
-
session.add(persona)
|
|
386
|
-
session.commit()
|
|
387
|
-
session.close()
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
csv_dir = os.path.expanduser("~/.npcsh/npc_team")
|
|
391
|
-
os.makedirs(csv_dir, exist_ok=True)
|
|
392
|
-
csv_path = os.path.join(csv_dir, "alicanto_personas.csv")
|
|
393
|
-
|
|
394
|
-
file_exists = os.path.exists(csv_path)
|
|
395
|
-
with open(csv_path, 'a', newline='') as csvfile:
|
|
396
|
-
fieldnames = ['name', 'birth_year', 'location', 'leader', 'interests',
|
|
397
|
-
'worldview', 'approach', 'persona_text', 'created_at']
|
|
398
|
-
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
|
399
|
-
|
|
400
|
-
if not file_exists:
|
|
401
|
-
writer.writeheader()
|
|
402
|
-
|
|
403
|
-
writer.writerow({
|
|
404
|
-
**persona_data,
|
|
405
|
-
'interests': json.dumps(persona_data.get('interests', [])),
|
|
406
|
-
'created_at': datetime.now().isoformat()
|
|
407
|
-
})
|
|
408
|
-
|
|
409
|
-
def generate_sub_agent_personas(topic: str, num_agents: int, model: str, provider: str, npc: NPC) -> List[Dict[str, str]]:
|
|
410
|
-
personas = []
|
|
411
|
-
for i in range(num_agents):
|
|
412
|
-
birth_year = random.randint(-32665, 32665)
|
|
413
|
-
teen_year = birth_year + 16
|
|
414
|
-
|
|
415
|
-
json_template = """
|
|
416
|
-
{
|
|
417
|
-
"name": "culturally appropriate full name for someone born in """ + str(birth_year) + """",
|
|
418
|
-
"location": "specific city/region where they were born in """ + str(birth_year) + """",
|
|
419
|
-
"leader": "who ruled their region when they were 16 years old in """ + str(teen_year) + """",
|
|
420
|
-
"interests": ["3-5 specific interests/obsessions they had as a teenager in """ + str(teen_year) + """"],
|
|
421
|
-
"worldview": "one sentence describing their fundamental perspective shaped by growing up in that era",
|
|
422
|
-
"approach": "how their historical background influences their way of thinking"
|
|
423
|
-
}
|
|
424
|
-
"""
|
|
425
|
-
|
|
426
|
-
prompt = f"Generate a unique persona for someone born in {birth_year}. Return JSON:\n{json_template}\n\nMake this person feel real and historically grounded. Consider: technological context, cultural movements, economic conditions, wars, discoveries happening in {teen_year}."
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
response = get_llm_response(
|
|
430
|
-
prompt,
|
|
431
|
-
model=model,
|
|
432
|
-
provider=provider,
|
|
433
|
-
npc=npc,
|
|
434
|
-
format='json'
|
|
435
|
-
)
|
|
436
|
-
|
|
437
|
-
new_persona = response.get('response')
|
|
438
|
-
if isinstance(new_persona, str):
|
|
439
|
-
new_persona = json.loads(new_persona)
|
|
440
|
-
|
|
441
|
-
persona_text = f"You are {new_persona.get('name')}, born {birth_year} in {new_persona.get('location')}, came of age under {new_persona.get('leader')}. Your interests were: {', '.join(new_persona.get('interests', []))}. {new_persona.get('worldview')} {new_persona.get('approach')}"
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
persona_data = {
|
|
445
|
-
'name': new_persona.get('name'),
|
|
446
|
-
'birth_year': birth_year,
|
|
447
|
-
'location': new_persona.get('location'),
|
|
448
|
-
'leader': new_persona.get('leader'),
|
|
449
|
-
'interests': new_persona.get('interests', []),
|
|
450
|
-
'worldview': new_persona.get('worldview'),
|
|
451
|
-
'approach': new_persona.get('approach'),
|
|
452
|
-
'persona_text': persona_text
|
|
453
|
-
}
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
save_persona_to_databases(persona_data)
|
|
457
|
-
|
|
458
|
-
personas.append({
|
|
459
|
-
"name": new_persona.get('name'),
|
|
460
|
-
"persona": persona_text
|
|
461
|
-
})
|
|
462
|
-
|
|
463
|
-
return personas
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
def create_sub_agent(
|
|
467
|
-
model: str,
|
|
468
|
-
provider: str,
|
|
469
|
-
hypothesis: str,
|
|
470
|
-
name: str,
|
|
471
|
-
persona: str
|
|
472
|
-
) -> NPC:
|
|
473
|
-
|
|
474
|
-
def wander_wrapper(problem_description: str) -> str:
|
|
475
|
-
return get_creative_ideas_for_stuck_agent(
|
|
476
|
-
problem_description,
|
|
477
|
-
agent,
|
|
478
|
-
model,
|
|
479
|
-
provider
|
|
480
|
-
)
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
tools = [
|
|
489
|
-
create_file,
|
|
490
|
-
append_to_file,
|
|
491
|
-
replace_in_file,
|
|
492
|
-
read_file,
|
|
493
|
-
list_files,
|
|
494
|
-
execute_shell_command,
|
|
495
|
-
search_papers,
|
|
496
|
-
wander_wrapper,
|
|
497
|
-
search_web
|
|
498
|
-
]
|
|
499
|
-
|
|
500
|
-
agent = NPC(
|
|
501
|
-
name=name,
|
|
502
|
-
model=model,
|
|
503
|
-
provider=provider,
|
|
504
|
-
primary_directive=persona,
|
|
505
|
-
tools=tools
|
|
506
|
-
)
|
|
507
|
-
|
|
508
|
-
return agent
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
def sub_agent_trace(hypothesis: str,
|
|
513
|
-
persona: Dict[str, str],
|
|
514
|
-
user_query: str,
|
|
515
|
-
model: str,
|
|
516
|
-
provider: str,
|
|
517
|
-
max_steps: int = 50) -> SubAgentTrace:
|
|
518
|
-
agent_name = persona.get("name")
|
|
519
|
-
agent_persona = persona.get("persona")
|
|
520
|
-
agent = create_sub_agent(model, provider, hypothesis, agent_name, agent_persona)
|
|
521
|
-
|
|
522
|
-
trace = SubAgentTrace(hypothesis=hypothesis, agent_name=agent_name, agent_persona=agent_persona)
|
|
523
|
-
summarized_history = []
|
|
524
|
-
file_provenance = {}
|
|
525
|
-
created_files = set()
|
|
526
|
-
summary = {}
|
|
527
|
-
|
|
528
|
-
major_step = 0
|
|
529
|
-
|
|
530
|
-
while major_step < max_steps:
|
|
531
|
-
fs_before = get_filesystem_state()
|
|
532
|
-
|
|
533
|
-
provenance_summary = []
|
|
534
|
-
for filename, prov in file_provenance.items():
|
|
535
|
-
history = "; ".join([f"Step {step}: {action} ({checksum}) - {changes}" for step, action, checksum, changes in prov.step_history])
|
|
536
|
-
provenance_summary.append(f"{filename}: {history}")
|
|
537
|
-
|
|
538
|
-
history_str = "\n".join(summarized_history)
|
|
539
|
-
next_step_text = f"This is the next step suggested by your advisor. : BEGIN NEXT_STEP: {summary.get('next_step')} END NEXT STEP" if summary else ""
|
|
540
|
-
|
|
541
|
-
initial_prompt = f"""
|
|
542
|
-
Test the following hypothesis: '{hypothesis}' as related to the user query: '{user_query}'.
|
|
543
|
-
Only focus on your specific hypothesis, other agents are being tasked with other aspects of the problem.
|
|
544
|
-
|
|
545
|
-
Use bash commands to carry out research through the execute_shell_command.
|
|
546
|
-
Adjust files with `replace_in_file` and use `read_file` and `list_files` to verify file states and file creation.
|
|
547
|
-
Create files with create_file()
|
|
548
|
-
|
|
549
|
-
Test with execute_shell_command when needed
|
|
550
|
-
Get unstuck with wander_wrapper
|
|
551
|
-
|
|
552
|
-
When you have a definitive result, say RESEARCH_COMPLETE.
|
|
553
|
-
|
|
554
|
-
FILE PROVENANCE HISTORY:
|
|
555
|
-
{chr(10).join(provenance_summary)}
|
|
556
|
-
|
|
557
|
-
CURRENT FILES: {list(fs_before.keys())}
|
|
558
|
-
|
|
559
|
-
COMPLETE ACTION HISTORY:
|
|
560
|
-
BEGIN HISTORY
|
|
561
|
-
`
|
|
562
|
-
{history_str}
|
|
563
|
-
`
|
|
564
|
-
END HISTORy
|
|
565
|
-
|
|
566
|
-
What specific action will you take next to test your hypothesis?
|
|
567
|
-
AVAILABLE TOOLS: create_file, append_to_file, replace_in_file, read_file, list_files, execute_shell_command, wander_wrapper, search_web .
|
|
568
|
-
|
|
569
|
-
Do not repeat actions. Do not constantly think unless you need to brainstorm or wander. Use `execute_shell_command` for anything complicated beyond a simple file read, replace, create.
|
|
570
|
-
Use `search_web` with provider of {os.environ.get('NPCSH_SEARCH_PROVIDER') } to look up items if you are struggling to understand why errors are happening with code execution.
|
|
571
|
-
Do not waste time re-verifying the same package versins or libraries when you can explicitly look up usage patterns that are up to date. Do not assume that your generated code will be correct the first time or up to date
|
|
572
|
-
amd if you are finding irreconcilable errors that you cannot seem to figure out locally then you need to search. For example, if you assume a python package you installed like `sqlite-vector' is importable like
|
|
573
|
-
"from sqlite.vector" and keep running into import or module errors, it it probably because you need to look up the correct way to access the library. It may have been that you would need to import "sqlite_vector" or "sql_vector".
|
|
574
|
-
There is no way to know this information a priori and instead of wasting time verifying pip installations, its better to look for actual usage patterns, either by inspecting the source code of the pip package itself or simply by
|
|
575
|
-
searching the web.
|
|
576
|
-
|
|
577
|
-
This should guide your next steps:
|
|
578
|
-
|
|
579
|
-
`{next_step_text} `
|
|
580
|
-
|
|
581
|
-
Your goal is to research. To set up experiments, create figures that can be included in a latex document report, and produce data outputs as well in csvs for verification and reusability and reproducibility.
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
Do not use seaborn. On matplotlib plots, do not use grids or titles.
|
|
585
1
|
"""
|
|
586
|
-
|
|
587
|
-
print(f"\n{'='*80}")
|
|
588
|
-
print(f"AUTONOMOUS LOOP {major_step + 1} FOR {agent_name}")
|
|
589
|
-
print(f"{'='*80}")
|
|
590
|
-
print(f"HYPOTHESIS: {hypothesis}")
|
|
591
|
-
print(f"FILES BEFORE: {list(fs_before.keys())}")
|
|
592
|
-
|
|
593
|
-
messages = []
|
|
594
|
-
all_thoughts = []
|
|
595
|
-
all_actions = []
|
|
596
|
-
all_outcomes = []
|
|
597
|
-
|
|
598
|
-
for micro_step in range(11):
|
|
599
|
-
print(f"\n--- Micro-step {micro_step + 1}/4 ---")
|
|
600
|
-
|
|
601
|
-
if micro_step == 0:
|
|
602
|
-
current_prompt = initial_prompt
|
|
603
|
-
print("SENDING INITIAL RESEARCH PROMPT")
|
|
604
|
-
else:
|
|
605
|
-
current_prompt = "Continue your work. What's your next action?"
|
|
606
|
-
print(f"SENDING CONTINUATION PROMPT: '{current_prompt}'")
|
|
607
|
-
try:
|
|
608
|
-
response = agent.get_llm_response(current_prompt,
|
|
609
|
-
messages=messages,
|
|
610
|
-
auto_process_tool_calls=True)
|
|
611
|
-
except Timeout:
|
|
612
|
-
continue
|
|
613
|
-
except ContextWindowExceededError:
|
|
614
|
-
break
|
|
615
|
-
messages = response.get('messages', [])
|
|
616
|
-
|
|
617
|
-
thought = response.get('response')
|
|
618
|
-
if thought is None:
|
|
619
|
-
thought = ''
|
|
620
|
-
print("WARNING: No thought received from agent")
|
|
621
|
-
else:
|
|
622
|
-
print(f"AGENT THOUGHT: {thought[:200]}{'...' if len(thought) > 200 else ''}")
|
|
623
|
-
all_thoughts.append(thought)
|
|
624
|
-
|
|
625
|
-
if thought and "RESEARCH_COMPLETE" in thought.upper():
|
|
626
|
-
print(f"✓ RESEARCH COMPLETED at micro-step {micro_step + 1}")
|
|
627
|
-
break
|
|
628
|
-
|
|
629
|
-
if response.get('tool_results'):
|
|
630
|
-
tool_results = response['tool_results']
|
|
631
|
-
print(f"TOOLS USED: {len(tool_results)} tool(s)")
|
|
632
|
-
|
|
633
|
-
for i, res in enumerate(tool_results):
|
|
634
|
-
tool_name = res.get('tool_name')
|
|
635
|
-
args = res.get('arguments', {})
|
|
636
|
-
result = res.get('result')
|
|
637
|
-
|
|
638
|
-
print(f" Tool {i+1}: {tool_name}({args})")
|
|
639
|
-
for arg, item in args.items():
|
|
640
|
-
print(f" {arg}: {item}")
|
|
641
|
-
if isinstance(result, str) and len(result) > 150:
|
|
642
|
-
print(f" Result: {result[:150]}...")
|
|
643
|
-
else:
|
|
644
|
-
print(f" Result: {result}")
|
|
645
|
-
|
|
646
|
-
action_str = ", ".join([f"{res['tool_name']}({res.get('arguments', {})})" for res in tool_results])
|
|
647
|
-
outcomes = []
|
|
648
|
-
|
|
649
|
-
for res in tool_results:
|
|
650
|
-
if res['tool_name'] in ['create_file', 'append_to_file', 'replace_in_file']:
|
|
651
|
-
filename = res.get('arguments', {}).get('filename')
|
|
652
|
-
if filename:
|
|
653
|
-
created_files.add(filename)
|
|
654
|
-
if os.path.exists(filename):
|
|
655
|
-
trace.was_successful = True
|
|
656
|
-
print(f" ✓ File created: {filename}")
|
|
657
|
-
|
|
658
|
-
result_data = res.get('result')
|
|
659
|
-
outcomes.append(str(result_data))
|
|
660
|
-
|
|
661
|
-
outcome_str = " | ".join(outcomes)
|
|
662
|
-
all_actions.append(action_str)
|
|
663
|
-
all_outcomes.append(outcome_str)
|
|
664
|
-
else:
|
|
665
|
-
print("NO TOOLS USED - Agent only provided reasoning")
|
|
666
|
-
|
|
667
|
-
fs_after = get_filesystem_state()
|
|
668
|
-
print(f"\nFILES AFTER: {list(fs_after.keys())}")
|
|
669
|
-
|
|
670
|
-
new_files = set(fs_after.keys()) - set(fs_before.keys())
|
|
671
|
-
if new_files:
|
|
672
|
-
print(f"NEW FILES CREATED: {list(new_files)}")
|
|
673
|
-
|
|
674
|
-
combined_thought = " ".join(all_thoughts)
|
|
675
|
-
combined_action = " | ".join(filter(None, all_actions))
|
|
676
|
-
combined_outcome = " | ".join(filter(None, all_outcomes))
|
|
677
|
-
|
|
678
|
-
print(f"\nCOMPRESSING AUTONOMOUS SESSION...")
|
|
679
|
-
print(f"THOUGHTS: {len(all_thoughts)} messages")
|
|
680
|
-
print(f"ACTIONS: {len(all_actions)} tool uses")
|
|
681
|
-
|
|
682
|
-
summary = summarize_step(combined_thought,
|
|
683
|
-
combined_action,
|
|
684
|
-
combined_outcome,
|
|
685
|
-
fs_before,
|
|
686
|
-
fs_after,
|
|
687
|
-
file_provenance,
|
|
688
|
-
major_step + 1,
|
|
689
|
-
model,
|
|
690
|
-
provider,
|
|
691
|
-
agent)
|
|
692
|
-
|
|
693
|
-
print(f"SUMMARY: {summary.get('summary', 'No summary')}")
|
|
694
|
-
print(f"NEXT STEP: {summary.get('next_step', 'No next step')}")
|
|
695
|
-
|
|
696
|
-
summarized_history.append(f"Step {major_step + 1}: {summary.get('summary')} ")
|
|
697
|
-
|
|
698
|
-
trace.steps.append(ResearchStep(
|
|
699
|
-
step=major_step + 1,
|
|
700
|
-
thought=combined_thought,
|
|
701
|
-
action=combined_action,
|
|
702
|
-
outcome=combined_outcome
|
|
703
|
-
))
|
|
704
|
-
|
|
705
|
-
if combined_thought and "RESEARCH_COMPLETE" in combined_thought.upper():
|
|
706
|
-
print(f"✓ RESEARCH COMPLETED FOR {agent_name}")
|
|
707
|
-
break
|
|
708
|
-
|
|
709
|
-
major_step += 1
|
|
710
|
-
|
|
711
|
-
for filename in created_files:
|
|
712
|
-
if os.path.exists(filename):
|
|
713
|
-
trace.final_files[filename] = read_file(filename)
|
|
714
|
-
|
|
715
|
-
print(f"\nFINAL RESULTS FOR {agent_name}:")
|
|
716
|
-
print(f"SUCCESS: {trace.was_successful}")
|
|
717
|
-
print(f"FILES CREATED: {list(trace.final_files.keys())}")
|
|
718
|
-
|
|
719
|
-
return trace
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
def save_trace_for_training(
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
traces: List[SubAgentTrace],
|
|
728
|
-
output_dir: str = "./alicanto_traces"
|
|
729
|
-
):
|
|
730
|
-
os.makedirs(output_dir, exist_ok=True)
|
|
731
|
-
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
732
|
-
filename = f"trace_{timestamp}.csv"
|
|
733
|
-
filepath = os.path.join(output_dir, filename)
|
|
734
|
-
|
|
735
|
-
flattened_data = []
|
|
736
|
-
for trace in traces:
|
|
737
|
-
for step in trace.steps:
|
|
738
|
-
flattened_data.append({
|
|
739
|
-
"hypothesis": trace.hypothesis,
|
|
740
|
-
"agent_name": trace.agent_name,
|
|
741
|
-
"agent_persona": trace.agent_persona,
|
|
742
|
-
"was_successful": trace.was_successful,
|
|
743
|
-
"step": step.step,
|
|
744
|
-
"thought": step.thought,
|
|
745
|
-
"action": step.action,
|
|
746
|
-
"outcome": step.outcome,
|
|
747
|
-
"final_files": json.dumps(trace.final_files)
|
|
748
|
-
})
|
|
749
|
-
|
|
750
|
-
if not flattened_data:
|
|
751
|
-
return
|
|
752
|
-
|
|
753
|
-
df = pd.DataFrame(flattened_data)
|
|
754
|
-
df.to_csv(filepath, index=False)
|
|
755
|
-
|
|
756
|
-
print(f"Full research trace saved to {filepath}")
|
|
757
|
-
return filepath
|
|
758
|
-
def compress_traces_for_synthesis(traces: List[SubAgentTrace], model: str, provider: str, npc: NPC) -> str:
|
|
759
|
-
compressed_summaries = []
|
|
760
|
-
|
|
761
|
-
for trace in traces:
|
|
762
|
-
steps_summary = []
|
|
763
|
-
for step in trace.steps[-3:]: # Only last 3 steps
|
|
764
|
-
if step.thought:
|
|
765
|
-
thought_short = step.thought[:100] + "..." if len(step.thought) > 100 else step.thought
|
|
766
|
-
else:
|
|
767
|
-
thought_short = "No thought recorded"
|
|
768
|
-
|
|
769
|
-
if step.action:
|
|
770
|
-
action_short = step.action[:100] + "..." if len(step.action) > 100 else step.action
|
|
771
|
-
else:
|
|
772
|
-
action_short = "No action taken"
|
|
773
|
-
|
|
774
|
-
steps_summary.append(f"Step {step.step}: {thought_short} | {action_short}")
|
|
775
|
-
|
|
776
|
-
files_created = list(trace.final_files.keys()) if trace.final_files else []
|
|
777
|
-
|
|
778
|
-
compressed_summaries.append({
|
|
779
|
-
"agent": trace.agent_name,
|
|
780
|
-
"hypothesis": trace.hypothesis,
|
|
781
|
-
"success": trace.was_successful,
|
|
782
|
-
"key_steps": steps_summary,
|
|
783
|
-
"files_created": files_created,
|
|
784
|
-
"final_file_count": len(files_created)
|
|
785
|
-
})
|
|
786
|
-
|
|
787
|
-
return json.dumps(compressed_summaries, indent=2)
|
|
788
|
-
def format_paper_as_latex(paper: Paper, authors: List[str]) -> str:
|
|
789
|
-
author_string = ", ".join(authors)
|
|
790
|
-
introduction_content = "\n\n".join(paper.introduction)
|
|
791
|
-
methods_content = "\n\n".join(paper.methods)
|
|
792
|
-
results_content = "\n\n".join(paper.results)
|
|
793
|
-
discussion_content = "\n\n".join(paper.discussion)
|
|
794
|
-
|
|
795
|
-
return f"""
|
|
796
|
-
\\documentclass{{article}}
|
|
797
|
-
\\title{{{paper.title}}}
|
|
798
|
-
\\author{{{author_string}}}
|
|
799
|
-
\\date{{\\today}}
|
|
800
|
-
\\begin{{document}}
|
|
801
|
-
\\maketitle
|
|
802
|
-
\\begin{{abstract}}
|
|
803
|
-
{paper.abstract}
|
|
804
|
-
\\end{{abstract}}
|
|
805
|
-
\\section*{{Introduction}}
|
|
806
|
-
{introduction_content}
|
|
807
|
-
\\section*{{Methods}}
|
|
808
|
-
{methods_content}
|
|
809
|
-
\\section*{{Results}}
|
|
810
|
-
{results_content}
|
|
811
|
-
\\section*{{Discussion}}
|
|
812
|
-
{discussion_content}
|
|
813
|
-
\\end{{document}}
|
|
814
|
-
"""
|
|
815
|
-
|
|
816
|
-
def alicanto(
|
|
817
|
-
query: str,
|
|
818
|
-
num_agents: int = 3,
|
|
819
|
-
max_steps: int = 10,
|
|
820
|
-
model: str = NPCSH_CHAT_MODEL,
|
|
821
|
-
provider: str = NPCSH_CHAT_PROVIDER,
|
|
822
|
-
skip_research: bool = True,
|
|
823
|
-
**kwargs
|
|
824
|
-
) -> None:
|
|
825
|
-
|
|
826
|
-
print("=== ALICANTO RESEARCH SYSTEM STARTING ===")
|
|
827
|
-
print(f"Query: {query}")
|
|
828
|
-
|
|
829
|
-
if skip_research:
|
|
830
|
-
print("SKIPPING RESEARCH - GOING DIRECTLY TO PAPER WRITING")
|
|
831
|
-
else:
|
|
832
|
-
print(f"Agents: {num_agents}, Max steps per agent: {max_steps}")
|
|
833
|
-
|
|
834
|
-
print(f"Model: {model}, Provider: {provider}")
|
|
835
|
-
|
|
836
|
-
def wander_wrapper_coordinator(problem_description: str) -> str:
|
|
837
|
-
return get_creative_ideas_for_stuck_agent(
|
|
838
|
-
problem_description,
|
|
839
|
-
alicanto_coordinator,
|
|
840
|
-
model,
|
|
841
|
-
provider
|
|
842
|
-
)
|
|
843
|
-
|
|
844
|
-
alicanto_coordinator = NPC(
|
|
845
|
-
name="Alicanto",
|
|
846
|
-
model=model,
|
|
847
|
-
provider=provider,
|
|
848
|
-
primary_directive="You are Alicanto the mythical bird. You research topics iteratively by writing to LaTeX files and searching for more information.",
|
|
849
|
-
tools=[
|
|
850
|
-
create_file,
|
|
851
|
-
append_to_file,
|
|
852
|
-
replace_in_file,
|
|
853
|
-
read_file,
|
|
854
|
-
list_files,
|
|
855
|
-
execute_shell_command,
|
|
856
|
-
search_papers,
|
|
857
|
-
search_web,
|
|
858
|
-
wander_wrapper_coordinator
|
|
859
|
-
]
|
|
860
|
-
)
|
|
861
|
-
|
|
862
|
-
messages = []
|
|
863
|
-
summarized_history = []
|
|
864
|
-
file_provenance = {}
|
|
865
|
-
|
|
866
|
-
if not skip_research:
|
|
867
|
-
print("\n--- Step 1: Generating hypotheses and personas ---")
|
|
868
|
-
|
|
869
|
-
one_shot_example_hypotheses = """
|
|
870
|
-
"example_input": "Investigate the impact of quantum annealing on protein folding.",
|
|
871
|
-
"example_output": {
|
|
872
|
-
"hypotheses": [
|
|
873
|
-
"Implementing a quantum annealer simulation for a small peptide chain will identify lower energy states faster than a classical simulated annealing approach.",
|
|
874
|
-
"The choice of qubit connectivity in the quantum annealer's topology significantly impacts the final folded state's accuracy for proteins with long-range interactions.",
|
|
875
|
-
"Encoding the protein's residue interactions as a QUBO problem is feasible for structures up to 50 amino acids before qubit requirements become prohibitive."
|
|
876
|
-
]
|
|
877
|
-
}
|
|
878
|
-
"""
|
|
879
|
-
hypotheses_prompt = f"""Based on the following research topic, generate a list of {num_agents} distinct, specific, and empirically testable hypotheses.
|
|
880
|
-
|
|
881
|
-
TOPIC: "{query}"
|
|
882
|
-
|
|
883
|
-
Return a JSON object with a single key "hypotheses" which is a list of strings.
|
|
2
|
+
alicanto - Deep research mode CLI entry point
|
|
884
3
|
|
|
885
|
-
|
|
886
|
-
{one_shot_example_hypotheses}
|
|
887
|
-
|
|
888
|
-
Return ONLY the JSON object.
|
|
4
|
+
This is a thin wrapper that executes the alicanto.jinx through the jinx mechanism.
|
|
889
5
|
"""
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
hypotheses_prompt,
|
|
894
|
-
model=model,
|
|
895
|
-
provider=provider,
|
|
896
|
-
npc=alicanto_coordinator,
|
|
897
|
-
format='json'
|
|
898
|
-
)
|
|
899
|
-
|
|
900
|
-
if not response or not response.get('response'):
|
|
901
|
-
print("ERROR: Failed to get hypotheses response")
|
|
902
|
-
return
|
|
903
|
-
|
|
904
|
-
hypotheses = response.get('response').get('hypotheses')
|
|
905
|
-
if not hypotheses:
|
|
906
|
-
print("ERROR: No hypotheses generated")
|
|
907
|
-
return
|
|
908
|
-
|
|
909
|
-
print(f"Generated {len(hypotheses)} hypotheses:")
|
|
910
|
-
for i, h in enumerate(hypotheses):
|
|
911
|
-
print(f" {i+1}. {h}")
|
|
912
|
-
|
|
913
|
-
print("\nGenerating agent personas...")
|
|
914
|
-
personas = generate_sub_agent_personas(
|
|
915
|
-
query,
|
|
916
|
-
num_agents,
|
|
917
|
-
model,
|
|
918
|
-
provider,
|
|
919
|
-
alicanto_coordinator
|
|
920
|
-
)
|
|
921
|
-
|
|
922
|
-
if not personas:
|
|
923
|
-
print("ERROR: No personas generated")
|
|
924
|
-
return
|
|
925
|
-
|
|
926
|
-
print(f"Generated {len(personas)} personas:")
|
|
927
|
-
for i, p in enumerate(personas):
|
|
928
|
-
print(f" {i+1}. {p.get('name')}: {p.get('persona')}")
|
|
929
|
-
|
|
930
|
-
print("\n--- Step 2: Delegating hypotheses to Sub-Agents for serial execution ---")
|
|
931
|
-
|
|
932
|
-
all_traces = []
|
|
933
|
-
for i, hypo in enumerate(hypotheses):
|
|
934
|
-
persona = personas[i % len(personas)]
|
|
935
|
-
print(f"\nStarting sub-agent {i+1}/{len(hypotheses)}")
|
|
936
|
-
trace = sub_agent_trace(
|
|
937
|
-
hypo,
|
|
938
|
-
persona,
|
|
939
|
-
query,
|
|
940
|
-
model,
|
|
941
|
-
provider,
|
|
942
|
-
max_steps
|
|
943
|
-
)
|
|
944
|
-
all_traces.append(trace)
|
|
945
|
-
print(f"Sub-agent {i+1} completed. Success: {trace.was_successful}")
|
|
946
|
-
|
|
947
|
-
print(f"\nAll sub-agents completed. Saving traces...")
|
|
948
|
-
save_trace_for_training(all_traces)
|
|
949
|
-
compressed_research = compress_traces_for_synthesis(all_traces, model, provider, alicanto_coordinator)
|
|
950
|
-
|
|
951
|
-
print("\n--- Step 3: Creating initial paper structure ---")
|
|
952
|
-
|
|
953
|
-
author_list = [trace.agent_name for trace in all_traces]
|
|
954
|
-
author_string = ", ".join(author_list)
|
|
955
|
-
|
|
956
|
-
initial_latex = f"""\\documentclass{{article}}
|
|
957
|
-
\\title{{% TODO: TITLE}}
|
|
958
|
-
\\author{{{author_string}}}
|
|
959
|
-
\\date{{\\today}}
|
|
960
|
-
\\begin{{document}}
|
|
961
|
-
\\maketitle
|
|
962
|
-
|
|
963
|
-
\\begin{{abstract}}
|
|
964
|
-
% TODO: ABSTRACT
|
|
965
|
-
\\end{{abstract}}
|
|
966
|
-
|
|
967
|
-
\\section{{Introduction}}
|
|
968
|
-
% TODO: INTRODUCTION
|
|
969
|
-
|
|
970
|
-
\\section{{Methods}}
|
|
971
|
-
% TODO: METHODS
|
|
972
|
-
|
|
973
|
-
\\section{{Results}}
|
|
974
|
-
% TODO: RESULTS
|
|
975
|
-
|
|
976
|
-
\\section{{Discussion}}
|
|
977
|
-
% TODO: DISCUSSION
|
|
978
|
-
|
|
979
|
-
\\end{{document}}"""
|
|
980
|
-
|
|
981
|
-
create_file("paper.tex", initial_latex)
|
|
982
|
-
else:
|
|
983
|
-
print("\n--- Skipping research phase - loading existing data ---")
|
|
984
|
-
|
|
985
|
-
if os.path.exists("paper.tex"):
|
|
986
|
-
print("Found existing paper.tex")
|
|
987
|
-
else:
|
|
988
|
-
print("No existing paper.tex found, creating basic template...")
|
|
989
|
-
basic_latex = f"""\\documentclass{{article}}
|
|
990
|
-
\\title{{{query.title()}}}
|
|
991
|
-
\\author{{Research Team}}
|
|
992
|
-
\\date{{\\today}}
|
|
993
|
-
\\begin{{document}}
|
|
994
|
-
\\maketitle
|
|
995
|
-
|
|
996
|
-
\\begin{{abstract}}
|
|
997
|
-
% TODO: ABSTRACT
|
|
998
|
-
\\end{{abstract}}
|
|
999
|
-
|
|
1000
|
-
\\section{{Introduction}}
|
|
1001
|
-
% TODO: INTRODUCTION
|
|
1002
|
-
|
|
1003
|
-
\\section{{Methods}}
|
|
1004
|
-
% TODO: METHODS
|
|
1005
|
-
|
|
1006
|
-
\\section{{Results}}
|
|
1007
|
-
% TODO: RESULTS
|
|
1008
|
-
|
|
1009
|
-
\\section{{Discussion}}
|
|
1010
|
-
% TODO: DISCUSSION
|
|
1011
|
-
|
|
1012
|
-
\\end{{document}}"""
|
|
1013
|
-
create_file("paper.tex", basic_latex)
|
|
1014
|
-
|
|
1015
|
-
compressed_research = f"Research topic: {query}. Previous research data should be available in local files."
|
|
1016
|
-
|
|
1017
|
-
print("\n--- Step 4: Iterative paper writing ---")
|
|
1018
|
-
|
|
1019
|
-
for section_round in range(25):
|
|
1020
|
-
print(f"\n--- Section Round {section_round + 1} ---")
|
|
1021
|
-
|
|
1022
|
-
fs_before = get_filesystem_state()
|
|
1023
|
-
|
|
1024
|
-
provenance_summary = []
|
|
1025
|
-
for filename, prov in file_provenance.items():
|
|
1026
|
-
history = "; ".join([f"Step {step}: {action} ({checksum}) - {changes}" for step, action, checksum, changes in prov.step_history])
|
|
1027
|
-
provenance_summary.append(f"{filename}: {history}")
|
|
1028
|
-
|
|
1029
|
-
history_str = "\n".join(summarized_history)
|
|
1030
|
-
current_paper = read_file("paper.tex")
|
|
1031
|
-
|
|
1032
|
-
initial_prompt = f"""You are writing a research paper about: "{query}" located at ./paper.tex
|
|
1033
|
-
|
|
1034
|
-
Research data from sub-agents: {compressed_research}
|
|
1035
|
-
|
|
1036
|
-
Current paper content:
|
|
1037
|
-
{current_paper}
|
|
1038
|
-
|
|
1039
|
-
FILE PROVENANCE HISTORY:
|
|
1040
|
-
{chr(10).join(provenance_summary)}
|
|
1041
|
-
|
|
1042
|
-
COMPLETE ACTION HISTORY:
|
|
1043
|
-
BEGIN HISTORY
|
|
1044
|
-
{history_str}
|
|
1045
|
-
END HISTORY
|
|
1046
|
-
|
|
1047
|
-
Ensure the paper contains the following sections and that they have a coherent narrative by the end of your work.
|
|
1048
|
-
work iteratively, so do not worry about making it all in one step.
|
|
1049
|
-
|
|
1050
|
-
SECTIONS: Title, Abstract, Intro, Methods, Results, Discussion, Conclusions,
|
|
6
|
+
import argparse
|
|
7
|
+
import os
|
|
8
|
+
import sys
|
|
1051
9
|
|
|
1052
|
-
|
|
10
|
+
from npcsh._state import setup_shell
|
|
1053
11
|
|
|
1054
|
-
You must ensure citations are properly included in your results and cited with the \cite{{author_year}} format , keeping in mind
|
|
1055
|
-
to also start and maintain a .bib file separate from any currently provided. be sure to reference this as well.
|
|
1056
12
|
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
|
|
13
|
+
def main():
|
|
14
|
+
parser = argparse.ArgumentParser(description="alicanto - Deep research with multiple perspectives")
|
|
15
|
+
parser.add_argument("query", nargs="*", help="Research query")
|
|
16
|
+
parser.add_argument("--model", "-m", type=str, help="LLM model to use")
|
|
17
|
+
parser.add_argument("--provider", "-p", type=str, help="LLM provider to use")
|
|
18
|
+
parser.add_argument("--num-npcs", type=int, default=5, help="Number of research perspectives")
|
|
19
|
+
parser.add_argument("--depth", type=int, default=3, help="Research depth")
|
|
20
|
+
parser.add_argument("--max-steps", type=int, default=20, help="Maximum research steps")
|
|
21
|
+
parser.add_argument("--exploration", type=float, default=0.3, help="Exploration factor (0-1)")
|
|
22
|
+
parser.add_argument("--creativity", type=float, default=0.5, help="Creativity factor (0-1)")
|
|
23
|
+
parser.add_argument("--format", type=str, default="report", choices=["report", "summary", "full"],
|
|
24
|
+
help="Output format")
|
|
25
|
+
parser.add_argument("--with-research", action="store_true", help="Include web research")
|
|
26
|
+
args = parser.parse_args()
|
|
1063
27
|
|
|
1064
|
-
|
|
28
|
+
if not args.query:
|
|
29
|
+
parser.print_help()
|
|
30
|
+
sys.exit(1)
|
|
31
|
+
|
|
32
|
+
# Setup shell to get team and default NPC
|
|
33
|
+
command_history, team, default_npc = setup_shell()
|
|
34
|
+
|
|
35
|
+
if not team or "alicanto" not in team.jinxs_dict:
|
|
36
|
+
print("Error: alicanto jinx not found. Ensure npc_team/jinxs/modes/alicanto.jinx exists.")
|
|
37
|
+
sys.exit(1)
|
|
38
|
+
|
|
39
|
+
# Build context for jinx execution
|
|
40
|
+
context = {
|
|
41
|
+
"npc": default_npc,
|
|
42
|
+
"team": team,
|
|
43
|
+
"messages": [],
|
|
44
|
+
"query": " ".join(args.query),
|
|
45
|
+
"model": args.model,
|
|
46
|
+
"provider": args.provider,
|
|
47
|
+
"num_npcs": args.num_npcs,
|
|
48
|
+
"depth": args.depth,
|
|
49
|
+
"max_steps": args.max_steps,
|
|
50
|
+
"exploration": args.exploration,
|
|
51
|
+
"creativity": args.creativity,
|
|
52
|
+
"format": args.format,
|
|
53
|
+
"skip_research": not args.with_research,
|
|
54
|
+
}
|
|
1065
55
|
|
|
1066
|
-
|
|
56
|
+
# Execute the jinx
|
|
57
|
+
alicanto_jinx = team.jinxs_dict["alicanto"]
|
|
58
|
+
result = alicanto_jinx.execute(context=context, npc=default_npc)
|
|
1067
59
|
|
|
1068
|
-
|
|
60
|
+
if isinstance(result, dict) and result.get("output"):
|
|
61
|
+
print(result["output"])
|
|
1069
62
|
|
|
1070
|
-
all_thoughts = []
|
|
1071
|
-
all_actions = []
|
|
1072
|
-
all_outcomes = []
|
|
1073
63
|
|
|
1074
|
-
|
|
1075
|
-
|
|
1076
|
-
|
|
1077
|
-
if micro_step == 0:
|
|
1078
|
-
current_prompt = initial_prompt
|
|
1079
|
-
else:
|
|
1080
|
-
current_prompt = f"continue "
|
|
1081
|
-
|
|
1082
|
-
try:
|
|
1083
|
-
response = alicanto_coordinator.get_llm_response(
|
|
1084
|
-
current_prompt,
|
|
1085
|
-
messages=messages,
|
|
1086
|
-
auto_process_tool_calls=True
|
|
1087
|
-
)
|
|
1088
|
-
print('response: ', response['response'])
|
|
1089
|
-
print('tool calls: ', response['tool_calls'])
|
|
1090
|
-
print('tool results: ', response['tool_results'])
|
|
1091
|
-
|
|
1092
|
-
messages = response.get('messages', [])
|
|
1093
|
-
|
|
1094
|
-
thought = response.get('response') or "" # Handle None case
|
|
1095
|
-
all_thoughts.append(thought)
|
|
1096
|
-
|
|
1097
|
-
if response.get('tool_results'):
|
|
1098
|
-
tool_results = response['tool_results']
|
|
1099
|
-
action_str = ", ".join([f"{res['tool_name']}({res.get('arguments', {})})" for res in tool_results])
|
|
1100
|
-
outcomes = [str(res.get('result', '')) for res in tool_results]
|
|
1101
|
-
outcome_str = " | ".join(outcomes)
|
|
1102
|
-
all_actions.append(action_str)
|
|
1103
|
-
all_outcomes.append(outcome_str)
|
|
1104
|
-
|
|
1105
|
-
except (Timeout, ContextWindowExceededError):
|
|
1106
|
-
break
|
|
1107
|
-
except Exception as e:
|
|
1108
|
-
print(f"Error in micro-step: {e}")
|
|
1109
|
-
break
|
|
1110
|
-
|
|
1111
|
-
fs_after = get_filesystem_state()
|
|
1112
|
-
|
|
1113
|
-
combined_thought = " ".join(filter(None, all_thoughts)) # Filter out None values
|
|
1114
|
-
combined_action = " | ".join(filter(None, all_actions))
|
|
1115
|
-
combined_outcome = " | ".join(filter(None, all_outcomes))
|
|
1116
|
-
|
|
1117
|
-
print(f"\nCOMPRESSING WRITING SESSION...")
|
|
1118
|
-
print(f"THOUGHTS: {len(all_thoughts)} messages")
|
|
1119
|
-
print(f"ACTIONS: {len(all_actions)} tool uses")
|
|
1120
|
-
|
|
1121
|
-
summary = summarize_step(combined_thought,
|
|
1122
|
-
combined_action,
|
|
1123
|
-
combined_outcome,
|
|
1124
|
-
fs_before,
|
|
1125
|
-
fs_after,
|
|
1126
|
-
file_provenance,
|
|
1127
|
-
section_round + 1,
|
|
1128
|
-
model,
|
|
1129
|
-
provider,
|
|
1130
|
-
alicanto_coordinator)
|
|
1131
|
-
|
|
1132
|
-
print(f"SUMMARY: {summary.get('summary', 'No summary')}")
|
|
1133
|
-
print(f"NEXT STEP: {summary.get('next_step', 'No next step')}")
|
|
1134
|
-
|
|
1135
|
-
summarized_history.append(f"Round {section_round + 1}: {summary.get('summary')} ")
|
|
1136
|
-
|
|
1137
|
-
final_paper = read_file("paper.tex")
|
|
1138
|
-
print(f"\n{'='*60}")
|
|
1139
|
-
print("FINAL RESEARCH PAPER (LATEX)")
|
|
1140
|
-
print("="*60)
|
|
1141
|
-
print(final_paper)
|
|
1142
|
-
print(f"\nPaper saved as paper.tex")
|
|
1143
|
-
|
|
1144
|
-
|
|
1145
|
-
|
|
1146
|
-
def main():
|
|
1147
|
-
parser = argparse.ArgumentParser(description="Alicanto Multi-Agent Research System")
|
|
1148
|
-
parser.add_argument("topic", help="Research topic to investigate")
|
|
1149
|
-
parser.add_argument("--num-agents", type=int, default=3, help="Number of sub-agents to run.")
|
|
1150
|
-
parser.add_argument("--max-steps", type=int, default=10, help="Maximum steps for each sub-agent.")
|
|
1151
|
-
parser.add_argument("--model", default=NPCSH_CHAT_MODEL, help="LLM model to use")
|
|
1152
|
-
parser.add_argument("--provider", default=NPCSH_CHAT_PROVIDER, help="LLM provider to use")
|
|
1153
|
-
parser.add_argument("--skip-research", action="store_true", help="Skip research phase and go directly to paper writing")
|
|
1154
|
-
|
|
1155
|
-
args = parser.parse_args()
|
|
1156
|
-
|
|
1157
|
-
alicanto(
|
|
1158
|
-
query=args.topic,
|
|
1159
|
-
num_agents=args.num_agents,
|
|
1160
|
-
max_steps=args.max_steps,
|
|
1161
|
-
model=args.model,
|
|
1162
|
-
provider=args.provider,
|
|
1163
|
-
skip_research=args.skip_research
|
|
1164
|
-
)
|
|
64
|
+
if __name__ == "__main__":
|
|
65
|
+
main()
|