ursa-ai 0.9.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ursa/__init__.py +3 -0
- ursa/agents/__init__.py +32 -0
- ursa/agents/acquisition_agents.py +812 -0
- ursa/agents/arxiv_agent.py +429 -0
- ursa/agents/base.py +728 -0
- ursa/agents/chat_agent.py +60 -0
- ursa/agents/code_review_agent.py +341 -0
- ursa/agents/execution_agent.py +915 -0
- ursa/agents/hypothesizer_agent.py +614 -0
- ursa/agents/lammps_agent.py +465 -0
- ursa/agents/mp_agent.py +204 -0
- ursa/agents/optimization_agent.py +410 -0
- ursa/agents/planning_agent.py +219 -0
- ursa/agents/rag_agent.py +304 -0
- ursa/agents/recall_agent.py +54 -0
- ursa/agents/websearch_agent.py +196 -0
- ursa/cli/__init__.py +363 -0
- ursa/cli/hitl.py +516 -0
- ursa/cli/hitl_api.py +75 -0
- ursa/observability/metrics_charts.py +1279 -0
- ursa/observability/metrics_io.py +11 -0
- ursa/observability/metrics_session.py +750 -0
- ursa/observability/pricing.json +97 -0
- ursa/observability/pricing.py +321 -0
- ursa/observability/timing.py +1466 -0
- ursa/prompt_library/__init__.py +0 -0
- ursa/prompt_library/code_review_prompts.py +51 -0
- ursa/prompt_library/execution_prompts.py +50 -0
- ursa/prompt_library/hypothesizer_prompts.py +17 -0
- ursa/prompt_library/literature_prompts.py +11 -0
- ursa/prompt_library/optimization_prompts.py +131 -0
- ursa/prompt_library/planning_prompts.py +79 -0
- ursa/prompt_library/websearch_prompts.py +131 -0
- ursa/tools/__init__.py +0 -0
- ursa/tools/feasibility_checker.py +114 -0
- ursa/tools/feasibility_tools.py +1075 -0
- ursa/tools/run_command.py +27 -0
- ursa/tools/write_code.py +42 -0
- ursa/util/__init__.py +0 -0
- ursa/util/diff_renderer.py +128 -0
- ursa/util/helperFunctions.py +142 -0
- ursa/util/logo_generator.py +625 -0
- ursa/util/memory_logger.py +183 -0
- ursa/util/optimization_schema.py +78 -0
- ursa/util/parse.py +405 -0
- ursa_ai-0.9.1.dist-info/METADATA +304 -0
- ursa_ai-0.9.1.dist-info/RECORD +51 -0
- ursa_ai-0.9.1.dist-info/WHEEL +5 -0
- ursa_ai-0.9.1.dist-info/entry_points.txt +2 -0
- ursa_ai-0.9.1.dist-info/licenses/LICENSE +8 -0
- ursa_ai-0.9.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,614 @@
|
|
|
1
|
+
import ast
|
|
2
|
+
|
|
3
|
+
# from langchain_community.tools import TavilySearchResults
|
|
4
|
+
# from textwrap import dedent
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
from typing import Any, Literal, Mapping, TypedDict
|
|
7
|
+
|
|
8
|
+
from langchain.chat_models import BaseChatModel
|
|
9
|
+
from langchain_core.messages import HumanMessage, SystemMessage
|
|
10
|
+
from langgraph.graph import StateGraph
|
|
11
|
+
|
|
12
|
+
try:
|
|
13
|
+
from ddgs import DDGS # pip install duckduckgo-search
|
|
14
|
+
except Exception:
|
|
15
|
+
DDGS = None
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
from ..prompt_library.hypothesizer_prompts import (
|
|
19
|
+
competitor_prompt,
|
|
20
|
+
critic_prompt,
|
|
21
|
+
hypothesizer_prompt,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
# from langchain_core.runnables.graph import MermaidDrawMethod
|
|
25
|
+
from .base import BaseAgent
|
|
26
|
+
|
|
27
|
+
# --- ANSI color codes ---
|
|
28
|
+
GREEN = "\033[92m"
|
|
29
|
+
BLUE = "\033[94m"
|
|
30
|
+
RED = "\033[91m"
|
|
31
|
+
RESET = "\033[0m"
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
# Define our state schema
|
|
35
|
+
class HypothesizerState(TypedDict):
|
|
36
|
+
question: str
|
|
37
|
+
question_search_query: str
|
|
38
|
+
current_iteration: int
|
|
39
|
+
max_iterations: int
|
|
40
|
+
agent1_solution: list[str] # List to store each iteration of solutions
|
|
41
|
+
agent2_critiques: list[str] # List to store critiques
|
|
42
|
+
agent3_perspectives: list[str] # List to store competitor perspectives
|
|
43
|
+
solution: str # Refined solution
|
|
44
|
+
summary_report: str # the final summarized report
|
|
45
|
+
visited_sites: list[str]
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class HypothesizerAgent(BaseAgent):
|
|
49
|
+
def __init__(
|
|
50
|
+
self,
|
|
51
|
+
llm: BaseChatModel,
|
|
52
|
+
**kwargs,
|
|
53
|
+
):
|
|
54
|
+
super().__init__(llm, **kwargs)
|
|
55
|
+
self.hypothesizer_prompt = hypothesizer_prompt
|
|
56
|
+
self.critic_prompt = critic_prompt
|
|
57
|
+
self.competitor_prompt = competitor_prompt
|
|
58
|
+
self.search_tool = DDGS()
|
|
59
|
+
# self.search_tool = TavilySearchResults(
|
|
60
|
+
# max_results=10, search_depth="advanced", include_answer=False
|
|
61
|
+
# )
|
|
62
|
+
|
|
63
|
+
self._action = self._build_graph()
|
|
64
|
+
|
|
65
|
+
def agent1_generate_solution(
|
|
66
|
+
self, state: HypothesizerState
|
|
67
|
+
) -> HypothesizerState:
|
|
68
|
+
"""Agent 1: Hypothesizer."""
|
|
69
|
+
print(
|
|
70
|
+
f"[iteration {state['current_iteration']}] Entering agent1_generate_solution. Iteration: {state['current_iteration']}"
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
current_iter = state["current_iteration"]
|
|
74
|
+
user_content = f"Question: {state['question']}\n"
|
|
75
|
+
|
|
76
|
+
if current_iter > 0:
|
|
77
|
+
user_content += (
|
|
78
|
+
f"\nPrevious solution: {state['agent1_solution'][-1]}"
|
|
79
|
+
)
|
|
80
|
+
user_content += f"\nCritique: {state['agent2_critiques'][-1]}"
|
|
81
|
+
user_content += (
|
|
82
|
+
f"\nCompetitor perspective: {state['agent3_perspectives'][-1]}"
|
|
83
|
+
)
|
|
84
|
+
user_content += (
|
|
85
|
+
"\n\n**You must explicitly list how this new solution differs from the previous solution,** "
|
|
86
|
+
"point by point, explaining what changes were made in response to the critique and competitor perspective."
|
|
87
|
+
"\nAfterward, provide your updated solution."
|
|
88
|
+
)
|
|
89
|
+
else:
|
|
90
|
+
user_content += "Research this problem and generate a solution."
|
|
91
|
+
|
|
92
|
+
search_query = self.llm.invoke(
|
|
93
|
+
f"Here is a problem description: {state['question']}. Turn it into a short query to be fed into a search engine."
|
|
94
|
+
).content
|
|
95
|
+
if '"' in search_query:
|
|
96
|
+
search_query = search_query.split('"')[1]
|
|
97
|
+
raw_search_results = self.search_tool.text(
|
|
98
|
+
search_query, backend="duckduckgo"
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
# Parse the results if possible, so we can collect URLs
|
|
102
|
+
new_state = state.copy()
|
|
103
|
+
new_state["question_search_query"] = search_query
|
|
104
|
+
if "visited_sites" not in new_state:
|
|
105
|
+
new_state["visited_sites"] = []
|
|
106
|
+
|
|
107
|
+
try:
|
|
108
|
+
if isinstance(raw_search_results, str):
|
|
109
|
+
results_list = ast.literal_eval(raw_search_results)
|
|
110
|
+
else:
|
|
111
|
+
results_list = raw_search_results
|
|
112
|
+
# Each item typically might have "link", "title", "snippet"
|
|
113
|
+
for item in results_list:
|
|
114
|
+
link = item.get("link")
|
|
115
|
+
if link:
|
|
116
|
+
# print(f"[DEBUG] Appending visited link: {link}")
|
|
117
|
+
new_state["visited_sites"].append(link)
|
|
118
|
+
except (ValueError, SyntaxError, TypeError):
|
|
119
|
+
# If it's not valid Python syntax or something else goes wrong
|
|
120
|
+
print("[DEBUG] Could not parse search results as Python list.")
|
|
121
|
+
print("[DEBUG] raw_search_results:", raw_search_results)
|
|
122
|
+
|
|
123
|
+
user_content += f"\nSearch results: {raw_search_results}"
|
|
124
|
+
|
|
125
|
+
# Provide a system message to define this agent's role
|
|
126
|
+
messages = [
|
|
127
|
+
SystemMessage(content=self.hypothesizer_prompt),
|
|
128
|
+
HumanMessage(content=user_content),
|
|
129
|
+
]
|
|
130
|
+
solution = self.llm.invoke(messages)
|
|
131
|
+
|
|
132
|
+
new_state["agent1_solution"].append(solution.content)
|
|
133
|
+
|
|
134
|
+
# Print the entire solution in green
|
|
135
|
+
print(
|
|
136
|
+
f"{GREEN}[Agent1 - Hypothesizer solution]\n{solution.content}{RESET}"
|
|
137
|
+
)
|
|
138
|
+
print(
|
|
139
|
+
f"[iteration {state['current_iteration']}] Exiting agent1_generate_solution."
|
|
140
|
+
)
|
|
141
|
+
return new_state
|
|
142
|
+
|
|
143
|
+
def agent2_critique(self, state: HypothesizerState) -> HypothesizerState:
|
|
144
|
+
"""Agent 2: Critic."""
|
|
145
|
+
print(
|
|
146
|
+
f"[iteration {state['current_iteration']}] Entering agent2_critique."
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
solution = state["agent1_solution"][-1]
|
|
150
|
+
user_content = (
|
|
151
|
+
f"Question: {state['question']}\n"
|
|
152
|
+
f"Proposed solution: {solution}\n"
|
|
153
|
+
"Provide a detailed critique of this solution. Identify potential flaws, assumptions, and areas for improvement."
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
fact_check_query = f"fact check {state['question_search_query']} solution effectiveness"
|
|
157
|
+
|
|
158
|
+
raw_search_results = self.search_tool.text(
|
|
159
|
+
fact_check_query, backend="duckduckgo"
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
# Parse the results if possible, so we can collect URLs
|
|
163
|
+
new_state = state.copy()
|
|
164
|
+
if "visited_sites" not in new_state:
|
|
165
|
+
new_state["visited_sites"] = []
|
|
166
|
+
|
|
167
|
+
try:
|
|
168
|
+
if isinstance(raw_search_results, str):
|
|
169
|
+
results_list = ast.literal_eval(raw_search_results)
|
|
170
|
+
else:
|
|
171
|
+
results_list = raw_search_results
|
|
172
|
+
# Each item typically might have "link", "title", "snippet"
|
|
173
|
+
for item in results_list:
|
|
174
|
+
link = item.get("link")
|
|
175
|
+
if link:
|
|
176
|
+
# print(f"[DEBUG] Appending visited link: {link}")
|
|
177
|
+
new_state["visited_sites"].append(link)
|
|
178
|
+
except (ValueError, SyntaxError, TypeError):
|
|
179
|
+
# If it's not valid Python syntax or something else goes wrong
|
|
180
|
+
print("[DEBUG] Could not parse search results as Python list.")
|
|
181
|
+
print("[DEBUG] raw_search_results:", raw_search_results)
|
|
182
|
+
|
|
183
|
+
fact_check_results = raw_search_results
|
|
184
|
+
user_content += f"\nFact check results: {fact_check_results}"
|
|
185
|
+
|
|
186
|
+
messages = [
|
|
187
|
+
SystemMessage(content=self.critic_prompt),
|
|
188
|
+
HumanMessage(content=user_content),
|
|
189
|
+
]
|
|
190
|
+
critique = self.llm.invoke(messages)
|
|
191
|
+
|
|
192
|
+
new_state["agent2_critiques"].append(critique.content)
|
|
193
|
+
|
|
194
|
+
# Print the entire critique in blue
|
|
195
|
+
print(f"{BLUE}[Agent2 - Critic]\n{critique.content}{RESET}")
|
|
196
|
+
print(
|
|
197
|
+
f"[iteration {state['current_iteration']}] Exiting agent2_critique."
|
|
198
|
+
)
|
|
199
|
+
return new_state
|
|
200
|
+
|
|
201
|
+
def agent3_competitor_perspective(
|
|
202
|
+
self, state: HypothesizerState
|
|
203
|
+
) -> HypothesizerState:
|
|
204
|
+
"""Agent 3: Competitor/Stakeholder Simulator."""
|
|
205
|
+
print(
|
|
206
|
+
f"[iteration {state['current_iteration']}] Entering agent3_competitor_perspective."
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
solution = state["agent1_solution"][-1]
|
|
210
|
+
critique = state["agent2_critiques"][-1]
|
|
211
|
+
|
|
212
|
+
user_content = (
|
|
213
|
+
f"Question: {state['question']}\n"
|
|
214
|
+
f"Proposed solution: {solution}\n"
|
|
215
|
+
f"Critique: {critique}\n"
|
|
216
|
+
"Simulate how a competitor, government agency, or other stakeholder might respond to this solution."
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
competitor_search_query = (
|
|
220
|
+
f"competitor responses to {state['question_search_query']}"
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
raw_search_results = self.search_tool.text(
|
|
224
|
+
competitor_search_query, backend="duckduckgo"
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
# Parse the results if possible, so we can collect URLs
|
|
228
|
+
new_state = state.copy()
|
|
229
|
+
if "visited_sites" not in new_state:
|
|
230
|
+
new_state["visited_sites"] = []
|
|
231
|
+
|
|
232
|
+
try:
|
|
233
|
+
if isinstance(raw_search_results, str):
|
|
234
|
+
results_list = ast.literal_eval(raw_search_results)
|
|
235
|
+
else:
|
|
236
|
+
results_list = raw_search_results
|
|
237
|
+
# Each item typically might have "link", "title", "snippet"
|
|
238
|
+
for item in results_list:
|
|
239
|
+
link = item.get("link")
|
|
240
|
+
if link:
|
|
241
|
+
# print(f"[DEBUG] Appending visited link: {link}")
|
|
242
|
+
new_state["visited_sites"].append(link)
|
|
243
|
+
except (ValueError, SyntaxError, TypeError):
|
|
244
|
+
# If it's not valid Python syntax or something else goes wrong
|
|
245
|
+
print("[DEBUG] Could not parse search results as Python list.")
|
|
246
|
+
print("[DEBUG] raw_search_results:", raw_search_results)
|
|
247
|
+
|
|
248
|
+
competitor_info = raw_search_results
|
|
249
|
+
user_content += f"\nCompetitor information: {competitor_info}"
|
|
250
|
+
|
|
251
|
+
messages = [
|
|
252
|
+
SystemMessage(content=self.competitor_prompt),
|
|
253
|
+
HumanMessage(content=user_content),
|
|
254
|
+
]
|
|
255
|
+
perspective = self.llm.invoke(messages)
|
|
256
|
+
|
|
257
|
+
new_state["agent3_perspectives"].append(perspective.content)
|
|
258
|
+
|
|
259
|
+
# Print the entire perspective in red
|
|
260
|
+
print(
|
|
261
|
+
f"{RED}[Agent3 - Competitor/Stakeholder Perspective]\n{perspective.content}{RESET}"
|
|
262
|
+
)
|
|
263
|
+
print(
|
|
264
|
+
f"[iteration {state['current_iteration']}] Exiting agent3_competitor_perspective."
|
|
265
|
+
)
|
|
266
|
+
return new_state
|
|
267
|
+
|
|
268
|
+
def increment_iteration(
|
|
269
|
+
self, state: HypothesizerState
|
|
270
|
+
) -> HypothesizerState:
|
|
271
|
+
new_state = state.copy()
|
|
272
|
+
new_state["current_iteration"] += 1
|
|
273
|
+
print(
|
|
274
|
+
f"[iteration {state['current_iteration']}] Iteration incremented to {new_state['current_iteration']}"
|
|
275
|
+
)
|
|
276
|
+
return new_state
|
|
277
|
+
|
|
278
|
+
def generate_solution(self, state: HypothesizerState) -> HypothesizerState:
|
|
279
|
+
"""Generate the overall, refined solution based on all iterations."""
|
|
280
|
+
print(
|
|
281
|
+
f"[iteration {state['current_iteration']}] Entering generate_solution."
|
|
282
|
+
)
|
|
283
|
+
prompt = f"Original question: {state['question']}\n\n"
|
|
284
|
+
prompt += "Evolution of solutions:\n"
|
|
285
|
+
|
|
286
|
+
for i in range(state["max_iterations"]):
|
|
287
|
+
prompt += f"\nIteration {i + 1}:\n"
|
|
288
|
+
prompt += f"Solution: {state['agent1_solution'][i]}\n"
|
|
289
|
+
prompt += f"Critique: {state['agent2_critiques'][i]}\n"
|
|
290
|
+
prompt += (
|
|
291
|
+
f"Competitor perspective: {state['agent3_perspectives'][i]}\n"
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
prompt += "\nBased on this iterative process, provide the overall, refined solution."
|
|
295
|
+
|
|
296
|
+
print(
|
|
297
|
+
f"[iteration {state['current_iteration']}] Generating overall solution with LLM..."
|
|
298
|
+
)
|
|
299
|
+
solution = self.llm.invoke(prompt)
|
|
300
|
+
print(
|
|
301
|
+
f"[iteration {state['current_iteration']}] Overall solution obtained. Preview:",
|
|
302
|
+
solution.content[:200],
|
|
303
|
+
"...",
|
|
304
|
+
)
|
|
305
|
+
|
|
306
|
+
new_state = state.copy()
|
|
307
|
+
new_state["solution"] = solution.content
|
|
308
|
+
|
|
309
|
+
print(
|
|
310
|
+
f"[iteration {state['current_iteration']}] Exiting generate_solution."
|
|
311
|
+
)
|
|
312
|
+
return new_state
|
|
313
|
+
|
|
314
|
+
def print_visited_sites(
|
|
315
|
+
self, state: HypothesizerState
|
|
316
|
+
) -> HypothesizerState:
|
|
317
|
+
new_state = state.copy()
|
|
318
|
+
# all_sites = new_state.get("visited_sites", [])
|
|
319
|
+
# print("[DEBUG] Visited Sites:")
|
|
320
|
+
# for s in all_sites:
|
|
321
|
+
# print(" ", s)
|
|
322
|
+
return new_state
|
|
323
|
+
|
|
324
|
+
def summarize_process_as_latex(
|
|
325
|
+
self, state: HypothesizerState
|
|
326
|
+
) -> HypothesizerState:
|
|
327
|
+
"""
|
|
328
|
+
Summarize how the solution changed over time, referencing
|
|
329
|
+
each iteration's critique and competitor perspective,
|
|
330
|
+
then produce a final LaTeX document.
|
|
331
|
+
"""
|
|
332
|
+
print("Entering summarize_process_as_latex.")
|
|
333
|
+
llm_model = state.get("llm_model", "unknown_model")
|
|
334
|
+
|
|
335
|
+
# Build a single string describing the entire iterative process
|
|
336
|
+
iteration_details = ""
|
|
337
|
+
for i, (sol, crit, comp) in enumerate(
|
|
338
|
+
zip(
|
|
339
|
+
state["agent1_solution"],
|
|
340
|
+
state["agent2_critiques"],
|
|
341
|
+
state["agent3_perspectives"],
|
|
342
|
+
),
|
|
343
|
+
start=1,
|
|
344
|
+
):
|
|
345
|
+
iteration_details += (
|
|
346
|
+
f"\\subsection*{{Iteration {i}}}\n\n"
|
|
347
|
+
f"\\textbf{{Solution:}}\\\\\n{sol}\n\n"
|
|
348
|
+
f"\\textbf{{Critique:}}\\\\\n{crit}\n\n"
|
|
349
|
+
f"\\textbf{{Competitor Perspective:}}\\\\\n{comp}\n\n"
|
|
350
|
+
)
|
|
351
|
+
|
|
352
|
+
# -----------------------------
|
|
353
|
+
# Write iteration_details to disk as .txt
|
|
354
|
+
# -----------------------------
|
|
355
|
+
timestamp_str = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
|
356
|
+
txt_filename = (
|
|
357
|
+
f"iteration_details_{llm_model}_{timestamp_str}_chat_history.txt"
|
|
358
|
+
)
|
|
359
|
+
with open(txt_filename, "w", encoding="utf-8") as f:
|
|
360
|
+
f.write(iteration_details)
|
|
361
|
+
|
|
362
|
+
print(f"Wrote iteration details to {txt_filename}.")
|
|
363
|
+
|
|
364
|
+
# Prompt the LLM to produce a LaTeX doc
|
|
365
|
+
# We'll just pass it as a single string to the LLM;
|
|
366
|
+
# you could also do system+human messages if you prefer.
|
|
367
|
+
prompt = f"""\
|
|
368
|
+
You are a system that produces a FULL LaTeX document.
|
|
369
|
+
Here is information about a multi-iteration process:
|
|
370
|
+
|
|
371
|
+
Original question: {state["question"]}
|
|
372
|
+
|
|
373
|
+
Below are the solutions, critiques, and competitor perspectives from each iteration:
|
|
374
|
+
|
|
375
|
+
{iteration_details}
|
|
376
|
+
|
|
377
|
+
The solution we arrived at was:
|
|
378
|
+
|
|
379
|
+
{state["solution"]}
|
|
380
|
+
|
|
381
|
+
Now produce a valid LaTeX document. Be sure to use a table of contents.
|
|
382
|
+
It must start with an Executive Summary (that may be multiple pages) which summarizes
|
|
383
|
+
the entire iterative process. Following that, we should include the solution in full,
|
|
384
|
+
not summarized, but reformatted for appropriate LaTeX. And then, finally (and this will be
|
|
385
|
+
quite long), we must take all the steps - solutions, critiques, and competitor perspectives
|
|
386
|
+
and *NOT SUMMARIZE THEM* but merely reformat them for the reader. This will be in an Appendix
|
|
387
|
+
of the full content of the steps. Finally, include a listing of all of the websites we
|
|
388
|
+
used in our research.
|
|
389
|
+
|
|
390
|
+
You must ONLY RETURN LaTeX, nothing else. It must be valid LaTeX syntax!
|
|
391
|
+
|
|
392
|
+
Your output should start with:
|
|
393
|
+
\\documentclass{{article}}
|
|
394
|
+
\\usepackage[margin=1in]{{geometry}}
|
|
395
|
+
etc.
|
|
396
|
+
|
|
397
|
+
It must compile without errors under pdflatex.
|
|
398
|
+
"""
|
|
399
|
+
|
|
400
|
+
# Now produce a valid LaTeX document that nicely summarizes this entire iterative process.
|
|
401
|
+
# It must include the overall solution in full, not summarized, but reformatted for appropriate
|
|
402
|
+
# LaTeX. The summarization is for the other steps.
|
|
403
|
+
|
|
404
|
+
# all_visited_sites = state.get("visited_sites", [])
|
|
405
|
+
# (Optional) remove duplicates by converting to a set, then back to a list
|
|
406
|
+
# visited_sites_unique = list(set(all_visited_sites))
|
|
407
|
+
# if visited_sites_unique:
|
|
408
|
+
# websites_latex = "\\section*{Websites Visited}\\begin{itemize}\n"
|
|
409
|
+
# for url in visited_sites_unique:
|
|
410
|
+
# print(f"We visited: {url}")
|
|
411
|
+
# # Use \url{} to handle special characters in URLs
|
|
412
|
+
# websites_latex += f"\\item \\url{{{url}}}\n"
|
|
413
|
+
# websites_latex += "\\end{itemize}\n\n"
|
|
414
|
+
# else:
|
|
415
|
+
# # If no sites visited, or the list is empty
|
|
416
|
+
# websites_latex = (
|
|
417
|
+
# "\\section*{Websites Visited}\nNo sites were visited.\n\n"
|
|
418
|
+
# )
|
|
419
|
+
# print(websites_latex)
|
|
420
|
+
websites_latex = ""
|
|
421
|
+
|
|
422
|
+
# Ask the LLM to produce *only* LaTeX content
|
|
423
|
+
latex_response = self.llm.invoke(prompt)
|
|
424
|
+
|
|
425
|
+
latex_doc = latex_response.content
|
|
426
|
+
|
|
427
|
+
def inject_into_latex(original_tex: str, injection: str) -> str:
|
|
428
|
+
"""
|
|
429
|
+
Find the last occurrence of '\\end{document}' in 'original_tex'
|
|
430
|
+
and insert 'injection' right before it.
|
|
431
|
+
If '\\end{document}' is not found, just append the injection at the end.
|
|
432
|
+
"""
|
|
433
|
+
injection_index = original_tex.rfind(r"\end{document}")
|
|
434
|
+
if injection_index == -1:
|
|
435
|
+
# If the LLM didn't include \end{document}, just append
|
|
436
|
+
return original_tex + "\n" + injection
|
|
437
|
+
else:
|
|
438
|
+
# Insert right before \end{document}
|
|
439
|
+
return (
|
|
440
|
+
original_tex[:injection_index]
|
|
441
|
+
+ "\n"
|
|
442
|
+
+ injection
|
|
443
|
+
+ "\n"
|
|
444
|
+
+ original_tex[injection_index:]
|
|
445
|
+
)
|
|
446
|
+
|
|
447
|
+
final_latex = inject_into_latex(latex_doc, websites_latex)
|
|
448
|
+
|
|
449
|
+
new_state = state.copy()
|
|
450
|
+
new_state["summary_report"] = final_latex
|
|
451
|
+
|
|
452
|
+
print(
|
|
453
|
+
f"[iteration {state['current_iteration']}] Received LaTeX from LLM. Preview:"
|
|
454
|
+
)
|
|
455
|
+
print(latex_response.content[:300], "...")
|
|
456
|
+
print(
|
|
457
|
+
f"[iteration {state['current_iteration']}] Exiting summarize_process_as_latex."
|
|
458
|
+
)
|
|
459
|
+
return new_state
|
|
460
|
+
|
|
461
|
+
def _build_graph(self):
|
|
462
|
+
# Initialize the graph
|
|
463
|
+
graph = StateGraph(HypothesizerState)
|
|
464
|
+
|
|
465
|
+
# Add nodes
|
|
466
|
+
self.add_node(graph, self.agent1_generate_solution, "agent1")
|
|
467
|
+
self.add_node(graph, self.agent2_critique, "agent2")
|
|
468
|
+
self.add_node(graph, self.agent3_competitor_perspective, "agent3")
|
|
469
|
+
self.add_node(graph, self.increment_iteration, "increment_iteration")
|
|
470
|
+
self.add_node(graph, self.generate_solution, "finalize")
|
|
471
|
+
self.add_node(graph, self.print_visited_sites, "print_sites")
|
|
472
|
+
self.add_node(
|
|
473
|
+
graph, self.summarize_process_as_latex, "summarize_as_latex"
|
|
474
|
+
)
|
|
475
|
+
# self.graph.add_node("compile_pdf", compile_summary_to_pdf)
|
|
476
|
+
|
|
477
|
+
# Add simple edges for the known flow
|
|
478
|
+
graph.add_edge("agent1", "agent2")
|
|
479
|
+
graph.add_edge("agent2", "agent3")
|
|
480
|
+
graph.add_edge("agent3", "increment_iteration")
|
|
481
|
+
|
|
482
|
+
# Then from increment_iteration, we have a conditional:
|
|
483
|
+
# If we 'continue', we go back to agent1
|
|
484
|
+
# If we 'finish', we jump to the finalize node
|
|
485
|
+
graph.add_conditional_edges(
|
|
486
|
+
"increment_iteration",
|
|
487
|
+
should_continue,
|
|
488
|
+
{"continue": "agent1", "finish": "finalize"},
|
|
489
|
+
)
|
|
490
|
+
|
|
491
|
+
graph.add_edge("finalize", "summarize_as_latex")
|
|
492
|
+
graph.add_edge("summarize_as_latex", "print_sites")
|
|
493
|
+
# self.graph.add_edge("summarize_as_latex", "compile_pdf")
|
|
494
|
+
# self.graph.add_edge("compile_pdf", "print_sites")
|
|
495
|
+
|
|
496
|
+
# Set the entry point
|
|
497
|
+
graph.set_entry_point("agent1")
|
|
498
|
+
graph.set_finish_point("print_sites")
|
|
499
|
+
|
|
500
|
+
return graph.compile(checkpointer=self.checkpointer)
|
|
501
|
+
# self.action.get_graph().draw_mermaid_png(output_file_path="hypothesizer_agent_graph.png", draw_method=MermaidDrawMethod.PYPPETEER)
|
|
502
|
+
|
|
503
|
+
def _invoke(
|
|
504
|
+
self, inputs: Mapping[str, Any], recursion_limit: int = 100000, **_
|
|
505
|
+
):
|
|
506
|
+
config = self.build_config(
|
|
507
|
+
recursion_limit=recursion_limit, tags=["graph"]
|
|
508
|
+
)
|
|
509
|
+
if "prompt" not in inputs:
|
|
510
|
+
raise KeyError("'prompt' is a required arguments")
|
|
511
|
+
|
|
512
|
+
inputs["question"] = inputs["prompt"]
|
|
513
|
+
inputs["max_iterations"] = inputs.get("max_iterations", 3)
|
|
514
|
+
inputs["current_iteration"] = 0
|
|
515
|
+
inputs["agent1_solution"] = []
|
|
516
|
+
inputs["agent2_critiques"] = []
|
|
517
|
+
inputs["agent3_perspectives"] = []
|
|
518
|
+
inputs["solution"] = ""
|
|
519
|
+
|
|
520
|
+
return self._action.invoke(inputs, config)
|
|
521
|
+
|
|
522
|
+
|
|
523
|
+
def should_continue(state: HypothesizerState) -> Literal["continue", "finish"]:
|
|
524
|
+
if state["current_iteration"] >= state["max_iterations"]:
|
|
525
|
+
print(
|
|
526
|
+
f"[iteration {state['current_iteration']}] Reached max_iterations; finishing."
|
|
527
|
+
)
|
|
528
|
+
return "finish"
|
|
529
|
+
else:
|
|
530
|
+
print(
|
|
531
|
+
f"[iteration {state['current_iteration']}] Still under max_iterations; continuing."
|
|
532
|
+
)
|
|
533
|
+
return "continue"
|
|
534
|
+
|
|
535
|
+
|
|
536
|
+
# def compile_summary_to_pdf(state: AgentState) -> AgentState:
|
|
537
|
+
# """
|
|
538
|
+
# Takes the LaTeX in state["summary_report"] and tries to compile it to a PDF
|
|
539
|
+
# named with the model and timestamp, e.g.:
|
|
540
|
+
# summary_report_gpt-5-mini_Mar_15_2025_8:59am.pdf
|
|
541
|
+
# """
|
|
542
|
+
# print(f"[DEBUG] Entering compile_summary_to_pdf.")
|
|
543
|
+
|
|
544
|
+
# llm_model = state["llm_model"]
|
|
545
|
+
|
|
546
|
+
|
|
547
|
+
# latex_code = state.get("summary_report", "")
|
|
548
|
+
# if not latex_code:
|
|
549
|
+
# print("[DEBUG] No LaTeX code found in summary_report.")
|
|
550
|
+
# return state
|
|
551
|
+
|
|
552
|
+
# # Create a dynamic filename using the LLM model name & a timestamp
|
|
553
|
+
# # e.g. "summary_report_gpt-5-mini_Mar_15_2025_08:59AM.pdf"
|
|
554
|
+
# # timestamp_str = datetime.now().strftime("%b_%d_%Y_%I:%M%p")
|
|
555
|
+
# timestamp_str = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
|
556
|
+
|
|
557
|
+
# pdf_filename = f"summary_report_{llm_model}_{timestamp_str}.pdf"
|
|
558
|
+
|
|
559
|
+
# tex_filename = "summary_report.tex"
|
|
560
|
+
# with open(tex_filename, "w", encoding="utf-8") as f:
|
|
561
|
+
# f.write(latex_code)
|
|
562
|
+
|
|
563
|
+
# try:
|
|
564
|
+
# subprocess.run(["pdflatex", "-interaction=nonstopmode", tex_filename], check=True)
|
|
565
|
+
# subprocess.run(["pdflatex", "-interaction=nonstopmode", tex_filename], check=True)
|
|
566
|
+
# except subprocess.CalledProcessError as e:
|
|
567
|
+
# print("Error compiling LaTeX:", e)
|
|
568
|
+
|
|
569
|
+
# if os.path.exists("summary_report.pdf"):
|
|
570
|
+
# os.rename("summary_report.pdf", pdf_filename)
|
|
571
|
+
# print(f"[DEBUG] Successfully compiled PDF -> {pdf_filename}")
|
|
572
|
+
# else:
|
|
573
|
+
# print("[DEBUG] PDF compilation failed; no summary_report.pdf found.")
|
|
574
|
+
|
|
575
|
+
# print("[DEBUG] Exiting compile_summary_to_pdf.")
|
|
576
|
+
# return state
|
|
577
|
+
|
|
578
|
+
|
|
579
|
+
if __name__ == "__main__":
|
|
580
|
+
# Create the graph
|
|
581
|
+
hypothesizer_agent = HypothesizerAgent()
|
|
582
|
+
|
|
583
|
+
question = "Find a city with as least 10 vowels in its name."
|
|
584
|
+
|
|
585
|
+
# Initialize the state
|
|
586
|
+
initial_state = HypothesizerState(
|
|
587
|
+
question=question,
|
|
588
|
+
current_iteration=0,
|
|
589
|
+
max_iterations=3,
|
|
590
|
+
agent1_solution=[],
|
|
591
|
+
agent2_critiques=[],
|
|
592
|
+
agent3_perspectives=[],
|
|
593
|
+
solution="",
|
|
594
|
+
)
|
|
595
|
+
|
|
596
|
+
print("Invoking the graph...")
|
|
597
|
+
# Run the graph
|
|
598
|
+
result = hypothesizer_agent.invoke(
|
|
599
|
+
initial_state,
|
|
600
|
+
{
|
|
601
|
+
"recursion_limit": 999999,
|
|
602
|
+
"configurable": {"thread_id": 42},
|
|
603
|
+
},
|
|
604
|
+
)
|
|
605
|
+
summary_text = result["summary_report"]
|
|
606
|
+
|
|
607
|
+
print("Graph invocation complete.")
|
|
608
|
+
|
|
609
|
+
# Print the overall solution
|
|
610
|
+
print("Overall Solution:")
|
|
611
|
+
print(result["solution"])
|
|
612
|
+
|
|
613
|
+
# print("Summarized Report:")
|
|
614
|
+
# print(summary_text)
|