ursa-ai 0.0.3__py3-none-any.whl → 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ursa-ai might be problematic. Click here for more details.

@@ -0,0 +1,597 @@
1
+ import ast
2
+
3
+ # from langchain_community.tools import TavilySearchResults
4
+ # from textwrap import dedent
5
+ from datetime import datetime
6
+ from typing import List, Literal, TypedDict
7
+
8
+ from langchain_community.tools import DuckDuckGoSearchResults
9
+ from langchain_core.language_models import BaseChatModel
10
+ from langchain_core.messages import HumanMessage, SystemMessage
11
+ from langgraph.graph import END, StateGraph
12
+
13
+ from ..prompt_library.hypothesizer_prompts import (
14
+ competitor_prompt,
15
+ critic_prompt,
16
+ hypothesizer_prompt,
17
+ )
18
+
19
+ # from langchain_core.runnables.graph import MermaidDrawMethod
20
+ from .base import BaseAgent
21
+
22
+ # --- ANSI color codes ---
23
+ GREEN = "\033[92m"
24
+ BLUE = "\033[94m"
25
+ RED = "\033[91m"
26
+ RESET = "\033[0m"
27
+
28
+
29
+ # Define our state schema
30
+ class HypothesizerState(TypedDict):
31
+ question: str
32
+ question_search_query: str
33
+ current_iteration: int
34
+ max_iterations: int
35
+ agent1_solution: List[str] # List to store each iteration of solutions
36
+ agent2_critiques: List[str] # List to store critiques
37
+ agent3_perspectives: List[str] # List to store competitor perspectives
38
+ final_solution: str # Final refined solution
39
+ summary_report: str # the final summarized report
40
+ visited_sites: List[str]
41
+
42
+
43
+ class HypothesizerAgent(BaseAgent):
44
+ def __init__(self, llm: str | BaseChatModel = "openai/o3-mini", **kwargs):
45
+ super().__init__(llm, **kwargs)
46
+ self.hypothesizer_prompt = hypothesizer_prompt
47
+ self.critic_prompt = critic_prompt
48
+ self.competitor_prompt = competitor_prompt
49
+ self.search_tool = DuckDuckGoSearchResults(
50
+ output_format="json", num_results=10
51
+ )
52
+ # self.search_tool = TavilySearchResults(
53
+ # max_results=10, search_depth="advanced", include_answer=False
54
+ # )
55
+
56
+ self._initialize_agent()
57
+
58
+ def agent1_generate_solution(
59
+ self, state: HypothesizerState
60
+ ) -> HypothesizerState:
61
+ """Agent 1: Hypothesizer."""
62
+ print(
63
+ f"[iteration {state['current_iteration']} - DEBUG] Entering agent1_generate_solution. Iteration: {state['current_iteration']}"
64
+ )
65
+
66
+ current_iter = state["current_iteration"]
67
+ user_content = f"Question: {state['question']}\n"
68
+
69
+ if current_iter > 0:
70
+ user_content += (
71
+ f"\nPrevious solution: {state['agent1_solution'][-1]}"
72
+ )
73
+ user_content += f"\nCritique: {state['agent2_critiques'][-1]}"
74
+ user_content += (
75
+ f"\nCompetitor perspective: {state['agent3_perspectives'][-1]}"
76
+ )
77
+ user_content += (
78
+ "\n\n**You must explicitly list how this new solution differs from the previous solution,** "
79
+ "point by point, explaining what changes were made in response to the critique and competitor perspective."
80
+ "\nAfterward, provide your updated solution."
81
+ )
82
+ else:
83
+ user_content += "Research this problem and generate a solution."
84
+
85
+ search_query = self.llm.invoke(
86
+ f"Here is a problem description: {state['question']}. Turn it into a short query to be fed into a search engine."
87
+ ).content
88
+ if '"' in search_query:
89
+ search_query = search_query.split('"')[1]
90
+ raw_search_results = self.search_tool.invoke(search_query)
91
+
92
+ # Parse the results if possible, so we can collect URLs
93
+ new_state = state.copy()
94
+ new_state["question_search_query"] = search_query
95
+ if "visited_sites" not in new_state:
96
+ new_state["visited_sites"] = []
97
+
98
+ try:
99
+ if type(raw_search_results) == str:
100
+ results_list = ast.literal_eval(raw_search_results)
101
+ else:
102
+ results_list = raw_search_results
103
+ # Each item typically might have "link", "title", "snippet"
104
+ for item in results_list:
105
+ link = item.get("link")
106
+ if link:
107
+ print(f"[DEBUG] Appending visited link: {link}")
108
+ new_state["visited_sites"].append(link)
109
+ except (ValueError, SyntaxError, TypeError):
110
+ # If it's not valid Python syntax or something else goes wrong
111
+ print("[DEBUG] Could not parse search results as Python list.")
112
+ print("[DEBUG] raw_search_results:", raw_search_results)
113
+
114
+ user_content += f"\nSearch results: {raw_search_results}"
115
+
116
+ # Provide a system message to define this agent's role
117
+ messages = [
118
+ SystemMessage(content=self.hypothesizer_prompt),
119
+ HumanMessage(content=user_content),
120
+ ]
121
+ solution = self.llm.invoke(messages)
122
+
123
+ new_state["agent1_solution"].append(solution.content)
124
+
125
+ # Print the entire solution in green
126
+ print(
127
+ f"{GREEN}[Agent1 - Hypothesizer solution]\n{solution.content}{RESET}"
128
+ )
129
+ print(
130
+ f"[iteration {state['current_iteration']} - DEBUG] Exiting agent1_generate_solution."
131
+ )
132
+ return new_state
133
+
134
+ def agent2_critique(self, state: HypothesizerState) -> HypothesizerState:
135
+ """Agent 2: Critic."""
136
+ print(
137
+ f"[iteration {state['current_iteration']} - DEBUG] Entering agent2_critique."
138
+ )
139
+
140
+ solution = state["agent1_solution"][-1]
141
+ user_content = (
142
+ f"Question: {state['question']}\n"
143
+ f"Proposed solution: {solution}\n"
144
+ "Provide a detailed critique of this solution. Identify potential flaws, assumptions, and areas for improvement."
145
+ )
146
+
147
+ fact_check_query = f"fact check {state['question_search_query']} solution effectiveness"
148
+
149
+ raw_search_results = self.search_tool.invoke(fact_check_query)
150
+
151
+ # Parse the results if possible, so we can collect URLs
152
+ new_state = state.copy()
153
+ if "visited_sites" not in new_state:
154
+ new_state["visited_sites"] = []
155
+
156
+ try:
157
+ if type(raw_search_results) == str:
158
+ results_list = ast.literal_eval(raw_search_results)
159
+ else:
160
+ results_list = raw_search_results
161
+ # Each item typically might have "link", "title", "snippet"
162
+ for item in results_list:
163
+ link = item.get("link")
164
+ if link:
165
+ print(f"[DEBUG] Appending visited link: {link}")
166
+ new_state["visited_sites"].append(link)
167
+ except (ValueError, SyntaxError, TypeError):
168
+ # If it's not valid Python syntax or something else goes wrong
169
+ print("[DEBUG] Could not parse search results as Python list.")
170
+ print("[DEBUG] raw_search_results:", raw_search_results)
171
+
172
+ fact_check_results = raw_search_results
173
+ user_content += f"\nFact check results: {fact_check_results}"
174
+
175
+ messages = [
176
+ SystemMessage(content=self.critic_prompt),
177
+ HumanMessage(content=user_content),
178
+ ]
179
+ critique = self.llm.invoke(messages)
180
+
181
+ new_state["agent2_critiques"].append(critique.content)
182
+
183
+ # Print the entire critique in blue
184
+ print(f"{BLUE}[Agent2 - Critic]\n{critique.content}{RESET}")
185
+ print(
186
+ f"[iteration {state['current_iteration']} - DEBUG] Exiting agent2_critique."
187
+ )
188
+ return new_state
189
+
190
+ def agent3_competitor_perspective(
191
+ self, state: HypothesizerState
192
+ ) -> HypothesizerState:
193
+ """Agent 3: Competitor/Stakeholder Simulator."""
194
+ print(
195
+ f"[iteration {state['current_iteration']} - DEBUG] Entering agent3_competitor_perspective."
196
+ )
197
+
198
+ solution = state["agent1_solution"][-1]
199
+ critique = state["agent2_critiques"][-1]
200
+
201
+ user_content = (
202
+ f"Question: {state['question']}\n"
203
+ f"Proposed solution: {solution}\n"
204
+ f"Critique: {critique}\n"
205
+ "Simulate how a competitor, government agency, or other stakeholder might respond to this solution."
206
+ )
207
+
208
+ competitor_search_query = (
209
+ f"competitor responses to {state['question_search_query']}"
210
+ )
211
+
212
+ raw_search_results = self.search_tool.invoke(competitor_search_query)
213
+
214
+ # Parse the results if possible, so we can collect URLs
215
+ new_state = state.copy()
216
+ if "visited_sites" not in new_state:
217
+ new_state["visited_sites"] = []
218
+
219
+ try:
220
+ if type(raw_search_results) == str:
221
+ results_list = ast.literal_eval(raw_search_results)
222
+ else:
223
+ results_list = raw_search_results
224
+ # Each item typically might have "link", "title", "snippet"
225
+ for item in results_list:
226
+ link = item.get("link")
227
+ if link:
228
+ print(f"[DEBUG] Appending visited link: {link}")
229
+ new_state["visited_sites"].append(link)
230
+ except (ValueError, SyntaxError, TypeError):
231
+ # If it's not valid Python syntax or something else goes wrong
232
+ print("[DEBUG] Could not parse search results as Python list.")
233
+ print("[DEBUG] raw_search_results:", raw_search_results)
234
+
235
+ competitor_info = raw_search_results
236
+ user_content += f"\nCompetitor information: {competitor_info}"
237
+
238
+ messages = [
239
+ SystemMessage(content=self.competitor_prompt),
240
+ HumanMessage(content=user_content),
241
+ ]
242
+ perspective = self.llm.invoke(messages)
243
+
244
+ new_state["agent3_perspectives"].append(perspective.content)
245
+
246
+ # Print the entire perspective in red
247
+ print(
248
+ f"{RED}[Agent3 - Competitor/Stakeholder Perspective]\n{perspective.content}{RESET}"
249
+ )
250
+ print(
251
+ f"[iteration {state['current_iteration']} - DEBUG] Exiting agent3_competitor_perspective."
252
+ )
253
+ return new_state
254
+
255
+ def increment_iteration(
256
+ self, state: HypothesizerState
257
+ ) -> HypothesizerState:
258
+ new_state = state.copy()
259
+ new_state["current_iteration"] += 1
260
+ print(
261
+ f"[iteration {state['current_iteration']} - DEBUG] Iteration incremented to {new_state['current_iteration']}"
262
+ )
263
+ return new_state
264
+
265
+ def generate_final_solution(
266
+ self, state: HypothesizerState
267
+ ) -> HypothesizerState:
268
+ """Generate the final, refined solution based on all iterations."""
269
+ print(
270
+ f"[iteration {state['current_iteration']} - DEBUG] Entering generate_final_solution."
271
+ )
272
+ prompt = f"Original question: {state['question']}\n\n"
273
+ prompt += "Evolution of solutions:\n"
274
+
275
+ for i in range(state["max_iterations"]):
276
+ prompt += f"\nIteration {i + 1}:\n"
277
+ prompt += f"Solution: {state['agent1_solution'][i]}\n"
278
+ prompt += f"Critique: {state['agent2_critiques'][i]}\n"
279
+ prompt += (
280
+ f"Competitor perspective: {state['agent3_perspectives'][i]}\n"
281
+ )
282
+
283
+ prompt += "\nBased on this iterative process, provide the final, refined solution."
284
+
285
+ print(
286
+ f"[iteration {state['current_iteration']} - DEBUG] Generating final solution with LLM..."
287
+ )
288
+ final_solution = self.llm.invoke(prompt)
289
+ print(
290
+ f"[iteration {state['current_iteration']} - DEBUG] Final solution obtained. Preview:",
291
+ final_solution.content[:200],
292
+ "...",
293
+ )
294
+
295
+ new_state = state.copy()
296
+ new_state["final_solution"] = final_solution.content
297
+
298
+ print(
299
+ f"[iteration {state['current_iteration']} - DEBUG] Exiting generate_final_solution."
300
+ )
301
+ return new_state
302
+
303
+ def print_visited_sites(
304
+ self, state: HypothesizerState
305
+ ) -> HypothesizerState:
306
+ new_state = state.copy()
307
+ all_sites = new_state.get("visited_sites", [])
308
+ print("[DEBUG] Visited Sites:")
309
+ for s in all_sites:
310
+ print(" ", s)
311
+ return new_state
312
+
313
+ def summarize_process_as_latex(
314
+ self, state: HypothesizerState
315
+ ) -> HypothesizerState:
316
+ """
317
+ Summarize how the solution changed over time, referencing
318
+ each iteration's critique and competitor perspective,
319
+ then produce a final LaTeX document.
320
+ """
321
+ print("[DEBUG] Entering summarize_process_as_latex.")
322
+ llm_model = state.get("llm_model", "unknown_model")
323
+
324
+ # Build a single string describing the entire iterative process
325
+ iteration_details = ""
326
+ for i, (sol, crit, comp) in enumerate(
327
+ zip(
328
+ state["agent1_solution"],
329
+ state["agent2_critiques"],
330
+ state["agent3_perspectives"],
331
+ ),
332
+ start=1,
333
+ ):
334
+ iteration_details += (
335
+ f"\\subsection*{{Iteration {i}}}\n\n"
336
+ f"\\textbf{{Solution:}}\\\\\n{sol}\n\n"
337
+ f"\\textbf{{Critique:}}\\\\\n{crit}\n\n"
338
+ f"\\textbf{{Competitor Perspective:}}\\\\\n{comp}\n\n"
339
+ )
340
+
341
+ # -----------------------------
342
+ # Write iteration_details to disk as .txt
343
+ # -----------------------------
344
+ timestamp_str = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
345
+ txt_filename = (
346
+ f"iteration_details_{llm_model}_{timestamp_str}_chat_history.txt"
347
+ )
348
+ with open(txt_filename, "w", encoding="utf-8") as f:
349
+ f.write(iteration_details)
350
+
351
+ print(f"[DEBUG] Wrote iteration details to {txt_filename}.")
352
+
353
+ # Prompt the LLM to produce a LaTeX doc
354
+ # We'll just pass it as a single string to the LLM;
355
+ # you could also do system+human messages if you prefer.
356
+ prompt = f"""\
357
+ You are a system that produces a FULL LaTeX document.
358
+ Here is information about a multi-iteration process:
359
+
360
+ Original question: {state['question']}
361
+
362
+ Below are the solutions, critiques, and competitor perspectives from each iteration:
363
+
364
+ {iteration_details}
365
+
366
+ The final solution we arrived at was:
367
+
368
+ {state['final_solution']}
369
+
370
+ Now produce a valid LaTeX document. Be sure to use a table of contents.
371
+ It must start with an Executive Summary (that may be multiple pages) which summarizes
372
+ the entire iterative process. Following that, we should include the final solution in full,
373
+ not summarized, but reformatted for appropriate LaTeX. And then, finally (and this will be
374
+ quite long), we must take all the steps - solutions, critiques, and competitor perspectives
375
+ and *NOT SUMMARIZE THEM* but merely reformat them for the reader. This will be in an Appendix
376
+ of the full content of the steps. Finally, include a listing of all of the websites we
377
+ used in our research.
378
+
379
+ You must ONLY RETURN LaTeX, nothing else. It must be valid LaTeX syntax!
380
+
381
+ Your output should start with:
382
+ \\documentclass{{article}}
383
+ \\usepackage[margin=1in]{{geometry}}
384
+ etc.
385
+
386
+ It must compile without errors under pdflatex.
387
+ """
388
+
389
+ # Now produce a valid LaTeX document that nicely summarizes this entire iterative process.
390
+ # It must include the final solution in full, not summarized, but reformatted for appropriate
391
+ # LaTeX. The summarization is for the other steps.
392
+
393
+ all_visited_sites = state.get("visited_sites", [])
394
+ # (Optional) remove duplicates by converting to a set, then back to a list
395
+ visited_sites_unique = list(set(all_visited_sites))
396
+ if visited_sites_unique:
397
+ websites_latex = "\\section*{Websites Visited}\\begin{itemize}\n"
398
+ for url in visited_sites_unique:
399
+ print(f"We visited: {url}")
400
+ # Use \url{} to handle special characters in URLs
401
+ websites_latex += f"\\item \\url{{{url}}}\n"
402
+ websites_latex += "\\end{itemize}\n\n"
403
+ else:
404
+ # If no sites visited, or the list is empty
405
+ websites_latex = (
406
+ "\\section*{Websites Visited}\nNo sites were visited.\n\n"
407
+ )
408
+ print(websites_latex)
409
+
410
+ # Ask the LLM to produce *only* LaTeX content
411
+ latex_response = self.llm.invoke(prompt)
412
+
413
+ latex_doc = latex_response.content
414
+
415
+ def inject_into_latex(original_tex: str, injection: str) -> str:
416
+ """
417
+ Find the last occurrence of '\\end{document}' in 'original_tex'
418
+ and insert 'injection' right before it.
419
+ If '\\end{document}' is not found, just append the injection at the end.
420
+ """
421
+ injection_index = original_tex.rfind(r"\end{document}")
422
+ if injection_index == -1:
423
+ # If the LLM didn't include \end{document}, just append
424
+ return original_tex + "\n" + injection
425
+ else:
426
+ # Insert right before \end{document}
427
+ return (
428
+ original_tex[:injection_index]
429
+ + "\n"
430
+ + injection
431
+ + "\n"
432
+ + original_tex[injection_index:]
433
+ )
434
+
435
+ final_latex = inject_into_latex(latex_doc, websites_latex)
436
+
437
+ new_state = state.copy()
438
+ new_state["summary_report"] = final_latex
439
+
440
+ print(
441
+ f"[iteration {state['current_iteration']} - DEBUG] Received LaTeX from LLM. Preview:"
442
+ )
443
+ print(latex_response.content[:300], "...")
444
+ print(
445
+ f"[iteration {state['current_iteration']} - DEBUG] Exiting summarize_process_as_latex."
446
+ )
447
+ return new_state
448
+
449
+ def _initialize_agent(self):
450
+ # Initialize the graph
451
+ self.graph = StateGraph(HypothesizerState)
452
+
453
+ # Add nodes
454
+ self.graph.add_node("agent1", self.agent1_generate_solution)
455
+ self.graph.add_node("agent2", self.agent2_critique)
456
+ self.graph.add_node("agent3", self.agent3_competitor_perspective)
457
+ self.graph.add_node("increment_iteration", self.increment_iteration)
458
+ self.graph.add_node("finalize", self.generate_final_solution)
459
+ self.graph.add_node("print_sites", self.print_visited_sites)
460
+ self.graph.add_node(
461
+ "summarize_as_latex", self.summarize_process_as_latex
462
+ )
463
+ # self.graph.add_node("compile_pdf", compile_summary_to_pdf)
464
+
465
+ # Add simple edges for the known flow
466
+ self.graph.add_edge("agent1", "agent2")
467
+ self.graph.add_edge("agent2", "agent3")
468
+ self.graph.add_edge("agent3", "increment_iteration")
469
+
470
+ # Then from increment_iteration, we have a conditional:
471
+ # If we 'continue', we go back to agent1
472
+ # If we 'finish', we jump to the finalize node
473
+ self.graph.add_conditional_edges(
474
+ "increment_iteration",
475
+ should_continue,
476
+ {"continue": "agent1", "finish": "finalize"},
477
+ )
478
+
479
+ self.graph.add_edge("finalize", "summarize_as_latex")
480
+ self.graph.add_edge("summarize_as_latex", "print_sites")
481
+ self.graph.add_edge("print_sites", END)
482
+ # self.graph.add_edge("summarize_as_latex", "compile_pdf")
483
+ # self.graph.add_edge("compile_pdf", "print_sites")
484
+
485
+ # Set the entry point
486
+ self.graph.set_entry_point("agent1")
487
+
488
+ self.action = self.graph.compile(checkpointer=self.checkpointer)
489
+ # self.action.get_graph().draw_mermaid_png(output_file_path="hypothesizer_agent_graph.png", draw_method=MermaidDrawMethod.PYPPETEER)
490
+
491
+ def run(self, prompt, max_iter=3, recursion_limit=99999):
492
+ # Initialize the state
493
+ initial_state = HypothesizerState(
494
+ question=prompt,
495
+ current_iteration=0,
496
+ max_iterations=max_iter,
497
+ agent1_solution=[],
498
+ agent2_critiques=[],
499
+ agent3_perspectives=[],
500
+ final_solution="",
501
+ )
502
+ # Run the graph
503
+ result = hypothesizer_agent.action.invoke(
504
+ initial_state, {"recursion_limit": recursion_limit, "configurable": {"thread_id": self.thread_id}}
505
+ )
506
+ return result["final_solution"]
507
+
508
+
509
+
510
+ def should_continue(state: HypothesizerState) -> Literal["continue", "finish"]:
511
+ if state["current_iteration"] >= state["max_iterations"]:
512
+ print(
513
+ f"[iteration {state['current_iteration']} - DEBUG] Reached max_iterations; finishing."
514
+ )
515
+ return "finish"
516
+ else:
517
+ print(
518
+ f"[iteration {state['current_iteration']} - DEBUG] Still under max_iterations; continuing."
519
+ )
520
+ return "continue"
521
+
522
+
523
+ # def compile_summary_to_pdf(state: AgentState) -> AgentState:
524
+ # """
525
+ # Takes the LaTeX in state["summary_report"] and tries to compile it to a PDF
526
+ # named with the model and timestamp, e.g.:
527
+ # summary_report_gpt-4o-mini_Mar_15_2025_8:59am.pdf
528
+ # """
529
+ # print(f"[DEBUG] Entering compile_summary_to_pdf.")
530
+
531
+ # llm_model = state["llm_model"]
532
+
533
+
534
+ # latex_code = state.get("summary_report", "")
535
+ # if not latex_code:
536
+ # print("[DEBUG] No LaTeX code found in summary_report.")
537
+ # return state
538
+
539
+ # # Create a dynamic filename using the LLM model name & a timestamp
540
+ # # e.g. "summary_report_gpt-4o-mini_Mar_15_2025_08:59AM.pdf"
541
+ # # timestamp_str = datetime.now().strftime("%b_%d_%Y_%I:%M%p")
542
+ # timestamp_str = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
543
+
544
+ # pdf_filename = f"summary_report_{llm_model}_{timestamp_str}.pdf"
545
+
546
+ # tex_filename = "summary_report.tex"
547
+ # with open(tex_filename, "w", encoding="utf-8") as f:
548
+ # f.write(latex_code)
549
+
550
+ # try:
551
+ # subprocess.run(["pdflatex", "-interaction=nonstopmode", tex_filename], check=True)
552
+ # subprocess.run(["pdflatex", "-interaction=nonstopmode", tex_filename], check=True)
553
+ # except subprocess.CalledProcessError as e:
554
+ # print("Error compiling LaTeX:", e)
555
+
556
+ # if os.path.exists("summary_report.pdf"):
557
+ # os.rename("summary_report.pdf", pdf_filename)
558
+ # print(f"[DEBUG] Successfully compiled PDF -> {pdf_filename}")
559
+ # else:
560
+ # print("[DEBUG] PDF compilation failed; no summary_report.pdf found.")
561
+
562
+ # print("[DEBUG] Exiting compile_summary_to_pdf.")
563
+ # return state
564
+
565
+
566
+ if __name__ == "__main__":
567
+ # Create the graph
568
+ hypothesizer_agent = HypothesizerAgent()
569
+
570
+ question = "Find a city with as least 10 vowels in its name."
571
+
572
+ # Initialize the state
573
+ initial_state = HypothesizerState(
574
+ question=question,
575
+ current_iteration=0,
576
+ max_iterations=3,
577
+ agent1_solution=[],
578
+ agent2_critiques=[],
579
+ agent3_perspectives=[],
580
+ final_solution="",
581
+ )
582
+
583
+ print("[DEBUG] Invoking the graph...")
584
+ # Run the graph
585
+ result = hypothesizer_agent.action.invoke(
586
+ initial_state, {"recursion_limit": 999999, "configurable": {"thread_id": self.thread_id}}
587
+ )
588
+ summary_text = result["summary_report"]
589
+
590
+ print("[DEBUG] Graph invocation complete.")
591
+
592
+ # Print the final solution
593
+ print("Final Solution:")
594
+ print(result["final_solution"])
595
+
596
+ # print("Summarized Report:")
597
+ # print(summary_text)