ursa-ai 0.2.5__py3-none-any.whl → 0.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ursa-ai might be problematic. Click here for more details.

@@ -6,9 +6,16 @@ from pathlib import Path
6
6
  from typing import Annotated, Any, Literal, Optional
7
7
 
8
8
  import coolname
9
- from langchain_community.tools import DuckDuckGoSearchResults # TavilySearchResults,
9
+ from langchain_community.tools import (
10
+ DuckDuckGoSearchResults,
11
+ ) # TavilySearchResults,
10
12
  from langchain_core.language_models import BaseChatModel
11
- from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage
13
+ from langchain_core.messages import (
14
+ AIMessage,
15
+ HumanMessage,
16
+ SystemMessage,
17
+ ToolMessage,
18
+ )
12
19
  from langchain_core.tools import InjectedToolCallId, tool
13
20
  from langgraph.graph import END, START, StateGraph
14
21
  from langgraph.graph.message import add_messages
@@ -49,7 +56,7 @@ class ExecutionAgent(BaseAgent):
49
56
  def __init__(
50
57
  self,
51
58
  llm: str | BaseChatModel = "openai/gpt-4o-mini",
52
- agent_memory: Optional [Any | AgentMemory] = None,
59
+ agent_memory: Optional[Any | AgentMemory] = None,
53
60
  log_state: bool = False,
54
61
  **kwargs,
55
62
  ):
@@ -101,14 +108,17 @@ class ExecutionAgent(BaseAgent):
101
108
  new_state["symlinkdir"]["is_linked"] = True
102
109
 
103
110
  if type(new_state["messages"][0]) == SystemMessage:
104
- new_state["messages"][0] = SystemMessage(content=self.executor_prompt)
111
+ new_state["messages"][0] = SystemMessage(
112
+ content=self.executor_prompt
113
+ )
105
114
  else:
106
115
  new_state["messages"] = [
107
116
  SystemMessage(content=self.executor_prompt)
108
117
  ] + state["messages"]
109
118
  try:
110
119
  response = self.llm.invoke(
111
- new_state["messages"], {"configurable": {"thread_id": self.thread_id}}
120
+ new_state["messages"],
121
+ {"configurable": {"thread_id": self.thread_id}},
112
122
  )
113
123
  except ContentPolicyViolationError as e:
114
124
  print("Error: ", e, " ", new_state["messages"][-1].content)
@@ -140,7 +150,7 @@ class ExecutionAgent(BaseAgent):
140
150
  tool_strings.append(tool_name)
141
151
  for y in tool["args"]:
142
152
  tool_strings.append(
143
- f'Arg: {str(y)}\nValue: {str(tool["args"][y])}'
153
+ f"Arg: {str(y)}\nValue: {str(tool['args'][y])}"
144
154
  )
145
155
  memories.append("\n".join(tool_strings))
146
156
  memories.append(response.content)
@@ -188,7 +198,8 @@ class ExecutionAgent(BaseAgent):
188
198
  For reason: {safety_check.content}
189
199
  """
190
200
  console.print(
191
- "[bold red][WARNING][/bold red] Command deemed unsafe:", query
201
+ "[bold red][WARNING][/bold red] Command deemed unsafe:",
202
+ query,
192
203
  )
193
204
  # and tell the user the reason
194
205
  console.print(
@@ -414,7 +425,9 @@ def edit_code(
414
425
  new_code_clean = _strip_fences(new_code)
415
426
 
416
427
  if old_code_clean not in content:
417
- console.print(f"[yellow] ⚠️ 'old_code' not found in file'; no changes made.[/]")
428
+ console.print(
429
+ f"[yellow] ⚠️ 'old_code' not found in file'; no changes made.[/]"
430
+ )
418
431
  return f"No changes made to {filename}: 'old_code' not found in file."
419
432
 
420
433
  updated = content.replace(old_code_clean, new_code_clean, 1)
@@ -482,7 +495,9 @@ def command_safe(state: ExecutionState) -> Literal["safe", "unsafe"]:
482
495
 
483
496
  def main():
484
497
  execution_agent = ExecutionAgent()
485
- problem_string = "Write and execute a python script to print the first 10 integers."
498
+ problem_string = (
499
+ "Write and execute a python script to print the first 10 integers."
500
+ )
486
501
  inputs = {
487
502
  "messages": [HumanMessage(content=problem_string)]
488
503
  } # , "workspace":"dummy_test"}
@@ -357,7 +357,7 @@ class HypothesizerAgent(BaseAgent):
357
357
  You are a system that produces a FULL LaTeX document.
358
358
  Here is information about a multi-iteration process:
359
359
 
360
- Original question: {state['question']}
360
+ Original question: {state["question"]}
361
361
 
362
362
  Below are the solutions, critiques, and competitor perspectives from each iteration:
363
363
 
@@ -365,7 +365,7 @@ class HypothesizerAgent(BaseAgent):
365
365
 
366
366
  The final solution we arrived at was:
367
367
 
368
- {state['final_solution']}
368
+ {state["final_solution"]}
369
369
 
370
370
  Now produce a valid LaTeX document. Be sure to use a table of contents.
371
371
  It must start with an Executive Summary (that may be multiple pages) which summarizes
@@ -487,7 +487,7 @@ class HypothesizerAgent(BaseAgent):
487
487
 
488
488
  self.action = self.graph.compile(checkpointer=self.checkpointer)
489
489
  # self.action.get_graph().draw_mermaid_png(output_file_path="hypothesizer_agent_graph.png", draw_method=MermaidDrawMethod.PYPPETEER)
490
-
490
+
491
491
  def run(self, prompt, max_iter=3, recursion_limit=99999):
492
492
  # Initialize the state
493
493
  initial_state = HypothesizerState(
@@ -501,12 +501,15 @@ class HypothesizerAgent(BaseAgent):
501
501
  )
502
502
  # Run the graph
503
503
  result = hypothesizer_agent.action.invoke(
504
- initial_state, {"recursion_limit": recursion_limit, "configurable": {"thread_id": self.thread_id}}
504
+ initial_state,
505
+ {
506
+ "recursion_limit": recursion_limit,
507
+ "configurable": {"thread_id": self.thread_id},
508
+ },
505
509
  )
506
510
  return result["final_solution"]
507
511
 
508
512
 
509
-
510
513
  def should_continue(state: HypothesizerState) -> Literal["continue", "finish"]:
511
514
  if state["current_iteration"] >= state["max_iterations"]:
512
515
  print(
@@ -583,7 +586,11 @@ if __name__ == "__main__":
583
586
  print("[DEBUG] Invoking the graph...")
584
587
  # Run the graph
585
588
  result = hypothesizer_agent.action.invoke(
586
- initial_state, {"recursion_limit": 999999, "configurable": {"thread_id": self.thread_id}}
589
+ initial_state,
590
+ {
591
+ "recursion_limit": 999999,
592
+ "configurable": {"thread_id": self.thread_id},
593
+ },
587
594
  )
588
595
  summary_text = result["summary_report"]
589
596
 
ursa/agents/mp_agent.py CHANGED
@@ -8,7 +8,7 @@ from mp_api.client import MPRester
8
8
  from langchain.schema import Document
9
9
 
10
10
  import os
11
- import pymupdf
11
+ import pymupdf
12
12
  import requests
13
13
  import feedparser
14
14
  from PIL import Image
@@ -33,16 +33,16 @@ from openai import OpenAI
33
33
  from .base import BaseAgent
34
34
 
35
35
 
36
-
37
-
38
36
  client = OpenAI()
39
37
 
40
38
  embeddings = OpenAIEmbeddings()
41
39
 
40
+
42
41
  class PaperMetadata(TypedDict):
43
42
  arxiv_id: str
44
43
  full_text: str
45
44
 
45
+
46
46
  class PaperState(TypedDict, total=False):
47
47
  query: str
48
48
  context: str
@@ -59,12 +59,23 @@ def describe_image(image: Image.Image) -> str:
59
59
  response = client.chat.completions.create(
60
60
  model="gpt-4-vision-preview",
61
61
  messages=[
62
- {"role": "system", "content": "You are a scientific assistant who explains plots and scientific diagrams."},
62
+ {
63
+ "role": "system",
64
+ "content": "You are a scientific assistant who explains plots and scientific diagrams.",
65
+ },
63
66
  {
64
67
  "role": "user",
65
68
  "content": [
66
- {"type": "text", "text": "Describe this scientific image or plot in detail."},
67
- {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{img_base64}"}}
69
+ {
70
+ "type": "text",
71
+ "text": "Describe this scientific image or plot in detail.",
72
+ },
73
+ {
74
+ "type": "image_url",
75
+ "image_url": {
76
+ "url": f"data:image/png;base64,{img_base64}"
77
+ },
78
+ },
68
79
  ],
69
80
  },
70
81
  ],
@@ -73,7 +84,9 @@ def describe_image(image: Image.Image) -> str:
73
84
  return response.choices[0].message.content.strip()
74
85
 
75
86
 
76
- def extract_and_describe_images(pdf_path: str, max_images: int = 5) -> List[str]:
87
+ def extract_and_describe_images(
88
+ pdf_path: str, max_images: int = 5
89
+ ) -> List[str]:
77
90
  doc = pymupdf.open(pdf_path)
78
91
  descriptions = []
79
92
  image_count = 0
@@ -94,16 +107,21 @@ def extract_and_describe_images(pdf_path: str, max_images: int = 5) -> List[str]
94
107
 
95
108
  try:
96
109
  desc = describe_image(image)
97
- descriptions.append(f"Page {page_index + 1}, Image {img_index + 1}: {desc}")
110
+ descriptions.append(
111
+ f"Page {page_index + 1}, Image {img_index + 1}: {desc}"
112
+ )
98
113
  except Exception as e:
99
- descriptions.append(f"Page {page_index + 1}, Image {img_index + 1}: [Error: {e}]")
114
+ descriptions.append(
115
+ f"Page {page_index + 1}, Image {img_index + 1}: [Error: {e}]"
116
+ )
100
117
  image_count += 1
101
118
 
102
119
  return descriptions
103
120
 
104
121
 
105
122
  def remove_surrogates(text: str) -> str:
106
- return re.sub(r'[\ud800-\udfff]', '', text)
123
+ return re.sub(r"[\ud800-\udfff]", "", text)
124
+
107
125
 
108
126
  class MaterialsProjectAgent(BaseAgent):
109
127
  def __init__(
@@ -111,30 +129,30 @@ class MaterialsProjectAgent(BaseAgent):
111
129
  llm="openai/o3-mini",
112
130
  summarize: bool = True,
113
131
  max_results: int = 3,
114
- database_path: str = 'mp_database',
115
- summaries_path: str = 'mp_summaries',
116
- vectorstore_path: str = 'mp_vectorstores',
117
- **kwargs
132
+ database_path: str = "mp_database",
133
+ summaries_path: str = "mp_summaries",
134
+ vectorstore_path: str = "mp_vectorstores",
135
+ **kwargs,
118
136
  ):
119
137
  super().__init__(llm, **kwargs)
120
- self.summarize = summarize
121
- self.max_results = max_results
122
- self.database_path = database_path
123
- self.summaries_path = summaries_path
138
+ self.summarize = summarize
139
+ self.max_results = max_results
140
+ self.database_path = database_path
141
+ self.summaries_path = summaries_path
124
142
  self.vectorstore_path = vectorstore_path
125
-
126
- os.makedirs(self.database_path, exist_ok=True)
127
- os.makedirs(self.summaries_path, exist_ok=True)
143
+
144
+ os.makedirs(self.database_path, exist_ok=True)
145
+ os.makedirs(self.summaries_path, exist_ok=True)
128
146
  os.makedirs(self.vectorstore_path, exist_ok=True)
129
147
 
130
148
  self.embeddings = OpenAIEmbeddings() # or your preferred embedding
131
149
  self.graph = self._build_graph()
132
150
 
133
151
  def _fetch_node(self, state: Dict) -> Dict:
134
- f = state["query"]
135
- els = f["elements"] # e.g. ["Ga","In"]
136
- bg = (f["band_gap_min"], f["band_gap_max"])
137
- e_above_hull = (0, 0) # only on-hull (stable)
152
+ f = state["query"]
153
+ els = f["elements"] # e.g. ["Ga","In"]
154
+ bg = (f["band_gap_min"], f["band_gap_max"])
155
+ e_above_hull = (0, 0) # only on-hull (stable)
138
156
  mats = []
139
157
  with MPRester() as mpr:
140
158
  # get ALL matching materials…
@@ -142,7 +160,7 @@ class MaterialsProjectAgent(BaseAgent):
142
160
  elements=els,
143
161
  band_gap=bg,
144
162
  energy_above_hull=e_above_hull,
145
- is_stable=True # equivalent filter
163
+ is_stable=True, # equivalent filter
146
164
  )
147
165
  # …then take only the first `max_results`
148
166
  for doc in all_results[: self.max_results]:
@@ -161,11 +179,18 @@ class MaterialsProjectAgent(BaseAgent):
161
179
  """Build or load a Chroma vectorstore for a single material's description."""
162
180
  persist_dir = os.path.join(self.vectorstore_path, mid)
163
181
  if os.path.exists(persist_dir):
164
- store = Chroma(persist_directory=persist_dir, embedding_function=self.embeddings)
182
+ store = Chroma(
183
+ persist_directory=persist_dir,
184
+ embedding_function=self.embeddings,
185
+ )
165
186
  else:
166
- splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
187
+ splitter = RecursiveCharacterTextSplitter(
188
+ chunk_size=500, chunk_overlap=100
189
+ )
167
190
  docs = splitter.create_documents([text])
168
- store = Chroma.from_documents(docs, self.embeddings, persist_directory=persist_dir)
191
+ store = Chroma.from_documents(
192
+ docs, self.embeddings, persist_directory=persist_dir
193
+ )
169
194
  return store.as_retriever(search_kwargs={"k": 5})
170
195
 
171
196
  def _summarize_node(self, state: Dict) -> Dict:
@@ -187,18 +212,25 @@ You are a materials-science assistant. Given the following metadata about a mate
187
212
  # flatten metadata to text
188
213
  text = "\n".join(f"{k}: {v}" for k, v in meta.items())
189
214
  # build or load summary
190
- summary_file = os.path.join(self.summaries_path, f"{mid}_summary.txt")
215
+ summary_file = os.path.join(
216
+ self.summaries_path, f"{mid}_summary.txt"
217
+ )
191
218
  if os.path.exists(summary_file):
192
219
  with open(summary_file) as f:
193
220
  return i, f.read()
194
221
  # optional: vectorize & retrieve, but here we just summarize full text
195
222
  result = chain.invoke({"metadata": text})
196
- with open(summary_file, 'w') as f:
223
+ with open(summary_file, "w") as f:
197
224
  f.write(result)
198
225
  return i, result
199
226
 
200
- with ThreadPoolExecutor(max_workers=min(8, len(state["materials"]))) as exe:
201
- futures = [exe.submit(process, i, m) for i, m in enumerate(state["materials"])]
227
+ with ThreadPoolExecutor(
228
+ max_workers=min(8, len(state["materials"]))
229
+ ) as exe:
230
+ futures = [
231
+ exe.submit(process, i, m)
232
+ for i, m in enumerate(state["materials"])
233
+ ]
202
234
  for future in tqdm(futures, desc="Summarizing materials"):
203
235
  i, summ = future.result()
204
236
  summaries[i] = summ
@@ -208,8 +240,10 @@ You are a materials-science assistant. Given the following metadata about a mate
208
240
  def _aggregate_node(self, state: Dict) -> Dict:
209
241
  """Combine all summaries into a single, coherent answer."""
210
242
  combined = "\n\n----\n\n".join(
211
- f"[{i+1}] {m['material_id']}\n\n{summary}"
212
- for i, (m, summary) in enumerate(zip(state["materials"], state["summaries"]))
243
+ f"[{i + 1}] {m['material_id']}\n\n{summary}"
244
+ for i, (m, summary) in enumerate(
245
+ zip(state["materials"], state["summaries"])
246
+ )
213
247
  )
214
248
 
215
249
  prompt = ChatPromptTemplate.from_template("""
@@ -222,7 +256,9 @@ You are a materials-science assistant. Given the following metadata about a mate
222
256
  {context}
223
257
  """)
224
258
  chain = prompt | self.llm | StrOutputParser()
225
- final = chain.invoke({"summaries": combined, "context": state["context"]})
259
+ final = chain.invoke(
260
+ {"summaries": combined, "context": state["context"]}
261
+ )
226
262
  return {**state, "final_summary": final}
227
263
 
228
264
  def _build_graph(self):
@@ -252,6 +288,6 @@ if __name__ == "__main__":
252
288
  agent = MaterialsProjectAgent()
253
289
  resp = agent.run(
254
290
  mp_query="LiFePO4",
255
- context="What is its band gap and stability, and any synthesis challenges?"
291
+ context="What is its band gap and stability, and any synthesis challenges?",
256
292
  )
257
293
  print(resp)
@@ -44,7 +44,13 @@ class PlanningAgent(BaseAgent):
44
44
  messages[0] = SystemMessage(content=self.planner_prompt)
45
45
  else:
46
46
  messages = [SystemMessage(content=self.planner_prompt)] + messages
47
- return {"messages": [self.llm.invoke(messages, {"configurable": {"thread_id": self.thread_id}})]}
47
+ return {
48
+ "messages": [
49
+ self.llm.invoke(
50
+ messages, {"configurable": {"thread_id": self.thread_id}}
51
+ )
52
+ ]
53
+ }
48
54
 
49
55
  def formalize_node(self, state: PlanningState) -> PlanningState:
50
56
  cls_map = {"ai": HumanMessage, "human": AIMessage}
@@ -55,7 +61,9 @@ class PlanningAgent(BaseAgent):
55
61
  translated = [SystemMessage(content=self.formalize_prompt)] + translated
56
62
  for _ in range(10):
57
63
  try:
58
- res = self.llm.invoke(translated, {"configurable": {"thread_id": self.thread_id}})
64
+ res = self.llm.invoke(
65
+ translated, {"configurable": {"thread_id": self.thread_id}}
66
+ )
59
67
  json_out = extract_json(res.content)
60
68
  break
61
69
  except ValueError:
@@ -76,7 +84,9 @@ class PlanningAgent(BaseAgent):
76
84
  for msg in state["messages"][1:]
77
85
  ]
78
86
  translated = [SystemMessage(content=reflection_prompt)] + translated
79
- res = self.llm.invoke(translated, {"configurable": {"thread_id": self.thread_id}})
87
+ res = self.llm.invoke(
88
+ translated, {"configurable": {"thread_id": self.thread_id}}
89
+ )
80
90
  return {"messages": [HumanMessage(content=res.content)]}
81
91
 
82
92
  def _initialize_agent(self):
@@ -99,10 +109,16 @@ class PlanningAgent(BaseAgent):
99
109
  # self.action = self.graph.compile(checkpointer=memory)
100
110
  self.action = self.graph.compile(checkpointer=self.checkpointer)
101
111
  # self.action.get_graph().draw_mermaid_png(output_file_path="planning_agent_graph.png", draw_method=MermaidDrawMethod.PYPPETEER)
102
-
103
- def run(self, prompt,recursion_limit=100):
112
+
113
+ def run(self, prompt, recursion_limit=100):
104
114
  initial_state = {"messages": [HumanMessage(content=prompt)]}
105
- return self.action.invoke(initial_state, {"recursion_limit":recursion_limit, "configurable": {"thread_id": self.thread_id}})
115
+ return self.action.invoke(
116
+ initial_state,
117
+ {
118
+ "recursion_limit": recursion_limit,
119
+ "configurable": {"thread_id": self.thread_id},
120
+ },
121
+ )
106
122
 
107
123
 
108
124
  config = {"configurable": {"thread_id": "1"}}
@@ -4,10 +4,9 @@ from .base import BaseAgent
4
4
 
5
5
  class RecallAgent(BaseAgent):
6
6
  def __init__(self, llm, memory, **kwargs):
7
-
8
7
  super().__init__(llm, **kwargs)
9
8
  self.memorydb = memory
10
-
9
+
11
10
  def remember(self, query):
12
11
  memories = self.memorydb.retrieve(query)
13
12
  summarize_query = f"""
@@ -56,25 +56,45 @@ class WebSearchAgent(BaseAgent):
56
56
  self.websearch_prompt = websearch_prompt
57
57
  self.reflection_prompt = reflection_prompt
58
58
  self.tools = [search_tool, process_content] # + cb_tools
59
- self.has_internet = self._check_for_internet(kwargs.get("url","http://www.lanl.gov"))
59
+ self.has_internet = self._check_for_internet(
60
+ kwargs.get("url", "http://www.lanl.gov")
61
+ )
60
62
  self._initialize_agent()
61
63
 
62
64
  def review_node(self, state: WebSearchState) -> WebSearchState:
63
65
  if not self.has_internet:
64
- return {"messages":[HumanMessage(content="No internet for WebSearch Agent so no research to review.")], "urls_visited": []}
65
-
66
+ return {
67
+ "messages": [
68
+ HumanMessage(
69
+ content="No internet for WebSearch Agent so no research to review."
70
+ )
71
+ ],
72
+ "urls_visited": [],
73
+ }
74
+
66
75
  translated = [SystemMessage(content=reflection_prompt)] + state[
67
76
  "messages"
68
77
  ]
69
- res = self.llm.invoke(translated, {"configurable": {"thread_id": self.thread_id}})
78
+ res = self.llm.invoke(
79
+ translated, {"configurable": {"thread_id": self.thread_id}}
80
+ )
70
81
  return {"messages": [HumanMessage(content=res.content)]}
71
82
 
72
83
  def response_node(self, state: WebSearchState) -> WebSearchState:
73
84
  if not self.has_internet:
74
- return {"messages":[HumanMessage(content="No internet for WebSearch Agent. No research carried out.")], "urls_visited": []}
85
+ return {
86
+ "messages": [
87
+ HumanMessage(
88
+ content="No internet for WebSearch Agent. No research carried out."
89
+ )
90
+ ],
91
+ "urls_visited": [],
92
+ }
75
93
 
76
94
  messages = state["messages"] + [SystemMessage(content=summarize_prompt)]
77
- response = self.llm.invoke(messages, {"configurable": {"thread_id": self.thread_id}})
95
+ response = self.llm.invoke(
96
+ messages, {"configurable": {"thread_id": self.thread_id}}
97
+ )
78
98
 
79
99
  urls_visited = []
80
100
  for message in messages:
@@ -82,7 +102,7 @@ class WebSearchAgent(BaseAgent):
82
102
  if "url" in message.tool_calls[0]["args"]:
83
103
  urls_visited.append(message.tool_calls[0]["args"]["url"])
84
104
  return {"messages": [response.content], "urls_visited": urls_visited}
85
-
105
+
86
106
  def _check_for_internet(self, url, timeout=2):
87
107
  """
88
108
  Checks for internet connectivity by attempting an HTTP GET request.
@@ -119,15 +139,28 @@ class WebSearchAgent(BaseAgent):
119
139
  )
120
140
  self.action = self.graph.compile(checkpointer=self.checkpointer)
121
141
  # self.action.get_graph().draw_mermaid_png(output_file_path="./websearch_agent_graph.png", draw_method=MermaidDrawMethod.PYPPETEER)
122
-
142
+
123
143
  def run(self, prompt, recursion_limit=100):
124
144
  if not self.has_internet:
125
- return {"messages":[HumanMessage(content="No internet for WebSearch Agent. No research carried out.")]}
145
+ return {
146
+ "messages": [
147
+ HumanMessage(
148
+ content="No internet for WebSearch Agent. No research carried out."
149
+ )
150
+ ]
151
+ }
126
152
  inputs = {
127
153
  "messages": [HumanMessage(content=prompt)],
128
154
  "model": self.llm,
129
155
  }
130
- return self.action.invoke(inputs, {"recursion_limit":recursion_limit, "configurable": {"thread_id": self.thread_id}})
156
+ return self.action.invoke(
157
+ inputs,
158
+ {
159
+ "recursion_limit": recursion_limit,
160
+ "configurable": {"thread_id": self.thread_id},
161
+ },
162
+ )
163
+
131
164
 
132
165
  def process_content(
133
166
  url: str, context: str, state: Annotated[dict, InjectedState]
@@ -150,7 +183,11 @@ def process_content(
150
183
  Carefully summarize the content in full detail, given the following context:
151
184
  {context}
152
185
  """
153
- summarized_information = state["model"].invoke(content_prompt, {"configurable": {"thread_id": self.thread_id}}).content
186
+ summarized_information = (
187
+ state["model"]
188
+ .invoke(content_prompt, {"configurable": {"thread_id": self.thread_id}})
189
+ .content
190
+ )
154
191
  return summarized_information
155
192
 
156
193
 
@@ -176,7 +213,13 @@ def main():
176
213
  "messages": [HumanMessage(content=problem_string)],
177
214
  "model": model,
178
215
  }
179
- result = websearcher.action.invoke(inputs, {"recursion_limit": 10000, "configurable": {"thread_id": self.thread_id}})
216
+ result = websearcher.action.invoke(
217
+ inputs,
218
+ {
219
+ "recursion_limit": 10000,
220
+ "configurable": {"thread_id": self.thread_id},
221
+ },
222
+ )
180
223
 
181
224
  colors = [BLUE, RED]
182
225
  for ii, x in enumerate(result["messages"][:-1]):
@@ -1,6 +1,5 @@
1
-
2
1
  def get_code_review_prompt(project_prompt, file_list):
3
- return f'''
2
+ return f"""
4
3
  You are a responsible and efficient code review agent tasked with assessing if given files meet the goals of a project description.
5
4
 
6
5
  The project goals are:
@@ -26,10 +25,11 @@ def get_code_review_prompt(project_prompt, file_list):
26
25
  4. Immediately highlight and clearly communicate any steps that appear unclear, unsafe, or impractical before proceeding.
27
26
 
28
27
  Your goal is to ensure the implemented code addresses the plan accurately, safely, and transparently, maintaining accountability at each step.
29
- '''
28
+ """
29
+
30
30
 
31
31
  def get_plan_review_prompt(project_prompt, file_list):
32
- return f'''
32
+ return f"""
33
33
  You are a responsible and efficient code review agent tasked with assessing if given files meet the goals of a project description.
34
34
 
35
35
  The project goals are:
@@ -48,4 +48,4 @@ def get_plan_review_prompt(project_prompt, file_list):
48
48
  - Do any files appear dangerous, adversarial, or performing actions detrimental to the plan.
49
49
 
50
50
  Your goal is to provide that information in a clear, concise way for use by a code reviewer who will look over files in detail.
51
- '''
51
+ """
@@ -6,7 +6,7 @@
6
6
  # You may execute system commands to carry out this plan, as long as they are safe commands.
7
7
  # '''
8
8
 
9
- executor_prompt = '''
9
+ executor_prompt = """
10
10
  You are a responsible and efficient execution agent tasked with carrying out a provided plan designed to solve a specific problem.
11
11
 
12
12
  Your responsibilities are as follows:
@@ -23,9 +23,9 @@ Your responsibilities are as follows:
23
23
  4. Immediately highlight and clearly communicate any steps that appear unclear, unsafe, or impractical before proceeding.
24
24
 
25
25
  Your goal is to carry out the provided plan accurately, safely, and transparently, maintaining accountability at each step.
26
- '''
26
+ """
27
27
 
28
- summarize_prompt = '''
28
+ summarize_prompt = """
29
29
  You are a summarizing agent. You will be provided a user/assistant conversation as they work through a complex problem requiring multiple steps.
30
30
 
31
31
  Your responsibilities is to write a condensed summary of the conversation.
@@ -33,4 +33,4 @@ Your responsibilities is to write a condensed summary of the conversation.
33
33
  - Ensure the summary responds to the goals of the original query.
34
34
  - Summarize all the work that was carried out to meet those goals
35
35
  - Highlight any places where those goals were not achieved and why.
36
- '''
36
+ """
@@ -1,11 +1,11 @@
1
- search_prompt = '''
1
+ search_prompt = """
2
2
  You are an agent that is responsible for reviewing the literature to answer a specific question.
3
3
  Use the arxiv tool available to carry out this plan.
4
4
  You should perform a search through the arxiv database.
5
- '''
5
+ """
6
6
 
7
- summarize_prompt = '''
7
+ summarize_prompt = """
8
8
  You are a summarizing agent.
9
9
  You should cite all the papers that were used for the arxiv review.
10
10
  You should give me the final summary from the literature review.
11
- '''
11
+ """
@@ -37,14 +37,14 @@ Please keep your plan concise yet sufficiently detailed so that it can be execut
37
37
  # reflection_prompt = '''
38
38
  # You are a critical reviewer being given a series of steps to solve a problem.
39
39
 
40
- # Provide detailed recommendations, including adding missing steps or removing
40
+ # Provide detailed recommendations, including adding missing steps or removing
41
41
  # superfluous steps. Ensure the proposed effort is appropriate for the problem.
42
42
 
43
- # In the end, decide if the current proposal should be approved or revised.
43
+ # In the end, decide if the current proposal should be approved or revised.
44
44
  # Include [APPROVED] in your response if the proposal should be approved with no changes.
45
45
  # '''
46
46
 
47
- reflection_prompt = '''
47
+ reflection_prompt = """
48
48
  You are acting as a critical reviewer evaluating a series of steps proposed to solve a specific problem.
49
49
 
50
50
  Carefully review the proposed steps and provide detailed feedback based on the following criteria:
@@ -61,7 +61,7 @@ At the end of your feedback, clearly state your decision:
61
61
 
62
62
  - If the current proposal requires no changes, include "[APPROVED]" at the end of your response.
63
63
  - If revisions are necessary, summarize your reasoning clearly and briefly describe the main revisions needed.
64
- '''
64
+ """
65
65
 
66
66
  formalize_prompt = """
67
67
  Now that the step-by-step plan is finalized, format it into a series of steps in the form of a JSON array with objects having the following structure: