ursa-ai 0.2.7__py3-none-any.whl → 0.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ursa-ai might be problematic. Click here for more details.

ursa/agents/__init__.py CHANGED
@@ -1,9 +1,9 @@
1
- from .planning_agent import PlanningAgent, PlanningState
2
- from .websearch_agent import WebSearchAgent, WebSearchState
3
- from .execution_agent import ExecutionAgent, ExecutionState
1
+ from .arxiv_agent import ArxivAgent, PaperMetadata, PaperState
2
+ from .base import BaseAgent, BaseChatModel
4
3
  from .code_review_agent import CodeReviewAgent, CodeReviewState
4
+ from .execution_agent import ExecutionAgent, ExecutionState
5
5
  from .hypothesizer_agent import HypothesizerAgent, HypothesizerState
6
- from .arxiv_agent import ArxivAgent, PaperState, PaperMetadata
7
- from .recall_agent import RecallAgent
8
- from .base import BaseAgent, BaseChatModel
9
6
  from .mp_agent import MaterialsProjectAgent
7
+ from .planning_agent import PlanningAgent, PlanningState
8
+ from .recall_agent import RecallAgent
9
+ from .websearch_agent import WebSearchAgent, WebSearchState
@@ -1,29 +1,29 @@
1
+ import base64
1
2
  import os
2
- import pymupdf
3
- import requests
4
- import feedparser
5
- from PIL import Image
3
+ import re
4
+ import statistics
5
+ from concurrent.futures import ThreadPoolExecutor, as_completed
6
6
  from io import BytesIO
7
- import base64
8
7
  from urllib.parse import quote
9
- from typing_extensions import TypedDict, List
10
- from concurrent.futures import ThreadPoolExecutor, as_completed
11
- from tqdm import tqdm
12
- import statistics
13
- import re
14
8
 
9
+ import feedparser
10
+ import pymupdf
11
+ import requests
12
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
13
+ from langchain_chroma import Chroma
15
14
  from langchain_community.document_loaders import PyPDFLoader
16
15
  from langchain_core.output_parsers import StrOutputParser
17
16
  from langchain_core.prompts import ChatPromptTemplate
18
- from langgraph.graph import StateGraph, END, START
19
- from langchain.text_splitter import RecursiveCharacterTextSplitter
20
- from langchain_chroma import Chroma
17
+ from langgraph.graph import StateGraph
18
+ from PIL import Image
19
+ from tqdm import tqdm
20
+ from typing_extensions import List, TypedDict
21
21
 
22
22
  from .base import BaseAgent
23
23
 
24
24
  try:
25
25
  from openai import OpenAI
26
- except:
26
+ except Exception:
27
27
  pass
28
28
 
29
29
  # embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
@@ -162,7 +162,7 @@ class ArxivAgent(BaseAgent):
162
162
  full_id = entry.id.split("/abs/")[-1]
163
163
  arxiv_id = full_id.split("/")[-1]
164
164
  title = entry.title.strip()
165
- authors = ", ".join(author.name for author in entry.authors)
165
+ # authors = ", ".join(author.name for author in entry.authors)
166
166
  pdf_url = f"https://arxiv.org/pdf/{full_id}.pdf"
167
167
  pdf_filename = os.path.join(
168
168
  self.database_path, f"{arxiv_id}.pdf"
@@ -313,7 +313,7 @@ class ArxivAgent(BaseAgent):
313
313
 
314
314
  if "papers" not in state or len(state["papers"]) == 0:
315
315
  print(
316
- f"No papers retrieved - bad query or network connection to ArXiv?"
316
+ "No papers retrieved - bad query or network connection to ArXiv?"
317
317
  )
318
318
  return {**state, "summaries": None}
319
319
 
ursa/agents/base.py CHANGED
@@ -1,9 +1,7 @@
1
1
  from langchain_core.language_models.chat_models import BaseChatModel
2
+ from langchain_core.load import dumps
2
3
  from langchain_litellm import ChatLiteLLM
3
4
  from langgraph.checkpoint.base import BaseCheckpointSaver
4
- from langchain_core.load import dumps
5
-
6
- import json
7
5
 
8
6
 
9
7
  class BaseAgent:
@@ -7,7 +7,7 @@ from langchain_core.messages import HumanMessage, SystemMessage
7
7
  from langchain_core.tools import tool
8
8
  from langgraph.graph import END, START, StateGraph
9
9
  from langgraph.graph.message import add_messages
10
- from langgraph.prebuilt import ToolNode, InjectedState
10
+ from langgraph.prebuilt import InjectedState, ToolNode
11
11
  from typing_extensions import TypedDict
12
12
 
13
13
  from ..prompt_library.code_review_prompts import (
@@ -349,7 +349,7 @@ def main():
349
349
  }
350
350
  result = (
351
351
  code_review_agent.action.invoke(initial_state),
352
- {"configurable": {"thread_id": self.thread_id}},
352
+ {"configurable": {"thread_id": 42}},
353
353
  )
354
354
  for x in result["messages"]:
355
355
  print(x.content)
@@ -107,7 +107,7 @@ class ExecutionAgent(BaseAgent):
107
107
  # note that we've done the symlink now, so don't need to do it later
108
108
  new_state["symlinkdir"]["is_linked"] = True
109
109
 
110
- if type(new_state["messages"][0]) == SystemMessage:
110
+ if isinstance(new_state["messages"][0], SystemMessage):
111
111
  new_state["messages"][0] = SystemMessage(
112
112
  content=self.executor_prompt
113
113
  )
@@ -139,7 +139,7 @@ class ExecutionAgent(BaseAgent):
139
139
  memories = []
140
140
  # Handle looping through the messages
141
141
  for x in state["messages"]:
142
- if not type(x) == AIMessage:
142
+ if not isinstance(x, AIMessage):
143
143
  memories.append(x.content)
144
144
  elif not x.tool_calls:
145
145
  memories.append(x.content)
@@ -426,7 +426,7 @@ def edit_code(
426
426
 
427
427
  if old_code_clean not in content:
428
428
  console.print(
429
- f"[yellow] ⚠️ 'old_code' not found in file'; no changes made.[/]"
429
+ "[yellow] ⚠️ 'old_code' not found in file'; no changes made.[/]"
430
430
  )
431
431
  return f"No changes made to {filename}: 'old_code' not found in file."
432
432
 
@@ -483,7 +483,7 @@ def command_safe(state: ExecutionState) -> Literal["safe", "unsafe"]:
483
483
  index = -1
484
484
  message = state["messages"][index]
485
485
  # Loop through all the consecutive tool messages in reverse order
486
- while type(message) == ToolMessage:
486
+ while isinstance(message, ToolMessage):
487
487
  if "[UNSAFE]" in message.content:
488
488
  return "unsafe"
489
489
 
@@ -96,7 +96,7 @@ class HypothesizerAgent(BaseAgent):
96
96
  new_state["visited_sites"] = []
97
97
 
98
98
  try:
99
- if type(raw_search_results) == str:
99
+ if isinstance(raw_search_results, str):
100
100
  results_list = ast.literal_eval(raw_search_results)
101
101
  else:
102
102
  results_list = raw_search_results
@@ -154,7 +154,7 @@ class HypothesizerAgent(BaseAgent):
154
154
  new_state["visited_sites"] = []
155
155
 
156
156
  try:
157
- if type(raw_search_results) == str:
157
+ if isinstance(raw_search_results, str):
158
158
  results_list = ast.literal_eval(raw_search_results)
159
159
  else:
160
160
  results_list = raw_search_results
@@ -217,7 +217,7 @@ class HypothesizerAgent(BaseAgent):
217
217
  new_state["visited_sites"] = []
218
218
 
219
219
  try:
220
- if type(raw_search_results) == str:
220
+ if isinstance(raw_search_results, str):
221
221
  results_list = ast.literal_eval(raw_search_results)
222
222
  else:
223
223
  results_list = raw_search_results
@@ -589,7 +589,7 @@ if __name__ == "__main__":
589
589
  initial_state,
590
590
  {
591
591
  "recursion_limit": 999999,
592
- "configurable": {"thread_id": self.thread_id},
592
+ "configurable": {"thread_id": 42},
593
593
  },
594
594
  )
595
595
  summary_text = result["summary_report"]
ursa/agents/mp_agent.py CHANGED
@@ -1,43 +1,19 @@
1
- import os
2
1
  import json
3
- from typing import List, Dict
4
- from concurrent.futures import ThreadPoolExecutor
5
- from tqdm import tqdm
6
-
7
- from mp_api.client import MPRester
8
- from langchain.schema import Document
9
-
10
2
  import os
11
- import pymupdf
12
- import requests
13
- import feedparser
14
- from PIL import Image
15
- from io import BytesIO
16
- import base64
17
- from urllib.parse import quote
18
- from typing_extensions import TypedDict, List
19
- from concurrent.futures import ThreadPoolExecutor, as_completed
20
- from tqdm import tqdm
21
3
  import re
4
+ from concurrent.futures import ThreadPoolExecutor
5
+ from typing import Dict
22
6
 
23
- from langchain_community.document_loaders import PyPDFLoader
24
7
  from langchain_core.output_parsers import StrOutputParser
25
8
  from langchain_core.prompts import ChatPromptTemplate
26
- from langgraph.graph import StateGraph, END, START
27
- from langchain.text_splitter import RecursiveCharacterTextSplitter
28
- from langchain_chroma import Chroma
29
- from langchain_openai import OpenAIEmbeddings
30
-
31
- from openai import OpenAI
9
+ from langgraph.graph import StateGraph
10
+ from mp_api.client import MPRester
11
+ from tqdm import tqdm
12
+ from typing_extensions import List, TypedDict
32
13
 
33
14
  from .base import BaseAgent
34
15
 
35
16
 
36
- client = OpenAI()
37
-
38
- embeddings = OpenAIEmbeddings()
39
-
40
-
41
17
  class PaperMetadata(TypedDict):
42
18
  arxiv_id: str
43
19
  full_text: str
@@ -51,74 +27,6 @@ class PaperState(TypedDict, total=False):
51
27
  final_summary: str
52
28
 
53
29
 
54
- def describe_image(image: Image.Image) -> str:
55
- buffered = BytesIO()
56
- image.save(buffered, format="PNG")
57
- img_base64 = base64.b64encode(buffered.getvalue()).decode()
58
-
59
- response = client.chat.completions.create(
60
- model="gpt-4-vision-preview",
61
- messages=[
62
- {
63
- "role": "system",
64
- "content": "You are a scientific assistant who explains plots and scientific diagrams.",
65
- },
66
- {
67
- "role": "user",
68
- "content": [
69
- {
70
- "type": "text",
71
- "text": "Describe this scientific image or plot in detail.",
72
- },
73
- {
74
- "type": "image_url",
75
- "image_url": {
76
- "url": f"data:image/png;base64,{img_base64}"
77
- },
78
- },
79
- ],
80
- },
81
- ],
82
- max_tokens=500,
83
- )
84
- return response.choices[0].message.content.strip()
85
-
86
-
87
- def extract_and_describe_images(
88
- pdf_path: str, max_images: int = 5
89
- ) -> List[str]:
90
- doc = pymupdf.open(pdf_path)
91
- descriptions = []
92
- image_count = 0
93
-
94
- for page_index in range(len(doc)):
95
- if image_count >= max_images:
96
- break
97
- page = doc[page_index]
98
- images = page.get_images(full=True)
99
-
100
- for img_index, img in enumerate(images):
101
- if image_count >= max_images:
102
- break
103
- xref = img[0]
104
- base_image = doc.extract_image(xref)
105
- image_bytes = base_image["image"]
106
- image = Image.open(BytesIO(image_bytes))
107
-
108
- try:
109
- desc = describe_image(image)
110
- descriptions.append(
111
- f"Page {page_index + 1}, Image {img_index + 1}: {desc}"
112
- )
113
- except Exception as e:
114
- descriptions.append(
115
- f"Page {page_index + 1}, Image {img_index + 1}: [Error: {e}]"
116
- )
117
- image_count += 1
118
-
119
- return descriptions
120
-
121
-
122
30
  def remove_surrogates(text: str) -> str:
123
31
  return re.sub(r"[\ud800-\udfff]", "", text)
124
32
 
@@ -131,7 +39,6 @@ class MaterialsProjectAgent(BaseAgent):
131
39
  max_results: int = 3,
132
40
  database_path: str = "mp_database",
133
41
  summaries_path: str = "mp_summaries",
134
- vectorstore_path: str = "mp_vectorstores",
135
42
  **kwargs,
136
43
  ):
137
44
  super().__init__(llm, **kwargs)
@@ -139,13 +46,10 @@ class MaterialsProjectAgent(BaseAgent):
139
46
  self.max_results = max_results
140
47
  self.database_path = database_path
141
48
  self.summaries_path = summaries_path
142
- self.vectorstore_path = vectorstore_path
143
49
 
144
50
  os.makedirs(self.database_path, exist_ok=True)
145
51
  os.makedirs(self.summaries_path, exist_ok=True)
146
- os.makedirs(self.vectorstore_path, exist_ok=True)
147
52
 
148
- self.embeddings = OpenAIEmbeddings() # or your preferred embedding
149
53
  self.graph = self._build_graph()
150
54
 
151
55
  def _fetch_node(self, state: Dict) -> Dict:
@@ -175,24 +79,6 @@ class MaterialsProjectAgent(BaseAgent):
175
79
 
176
80
  return {**state, "materials": mats}
177
81
 
178
- def _get_or_build_vectorstore(self, text: str, mid: str):
179
- """Build or load a Chroma vectorstore for a single material's description."""
180
- persist_dir = os.path.join(self.vectorstore_path, mid)
181
- if os.path.exists(persist_dir):
182
- store = Chroma(
183
- persist_directory=persist_dir,
184
- embedding_function=self.embeddings,
185
- )
186
- else:
187
- splitter = RecursiveCharacterTextSplitter(
188
- chunk_size=500, chunk_overlap=100
189
- )
190
- docs = splitter.create_documents([text])
191
- store = Chroma.from_documents(
192
- docs, self.embeddings, persist_directory=persist_dir
193
- )
194
- return store.as_retriever(search_kwargs={"k": 5})
195
-
196
82
  def _summarize_node(self, state: Dict) -> Dict:
197
83
  """Summarize each material via LLM over its metadata."""
198
84
  # prompt template
@@ -204,7 +90,6 @@ You are a materials-science assistant. Given the following metadata about a mate
204
90
  chain = prompt | self.llm | StrOutputParser()
205
91
 
206
92
  summaries = [None] * len(state["materials"])
207
- relevancy = [0.0] * len(state["materials"])
208
93
 
209
94
  def process(i, mat):
210
95
  mid = mat["material_id"]
@@ -40,7 +40,7 @@ class PlanningAgent(BaseAgent):
40
40
 
41
41
  def generation_node(self, state: PlanningState) -> PlanningState:
42
42
  messages = state["messages"]
43
- if type(messages[0]) == SystemMessage:
43
+ if isinstance(messages[0], SystemMessage):
44
44
  messages[0] = SystemMessage(content=self.planner_prompt)
45
45
  else:
46
46
  messages = [SystemMessage(content=self.planner_prompt)] + messages
@@ -1,4 +1,3 @@
1
- import os
2
1
  from .base import BaseAgent
3
2
 
4
3
 
@@ -1,5 +1,3 @@
1
- import inspect
2
-
3
1
  # from langchain_community.tools import TavilySearchResults
4
2
  # from langchain_core.runnables.graph import MermaidDrawMethod
5
3
  from typing import Annotated, Any, List, Optional
@@ -9,7 +7,6 @@ from bs4 import BeautifulSoup
9
7
  from langchain_community.tools import DuckDuckGoSearchResults
10
8
  from langchain_core.language_models import BaseChatModel
11
9
  from langchain_core.messages import HumanMessage, SystemMessage, ToolMessage
12
- from langchain_core.tools import tool
13
10
  from langchain_openai import ChatOpenAI
14
11
  from langgraph.graph import END, START, StateGraph
15
12
  from langgraph.graph.message import add_messages
@@ -19,8 +16,8 @@ from typing_extensions import TypedDict
19
16
 
20
17
  from ..prompt_library.websearch_prompts import (
21
18
  reflection_prompt,
22
- websearch_prompt,
23
19
  summarize_prompt,
20
+ websearch_prompt,
24
21
  )
25
22
  from .base import BaseAgent
26
23
 
@@ -41,6 +38,7 @@ class WebSearchState(TypedDict):
41
38
  remaining_steps: int
42
39
  is_last_step: bool
43
40
  model: Any
41
+ thread_id: Any
44
42
 
45
43
 
46
44
  # Adding the model to the state clumsily so that all "read" sources arent in the
@@ -113,8 +111,14 @@ class WebSearchAgent(BaseAgent):
113
111
  except (requests.ConnectionError, requests.Timeout):
114
112
  return False
115
113
 
114
+ def state_store_node(self, state: WebSearchState) -> WebSearchState:
115
+ state["thread_id"] = self.thread_id
116
+ return state
117
+ # return dict(**state, thread_id=self.thread_id)
118
+
116
119
  def _initialize_agent(self):
117
120
  self.graph = StateGraph(WebSearchState)
121
+ self.graph.add_node("state_store", self.state_store_node)
118
122
  self.graph.add_node(
119
123
  "websearch",
120
124
  create_react_agent(
@@ -128,7 +132,8 @@ class WebSearchAgent(BaseAgent):
128
132
  self.graph.add_node("review", self.review_node)
129
133
  self.graph.add_node("response", self.response_node)
130
134
 
131
- self.graph.add_edge(START, "websearch")
135
+ self.graph.add_edge(START, "state_store")
136
+ self.graph.add_edge("state_store", "websearch")
132
137
  self.graph.add_edge("websearch", "review")
133
138
  self.graph.add_edge("response", END)
134
139
 
@@ -185,7 +190,9 @@ def process_content(
185
190
  """
186
191
  summarized_information = (
187
192
  state["model"]
188
- .invoke(content_prompt, {"configurable": {"thread_id": self.thread_id}})
193
+ .invoke(
194
+ content_prompt, {"configurable": {"thread_id": state["thread_id"]}}
195
+ )
189
196
  .content
190
197
  )
191
198
  return summarized_information
@@ -217,7 +224,7 @@ def main():
217
224
  inputs,
218
225
  {
219
226
  "recursion_limit": 10000,
220
- "configurable": {"thread_id": self.thread_id},
227
+ "configurable": {"thread_id": 42},
221
228
  },
222
229
  )
223
230
 
@@ -10,7 +10,7 @@ critic_prompt = dedent("""\
10
10
  You are Agent 2, a rigorous Critic who identifies flaws and areas for improvement.
11
11
  """)
12
12
 
13
- competitor_prompt = dedent(f"""\
13
+ competitor_prompt = dedent("""\
14
14
  You are Agent 3, taking on the role of a direct competitor to Agent 1 in this hypothetical situation.
15
15
  Acting as that competitor, and taking into account potential critiques from the critic, provide an honest
16
16
  assessment how you might *REALLY* counter the approach of Agent 1.
@@ -1,5 +1,5 @@
1
- import re
2
1
  import difflib
2
+ import re
3
3
  from dataclasses import dataclass
4
4
 
5
5
  from rich.console import Console, ConsoleOptions, RenderResult
@@ -1,7 +1,7 @@
1
1
  import os
2
2
  import shutil
3
3
  from pathlib import Path
4
- from typing import Any, Dict, List, Optional, Sequence
4
+ from typing import Any, Dict, Optional, Sequence
5
5
 
6
6
  from langchain_chroma import Chroma
7
7
  from langchain_core.documents import Document
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ursa-ai
3
- Version: 0.2.7
3
+ Version: 0.2.9
4
4
  Summary: Agents for science at LANL
5
5
  Author-email: Mike Grosskopf <mikegros@lanl.gov>, Nathan Debardeleben <ndebard@lanl.gov>, Rahul Somasundaram <rsomasundaram@lanl.gov>, Isaac Michaud <imichaud@lanl.gov>, Avanish Mishra <avanish@lanl.gov>, Arthur Lui <alui@lanl.gov>, Russell Bent <rbent@lanl.gov>, Earl Lawrence <earl@lanl.gov>
6
6
  License-Expression: BSD-3-Clause
@@ -0,0 +1,26 @@
1
+ ursa/agents/__init__.py,sha256=PAPw6gjnmdA74HpiiYwtO5c6hYLgG_Yu22eu_nuGYKw,498
2
+ ursa/agents/arxiv_agent.py,sha256=A_HVbmYOtQfalntOKSsUIkFu10oN9T6kBAvCuAz2evg,14273
3
+ ursa/agents/base.py,sha256=uFhRLVzqhFbTZVA7IePKbUi03ATCXuvga7rzwaHy1B0,1321
4
+ ursa/agents/code_review_agent.py,sha256=aUDq5gT-jdl9Qs-Wewj2oz1d60xov9sN-DOYRfGNTU0,11550
5
+ ursa/agents/execution_agent.py,sha256=-At1EcKRHP9lYQ80jpqTPtQyPuQV-sIpf9J8LlEfWdA,16618
6
+ ursa/agents/hypothesizer_agent.py,sha256=rSLohNQz3xvEcL_DGTFivf9q5BlX1cqlLUcts4GJIjM,23309
7
+ ursa/agents/mp_agent.py,sha256=HTMAnv1yGs8vgRLGFFYHSbwOz24qdnB-if_JQSH3urQ,6002
8
+ ursa/agents/planning_agent.py,sha256=AKWQJ848RLPiwQGrvDNdN9lBlf3YI5qWmt2hqXnRGj8,5426
9
+ ursa/agents/recall_agent.py,sha256=bQk7ZJtiO5pj89A50OBDzAJ4G2F7ZdsMwmKnp1WWR7g,813
10
+ ursa/agents/websearch_agent.py,sha256=rCv4AWbqe5Us4FmuypM6jptri21nKoNg044ncsu9u3E,8014
11
+ ursa/prompt_library/code_review_prompts.py,sha256=-HuhwW9W_p2LDn44bXLntxLADHCOyl-2KIXxRHto66w,2444
12
+ ursa/prompt_library/execution_prompts.py,sha256=JBBmzVV0605uwFXNv0pxH0fXHqtmOgcDzabjpq3wt2A,2153
13
+ ursa/prompt_library/hypothesizer_prompts.py,sha256=ieupOF5tUy_u8actOjPbK-y5Qkrgw6EYxAfw6RXBebs,762
14
+ ursa/prompt_library/literature_prompts.py,sha256=zhBiN3Q-1Z2hp-hkXXp0T8Ipc-6YUM9gw85DjNu1F6I,421
15
+ ursa/prompt_library/planning_prompts.py,sha256=C8IfVc3ny_5-03bJZop2Yax7wfqS_UIdUGsTZSNQRC0,3534
16
+ ursa/prompt_library/websearch_prompts.py,sha256=n4DJaYn_lIYAVtdy00CCJjT-dLWhn2JNipYqMJAotdY,8846
17
+ ursa/tools/run_command.py,sha256=sQRuHtRyJYWEyL9dpW_Ukc-xQ5vmKKJK1i_6z3uKEfA,690
18
+ ursa/tools/write_code.py,sha256=DtCsUMZegYm0mk-HMPG5Zo3Ba1gbGfnXHsv1NZTdDs8,1220
19
+ ursa/util/diff_renderer.py,sha256=1L1q2qWWb8gLhR532-LgJn2TrqXDx0gUpPVOWD_sqeU,4086
20
+ ursa/util/memory_logger.py,sha256=-4jZkMFXLnABj9x_DMGEUySVPLZaI47HrLgK69Naxw0,5731
21
+ ursa/util/parse.py,sha256=M0cjyQWmjatxX4WbVmDRUiirTLyW-t_Aemlrlrsc5nA,2811
22
+ ursa_ai-0.2.9.dist-info/licenses/LICENSE,sha256=4Vr6_u2zTHIUvYjoOBg9ztDbfpV3hyCFv3mTCS87gYU,1482
23
+ ursa_ai-0.2.9.dist-info/METADATA,sha256=tPMX8nDJxo5ZbaOnpScf9wQrkTsUD73XrStyHxs81Eg,6848
24
+ ursa_ai-0.2.9.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
25
+ ursa_ai-0.2.9.dist-info/top_level.txt,sha256=OjA1gRYSUAeiXGnpqPC8iOOGfcjFO1IlP848qMnYSdY,5
26
+ ursa_ai-0.2.9.dist-info/RECORD,,
@@ -1,26 +0,0 @@
1
- ursa/agents/__init__.py,sha256=fhuGhYRfOSJ5o2yxjOvdEqJFy2j-VWLDkhiNi3wYNRw,498
2
- ursa/agents/arxiv_agent.py,sha256=nLZ1huMREr-INSDv1RMmOM8a-DcLBjNBWLSi3oFI5hw,14274
3
- ursa/agents/base.py,sha256=kGbiGn8qu1eKQSv2Y9YZWEv8ngYsyUMTBkAxk8iD9R0,1334
4
- ursa/agents/code_review_agent.py,sha256=yVO7nzYI3o0k2HguFw3OSY1IrCy5W8V0YiriYAeviY4,11562
5
- ursa/agents/execution_agent.py,sha256=Hw7EZem8qYedXjeLi5RDPrPIhqlW1G-vE66MlZ7g1BY,16607
6
- ursa/agents/hypothesizer_agent.py,sha256=p3bLHyqsiGRwYS4nycYcwnpye2j1umWdaOYspGAFRU0,23309
7
- ursa/agents/mp_agent.py,sha256=Kv793S2x6gavdgBD68wxvTPNFKLDLyI0FSs8iXCkcVQ,9732
8
- ursa/agents/planning_agent.py,sha256=5KSRk_gDsUrv_6zSxd7CqXhhMCYtnlfNlxSI9tSbqzc,5422
9
- ursa/agents/recall_agent.py,sha256=UcNRZLbx3j3cHaLEZul4__KzWV4SnUhLTjX9GeoYbHM,823
10
- ursa/agents/websearch_agent.py,sha256=zDS4IF-WJgsvSmV42HEO582rt3zCh_fJjteh7VpSNe4,7715
11
- ursa/prompt_library/code_review_prompts.py,sha256=-HuhwW9W_p2LDn44bXLntxLADHCOyl-2KIXxRHto66w,2444
12
- ursa/prompt_library/execution_prompts.py,sha256=JBBmzVV0605uwFXNv0pxH0fXHqtmOgcDzabjpq3wt2A,2153
13
- ursa/prompt_library/hypothesizer_prompts.py,sha256=is1SpCbsUtsYsyWOFz3H6M4nCnfyOMPj2p0mOcaEucc,763
14
- ursa/prompt_library/literature_prompts.py,sha256=zhBiN3Q-1Z2hp-hkXXp0T8Ipc-6YUM9gw85DjNu1F6I,421
15
- ursa/prompt_library/planning_prompts.py,sha256=C8IfVc3ny_5-03bJZop2Yax7wfqS_UIdUGsTZSNQRC0,3534
16
- ursa/prompt_library/websearch_prompts.py,sha256=n4DJaYn_lIYAVtdy00CCJjT-dLWhn2JNipYqMJAotdY,8846
17
- ursa/tools/run_command.py,sha256=sQRuHtRyJYWEyL9dpW_Ukc-xQ5vmKKJK1i_6z3uKEfA,690
18
- ursa/tools/write_code.py,sha256=DtCsUMZegYm0mk-HMPG5Zo3Ba1gbGfnXHsv1NZTdDs8,1220
19
- ursa/util/diff_renderer.py,sha256=gHawyUtBLeOq32A25_etDSy-HXAPyZQrnzfYGtHoEIQ,4086
20
- ursa/util/memory_logger.py,sha256=Qu8JRjqvXvchnVh6s-91te_xnfOAK1fJDyf1DvsRWnI,5737
21
- ursa/util/parse.py,sha256=M0cjyQWmjatxX4WbVmDRUiirTLyW-t_Aemlrlrsc5nA,2811
22
- ursa_ai-0.2.7.dist-info/licenses/LICENSE,sha256=4Vr6_u2zTHIUvYjoOBg9ztDbfpV3hyCFv3mTCS87gYU,1482
23
- ursa_ai-0.2.7.dist-info/METADATA,sha256=93ph0QLYscdrN2SpPVlEVjVtUaxDBpIE2BYMyTTJAkY,6848
24
- ursa_ai-0.2.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
25
- ursa_ai-0.2.7.dist-info/top_level.txt,sha256=OjA1gRYSUAeiXGnpqPC8iOOGfcjFO1IlP848qMnYSdY,5
26
- ursa_ai-0.2.7.dist-info/RECORD,,