ursa-ai 0.2.8__tar.gz → 0.2.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ursa-ai might be problematic. Click here for more details.

Files changed (31) hide show
  1. {ursa_ai-0.2.8/src/ursa_ai.egg-info → ursa_ai-0.2.9}/PKG-INFO +1 -1
  2. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/pyproject.toml +5 -3
  3. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/src/ursa/agents/__init__.py +6 -6
  4. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/src/ursa/agents/arxiv_agent.py +16 -16
  5. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/src/ursa/agents/base.py +1 -3
  6. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/src/ursa/agents/code_review_agent.py +2 -2
  7. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/src/ursa/agents/execution_agent.py +4 -4
  8. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/src/ursa/agents/hypothesizer_agent.py +4 -4
  9. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/src/ursa/agents/mp_agent.py +6 -11
  10. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/src/ursa/agents/planning_agent.py +1 -1
  11. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/src/ursa/agents/recall_agent.py +0 -1
  12. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/src/ursa/agents/websearch_agent.py +14 -7
  13. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/src/ursa/prompt_library/hypothesizer_prompts.py +1 -1
  14. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/src/ursa/util/memory_logger.py +1 -1
  15. {ursa_ai-0.2.8 → ursa_ai-0.2.9/src/ursa_ai.egg-info}/PKG-INFO +1 -1
  16. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/LICENSE +0 -0
  17. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/README.md +0 -0
  18. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/setup.cfg +0 -0
  19. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/src/ursa/prompt_library/code_review_prompts.py +0 -0
  20. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/src/ursa/prompt_library/execution_prompts.py +0 -0
  21. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/src/ursa/prompt_library/literature_prompts.py +0 -0
  22. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/src/ursa/prompt_library/planning_prompts.py +0 -0
  23. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/src/ursa/prompt_library/websearch_prompts.py +0 -0
  24. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/src/ursa/tools/run_command.py +0 -0
  25. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/src/ursa/tools/write_code.py +0 -0
  26. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/src/ursa/util/diff_renderer.py +1 -1
  27. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/src/ursa/util/parse.py +0 -0
  28. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/src/ursa_ai.egg-info/SOURCES.txt +0 -0
  29. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/src/ursa_ai.egg-info/dependency_links.txt +0 -0
  30. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/src/ursa_ai.egg-info/requires.txt +0 -0
  31. {ursa_ai-0.2.8 → ursa_ai-0.2.9}/src/ursa_ai.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ursa-ai
3
- Version: 0.2.8
3
+ Version: 0.2.9
4
4
  Summary: Agents for science at LANL
5
5
  Author-email: Mike Grosskopf <mikegros@lanl.gov>, Nathan Debardeleben <ndebard@lanl.gov>, Rahul Somasundaram <rsomasundaram@lanl.gov>, Isaac Michaud <imichaud@lanl.gov>, Avanish Mishra <avanish@lanl.gov>, Arthur Lui <alui@lanl.gov>, Russell Bent <rbent@lanl.gov>, Earl Lawrence <earl@lanl.gov>
6
6
  License-Expression: BSD-3-Clause
@@ -65,11 +65,13 @@ enabled = true
65
65
  line-length = 80
66
66
 
67
67
  [tool.ruff.lint]
68
- ignore = ["D100"]
69
- extend-select = ["I", "W505"] # "D"
70
- extend-unsafe-fixes = ["F401"]
68
+ extend-select = ["I"] # W505 (doc-line-too-long); D (pydocstyle)
71
69
  pydocstyle.convention = "numpy"
72
70
  pycodestyle.max-doc-length = 80
71
+ # extend-unfixable = ["F401"] # unused imports
72
+
73
+ [tool.ruff.lint.per-file-ignores]
74
+ "__init__.py" = ["F401"]
73
75
 
74
76
  # Ignore test file documentation linting.
75
77
  [tool.ruff.lint.extend-per-file-ignores]
@@ -1,9 +1,9 @@
1
- from .planning_agent import PlanningAgent, PlanningState
2
- from .websearch_agent import WebSearchAgent, WebSearchState
3
- from .execution_agent import ExecutionAgent, ExecutionState
1
+ from .arxiv_agent import ArxivAgent, PaperMetadata, PaperState
2
+ from .base import BaseAgent, BaseChatModel
4
3
  from .code_review_agent import CodeReviewAgent, CodeReviewState
4
+ from .execution_agent import ExecutionAgent, ExecutionState
5
5
  from .hypothesizer_agent import HypothesizerAgent, HypothesizerState
6
- from .arxiv_agent import ArxivAgent, PaperState, PaperMetadata
7
- from .recall_agent import RecallAgent
8
- from .base import BaseAgent, BaseChatModel
9
6
  from .mp_agent import MaterialsProjectAgent
7
+ from .planning_agent import PlanningAgent, PlanningState
8
+ from .recall_agent import RecallAgent
9
+ from .websearch_agent import WebSearchAgent, WebSearchState
@@ -1,29 +1,29 @@
1
+ import base64
1
2
  import os
2
- import pymupdf
3
- import requests
4
- import feedparser
5
- from PIL import Image
3
+ import re
4
+ import statistics
5
+ from concurrent.futures import ThreadPoolExecutor, as_completed
6
6
  from io import BytesIO
7
- import base64
8
7
  from urllib.parse import quote
9
- from typing_extensions import TypedDict, List
10
- from concurrent.futures import ThreadPoolExecutor, as_completed
11
- from tqdm import tqdm
12
- import statistics
13
- import re
14
8
 
9
+ import feedparser
10
+ import pymupdf
11
+ import requests
12
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
13
+ from langchain_chroma import Chroma
15
14
  from langchain_community.document_loaders import PyPDFLoader
16
15
  from langchain_core.output_parsers import StrOutputParser
17
16
  from langchain_core.prompts import ChatPromptTemplate
18
- from langgraph.graph import StateGraph, END, START
19
- from langchain.text_splitter import RecursiveCharacterTextSplitter
20
- from langchain_chroma import Chroma
17
+ from langgraph.graph import StateGraph
18
+ from PIL import Image
19
+ from tqdm import tqdm
20
+ from typing_extensions import List, TypedDict
21
21
 
22
22
  from .base import BaseAgent
23
23
 
24
24
  try:
25
25
  from openai import OpenAI
26
- except:
26
+ except Exception:
27
27
  pass
28
28
 
29
29
  # embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
@@ -162,7 +162,7 @@ class ArxivAgent(BaseAgent):
162
162
  full_id = entry.id.split("/abs/")[-1]
163
163
  arxiv_id = full_id.split("/")[-1]
164
164
  title = entry.title.strip()
165
- authors = ", ".join(author.name for author in entry.authors)
165
+ # authors = ", ".join(author.name for author in entry.authors)
166
166
  pdf_url = f"https://arxiv.org/pdf/{full_id}.pdf"
167
167
  pdf_filename = os.path.join(
168
168
  self.database_path, f"{arxiv_id}.pdf"
@@ -313,7 +313,7 @@ class ArxivAgent(BaseAgent):
313
313
 
314
314
  if "papers" not in state or len(state["papers"]) == 0:
315
315
  print(
316
- f"No papers retrieved - bad query or network connection to ArXiv?"
316
+ "No papers retrieved - bad query or network connection to ArXiv?"
317
317
  )
318
318
  return {**state, "summaries": None}
319
319
 
@@ -1,9 +1,7 @@
1
1
  from langchain_core.language_models.chat_models import BaseChatModel
2
+ from langchain_core.load import dumps
2
3
  from langchain_litellm import ChatLiteLLM
3
4
  from langgraph.checkpoint.base import BaseCheckpointSaver
4
- from langchain_core.load import dumps
5
-
6
- import json
7
5
 
8
6
 
9
7
  class BaseAgent:
@@ -7,7 +7,7 @@ from langchain_core.messages import HumanMessage, SystemMessage
7
7
  from langchain_core.tools import tool
8
8
  from langgraph.graph import END, START, StateGraph
9
9
  from langgraph.graph.message import add_messages
10
- from langgraph.prebuilt import ToolNode, InjectedState
10
+ from langgraph.prebuilt import InjectedState, ToolNode
11
11
  from typing_extensions import TypedDict
12
12
 
13
13
  from ..prompt_library.code_review_prompts import (
@@ -349,7 +349,7 @@ def main():
349
349
  }
350
350
  result = (
351
351
  code_review_agent.action.invoke(initial_state),
352
- {"configurable": {"thread_id": self.thread_id}},
352
+ {"configurable": {"thread_id": 42}},
353
353
  )
354
354
  for x in result["messages"]:
355
355
  print(x.content)
@@ -107,7 +107,7 @@ class ExecutionAgent(BaseAgent):
107
107
  # note that we've done the symlink now, so don't need to do it later
108
108
  new_state["symlinkdir"]["is_linked"] = True
109
109
 
110
- if type(new_state["messages"][0]) == SystemMessage:
110
+ if isinstance(new_state["messages"][0], SystemMessage):
111
111
  new_state["messages"][0] = SystemMessage(
112
112
  content=self.executor_prompt
113
113
  )
@@ -139,7 +139,7 @@ class ExecutionAgent(BaseAgent):
139
139
  memories = []
140
140
  # Handle looping through the messages
141
141
  for x in state["messages"]:
142
- if not type(x) == AIMessage:
142
+ if not isinstance(x, AIMessage):
143
143
  memories.append(x.content)
144
144
  elif not x.tool_calls:
145
145
  memories.append(x.content)
@@ -426,7 +426,7 @@ def edit_code(
426
426
 
427
427
  if old_code_clean not in content:
428
428
  console.print(
429
- f"[yellow] ⚠️ 'old_code' not found in file'; no changes made.[/]"
429
+ "[yellow] ⚠️ 'old_code' not found in file'; no changes made.[/]"
430
430
  )
431
431
  return f"No changes made to {filename}: 'old_code' not found in file."
432
432
 
@@ -483,7 +483,7 @@ def command_safe(state: ExecutionState) -> Literal["safe", "unsafe"]:
483
483
  index = -1
484
484
  message = state["messages"][index]
485
485
  # Loop through all the consecutive tool messages in reverse order
486
- while type(message) == ToolMessage:
486
+ while isinstance(message, ToolMessage):
487
487
  if "[UNSAFE]" in message.content:
488
488
  return "unsafe"
489
489
 
@@ -96,7 +96,7 @@ class HypothesizerAgent(BaseAgent):
96
96
  new_state["visited_sites"] = []
97
97
 
98
98
  try:
99
- if type(raw_search_results) == str:
99
+ if isinstance(raw_search_results, str):
100
100
  results_list = ast.literal_eval(raw_search_results)
101
101
  else:
102
102
  results_list = raw_search_results
@@ -154,7 +154,7 @@ class HypothesizerAgent(BaseAgent):
154
154
  new_state["visited_sites"] = []
155
155
 
156
156
  try:
157
- if type(raw_search_results) == str:
157
+ if isinstance(raw_search_results, str):
158
158
  results_list = ast.literal_eval(raw_search_results)
159
159
  else:
160
160
  results_list = raw_search_results
@@ -217,7 +217,7 @@ class HypothesizerAgent(BaseAgent):
217
217
  new_state["visited_sites"] = []
218
218
 
219
219
  try:
220
- if type(raw_search_results) == str:
220
+ if isinstance(raw_search_results, str):
221
221
  results_list = ast.literal_eval(raw_search_results)
222
222
  else:
223
223
  results_list = raw_search_results
@@ -589,7 +589,7 @@ if __name__ == "__main__":
589
589
  initial_state,
590
590
  {
591
591
  "recursion_limit": 999999,
592
- "configurable": {"thread_id": self.thread_id},
592
+ "configurable": {"thread_id": 42},
593
593
  },
594
594
  )
595
595
  summary_text = result["summary_report"]
@@ -1,20 +1,15 @@
1
- import os
2
1
  import json
3
- from typing import List, Dict
4
- from concurrent.futures import ThreadPoolExecutor
5
- from tqdm import tqdm
6
-
7
- from mp_api.client import MPRester
8
-
9
2
  import os
10
- from typing_extensions import TypedDict, List
11
- from concurrent.futures import ThreadPoolExecutor
12
- from tqdm import tqdm
13
3
  import re
4
+ from concurrent.futures import ThreadPoolExecutor
5
+ from typing import Dict
14
6
 
15
7
  from langchain_core.output_parsers import StrOutputParser
16
8
  from langchain_core.prompts import ChatPromptTemplate
17
- from langgraph.graph import StateGraph, END, START
9
+ from langgraph.graph import StateGraph
10
+ from mp_api.client import MPRester
11
+ from tqdm import tqdm
12
+ from typing_extensions import List, TypedDict
18
13
 
19
14
  from .base import BaseAgent
20
15
 
@@ -40,7 +40,7 @@ class PlanningAgent(BaseAgent):
40
40
 
41
41
  def generation_node(self, state: PlanningState) -> PlanningState:
42
42
  messages = state["messages"]
43
- if type(messages[0]) == SystemMessage:
43
+ if isinstance(messages[0], SystemMessage):
44
44
  messages[0] = SystemMessage(content=self.planner_prompt)
45
45
  else:
46
46
  messages = [SystemMessage(content=self.planner_prompt)] + messages
@@ -1,4 +1,3 @@
1
- import os
2
1
  from .base import BaseAgent
3
2
 
4
3
 
@@ -1,5 +1,3 @@
1
- import inspect
2
-
3
1
  # from langchain_community.tools import TavilySearchResults
4
2
  # from langchain_core.runnables.graph import MermaidDrawMethod
5
3
  from typing import Annotated, Any, List, Optional
@@ -9,7 +7,6 @@ from bs4 import BeautifulSoup
9
7
  from langchain_community.tools import DuckDuckGoSearchResults
10
8
  from langchain_core.language_models import BaseChatModel
11
9
  from langchain_core.messages import HumanMessage, SystemMessage, ToolMessage
12
- from langchain_core.tools import tool
13
10
  from langchain_openai import ChatOpenAI
14
11
  from langgraph.graph import END, START, StateGraph
15
12
  from langgraph.graph.message import add_messages
@@ -19,8 +16,8 @@ from typing_extensions import TypedDict
19
16
 
20
17
  from ..prompt_library.websearch_prompts import (
21
18
  reflection_prompt,
22
- websearch_prompt,
23
19
  summarize_prompt,
20
+ websearch_prompt,
24
21
  )
25
22
  from .base import BaseAgent
26
23
 
@@ -41,6 +38,7 @@ class WebSearchState(TypedDict):
41
38
  remaining_steps: int
42
39
  is_last_step: bool
43
40
  model: Any
41
+ thread_id: Any
44
42
 
45
43
 
46
44
  # Adding the model to the state clumsily so that all "read" sources arent in the
@@ -113,8 +111,14 @@ class WebSearchAgent(BaseAgent):
113
111
  except (requests.ConnectionError, requests.Timeout):
114
112
  return False
115
113
 
114
+ def state_store_node(self, state: WebSearchState) -> WebSearchState:
115
+ state["thread_id"] = self.thread_id
116
+ return state
117
+ # return dict(**state, thread_id=self.thread_id)
118
+
116
119
  def _initialize_agent(self):
117
120
  self.graph = StateGraph(WebSearchState)
121
+ self.graph.add_node("state_store", self.state_store_node)
118
122
  self.graph.add_node(
119
123
  "websearch",
120
124
  create_react_agent(
@@ -128,7 +132,8 @@ class WebSearchAgent(BaseAgent):
128
132
  self.graph.add_node("review", self.review_node)
129
133
  self.graph.add_node("response", self.response_node)
130
134
 
131
- self.graph.add_edge(START, "websearch")
135
+ self.graph.add_edge(START, "state_store")
136
+ self.graph.add_edge("state_store", "websearch")
132
137
  self.graph.add_edge("websearch", "review")
133
138
  self.graph.add_edge("response", END)
134
139
 
@@ -185,7 +190,9 @@ def process_content(
185
190
  """
186
191
  summarized_information = (
187
192
  state["model"]
188
- .invoke(content_prompt, {"configurable": {"thread_id": self.thread_id}})
193
+ .invoke(
194
+ content_prompt, {"configurable": {"thread_id": state["thread_id"]}}
195
+ )
189
196
  .content
190
197
  )
191
198
  return summarized_information
@@ -217,7 +224,7 @@ def main():
217
224
  inputs,
218
225
  {
219
226
  "recursion_limit": 10000,
220
- "configurable": {"thread_id": self.thread_id},
227
+ "configurable": {"thread_id": 42},
221
228
  },
222
229
  )
223
230
 
@@ -10,7 +10,7 @@ critic_prompt = dedent("""\
10
10
  You are Agent 2, a rigorous Critic who identifies flaws and areas for improvement.
11
11
  """)
12
12
 
13
- competitor_prompt = dedent(f"""\
13
+ competitor_prompt = dedent("""\
14
14
  You are Agent 3, taking on the role of a direct competitor to Agent 1 in this hypothetical situation.
15
15
  Acting as that competitor, and taking into account potential critiques from the critic, provide an honest
16
16
  assessment how you might *REALLY* counter the approach of Agent 1.
@@ -1,7 +1,7 @@
1
1
  import os
2
2
  import shutil
3
3
  from pathlib import Path
4
- from typing import Any, Dict, List, Optional, Sequence
4
+ from typing import Any, Dict, Optional, Sequence
5
5
 
6
6
  from langchain_chroma import Chroma
7
7
  from langchain_core.documents import Document
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ursa-ai
3
- Version: 0.2.8
3
+ Version: 0.2.9
4
4
  Summary: Agents for science at LANL
5
5
  Author-email: Mike Grosskopf <mikegros@lanl.gov>, Nathan Debardeleben <ndebard@lanl.gov>, Rahul Somasundaram <rsomasundaram@lanl.gov>, Isaac Michaud <imichaud@lanl.gov>, Avanish Mishra <avanish@lanl.gov>, Arthur Lui <alui@lanl.gov>, Russell Bent <rbent@lanl.gov>, Earl Lawrence <earl@lanl.gov>
6
6
  License-Expression: BSD-3-Clause
File without changes
File without changes
File without changes
@@ -1,5 +1,5 @@
1
- import re
2
1
  import difflib
2
+ import re
3
3
  from dataclasses import dataclass
4
4
 
5
5
  from rich.console import Console, ConsoleOptions, RenderResult
File without changes