ursa-ai 0.2.9__tar.gz → 0.2.11__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ursa-ai might be problematic. Click here for more details.
- {ursa_ai-0.2.9/src/ursa_ai.egg-info → ursa_ai-0.2.11}/PKG-INFO +1 -1
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/pyproject.toml +1 -4
- ursa_ai-0.2.11/src/ursa/agents/__init__.py +17 -0
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/src/ursa/agents/arxiv_agent.py +22 -27
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/src/ursa/agents/mp_agent.py +4 -3
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/src/ursa/util/memory_logger.py +17 -8
- {ursa_ai-0.2.9 → ursa_ai-0.2.11/src/ursa_ai.egg-info}/PKG-INFO +1 -1
- ursa_ai-0.2.9/src/ursa/agents/__init__.py +0 -9
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/LICENSE +0 -0
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/README.md +0 -0
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/setup.cfg +0 -0
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/src/ursa/agents/base.py +0 -0
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/src/ursa/agents/code_review_agent.py +0 -0
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/src/ursa/agents/execution_agent.py +0 -0
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/src/ursa/agents/hypothesizer_agent.py +0 -0
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/src/ursa/agents/planning_agent.py +0 -0
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/src/ursa/agents/recall_agent.py +0 -0
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/src/ursa/agents/websearch_agent.py +0 -0
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/src/ursa/prompt_library/code_review_prompts.py +0 -0
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/src/ursa/prompt_library/execution_prompts.py +0 -0
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/src/ursa/prompt_library/hypothesizer_prompts.py +0 -0
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/src/ursa/prompt_library/literature_prompts.py +0 -0
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/src/ursa/prompt_library/planning_prompts.py +0 -0
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/src/ursa/prompt_library/websearch_prompts.py +0 -0
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/src/ursa/tools/run_command.py +0 -0
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/src/ursa/tools/write_code.py +0 -0
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/src/ursa/util/diff_renderer.py +0 -0
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/src/ursa/util/parse.py +0 -0
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/src/ursa_ai.egg-info/SOURCES.txt +0 -0
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/src/ursa_ai.egg-info/dependency_links.txt +0 -0
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/src/ursa_ai.egg-info/requires.txt +0 -0
- {ursa_ai-0.2.9 → ursa_ai-0.2.11}/src/ursa_ai.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ursa-ai
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.11
|
|
4
4
|
Summary: Agents for science at LANL
|
|
5
5
|
Author-email: Mike Grosskopf <mikegros@lanl.gov>, Nathan Debardeleben <ndebard@lanl.gov>, Rahul Somasundaram <rsomasundaram@lanl.gov>, Isaac Michaud <imichaud@lanl.gov>, Avanish Mishra <avanish@lanl.gov>, Arthur Lui <alui@lanl.gov>, Russell Bent <rbent@lanl.gov>, Earl Lawrence <earl@lanl.gov>
|
|
6
6
|
License-Expression: BSD-3-Clause
|
|
@@ -63,15 +63,12 @@ enabled = true
|
|
|
63
63
|
|
|
64
64
|
[tool.ruff]
|
|
65
65
|
line-length = 80
|
|
66
|
+
preview = true
|
|
66
67
|
|
|
67
68
|
[tool.ruff.lint]
|
|
68
69
|
extend-select = ["I"] # W505 (doc-line-too-long); D (pydocstyle)
|
|
69
70
|
pydocstyle.convention = "numpy"
|
|
70
71
|
pycodestyle.max-doc-length = 80
|
|
71
|
-
# extend-unfixable = ["F401"] # unused imports
|
|
72
|
-
|
|
73
|
-
[tool.ruff.lint.per-file-ignores]
|
|
74
|
-
"__init__.py" = ["F401"]
|
|
75
72
|
|
|
76
73
|
# Ignore test file documentation linting.
|
|
77
74
|
[tool.ruff.lint.extend-per-file-ignores]
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
from .arxiv_agent import ArxivAgent as ArxivAgent
|
|
2
|
+
from .arxiv_agent import PaperMetadata as PaperMetadata
|
|
3
|
+
from .arxiv_agent import PaperState as PaperState
|
|
4
|
+
from .base import BaseAgent as BaseAgent
|
|
5
|
+
from .base import BaseChatModel as BaseChatModel
|
|
6
|
+
from .code_review_agent import CodeReviewAgent as CodeReviewAgent
|
|
7
|
+
from .code_review_agent import CodeReviewState as CodeReviewState
|
|
8
|
+
from .execution_agent import ExecutionAgent as ExecutionAgent
|
|
9
|
+
from .execution_agent import ExecutionState as ExecutionState
|
|
10
|
+
from .hypothesizer_agent import HypothesizerAgent as HypothesizerAgent
|
|
11
|
+
from .hypothesizer_agent import HypothesizerState as HypothesizerState
|
|
12
|
+
from .mp_agent import MaterialsProjectAgent as MaterialsProjectAgent
|
|
13
|
+
from .planning_agent import PlanningAgent as PlanningAgent
|
|
14
|
+
from .planning_agent import PlanningState as PlanningState
|
|
15
|
+
from .recall_agent import RecallAgent as RecallAgent
|
|
16
|
+
from .websearch_agent import WebSearchAgent as WebSearchAgent
|
|
17
|
+
from .websearch_agent import WebSearchState as WebSearchState
|
|
@@ -211,12 +211,10 @@ class ArxivAgent(BaseAgent):
|
|
|
211
211
|
except Exception as e:
|
|
212
212
|
full_text = f"Error loading paper: {e}"
|
|
213
213
|
|
|
214
|
-
papers.append(
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
}
|
|
219
|
-
)
|
|
214
|
+
papers.append({
|
|
215
|
+
"arxiv_id": arxiv_id,
|
|
216
|
+
"full_text": full_text,
|
|
217
|
+
})
|
|
220
218
|
|
|
221
219
|
return papers
|
|
222
220
|
|
|
@@ -279,28 +277,23 @@ class ArxivAgent(BaseAgent):
|
|
|
279
277
|
)
|
|
280
278
|
|
|
281
279
|
if relevant_docs_with_scores:
|
|
282
|
-
score = sum(
|
|
283
|
-
|
|
284
|
-
) / len(relevant_docs_with_scores)
|
|
280
|
+
score = sum([
|
|
281
|
+
s for _, s in relevant_docs_with_scores
|
|
282
|
+
]) / len(relevant_docs_with_scores)
|
|
285
283
|
relevancy_scores[i] = abs(1.0 - score)
|
|
286
284
|
else:
|
|
287
285
|
relevancy_scores[i] = 0.0
|
|
288
286
|
|
|
289
|
-
retrieved_content = "\n\n".join(
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
for doc, _ in relevant_docs_with_scores
|
|
293
|
-
]
|
|
294
|
-
)
|
|
287
|
+
retrieved_content = "\n\n".join([
|
|
288
|
+
doc.page_content for doc, _ in relevant_docs_with_scores
|
|
289
|
+
])
|
|
295
290
|
else:
|
|
296
291
|
retrieved_content = cleaned_text
|
|
297
292
|
|
|
298
|
-
summary = chain.invoke(
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
}
|
|
303
|
-
)
|
|
293
|
+
summary = chain.invoke({
|
|
294
|
+
"retrieved_content": retrieved_content,
|
|
295
|
+
"context": state["context"],
|
|
296
|
+
})
|
|
304
297
|
|
|
305
298
|
except Exception as e:
|
|
306
299
|
summary = f"Error summarizing paper: {e}"
|
|
@@ -376,9 +369,10 @@ class ArxivAgent(BaseAgent):
|
|
|
376
369
|
|
|
377
370
|
chain = prompt | self.llm | StrOutputParser()
|
|
378
371
|
|
|
379
|
-
final_summary = chain.invoke(
|
|
380
|
-
|
|
381
|
-
|
|
372
|
+
final_summary = chain.invoke({
|
|
373
|
+
"Summaries": combined,
|
|
374
|
+
"context": state["context"],
|
|
375
|
+
})
|
|
382
376
|
|
|
383
377
|
with open(self.summaries_path + "/final_summary.txt", "w") as f:
|
|
384
378
|
f.write(final_summary)
|
|
@@ -406,9 +400,10 @@ class ArxivAgent(BaseAgent):
|
|
|
406
400
|
return graph
|
|
407
401
|
|
|
408
402
|
def run(self, arxiv_search_query: str, context: str) -> str:
|
|
409
|
-
result = self.graph.invoke(
|
|
410
|
-
|
|
411
|
-
|
|
403
|
+
result = self.graph.invoke({
|
|
404
|
+
"query": arxiv_search_query,
|
|
405
|
+
"context": context,
|
|
406
|
+
})
|
|
412
407
|
|
|
413
408
|
if self.summarize:
|
|
414
409
|
return result.get("final_summary", "No summary generated.")
|
|
@@ -141,9 +141,10 @@ You are a materials-science assistant. Given the following metadata about a mate
|
|
|
141
141
|
{context}
|
|
142
142
|
""")
|
|
143
143
|
chain = prompt | self.llm | StrOutputParser()
|
|
144
|
-
final = chain.invoke(
|
|
145
|
-
|
|
146
|
-
|
|
144
|
+
final = chain.invoke({
|
|
145
|
+
"summaries": combined,
|
|
146
|
+
"context": state["context"],
|
|
147
|
+
})
|
|
147
148
|
return {**state, "final_summary": final}
|
|
148
149
|
|
|
149
150
|
def _build_graph(self):
|
|
@@ -26,20 +26,30 @@ class AgentMemory:
|
|
|
26
26
|
* Requires `langchain-chroma`, and `chromadb`.
|
|
27
27
|
"""
|
|
28
28
|
|
|
29
|
+
@classmethod
|
|
30
|
+
def get_db_path(cls, path: Optional[str | Path]) -> Path:
|
|
31
|
+
match path:
|
|
32
|
+
case None:
|
|
33
|
+
return Path.home() / ".cache" / "ursa" / "rag" / "db"
|
|
34
|
+
case str():
|
|
35
|
+
return Path(str)
|
|
36
|
+
case Path():
|
|
37
|
+
return path
|
|
38
|
+
case _:
|
|
39
|
+
raise TypeError(
|
|
40
|
+
f"Type of path is `{type(path)}` "
|
|
41
|
+
"but `Optional[str | Path]` was expected."
|
|
42
|
+
)
|
|
43
|
+
|
|
29
44
|
def __init__(
|
|
30
45
|
self,
|
|
31
46
|
embedding_model,
|
|
32
47
|
path: Optional[str | Path] = None,
|
|
33
48
|
collection_name: str = "agent_memory",
|
|
34
49
|
) -> None:
|
|
35
|
-
self.path = (
|
|
36
|
-
Path(path)
|
|
37
|
-
if path
|
|
38
|
-
else Path(__file__).resolve().parent / "agent_memory_db"
|
|
39
|
-
)
|
|
50
|
+
self.path = self.get_db_path(path)
|
|
40
51
|
self.collection_name = collection_name
|
|
41
52
|
self.path.mkdir(parents=True, exist_ok=True)
|
|
42
|
-
|
|
43
53
|
self.embeddings = embedding_model
|
|
44
54
|
|
|
45
55
|
# If a DB already exists, load it; otherwise defer creation until `build_index`.
|
|
@@ -165,8 +175,7 @@ def delete_database(path: Optional[str | Path] = None):
|
|
|
165
175
|
Where the on-disk Chroma DB is for deleting. If *None*, a folder called
|
|
166
176
|
``agent_memory_db`` is created in the package’s base directory.
|
|
167
177
|
"""
|
|
168
|
-
|
|
169
|
-
db_path = Path(path) if path else Path("~/.cache/ursa/rag/db/")
|
|
178
|
+
db_path = AgentMemory.get_db_path(path)
|
|
170
179
|
if os.path.exists(db_path):
|
|
171
180
|
shutil.rmtree(db_path)
|
|
172
181
|
print(f"Database: {db_path} has been deleted.")
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ursa-ai
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.11
|
|
4
4
|
Summary: Agents for science at LANL
|
|
5
5
|
Author-email: Mike Grosskopf <mikegros@lanl.gov>, Nathan Debardeleben <ndebard@lanl.gov>, Rahul Somasundaram <rsomasundaram@lanl.gov>, Isaac Michaud <imichaud@lanl.gov>, Avanish Mishra <avanish@lanl.gov>, Arthur Lui <alui@lanl.gov>, Russell Bent <rbent@lanl.gov>, Earl Lawrence <earl@lanl.gov>
|
|
6
6
|
License-Expression: BSD-3-Clause
|
|
@@ -1,9 +0,0 @@
|
|
|
1
|
-
from .arxiv_agent import ArxivAgent, PaperMetadata, PaperState
|
|
2
|
-
from .base import BaseAgent, BaseChatModel
|
|
3
|
-
from .code_review_agent import CodeReviewAgent, CodeReviewState
|
|
4
|
-
from .execution_agent import ExecutionAgent, ExecutionState
|
|
5
|
-
from .hypothesizer_agent import HypothesizerAgent, HypothesizerState
|
|
6
|
-
from .mp_agent import MaterialsProjectAgent
|
|
7
|
-
from .planning_agent import PlanningAgent, PlanningState
|
|
8
|
-
from .recall_agent import RecallAgent
|
|
9
|
-
from .websearch_agent import WebSearchAgent, WebSearchState
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|