aiagents4pharma 1.29.0__py3-none-any.whl → 1.30.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aiagents4pharma/talk2scholars/agents/__init__.py +1 -0
- aiagents4pharma/talk2scholars/agents/main_agent.py +18 -10
- aiagents4pharma/talk2scholars/agents/paper_download_agent.py +85 -0
- aiagents4pharma/talk2scholars/agents/pdf_agent.py +4 -10
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/default.yaml +18 -9
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/paper_download_agent/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/default.yaml +2 -2
- aiagents4pharma/talk2scholars/configs/app/frontend/default.yaml +1 -0
- aiagents4pharma/talk2scholars/configs/config.yaml +2 -0
- aiagents4pharma/talk2scholars/configs/tools/download_arxiv_paper/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/tools/multi_paper_recommendation/default.yaml +6 -1
- aiagents4pharma/talk2scholars/configs/tools/search/default.yaml +7 -1
- aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation/default.yaml +6 -1
- aiagents4pharma/talk2scholars/configs/tools/zotero_read/default.yaml +1 -1
- aiagents4pharma/talk2scholars/state/state_talk2scholars.py +4 -0
- aiagents4pharma/talk2scholars/tests/test_llm_main_integration.py +84 -53
- aiagents4pharma/talk2scholars/tests/test_main_agent.py +24 -0
- aiagents4pharma/talk2scholars/tests/test_paper_download_agent.py +142 -0
- aiagents4pharma/talk2scholars/tests/test_paper_download_tools.py +154 -0
- aiagents4pharma/talk2scholars/tests/test_question_and_answer_tool.py +79 -15
- aiagents4pharma/talk2scholars/tests/test_routing_logic.py +12 -8
- aiagents4pharma/talk2scholars/tests/test_s2_multi.py +27 -4
- aiagents4pharma/talk2scholars/tests/test_s2_search.py +19 -3
- aiagents4pharma/talk2scholars/tests/test_s2_single.py +27 -3
- aiagents4pharma/talk2scholars/tests/test_zotero_read.py +17 -10
- aiagents4pharma/talk2scholars/tools/paper_download/__init__.py +17 -0
- aiagents4pharma/talk2scholars/tools/paper_download/abstract_downloader.py +45 -0
- aiagents4pharma/talk2scholars/tools/paper_download/arxiv_downloader.py +115 -0
- aiagents4pharma/talk2scholars/tools/paper_download/download_arxiv_input.py +64 -0
- aiagents4pharma/talk2scholars/tools/pdf/question_and_answer.py +73 -26
- aiagents4pharma/talk2scholars/tools/s2/multi_paper_rec.py +46 -22
- aiagents4pharma/talk2scholars/tools/s2/query_results.py +1 -1
- aiagents4pharma/talk2scholars/tools/s2/search.py +40 -12
- aiagents4pharma/talk2scholars/tools/s2/single_paper_rec.py +42 -16
- aiagents4pharma/talk2scholars/tools/zotero/zotero_read.py +33 -16
- aiagents4pharma/talk2scholars/tools/zotero/zotero_write.py +39 -7
- {aiagents4pharma-1.29.0.dist-info → aiagents4pharma-1.30.1.dist-info}/METADATA +2 -2
- {aiagents4pharma-1.29.0.dist-info → aiagents4pharma-1.30.1.dist-info}/RECORD +41 -32
- {aiagents4pharma-1.29.0.dist-info → aiagents4pharma-1.30.1.dist-info}/WHEEL +1 -1
- {aiagents4pharma-1.29.0.dist-info → aiagents4pharma-1.30.1.dist-info}/LICENSE +0 -0
- {aiagents4pharma-1.29.0.dist-info → aiagents4pharma-1.30.1.dist-info}/top_level.txt +0 -0
@@ -1,11 +1,14 @@
|
|
1
1
|
#!/usr/bin/env python3
|
2
2
|
|
3
3
|
"""
|
4
|
-
Main agent for
|
4
|
+
Main agent module for initializing and running the Talk2Scholars application.
|
5
5
|
|
6
|
-
This module
|
7
|
-
|
8
|
-
|
6
|
+
This module sets up the hierarchical agent system using LangGraph and integrates
|
7
|
+
various sub-agents for handling different tasks such as semantic scholar, zotero,
|
8
|
+
PDF processing, and paper downloading.
|
9
|
+
|
10
|
+
Functions:
|
11
|
+
- get_app: Initializes and returns the LangGraph-based hierarchical agent system.
|
9
12
|
"""
|
10
13
|
|
11
14
|
import logging
|
@@ -16,6 +19,8 @@ from langchain_core.language_models.chat_models import BaseChatModel
|
|
16
19
|
from langgraph.checkpoint.memory import MemorySaver
|
17
20
|
from ..agents.s2_agent import get_app as get_app_s2
|
18
21
|
from ..agents.zotero_agent import get_app as get_app_zotero
|
22
|
+
from ..agents.pdf_agent import get_app as get_app_pdf
|
23
|
+
from ..agents.paper_download_agent import get_app as get_app_paper_download
|
19
24
|
from ..state.state_talk2scholars import Talk2Scholars
|
20
25
|
|
21
26
|
# Initialize logger
|
@@ -43,12 +48,13 @@ def get_app(uniq_id, llm_model: BaseChatModel):
|
|
43
48
|
>>> app = get_app("thread_123")
|
44
49
|
>>> result = app.invoke(initial_state)
|
45
50
|
"""
|
46
|
-
if llm_model
|
47
|
-
llm_model
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
51
|
+
if hasattr(llm_model, "model_name"):
|
52
|
+
if llm_model.model_name == "gpt-4o-mini":
|
53
|
+
llm_model = ChatOpenAI(
|
54
|
+
model="gpt-4o-mini",
|
55
|
+
temperature=0,
|
56
|
+
model_kwargs={"parallel_tool_calls": False},
|
57
|
+
)
|
52
58
|
# Load hydra configuration
|
53
59
|
logger.log(logging.INFO, "Launching Talk2Scholars with thread_id %s", uniq_id)
|
54
60
|
with hydra.initialize(version_base=None, config_path="../configs/"):
|
@@ -62,6 +68,8 @@ def get_app(uniq_id, llm_model: BaseChatModel):
|
|
62
68
|
[
|
63
69
|
get_app_s2(uniq_id, llm_model), # semantic scholar
|
64
70
|
get_app_zotero(uniq_id, llm_model), # zotero
|
71
|
+
get_app_pdf(uniq_id, llm_model), # pdf
|
72
|
+
get_app_paper_download(uniq_id, llm_model), # paper download
|
65
73
|
],
|
66
74
|
model=llm_model,
|
67
75
|
state_schema=Talk2Scholars,
|
@@ -0,0 +1,85 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
"""
|
3
|
+
This module defines the paper download agent that connects to the arXiv API to fetch
|
4
|
+
paper details and PDFs. It is part of the Talk2Scholars project.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import logging
|
8
|
+
from typing import Any, Dict
|
9
|
+
import hydra
|
10
|
+
from langchain_core.language_models.chat_models import BaseChatModel
|
11
|
+
from langgraph.graph import START, StateGraph
|
12
|
+
from langgraph.prebuilt.chat_agent_executor import create_react_agent
|
13
|
+
from langgraph.prebuilt.tool_node import ToolNode
|
14
|
+
from langgraph.checkpoint.memory import MemorySaver
|
15
|
+
from ..state.state_talk2scholars import Talk2Scholars
|
16
|
+
from ..tools.paper_download import download_arxiv_paper
|
17
|
+
from ..tools.s2.query_results import query_results
|
18
|
+
|
19
|
+
# Initialize logger
|
20
|
+
logging.basicConfig(level=logging.INFO)
|
21
|
+
logger = logging.getLogger(__name__)
|
22
|
+
|
23
|
+
|
24
|
+
def get_app(uniq_id, llm_model: BaseChatModel):
|
25
|
+
"""
|
26
|
+
Initializes and returns the LangGraph application for the Talk2Scholars paper download agent.
|
27
|
+
|
28
|
+
Args:
|
29
|
+
uniq_id (str): A unique identifier for tracking the current session.
|
30
|
+
llm_model (BaseChatModel, optional): The language model to be used by the agent.
|
31
|
+
Defaults to ChatOpenAI(model="gpt-4o-mini", temperature=0.5).
|
32
|
+
|
33
|
+
Returns:
|
34
|
+
StateGraph: A compiled LangGraph application that enables the paper download agent to
|
35
|
+
process user queries and retrieve arXiv papers.
|
36
|
+
"""
|
37
|
+
|
38
|
+
# Load Hydra configuration
|
39
|
+
logger.info("Loading Hydra configuration for Talk2Scholars paper download agent")
|
40
|
+
with hydra.initialize(version_base=None, config_path="../configs"):
|
41
|
+
cfg = hydra.compose(
|
42
|
+
config_name="config",
|
43
|
+
overrides=["agents/talk2scholars/paper_download_agent=default"],
|
44
|
+
)
|
45
|
+
cfg = cfg.agents.talk2scholars.paper_download_agent
|
46
|
+
|
47
|
+
# Define tools properly
|
48
|
+
tools = ToolNode([download_arxiv_paper, query_results])
|
49
|
+
|
50
|
+
# Define the model
|
51
|
+
logger.info("Using OpenAI model %s", llm_model)
|
52
|
+
model = create_react_agent(
|
53
|
+
llm_model,
|
54
|
+
tools=tools,
|
55
|
+
state_schema=Talk2Scholars,
|
56
|
+
prompt=cfg.paper_download_agent,
|
57
|
+
checkpointer=MemorySaver(),
|
58
|
+
)
|
59
|
+
|
60
|
+
def paper_download_agent_node(state: Talk2Scholars) -> Dict[str, Any]:
|
61
|
+
"""
|
62
|
+
Processes the current state to fetch the arXiv paper.
|
63
|
+
"""
|
64
|
+
logger.info("Creating paper download agent node with thread_id: %s", uniq_id)
|
65
|
+
result = model.invoke(state, {"configurable": {"thread_id": uniq_id}})
|
66
|
+
return result
|
67
|
+
|
68
|
+
# Define new graph
|
69
|
+
workflow = StateGraph(Talk2Scholars)
|
70
|
+
|
71
|
+
# Adding node for paper download agent
|
72
|
+
workflow.add_node("paper_download_agent", paper_download_agent_node)
|
73
|
+
|
74
|
+
# Entering into the agent
|
75
|
+
workflow.add_edge(START, "paper_download_agent")
|
76
|
+
|
77
|
+
# Memory management for states between graph runs
|
78
|
+
checkpointer = MemorySaver()
|
79
|
+
|
80
|
+
# Compile the graph
|
81
|
+
app = workflow.compile(checkpointer=checkpointer, name="agent_paper_download")
|
82
|
+
|
83
|
+
# Logging the information and returning the app
|
84
|
+
logger.info("Compiled the graph")
|
85
|
+
return app
|
@@ -26,10 +26,7 @@ logging.basicConfig(level=logging.INFO)
|
|
26
26
|
logger = logging.getLogger(__name__)
|
27
27
|
|
28
28
|
|
29
|
-
def get_app(
|
30
|
-
uniq_id,
|
31
|
-
llm_model: BaseChatModel
|
32
|
-
):
|
29
|
+
def get_app(uniq_id, llm_model: BaseChatModel):
|
33
30
|
"""
|
34
31
|
Initializes and returns the LangGraph application for the PDF agent.
|
35
32
|
|
@@ -40,7 +37,7 @@ def get_app(
|
|
40
37
|
|
41
38
|
Args:
|
42
39
|
uniq_id (str): A unique identifier for the current conversation session or thread.
|
43
|
-
llm_model (BaseChatModel, optional): The language model instance to be used.
|
40
|
+
llm_model (BaseChatModel, optional): The language model instance to be used.
|
44
41
|
Defaults to ChatOpenAI(model="gpt-4o-mini", temperature=0).
|
45
42
|
|
46
43
|
Returns:
|
@@ -71,10 +68,7 @@ def get_app(
|
|
71
68
|
Any: The response generated by the language model after processing the state.
|
72
69
|
"""
|
73
70
|
logger.info("Creating Agent_PDF node with thread_id %s", uniq_id)
|
74
|
-
response = model.invoke(
|
75
|
-
state,
|
76
|
-
{"configurable": {"thread_id": uniq_id}}
|
77
|
-
)
|
71
|
+
response = model.invoke(state, {"configurable": {"thread_id": uniq_id}})
|
78
72
|
return response
|
79
73
|
|
80
74
|
# Define the tool node that includes the PDF QnA tool.
|
@@ -100,7 +94,7 @@ def get_app(
|
|
100
94
|
checkpointer = MemorySaver()
|
101
95
|
|
102
96
|
# Compile the graph into a runnable app.
|
103
|
-
app = workflow.compile(checkpointer=checkpointer)
|
97
|
+
app = workflow.compile(checkpointer=checkpointer, name="agent_pdf")
|
104
98
|
logger.info("Compiled the PDF agent graph.")
|
105
99
|
|
106
100
|
return app
|
@@ -1,13 +1,22 @@
|
|
1
1
|
_target_: agents.main_agent.get_app
|
2
2
|
temperature: 0
|
3
3
|
system_prompt: >
|
4
|
-
You are
|
4
|
+
You are Talk2Scholars agent coordinating academic paper discovery
|
5
|
+
and analysis with help of the following agents:
|
6
|
+
1. Agent S2: This agent can be used to search and recommend papers
|
7
|
+
from Semantic Scholar. Use this agent when the user asks for
|
8
|
+
general paper/article searches and recommendations, or to retrieve information
|
9
|
+
from the last displayed results table or query abstract of last
|
10
|
+
displayed results.
|
11
|
+
2. Agent Zotero: This agent can be used to retrieve, display, and query
|
12
|
+
papers/articles from the Zotero library. Use this agent only when the user
|
13
|
+
explicitly asks for papers from Zotero. This tool can also be used to
|
14
|
+
save papers in the zotero library.
|
15
|
+
3. Agent PaperFetch: This agent can be used to download papers/articles
|
16
|
+
from ArXiv.
|
17
|
+
4. Agent PDFQuery: This agent can be used to query contents of an
|
18
|
+
uploaded or downloaded PDF/paper/article.
|
5
19
|
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
general paper searches and recommendations.
|
10
|
-
2. Zotero_agent: This agent can be used to retrieve, display, and query
|
11
|
-
papers from the Zotero library. Use this agent only when the user
|
12
|
-
explicitly asks for papers from Zotero. This tool can also be used to
|
13
|
-
save papers in under collections in the zotero library
|
20
|
+
Your final response should be a one sentence summary of the information
|
21
|
+
retrieved from the agents above. Do not repeat the information already
|
22
|
+
displayed to the user in the response of the agents.
|
@@ -15,5 +15,5 @@ s2_agent: >
|
|
15
15
|
1. When user requests papers, use search/recommendation tools to find papers
|
16
16
|
2. Use `display_results` tool to display the response from the search/recommendation tools
|
17
17
|
3. Use `query_results` tool to query over the selected paper only when the user asks to
|
18
|
-
4. When the user wants recommendations, you can get the "
|
19
|
-
pass the "
|
18
|
+
4. When the user wants recommendations, you can get the "semantic_scholar_paper_id" using `query_results` tool in the "last_displayed_results" key, then
|
19
|
+
pass the "semantic_scholar_paper_id" to `search`, `single_paper_rec` or `multi_paper_rec` tools depending on the user's query. Do not use "arxiv_id"
|
@@ -2,10 +2,12 @@ defaults:
|
|
2
2
|
- _self_
|
3
3
|
- agents/talk2scholars/main_agent: default
|
4
4
|
- agents/talk2scholars/s2_agent: default
|
5
|
+
- agents/talk2scholars/paper_download_agent: default
|
5
6
|
- agents/talk2scholars/zotero_agent: default
|
6
7
|
- app/frontend: default
|
7
8
|
- agents/talk2scholars/pdf_agent: default
|
8
9
|
- tools/search: default
|
10
|
+
- tools/download_arxiv_paper: default
|
9
11
|
- tools/single_paper_recommendation: default
|
10
12
|
- tools/multi_paper_recommendation: default
|
11
13
|
- tools/retrieve_semantic_scholar_paper_id: default
|
@@ -6,10 +6,15 @@ api_fields:
|
|
6
6
|
- "title"
|
7
7
|
- "abstract"
|
8
8
|
- "year"
|
9
|
-
- "authors"
|
9
|
+
- "authors.name"
|
10
|
+
- "authors.authorId"
|
10
11
|
- "citationCount"
|
11
12
|
- "url"
|
12
13
|
- "externalIds"
|
14
|
+
- "venue"
|
15
|
+
- "publicationVenue" # Full object, instead of specific subfields
|
16
|
+
- "journal" # Full object, instead of specific subfields
|
17
|
+
- "publicationDate"
|
13
18
|
# Commented fields that could be added later if needed
|
14
19
|
|
15
20
|
# Default headers and params
|
@@ -6,10 +6,16 @@ api_fields:
|
|
6
6
|
- "title"
|
7
7
|
- "abstract"
|
8
8
|
- "year"
|
9
|
-
- "authors"
|
9
|
+
- "authors.name"
|
10
|
+
- "authors.authorId"
|
10
11
|
- "citationCount"
|
11
12
|
- "url"
|
12
13
|
- "externalIds"
|
14
|
+
- "venue"
|
15
|
+
- "publicationVenue" # Full object, instead of specific subfields
|
16
|
+
- "journal" # Full object, instead of specific subfields
|
17
|
+
- "publicationDate"
|
18
|
+
|
13
19
|
# Commented fields that could be added later if needed
|
14
20
|
# - "publicationTypes"
|
15
21
|
# - "openAccessPdf"
|
@@ -6,10 +6,15 @@ api_fields:
|
|
6
6
|
- "title"
|
7
7
|
- "abstract"
|
8
8
|
- "year"
|
9
|
-
- "authors"
|
9
|
+
- "authors.name"
|
10
|
+
- "authors.authorId"
|
10
11
|
- "citationCount"
|
11
12
|
- "url"
|
12
13
|
- "externalIds"
|
14
|
+
- "venue"
|
15
|
+
- "publicationVenue" # Full object, instead of specific subfields
|
16
|
+
- "journal" # Full object, instead of specific subfields
|
17
|
+
- "publicationDate"
|
13
18
|
# Commented fields that could be added later if needed
|
14
19
|
# - "publicationTypes"
|
15
20
|
# - "openAccessPdf"
|
@@ -9,6 +9,7 @@ across agent interactions.
|
|
9
9
|
import logging
|
10
10
|
from typing import Annotated, Any, Dict
|
11
11
|
from langchain_core.language_models import BaseChatModel
|
12
|
+
from langchain_core.embeddings import Embeddings
|
12
13
|
from langgraph.prebuilt.chat_agent_executor import AgentState
|
13
14
|
|
14
15
|
# Configure logging
|
@@ -54,6 +55,8 @@ class Talk2Scholars(AgentState):
|
|
54
55
|
multi_papers (Dict[str, Any]): Stores multiple recommended papers from various sources.
|
55
56
|
zotero_read (Dict[str, Any]): Stores the papers retrieved from Zotero.
|
56
57
|
llm_model (BaseChatModel): The language model instance used for generating responses.
|
58
|
+
text_embedding_model (Embeddings): The text embedding model used for
|
59
|
+
similarity calculations.
|
57
60
|
"""
|
58
61
|
|
59
62
|
# Agent state fields
|
@@ -63,3 +66,4 @@ class Talk2Scholars(AgentState):
|
|
63
66
|
pdf_data: Annotated[Dict[str, Any], replace_dict]
|
64
67
|
zotero_read: Annotated[Dict[str, Any], replace_dict]
|
65
68
|
llm_model: BaseChatModel
|
69
|
+
text_embedding_model: Embeddings
|
@@ -1,58 +1,89 @@
|
|
1
1
|
"""
|
2
2
|
Integration tests for talk2scholars system with OpenAI.
|
3
|
+
This test triggers all sub-agents by sending a conversation that covers:
|
4
|
+
- Searching Semantic Scholar (S2 agent)
|
5
|
+
- Retrieving Zotero results (Zotero agent)
|
6
|
+
- Querying PDF content (PDF agent)
|
7
|
+
- Downloading paper details from arXiv (Paper Download agent)
|
3
8
|
"""
|
4
9
|
|
5
|
-
|
6
|
-
import pytest
|
7
|
-
import hydra
|
8
|
-
from langchain_openai import ChatOpenAI
|
9
|
-
from langchain_core.messages import HumanMessage, AIMessage
|
10
|
-
from ..agents.main_agent import get_app
|
11
|
-
from ..state.state_talk2scholars import Talk2Scholars
|
10
|
+
# This will be covered in the next pr.
|
12
11
|
|
13
|
-
#
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
12
|
+
#
|
13
|
+
# import os
|
14
|
+
# import pytest
|
15
|
+
# import hydra
|
16
|
+
# from langchain_openai import ChatOpenAI
|
17
|
+
# from langchain_core.messages import HumanMessage, AIMessage
|
18
|
+
# from ..agents.main_agent import get_app
|
19
|
+
# from ..state.state_talk2scholars import Talk2Scholars
|
20
|
+
#
|
21
|
+
# # pylint: disable=redefined-outer-name,too-few-public-methods
|
22
|
+
#
|
23
|
+
#
|
24
|
+
# @pytest.mark.skipif(
|
25
|
+
# not os.getenv("OPENAI_API_KEY"), reason="Requires OpenAI API key to run"
|
26
|
+
# )
|
27
|
+
# def test_main_agent_real_llm():
|
28
|
+
# """
|
29
|
+
# Integration test for the Talk2Scholars system using a real OpenAI LLM.
|
30
|
+
# This test verifies that the supervisor correctly routes to all sub-agents by
|
31
|
+
# providing a conversation with queries intended to trigger each agent.
|
32
|
+
# """
|
33
|
+
# # Load Hydra configuration EXACTLY like in main_agent.py
|
34
|
+
# with hydra.initialize(version_base=None, config_path="../configs"):
|
35
|
+
# cfg = hydra.compose(
|
36
|
+
# config_name="config", overrides=["agents/talk2scholars/main_agent=default"]
|
37
|
+
# )
|
38
|
+
# hydra_cfg = cfg.agents.talk2scholars.main_agent
|
39
|
+
# assert hydra_cfg is not None, "Hydra config failed to load"
|
40
|
+
#
|
41
|
+
# # Use the real OpenAI API (ensure OPENAI_API_KEY is set in environment)
|
42
|
+
# llm = ChatOpenAI(model="gpt-4o-mini", temperature=hydra_cfg.temperature)
|
43
|
+
#
|
44
|
+
# # Initialize the main agent workflow (with real Hydra config)
|
45
|
+
# thread_id = "test_thread"
|
46
|
+
# app = get_app(thread_id, llm)
|
47
|
+
#
|
48
|
+
# # Provide a multi-turn conversation intended to trigger all sub-agents:
|
49
|
+
# # - S2 agent: "Search Semantic Scholar for AI papers on transformers."
|
50
|
+
# # - Zotero agent: "Retrieve Zotero results for these papers."
|
51
|
+
# # - PDF agent: "Analyze the attached PDF and summarize its key findings."
|
52
|
+
# # - Paper Download agent: "Download the paper details from arXiv."
|
53
|
+
# initial_state = Talk2Scholars(
|
54
|
+
# messages=[
|
55
|
+
# HumanMessage(
|
56
|
+
# content="Search Semantic Scholar for AI papers on transformers."
|
57
|
+
# ),
|
58
|
+
# HumanMessage(content="Also, retrieve Zotero results for these papers."),
|
59
|
+
# HumanMessage(
|
60
|
+
# content="I have attached a PDF; analyze it and tell me the key findings."
|
61
|
+
# ),
|
62
|
+
# HumanMessage(content="Finally, download the paper from arXiv."),
|
63
|
+
# ]
|
64
|
+
# )
|
65
|
+
#
|
66
|
+
# # Invoke the agent (which routes to the appropriate sub-agents)
|
67
|
+
# result = app.invoke(
|
68
|
+
# initial_state,
|
69
|
+
# {"configurable": {"config_id": thread_id, "thread_id": thread_id}},
|
70
|
+
# )
|
71
|
+
#
|
72
|
+
# # Assert that the result contains messages and that the final message is valid.
|
73
|
+
# assert "messages" in result, "Expected 'messages' in the response"
|
74
|
+
# last_message = result["messages"][-1]
|
75
|
+
# assert isinstance(
|
76
|
+
# last_message, (HumanMessage, AIMessage, str)
|
77
|
+
# ), "Last message should be a valid response type"
|
78
|
+
#
|
79
|
+
# # Concatenate message texts (if available) to perform keyword checks.
|
80
|
+
# output_text = " ".join(
|
81
|
+
# msg.content if hasattr(msg, "content") else str(msg)
|
82
|
+
# for msg in result["messages"]
|
83
|
+
# ).lower()
|
84
|
+
#
|
85
|
+
# # Check for keywords that suggest each sub-agent was invoked.
|
86
|
+
# for keyword in ["semantic scholar", "zotero", "pdf", "arxiv"]:
|
87
|
+
# assert (
|
88
|
+
# keyword in output_text
|
89
|
+
# ), f"Expected keyword '{keyword}' in the output response"
|
@@ -65,6 +65,13 @@ def dummy_get_app_zotero(uniq_id, llm_model):
|
|
65
65
|
return DummyWorkflow(supervisor_args={"agent": "zotero", "uniq_id": uniq_id})
|
66
66
|
|
67
67
|
|
68
|
+
def dummy_get_app_pdf(uniq_id, llm_model):
|
69
|
+
"""Return a DummyWorkflow for the PDF agent."""
|
70
|
+
dummy_get_app_pdf.called_uniq_id = uniq_id
|
71
|
+
dummy_get_app_pdf.called_llm_model = llm_model
|
72
|
+
return DummyWorkflow(supervisor_args={"agent": "pdf", "uniq_id": uniq_id})
|
73
|
+
|
74
|
+
|
68
75
|
def dummy_create_supervisor(apps, model, state_schema, **kwargs):
|
69
76
|
"""Return a DummyWorkflow for the supervisor."""
|
70
77
|
dummy_create_supervisor.called_kwargs = kwargs
|
@@ -136,6 +143,15 @@ def patch_hydra(monkeypatch):
|
|
136
143
|
)
|
137
144
|
|
138
145
|
|
146
|
+
def dummy_get_app_paper_download(uniq_id, llm_model):
|
147
|
+
"""Return a DummyWorkflow for the paper download agent."""
|
148
|
+
dummy_get_app_paper_download.called_uniq_id = uniq_id
|
149
|
+
dummy_get_app_paper_download.called_llm_model = llm_model
|
150
|
+
return DummyWorkflow(
|
151
|
+
supervisor_args={"agent": "paper_download", "uniq_id": uniq_id}
|
152
|
+
)
|
153
|
+
|
154
|
+
|
139
155
|
@pytest.fixture(autouse=True)
|
140
156
|
def patch_sub_agents_and_supervisor(monkeypatch):
|
141
157
|
"""Patch the sub-agents and supervisor creation functions."""
|
@@ -146,6 +162,14 @@ def patch_sub_agents_and_supervisor(monkeypatch):
|
|
146
162
|
"aiagents4pharma.talk2scholars.agents.main_agent.get_app_zotero",
|
147
163
|
dummy_get_app_zotero,
|
148
164
|
)
|
165
|
+
monkeypatch.setattr(
|
166
|
+
"aiagents4pharma.talk2scholars.agents.main_agent.get_app_pdf",
|
167
|
+
dummy_get_app_pdf,
|
168
|
+
)
|
169
|
+
monkeypatch.setattr(
|
170
|
+
"aiagents4pharma.talk2scholars.agents.main_agent.get_app_paper_download",
|
171
|
+
dummy_get_app_paper_download,
|
172
|
+
)
|
149
173
|
monkeypatch.setattr(
|
150
174
|
"aiagents4pharma.talk2scholars.agents.main_agent.create_supervisor",
|
151
175
|
dummy_create_supervisor,
|