aiagents4pharma 1.27.2__py3-none-any.whl → 1.29.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aiagents4pharma/talk2scholars/agents/__init__.py +1 -0
- aiagents4pharma/talk2scholars/agents/main_agent.py +35 -209
- aiagents4pharma/talk2scholars/agents/pdf_agent.py +106 -0
- aiagents4pharma/talk2scholars/agents/s2_agent.py +10 -6
- aiagents4pharma/talk2scholars/agents/zotero_agent.py +12 -6
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/__init__.py +1 -0
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/default.yaml +2 -48
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/pdf_agent/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/default.yaml +5 -28
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/zotero_agent/default.yaml +5 -21
- aiagents4pharma/talk2scholars/configs/config.yaml +3 -0
- aiagents4pharma/talk2scholars/configs/tools/__init__.py +2 -0
- aiagents4pharma/talk2scholars/configs/tools/multi_paper_recommendation/default.yaml +1 -1
- aiagents4pharma/talk2scholars/configs/tools/question_and_answer/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/tools/search/default.yaml +1 -1
- aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation/default.yaml +1 -1
- aiagents4pharma/talk2scholars/configs/tools/zotero_read/default.yaml +42 -1
- aiagents4pharma/talk2scholars/configs/tools/zotero_write/__inti__.py +3 -0
- aiagents4pharma/talk2scholars/state/state_talk2scholars.py +1 -0
- aiagents4pharma/talk2scholars/tests/test_main_agent.py +186 -111
- aiagents4pharma/talk2scholars/tests/test_pdf_agent.py +126 -0
- aiagents4pharma/talk2scholars/tests/test_question_and_answer_tool.py +186 -0
- aiagents4pharma/talk2scholars/tests/test_s2_display.py +74 -0
- aiagents4pharma/talk2scholars/tests/test_s2_multi.py +282 -0
- aiagents4pharma/talk2scholars/tests/test_s2_query.py +78 -0
- aiagents4pharma/talk2scholars/tests/test_s2_retrieve.py +65 -0
- aiagents4pharma/talk2scholars/tests/test_s2_search.py +266 -0
- aiagents4pharma/talk2scholars/tests/test_s2_single.py +274 -0
- aiagents4pharma/talk2scholars/tests/test_zotero_path.py +57 -0
- aiagents4pharma/talk2scholars/tests/test_zotero_read.py +412 -0
- aiagents4pharma/talk2scholars/tests/test_zotero_write.py +626 -0
- aiagents4pharma/talk2scholars/tools/__init__.py +1 -0
- aiagents4pharma/talk2scholars/tools/pdf/__init__.py +5 -0
- aiagents4pharma/talk2scholars/tools/pdf/question_and_answer.py +170 -0
- aiagents4pharma/talk2scholars/tools/s2/multi_paper_rec.py +50 -34
- aiagents4pharma/talk2scholars/tools/s2/query_results.py +1 -1
- aiagents4pharma/talk2scholars/tools/s2/retrieve_semantic_scholar_paper_id.py +8 -8
- aiagents4pharma/talk2scholars/tools/s2/search.py +36 -23
- aiagents4pharma/talk2scholars/tools/s2/single_paper_rec.py +44 -38
- aiagents4pharma/talk2scholars/tools/zotero/__init__.py +2 -0
- aiagents4pharma/talk2scholars/tools/zotero/utils/__init__.py +5 -0
- aiagents4pharma/talk2scholars/tools/zotero/utils/zotero_path.py +63 -0
- aiagents4pharma/talk2scholars/tools/zotero/zotero_read.py +64 -19
- aiagents4pharma/talk2scholars/tools/zotero/zotero_write.py +247 -0
- {aiagents4pharma-1.27.2.dist-info → aiagents4pharma-1.29.0.dist-info}/METADATA +6 -5
- {aiagents4pharma-1.27.2.dist-info → aiagents4pharma-1.29.0.dist-info}/RECORD +49 -33
- aiagents4pharma/talk2scholars/tests/test_call_s2.py +0 -100
- aiagents4pharma/talk2scholars/tests/test_call_zotero.py +0 -94
- aiagents4pharma/talk2scholars/tests/test_s2_tools.py +0 -355
- aiagents4pharma/talk2scholars/tests/test_zotero_tool.py +0 -171
- {aiagents4pharma-1.27.2.dist-info → aiagents4pharma-1.29.0.dist-info}/LICENSE +0 -0
- {aiagents4pharma-1.27.2.dist-info → aiagents4pharma-1.29.0.dist-info}/WHEEL +0 -0
- {aiagents4pharma-1.27.2.dist-info → aiagents4pharma-1.29.0.dist-info}/top_level.txt +0 -0
@@ -12,4 +12,45 @@ search_params:
|
|
12
12
|
# Item Types and Limit
|
13
13
|
zotero:
|
14
14
|
max_limit: 100
|
15
|
-
filter_item_types:
|
15
|
+
filter_item_types:
|
16
|
+
[
|
17
|
+
"Artwork",
|
18
|
+
"Audio Recording",
|
19
|
+
"Bill",
|
20
|
+
"Blog Post",
|
21
|
+
"Book",
|
22
|
+
"Book Section",
|
23
|
+
"Case",
|
24
|
+
"Conference Paper",
|
25
|
+
"Dataset",
|
26
|
+
"Dictionary Entry",
|
27
|
+
"Document",
|
28
|
+
"E-mail",
|
29
|
+
"Encyclopedia Article",
|
30
|
+
"Film",
|
31
|
+
"Forum Post",
|
32
|
+
"Hearing",
|
33
|
+
"Instant Message",
|
34
|
+
"Interview",
|
35
|
+
"Journal Article",
|
36
|
+
"Letter",
|
37
|
+
"Magazine Article",
|
38
|
+
"Manuscript",
|
39
|
+
"Map",
|
40
|
+
"Newspaper Article",
|
41
|
+
"Patent",
|
42
|
+
"Podcast",
|
43
|
+
"Preprint",
|
44
|
+
"Presentation",
|
45
|
+
"Radio Broadcast",
|
46
|
+
"Report",
|
47
|
+
"Software",
|
48
|
+
"Standard",
|
49
|
+
"Statute",
|
50
|
+
"Thesis",
|
51
|
+
"TV Broadcast",
|
52
|
+
"Video Recording",
|
53
|
+
"Web Page",
|
54
|
+
]
|
55
|
+
|
56
|
+
filter_excluded_types: ["attachment", "note", "annotation"]
|
@@ -60,5 +60,6 @@ class Talk2Scholars(AgentState):
|
|
60
60
|
last_displayed_papers: Annotated[Dict[str, Any], replace_dict]
|
61
61
|
papers: Annotated[Dict[str, Any], replace_dict]
|
62
62
|
multi_papers: Annotated[Dict[str, Any], replace_dict]
|
63
|
+
pdf_data: Annotated[Dict[str, Any], replace_dict]
|
63
64
|
zotero_read: Annotated[Dict[str, Any], replace_dict]
|
64
65
|
llm_model: BaseChatModel
|
@@ -3,119 +3,194 @@ Unit tests for main agent functionality.
|
|
3
3
|
Tests the supervisor agent's routing logic and state management.
|
4
4
|
"""
|
5
5
|
|
6
|
-
# pylint: disable=redefined-outer-name
|
7
6
|
# pylint: disable=redefined-outer-name,too-few-public-methods
|
8
|
-
|
9
|
-
from
|
7
|
+
|
8
|
+
from types import SimpleNamespace
|
10
9
|
import pytest
|
11
|
-
|
12
|
-
from
|
13
|
-
from
|
14
|
-
from
|
10
|
+
import hydra
|
11
|
+
from langchain_core.language_models.chat_models import BaseChatModel
|
12
|
+
from langchain_openai import ChatOpenAI
|
13
|
+
from pydantic import Field
|
14
|
+
from aiagents4pharma.talk2scholars.agents.main_agent import get_app
|
15
|
+
|
16
|
+
# --- Dummy LLM Implementation ---
|
17
|
+
|
18
|
+
|
19
|
+
class DummyLLM(BaseChatModel):
|
20
|
+
"""A dummy language model implementation for testing purposes."""
|
21
|
+
|
22
|
+
model_name: str = Field(...)
|
23
|
+
|
24
|
+
def _generate(self, prompt, stop=None):
|
25
|
+
"""Generate a response given a prompt."""
|
26
|
+
DummyLLM.called_prompt = prompt
|
27
|
+
return "dummy output"
|
28
|
+
|
29
|
+
@property
|
30
|
+
def _llm_type(self):
|
31
|
+
"""Return the type of the language model."""
|
32
|
+
return "dummy"
|
33
|
+
|
34
|
+
|
35
|
+
# --- Dummy Workflow and Sub-agent Functions ---
|
36
|
+
|
37
|
+
|
38
|
+
class DummyWorkflow:
|
39
|
+
"""A dummy workflow class that records arguments for verification."""
|
40
|
+
|
41
|
+
def __init__(self, supervisor_args=None):
|
42
|
+
"""Initialize the workflow with the given supervisor arguments."""
|
43
|
+
self.supervisor_args = supervisor_args or {}
|
44
|
+
self.checkpointer = None
|
45
|
+
self.name = None
|
46
|
+
|
47
|
+
def compile(self, checkpointer, name):
|
48
|
+
"""Compile the workflow with the given checkpointer and name."""
|
49
|
+
self.checkpointer = checkpointer
|
50
|
+
self.name = name
|
51
|
+
return self
|
52
|
+
|
53
|
+
|
54
|
+
def dummy_get_app_s2(uniq_id, llm_model):
|
55
|
+
"""Return a DummyWorkflow for the S2 agent."""
|
56
|
+
dummy_get_app_s2.called_uniq_id = uniq_id
|
57
|
+
dummy_get_app_s2.called_llm_model = llm_model
|
58
|
+
return DummyWorkflow(supervisor_args={"agent": "s2", "uniq_id": uniq_id})
|
59
|
+
|
60
|
+
|
61
|
+
def dummy_get_app_zotero(uniq_id, llm_model):
|
62
|
+
"""Return a DummyWorkflow for the Zotero agent."""
|
63
|
+
dummy_get_app_zotero.called_uniq_id = uniq_id
|
64
|
+
dummy_get_app_zotero.called_llm_model = llm_model
|
65
|
+
return DummyWorkflow(supervisor_args={"agent": "zotero", "uniq_id": uniq_id})
|
66
|
+
|
67
|
+
|
68
|
+
def dummy_create_supervisor(apps, model, state_schema, **kwargs):
|
69
|
+
"""Return a DummyWorkflow for the supervisor."""
|
70
|
+
dummy_create_supervisor.called_kwargs = kwargs
|
71
|
+
return DummyWorkflow(
|
72
|
+
supervisor_args={
|
73
|
+
"apps": apps,
|
74
|
+
"model": model,
|
75
|
+
"state_schema": state_schema,
|
76
|
+
**kwargs,
|
77
|
+
}
|
78
|
+
)
|
79
|
+
|
80
|
+
|
81
|
+
# --- Dummy Hydra Configuration Setup ---
|
82
|
+
|
83
|
+
|
84
|
+
class DummyHydraContext:
|
85
|
+
"""A dummy context manager for mocking Hydra's initialize and compose functions."""
|
86
|
+
|
87
|
+
def __enter__(self):
|
88
|
+
"""Return None when entering the context."""
|
89
|
+
return None
|
90
|
+
|
91
|
+
def __exit__(self, exc_type, exc_val, traceback):
|
92
|
+
"""Exit function that does nothing."""
|
93
|
+
return None
|
94
|
+
|
95
|
+
|
96
|
+
def dict_to_namespace(d):
|
97
|
+
"""Convert a dictionary to a SimpleNamespace object."""
|
98
|
+
return SimpleNamespace(
|
99
|
+
**{
|
100
|
+
key: dict_to_namespace(val) if isinstance(val, dict) else val
|
101
|
+
for key, val in d.items()
|
102
|
+
}
|
103
|
+
)
|
104
|
+
|
105
|
+
|
106
|
+
dummy_config = {
|
107
|
+
"agents": {
|
108
|
+
"talk2scholars": {"main_agent": {"system_prompt": "Dummy system prompt"}}
|
109
|
+
}
|
110
|
+
}
|
111
|
+
|
112
|
+
|
113
|
+
class DummyHydraCompose:
|
114
|
+
"""A dummy class that returns a namespace from a dummy config dictionary."""
|
115
|
+
|
116
|
+
def __init__(self, config):
|
117
|
+
"""Constructor that stores the dummy config."""
|
118
|
+
self.config = config
|
119
|
+
|
120
|
+
def __getattr__(self, item):
|
121
|
+
"""Return a namespace from the dummy config."""
|
122
|
+
return dict_to_namespace(self.config.get(item, {}))
|
123
|
+
|
124
|
+
|
125
|
+
# --- Pytest Fixtures to Patch Dependencies ---
|
126
|
+
|
127
|
+
|
128
|
+
@pytest.fixture(autouse=True)
|
129
|
+
def patch_hydra(monkeypatch):
|
130
|
+
"""Patch the hydra.initialize and hydra.compose functions to return dummy objects."""
|
131
|
+
monkeypatch.setattr(
|
132
|
+
hydra, "initialize", lambda version_base, config_path: DummyHydraContext()
|
133
|
+
)
|
134
|
+
monkeypatch.setattr(
|
135
|
+
hydra, "compose", lambda config_name, overrides: DummyHydraCompose(dummy_config)
|
136
|
+
)
|
15
137
|
|
16
138
|
|
17
139
|
@pytest.fixture(autouse=True)
|
18
|
-
def
|
19
|
-
"""
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
def
|
64
|
-
"""
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
"""
|
75
|
-
mock_llm = Mock()
|
76
|
-
thread_id = "test_thread"
|
77
|
-
|
78
|
-
class MockRouter:
|
79
|
-
"""Mock router class."""
|
80
|
-
|
81
|
-
next = random.choice(["s2_agent", "zotero_agent"])
|
82
|
-
|
83
|
-
with (
|
84
|
-
patch.object(mock_llm, "with_structured_output", return_value=mock_llm),
|
85
|
-
patch.object(mock_llm, "invoke", return_value=MockRouter()),
|
86
|
-
):
|
87
|
-
supervisor_node = make_supervisor_node(mock_llm, thread_id)
|
88
|
-
mock_state = Talk2Scholars(messages=[HumanMessage(content="Find AI papers")])
|
89
|
-
result = supervisor_node(mock_state)
|
90
|
-
|
91
|
-
# Accept either "s2_agent" or "zotero_agent"
|
92
|
-
assert result.goto in ["s2_agent", "zotero_agent"]
|
93
|
-
|
94
|
-
|
95
|
-
def test_supervisor_node_finish():
|
96
|
-
"""Test that supervisor node correctly handles FINISH case."""
|
97
|
-
mock_llm = Mock()
|
98
|
-
thread_id = "test_thread"
|
99
|
-
|
100
|
-
class MockRouter:
|
101
|
-
"""Mock router class."""
|
102
|
-
|
103
|
-
next = "FINISH"
|
104
|
-
|
105
|
-
class MockAIResponse:
|
106
|
-
"""Mock AI response class."""
|
107
|
-
|
108
|
-
def __init__(self):
|
109
|
-
self.content = "Final AI Response"
|
110
|
-
|
111
|
-
with (
|
112
|
-
patch.object(mock_llm, "with_structured_output", return_value=mock_llm),
|
113
|
-
patch.object(mock_llm, "invoke", side_effect=[MockRouter(), MockAIResponse()]),
|
114
|
-
):
|
115
|
-
supervisor_node = make_supervisor_node(mock_llm, thread_id)
|
116
|
-
mock_state = Talk2Scholars(messages=[HumanMessage(content="End conversation")])
|
117
|
-
result = supervisor_node(mock_state)
|
118
|
-
assert result.goto == END
|
119
|
-
assert "messages" in result.update
|
120
|
-
assert isinstance(result.update["messages"], AIMessage)
|
121
|
-
assert result.update["messages"].content == "Final AI Response"
|
140
|
+
def patch_sub_agents_and_supervisor(monkeypatch):
|
141
|
+
"""Patch the sub-agents and supervisor creation functions."""
|
142
|
+
monkeypatch.setattr(
|
143
|
+
"aiagents4pharma.talk2scholars.agents.main_agent.get_app_s2", dummy_get_app_s2
|
144
|
+
)
|
145
|
+
monkeypatch.setattr(
|
146
|
+
"aiagents4pharma.talk2scholars.agents.main_agent.get_app_zotero",
|
147
|
+
dummy_get_app_zotero,
|
148
|
+
)
|
149
|
+
monkeypatch.setattr(
|
150
|
+
"aiagents4pharma.talk2scholars.agents.main_agent.create_supervisor",
|
151
|
+
dummy_create_supervisor,
|
152
|
+
)
|
153
|
+
|
154
|
+
|
155
|
+
# --- Test Cases ---
|
156
|
+
|
157
|
+
|
158
|
+
def test_dummy_llm_generate():
|
159
|
+
"""Test the dummy LLM's generate function."""
|
160
|
+
dummy = DummyLLM(model_name="test-model")
|
161
|
+
output = getattr(dummy, "_generate")("any prompt")
|
162
|
+
assert output == "dummy output"
|
163
|
+
|
164
|
+
|
165
|
+
def test_dummy_llm_llm_type():
|
166
|
+
"""Test the dummy LLM's _llm_type property."""
|
167
|
+
dummy = DummyLLM(model_name="test-model")
|
168
|
+
assert getattr(dummy, "_llm_type") == "dummy"
|
169
|
+
|
170
|
+
|
171
|
+
def test_get_app_with_gpt4o_mini():
|
172
|
+
"""
|
173
|
+
Test that get_app replaces a 'gpt-4o-mini' LLM with a new ChatOpenAI instance.
|
174
|
+
"""
|
175
|
+
uniq_id = "test_thread"
|
176
|
+
dummy_llm = DummyLLM(model_name="gpt-4o-mini")
|
177
|
+
app = get_app(uniq_id, dummy_llm)
|
178
|
+
|
179
|
+
supervisor_args = getattr(app, "supervisor_args", {})
|
180
|
+
assert isinstance(supervisor_args.get("model"), ChatOpenAI)
|
181
|
+
assert supervisor_args.get("prompt") == "Dummy system prompt"
|
182
|
+
assert getattr(app, "name", "") == "Talk2Scholars_MainAgent"
|
183
|
+
|
184
|
+
|
185
|
+
def test_get_app_with_other_model():
|
186
|
+
"""
|
187
|
+
Test that get_app does not replace the LLM if its model_name is not 'gpt-4o-mini'.
|
188
|
+
"""
|
189
|
+
uniq_id = "test_thread_2"
|
190
|
+
dummy_llm = DummyLLM(model_name="other-model")
|
191
|
+
app = get_app(uniq_id, dummy_llm)
|
192
|
+
|
193
|
+
supervisor_args = getattr(app, "supervisor_args", {})
|
194
|
+
assert supervisor_args.get("model") is dummy_llm
|
195
|
+
assert supervisor_args.get("prompt") == "Dummy system prompt"
|
196
|
+
assert getattr(app, "name", "") == "Talk2Scholars_MainAgent"
|
@@ -0,0 +1,126 @@
|
|
1
|
+
"""
|
2
|
+
Unit Tests for the PDF agent.
|
3
|
+
"""
|
4
|
+
|
5
|
+
# pylint: disable=redefined-outer-name
|
6
|
+
from unittest import mock
|
7
|
+
import pytest
|
8
|
+
from langchain_core.messages import HumanMessage, AIMessage
|
9
|
+
from ..agents.pdf_agent import get_app
|
10
|
+
from ..state.state_talk2scholars import Talk2Scholars
|
11
|
+
|
12
|
+
|
13
|
+
@pytest.fixture(autouse=True)
|
14
|
+
def mock_hydra_fixture():
|
15
|
+
"""Mock Hydra configuration to prevent external dependencies."""
|
16
|
+
with mock.patch("hydra.initialize"), mock.patch("hydra.compose") as mock_compose:
|
17
|
+
# Create a mock configuration with a pdf_agent section.
|
18
|
+
cfg_mock = mock.MagicMock()
|
19
|
+
# The pdf_agent config will be accessed as cfg.agents.talk2scholars.pdf_agent in get_app.
|
20
|
+
cfg_mock.agents.talk2scholars.pdf_agent.some_property = "Test prompt"
|
21
|
+
mock_compose.return_value = cfg_mock
|
22
|
+
yield mock_compose
|
23
|
+
|
24
|
+
|
25
|
+
@pytest.fixture
|
26
|
+
def mock_tools_fixture():
|
27
|
+
"""Mock PDF agent tools to prevent execution of real API calls."""
|
28
|
+
with (
|
29
|
+
mock.patch(
|
30
|
+
"aiagents4pharma.talk2scholars.agents.pdf_agent.question_and_answer_tool"
|
31
|
+
) as mock_question_and_answer_tool,
|
32
|
+
mock.patch(
|
33
|
+
"aiagents4pharma.talk2scholars.agents.pdf_agent.query_results"
|
34
|
+
) as mock_query_results,
|
35
|
+
):
|
36
|
+
mock_question_and_answer_tool.return_value = {
|
37
|
+
"result": "Mock Question and Answer Result"
|
38
|
+
}
|
39
|
+
mock_query_results.return_value = {"result": "Mock Query Result"}
|
40
|
+
yield [mock_question_and_answer_tool, mock_query_results]
|
41
|
+
|
42
|
+
|
43
|
+
@pytest.fixture
|
44
|
+
def mock_llm():
|
45
|
+
"""Provide a dummy language model to pass into get_app."""
|
46
|
+
return mock.Mock()
|
47
|
+
|
48
|
+
|
49
|
+
@pytest.mark.usefixtures("mock_hydra_fixture")
|
50
|
+
def test_pdf_agent_initialization(mock_llm):
|
51
|
+
"""Test that PDF agent initializes correctly with mock configuration."""
|
52
|
+
thread_id = "test_thread"
|
53
|
+
with mock.patch(
|
54
|
+
"aiagents4pharma.talk2scholars.agents.pdf_agent.create_react_agent"
|
55
|
+
) as mock_create:
|
56
|
+
mock_create.return_value = mock.Mock()
|
57
|
+
app = get_app(thread_id, mock_llm)
|
58
|
+
assert app is not None
|
59
|
+
assert mock_create.called
|
60
|
+
|
61
|
+
|
62
|
+
def test_pdf_agent_invocation(mock_llm):
|
63
|
+
"""Test that the PDF agent processes user input and returns a valid response."""
|
64
|
+
thread_id = "test_thread"
|
65
|
+
# Create a sample state with a human message.
|
66
|
+
mock_state = Talk2Scholars(
|
67
|
+
messages=[HumanMessage(content="Extract key data from PDF")]
|
68
|
+
)
|
69
|
+
with mock.patch(
|
70
|
+
"aiagents4pharma.talk2scholars.agents.pdf_agent.create_react_agent"
|
71
|
+
) as mock_create:
|
72
|
+
mock_agent = mock.Mock()
|
73
|
+
mock_create.return_value = mock_agent
|
74
|
+
# Simulate a response from the PDF agent.
|
75
|
+
mock_agent.invoke.return_value = {
|
76
|
+
"messages": [
|
77
|
+
AIMessage(content="PDF content extracted successfully")
|
78
|
+
],
|
79
|
+
"pdf_data": {"page": 1, "text": "Sample PDF text"},
|
80
|
+
}
|
81
|
+
app = get_app(thread_id, mock_llm)
|
82
|
+
result = app.invoke(
|
83
|
+
mock_state,
|
84
|
+
config={
|
85
|
+
"configurable": {
|
86
|
+
"thread_id": thread_id,
|
87
|
+
"checkpoint_ns": "test_ns",
|
88
|
+
"checkpoint_id": "test_checkpoint",
|
89
|
+
}
|
90
|
+
},
|
91
|
+
)
|
92
|
+
assert "messages" in result
|
93
|
+
assert "pdf_data" in result
|
94
|
+
assert result["pdf_data"]["page"] == 1
|
95
|
+
|
96
|
+
|
97
|
+
def test_pdf_agent_tools_assignment(request, mock_llm):
|
98
|
+
"""Ensure that the correct tools are assigned to the PDF agent."""
|
99
|
+
thread_id = "test_thread"
|
100
|
+
mock_tools = request.getfixturevalue("mock_tools_fixture")
|
101
|
+
with (
|
102
|
+
mock.patch(
|
103
|
+
"aiagents4pharma.talk2scholars.agents.pdf_agent.create_react_agent"
|
104
|
+
) as mock_create,
|
105
|
+
mock.patch(
|
106
|
+
"aiagents4pharma.talk2scholars.agents.pdf_agent.ToolNode"
|
107
|
+
) as mock_toolnode,
|
108
|
+
):
|
109
|
+
mock_agent = mock.Mock()
|
110
|
+
mock_create.return_value = mock_agent
|
111
|
+
mock_tool_instance = mock.Mock()
|
112
|
+
# For the PDF agent, we expect two tools: question_and_answer_tool and query_results.
|
113
|
+
mock_tool_instance.tools = mock_tools
|
114
|
+
mock_toolnode.return_value = mock_tool_instance
|
115
|
+
get_app(thread_id, mock_llm)
|
116
|
+
assert mock_toolnode.called
|
117
|
+
assert len(mock_tool_instance.tools) == 2
|
118
|
+
|
119
|
+
|
120
|
+
def test_pdf_agent_hydra_failure(mock_llm):
|
121
|
+
"""Test exception handling when Hydra fails to load config for PDF agent."""
|
122
|
+
thread_id = "test_thread"
|
123
|
+
with mock.patch("hydra.initialize", side_effect=Exception("Hydra error")):
|
124
|
+
with pytest.raises(Exception) as exc_info:
|
125
|
+
get_app(thread_id, mock_llm)
|
126
|
+
assert "Hydra error" in str(exc_info.value)
|