aiagents4pharma 1.27.2__py3-none-any.whl → 1.29.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. aiagents4pharma/talk2scholars/agents/__init__.py +1 -0
  2. aiagents4pharma/talk2scholars/agents/main_agent.py +35 -209
  3. aiagents4pharma/talk2scholars/agents/pdf_agent.py +106 -0
  4. aiagents4pharma/talk2scholars/agents/s2_agent.py +10 -6
  5. aiagents4pharma/talk2scholars/agents/zotero_agent.py +12 -6
  6. aiagents4pharma/talk2scholars/configs/agents/talk2scholars/__init__.py +1 -0
  7. aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/default.yaml +2 -48
  8. aiagents4pharma/talk2scholars/configs/agents/talk2scholars/pdf_agent/__init__.py +3 -0
  9. aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/default.yaml +5 -28
  10. aiagents4pharma/talk2scholars/configs/agents/talk2scholars/zotero_agent/default.yaml +5 -21
  11. aiagents4pharma/talk2scholars/configs/config.yaml +3 -0
  12. aiagents4pharma/talk2scholars/configs/tools/__init__.py +2 -0
  13. aiagents4pharma/talk2scholars/configs/tools/multi_paper_recommendation/default.yaml +1 -1
  14. aiagents4pharma/talk2scholars/configs/tools/question_and_answer/__init__.py +3 -0
  15. aiagents4pharma/talk2scholars/configs/tools/search/default.yaml +1 -1
  16. aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation/default.yaml +1 -1
  17. aiagents4pharma/talk2scholars/configs/tools/zotero_read/default.yaml +42 -1
  18. aiagents4pharma/talk2scholars/configs/tools/zotero_write/__inti__.py +3 -0
  19. aiagents4pharma/talk2scholars/state/state_talk2scholars.py +1 -0
  20. aiagents4pharma/talk2scholars/tests/test_main_agent.py +186 -111
  21. aiagents4pharma/talk2scholars/tests/test_pdf_agent.py +126 -0
  22. aiagents4pharma/talk2scholars/tests/test_question_and_answer_tool.py +186 -0
  23. aiagents4pharma/talk2scholars/tests/test_s2_display.py +74 -0
  24. aiagents4pharma/talk2scholars/tests/test_s2_multi.py +282 -0
  25. aiagents4pharma/talk2scholars/tests/test_s2_query.py +78 -0
  26. aiagents4pharma/talk2scholars/tests/test_s2_retrieve.py +65 -0
  27. aiagents4pharma/talk2scholars/tests/test_s2_search.py +266 -0
  28. aiagents4pharma/talk2scholars/tests/test_s2_single.py +274 -0
  29. aiagents4pharma/talk2scholars/tests/test_zotero_path.py +57 -0
  30. aiagents4pharma/talk2scholars/tests/test_zotero_read.py +412 -0
  31. aiagents4pharma/talk2scholars/tests/test_zotero_write.py +626 -0
  32. aiagents4pharma/talk2scholars/tools/__init__.py +1 -0
  33. aiagents4pharma/talk2scholars/tools/pdf/__init__.py +5 -0
  34. aiagents4pharma/talk2scholars/tools/pdf/question_and_answer.py +170 -0
  35. aiagents4pharma/talk2scholars/tools/s2/multi_paper_rec.py +50 -34
  36. aiagents4pharma/talk2scholars/tools/s2/query_results.py +1 -1
  37. aiagents4pharma/talk2scholars/tools/s2/retrieve_semantic_scholar_paper_id.py +8 -8
  38. aiagents4pharma/talk2scholars/tools/s2/search.py +36 -23
  39. aiagents4pharma/talk2scholars/tools/s2/single_paper_rec.py +44 -38
  40. aiagents4pharma/talk2scholars/tools/zotero/__init__.py +2 -0
  41. aiagents4pharma/talk2scholars/tools/zotero/utils/__init__.py +5 -0
  42. aiagents4pharma/talk2scholars/tools/zotero/utils/zotero_path.py +63 -0
  43. aiagents4pharma/talk2scholars/tools/zotero/zotero_read.py +64 -19
  44. aiagents4pharma/talk2scholars/tools/zotero/zotero_write.py +247 -0
  45. {aiagents4pharma-1.27.2.dist-info → aiagents4pharma-1.29.0.dist-info}/METADATA +6 -5
  46. {aiagents4pharma-1.27.2.dist-info → aiagents4pharma-1.29.0.dist-info}/RECORD +49 -33
  47. aiagents4pharma/talk2scholars/tests/test_call_s2.py +0 -100
  48. aiagents4pharma/talk2scholars/tests/test_call_zotero.py +0 -94
  49. aiagents4pharma/talk2scholars/tests/test_s2_tools.py +0 -355
  50. aiagents4pharma/talk2scholars/tests/test_zotero_tool.py +0 -171
  51. {aiagents4pharma-1.27.2.dist-info → aiagents4pharma-1.29.0.dist-info}/LICENSE +0 -0
  52. {aiagents4pharma-1.27.2.dist-info → aiagents4pharma-1.29.0.dist-info}/WHEEL +0 -0
  53. {aiagents4pharma-1.27.2.dist-info → aiagents4pharma-1.29.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,170 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ question_and_answer: Tool for performing Q&A on PDF documents using retrieval augmented generation.
4
+
5
+ This module provides functionality to extract text from PDF binary data, split it into
6
+ chunks, retrieve relevant segments via a vector store, and generate an answer to a
7
+ user-provided question using a language model chain.
8
+ """
9
+
10
+ import io
11
+ import logging
12
+ from typing import Annotated, Dict, Any, List
13
+
14
+ from PyPDF2 import PdfReader
15
+ from pydantic import BaseModel, Field
16
+ import hydra
17
+
18
+ from langchain.chains.question_answering import load_qa_chain
19
+ from langchain.docstore.document import Document
20
+ from langchain.text_splitter import CharacterTextSplitter
21
+ from langchain_community.vectorstores import Annoy
22
+ from langchain_openai import OpenAIEmbeddings
23
+ from langchain_core.language_models.chat_models import BaseChatModel
24
+
25
+ from langchain_core.messages import ToolMessage
26
+ from langchain_core.tools import tool
27
+ from langchain_core.tools.base import InjectedToolCallId
28
+ from langgraph.types import Command
29
+ from langgraph.prebuilt import InjectedState
30
+
31
+ # Set up logging.
32
+ logging.basicConfig(level=logging.INFO)
33
+ logger = logging.getLogger(__name__)
34
+ logger.setLevel(logging.INFO)
35
+
36
+ # Load configuration using Hydra.
37
+ with hydra.initialize(version_base=None, config_path="../../configs"):
38
+ cfg = hydra.compose(config_name="config", overrides=["tools/question_and_answer=default"])
39
+ cfg = cfg.tools.question_and_answer
40
+ logger.info("Loaded Question and Answer tool configuration.")
41
+
42
+ class QuestionAndAnswerInput(BaseModel):
43
+ """
44
+ Input schema for the PDF Question and Answer tool.
45
+
46
+ Attributes:
47
+ question (str): The question to ask regarding the PDF content.
48
+ tool_call_id (str): Unique identifier for the tool call, injected automatically.
49
+ """
50
+ question: str = Field(
51
+ description="The question to ask regarding the PDF content."
52
+ )
53
+ tool_call_id: Annotated[str, InjectedToolCallId]
54
+ state: Annotated[dict, InjectedState]
55
+
56
+ def extract_text_from_pdf_data(pdf_bytes: bytes) -> str:
57
+ """
58
+ Extract text content from PDF binary data.
59
+
60
+ This function uses PyPDF2 to read the provided PDF bytes and concatenates the text
61
+ extracted from each page.
62
+
63
+ Args:
64
+ pdf_bytes (bytes): The binary data of the PDF document.
65
+
66
+ Returns:
67
+ str: The complete text extracted from the PDF.
68
+ """
69
+ reader = PdfReader(io.BytesIO(pdf_bytes))
70
+ text = ""
71
+ for page in reader.pages:
72
+ page_text = page.extract_text() or ""
73
+ text += page_text
74
+ return text
75
+
76
+ def generate_answer(question: str, pdf_bytes: bytes, llm_model: BaseChatModel) -> Dict[str, Any]:
77
+ """
78
+ Generate an answer for a question using retrieval augmented generation on PDF content.
79
+
80
+ This function extracts text from the PDF data, splits the text into manageable chunks,
81
+ performs a similarity search to retrieve the most relevant segments, and then uses a
82
+ question-answering chain (built using the provided llm_model) to generate an answer.
83
+
84
+ Args:
85
+ question (str): The question to be answered.
86
+ pdf_bytes (bytes): The binary content of the PDF document.
87
+ llm_model (BaseChatModel): The language model instance to use for answering.
88
+
89
+ Returns:
90
+ Dict[str, Any]: A dictionary containing the answer generated by the language model.
91
+ """
92
+ text = extract_text_from_pdf_data(pdf_bytes)
93
+ logger.info("Extracted text from PDF.")
94
+ text_splitter = CharacterTextSplitter(
95
+ separator="\n",
96
+ chunk_size=cfg.chunk_size,
97
+ chunk_overlap=cfg.chunk_overlap
98
+ )
99
+ chunks = text_splitter.split_text(text)
100
+ documents: List[Document] = [Document(page_content=chunk) for chunk in chunks]
101
+ logger.info("Split PDF text into %d chunks.", len(documents))
102
+
103
+ embeddings = OpenAIEmbeddings(openai_api_key=cfg.openai_api_key)
104
+ vector_store = Annoy.from_documents(documents, embeddings)
105
+ search_results = vector_store.similarity_search(
106
+ question,
107
+ k=cfg.num_retrievals
108
+ )
109
+ logger.info("Retrieved %d relevant document chunks.", len(search_results))
110
+ # Use the provided llm_model to build the QA chain.
111
+ qa_chain = load_qa_chain(llm_model, chain_type=cfg.qa_chain_type)
112
+ answer = qa_chain.invoke(
113
+ input={"input_documents": search_results, "question": question}
114
+ )
115
+ return answer
116
+
117
+ @tool(args_schema=QuestionAndAnswerInput)
118
+ def question_and_answer_tool(
119
+ question: str,
120
+ tool_call_id: Annotated[str, InjectedToolCallId],
121
+ state: Annotated[dict, InjectedState],
122
+ ) -> Dict[str, Any]:
123
+ """
124
+ Answer a question using PDF content stored in the state via retrieval augmented generation.
125
+
126
+ This tool retrieves the PDF binary data from the state (under the key "pdf_data"), extracts its
127
+ textual content, and generates an answer to the specified question. It also extracts the
128
+ llm_model (of type BaseChatModel) from the state to use for answering.
129
+
130
+ Args:
131
+ question (str): The question regarding the PDF content.
132
+ tool_call_id (str): Unique identifier for the current tool call.
133
+ state (dict): A dictionary representing the current state, expected to contain PDF data
134
+ under the key "pdf_data" with a sub-key "pdf_object" for the binary content,
135
+ and a key "llm_model" holding the language model instance.
136
+
137
+ Returns:
138
+ Dict[str, Any]: A dictionary containing the generated answer or an error message.
139
+ """
140
+ logger.info("Starting PDF Question and Answer tool using PDF data from state.")
141
+ pdf_state = state.get("pdf_data")
142
+ if not pdf_state:
143
+ error_msg = "No pdf_data found in state."
144
+ logger.error(error_msg)
145
+ return Command(
146
+ update={
147
+ "messages": [
148
+ ToolMessage(content=error_msg, tool_call_id=tool_call_id)
149
+ ]
150
+ }
151
+ )
152
+ pdf_bytes = pdf_state.get("pdf_object")
153
+ if not pdf_bytes:
154
+ error_msg = "PDF binary data is missing in the pdf_data from state."
155
+ logger.error(error_msg)
156
+ return Command(
157
+ update={
158
+ "messages": [
159
+ ToolMessage(content=error_msg, tool_call_id=tool_call_id)
160
+ ]
161
+ }
162
+ )
163
+ # Retrieve llm_model from state; use a default if not provided.
164
+ llm_model = state.get("llm_model")
165
+ if not llm_model:
166
+ logger.error("Missing LLM model instance in state.")
167
+ return {"error": "No LLM model found in state."}
168
+ answer = generate_answer(question, pdf_bytes, llm_model)
169
+ logger.info("Generated answer: %s", answer)
170
+ return answer
@@ -16,6 +16,7 @@ from langchain_core.tools.base import InjectedToolCallId
16
16
  from langgraph.types import Command
17
17
  from pydantic import BaseModel, Field
18
18
 
19
+ # pylint: disable=R0914,R0912,R0915
19
20
 
20
21
  # Configure logging
21
22
  logging.basicConfig(level=logging.INFO)
@@ -26,7 +27,7 @@ class MultiPaperRecInput(BaseModel):
26
27
  """Input schema for multiple paper recommendations tool."""
27
28
 
28
29
  paper_ids: List[str] = Field(
29
- description=("List of Semantic Scholar Paper IDs to get recommendations for")
30
+ description="List of Semantic Scholar Paper IDs to get recommendations for"
30
31
  )
31
32
  limit: int = Field(
32
33
  default=2,
@@ -44,14 +45,6 @@ class MultiPaperRecInput(BaseModel):
44
45
  model_config = {"arbitrary_types_allowed": True}
45
46
 
46
47
 
47
- # Load hydra configuration
48
- with hydra.initialize(version_base=None, config_path="../../configs"):
49
- cfg = hydra.compose(
50
- config_name="config", overrides=["tools/multi_paper_recommendation=default"]
51
- )
52
- cfg = cfg.tools.multi_paper_recommendation
53
-
54
-
55
48
  @tool(args_schema=MultiPaperRecInput, parse_docstring=True)
56
49
  def get_multi_paper_recommendations(
57
50
  paper_ids: List[str],
@@ -73,7 +66,14 @@ def get_multi_paper_recommendations(
73
66
  Returns:
74
67
  Dict[str, Any]: The recommendations and related information.
75
68
  """
76
- logging.info(
69
+ # Load hydra configuration
70
+ with hydra.initialize(version_base=None, config_path="../../configs"):
71
+ cfg = hydra.compose(
72
+ config_name="config", overrides=["tools/multi_paper_recommendation=default"]
73
+ )
74
+ cfg = cfg.tools.multi_paper_recommendation
75
+ logger.info("Loaded configuration for multi-paper recommendation tool")
76
+ logger.info(
77
77
  "Starting multi-paper recommendations search with paper IDs: %s", paper_ids
78
78
  )
79
79
 
@@ -89,45 +89,61 @@ def get_multi_paper_recommendations(
89
89
  if year:
90
90
  params["year"] = year
91
91
 
92
- # Getting recommendations
93
- response = requests.post(
94
- endpoint,
95
- headers=headers,
96
- params=params,
97
- data=json.dumps(payload),
98
- timeout=cfg.request_timeout,
99
- )
100
- logging.info(
92
+ # Wrap API call in try/except to catch connectivity issues and validate response format
93
+ try:
94
+ response = requests.post(
95
+ endpoint,
96
+ headers=headers,
97
+ params=params,
98
+ data=json.dumps(payload),
99
+ timeout=cfg.request_timeout,
100
+ )
101
+ response.raise_for_status() # Raises HTTPError for bad responses
102
+ except requests.exceptions.RequestException as e:
103
+ logger.error(
104
+ "Failed to connect to Semantic Scholar API for multi-paper recommendations: %s",
105
+ e,
106
+ )
107
+ raise RuntimeError(
108
+ "Failed to connect to Semantic Scholar API. Please retry the same query."
109
+ ) from e
110
+
111
+ logger.info(
101
112
  "API Response Status for multi-paper recommendations: %s", response.status_code
102
113
  )
114
+ logger.info("Request params: %s", params)
103
115
 
104
116
  data = response.json()
105
- recommendations = data.get("recommendedPapers", [])
106
117
 
118
+ # Check for expected data format
119
+ if "recommendedPapers" not in data:
120
+ logger.error("Unexpected API response format: %s", data)
121
+ raise RuntimeError(
122
+ "Unexpected response from Semantic Scholar API. The results could not be "
123
+ "retrieved due to an unexpected format. "
124
+ "Please modify your search query and try again."
125
+ )
126
+
127
+ recommendations = data.get("recommendedPapers", [])
107
128
  if not recommendations:
108
- return Command(
109
- update={ # Place 'messages' inside 'update'
110
- "messages": [
111
- ToolMessage(
112
- content="No recommendations found based on multiple papers.",
113
- tool_call_id=tool_call_id,
114
- )
115
- ]
116
- }
129
+ logger.error(
130
+ "No recommendations returned from API for paper IDs: %s", paper_ids
131
+ )
132
+ raise RuntimeError(
133
+ "No recommendations were found for your query. Consider refining your search "
134
+ "by using more specific keywords or different terms."
117
135
  )
118
136
 
119
137
  # Create a dictionary to store the papers
120
138
  filtered_papers = {
121
139
  paper["paperId"]: {
122
- # "semantic_scholar_id": paper["paperId"], # Store Semantic Scholar ID
140
+ "paper_id": paper["paperId"],
123
141
  "Title": paper.get("title", "N/A"),
124
142
  "Abstract": paper.get("abstract", "N/A"),
125
143
  "Year": paper.get("year", "N/A"),
126
144
  "Citation Count": paper.get("citationCount", "N/A"),
127
145
  "URL": paper.get("url", "N/A"),
128
- # "arXiv_ID": paper.get("externalIds", {}).get(
129
- # "ArXiv", "N/A"
130
- # ), # Extract arXiv ID
146
+ "arxiv_id": paper.get("externalIds", {}).get("ArXiv", "N/A"),
131
147
  }
132
148
  for paper in recommendations
133
149
  if paper.get("title") and paper.get("authors")
@@ -156,7 +172,7 @@ def get_multi_paper_recommendations(
156
172
 
157
173
  return Command(
158
174
  update={
159
- "multi_papers": filtered_papers, # Now sending the dictionary directly
175
+ "multi_papers": filtered_papers, # Sending the dictionary directly
160
176
  "last_displayed_papers": "multi_papers",
161
177
  "messages": [
162
178
  ToolMessage(
@@ -44,7 +44,7 @@ def query_results(question: str, state: Annotated[dict, InjectedState]) -> str:
44
44
  raise NoPapersFoundError(
45
45
  "No papers found. A search needs to be performed first."
46
46
  )
47
- context_key = state.get("last_displayed_papers")
47
+ context_key = state.get("last_displayed_papers","pdf_data")
48
48
  dic_papers = state.get(context_key)
49
49
  df_papers = pd.DataFrame.from_dict(dic_papers, orient="index")
50
50
  df_agent = create_pandas_dataframe_agent(
@@ -19,14 +19,6 @@ from pydantic import Field
19
19
  logging.basicConfig(level=logging.INFO)
20
20
  logger = logging.getLogger(__name__)
21
21
 
22
- # Load hydra configuration
23
- with hydra.initialize(version_base=None, config_path="../../configs"):
24
- cfg = hydra.compose(
25
- config_name="config",
26
- overrides=["tools/retrieve_semantic_scholar_paper_id=default"],
27
- )
28
- cfg = cfg.tools.retrieve_semantic_scholar_paper_id
29
-
30
22
 
31
23
  @tool("retrieve_semantic_scholar_paper_id", parse_docstring=True)
32
24
  def retrieve_semantic_scholar_paper_id(
@@ -49,6 +41,14 @@ def retrieve_semantic_scholar_paper_id(
49
41
  Returns:
50
42
  ToolMessage: A message containing the paper ID.
51
43
  """
44
+ # Load hydra configuration
45
+ with hydra.initialize(version_base=None, config_path="../../configs"):
46
+ cfg = hydra.compose(
47
+ config_name="config",
48
+ overrides=["tools/retrieve_semantic_scholar_paper_id=default"],
49
+ )
50
+ cfg = cfg.tools.retrieve_semantic_scholar_paper_id
51
+ logger.info("Loaded configuration for Semantic Scholar paper ID retrieval tool")
52
52
  logger.info("Retrieving ID of paper with title: %s", paper_title)
53
53
  endpoint = cfg.api_endpoint
54
54
  params = {
@@ -37,12 +37,6 @@ class SearchInput(BaseModel):
37
37
  tool_call_id: Annotated[str, InjectedToolCallId]
38
38
 
39
39
 
40
- # Load hydra configuration
41
- with hydra.initialize(version_base=None, config_path="../../configs"):
42
- cfg = hydra.compose(config_name="config", overrides=["tools/search=default"])
43
- cfg = cfg.tools.search
44
-
45
-
46
40
  @tool("search_tool", args_schema=SearchInput, parse_docstring=True)
47
41
  def search_tool(
48
42
  query: str,
@@ -56,13 +50,18 @@ def search_tool(
56
50
  Args:
57
51
  query (str): The search query string to find academic papers.
58
52
  tool_call_id (Annotated[str, InjectedToolCallId]): The tool call ID.
59
- limit (int, optional): The maximum number of results to return. Defaults to 2.
53
+ limit (int, optional): The maximum number of results to return. Defaults to 5.
60
54
  year (str, optional): Year range for papers.
61
55
  Supports formats like "2024-", "-2024", "2024:2025". Defaults to None.
62
56
 
63
57
  Returns:
64
58
  The number of papers found on Semantic Scholar.
65
59
  """
60
+ # Load hydra configuration
61
+ with hydra.initialize(version_base=None, config_path="../../configs"):
62
+ cfg = hydra.compose(config_name="config", overrides=["tools/search=default"])
63
+ cfg = cfg.tools.search
64
+ logger.info("Loaded configuration for search tool")
66
65
  logger.info("Searching for papers on %s", query)
67
66
  endpoint = cfg.api_endpoint
68
67
  params = {
@@ -75,33 +74,47 @@ def search_tool(
75
74
  if year:
76
75
  params["year"] = year
77
76
 
78
- response = requests.get(endpoint, params=params, timeout=10)
77
+ # Wrap API call in try/except to catch connectivity issues
78
+ try:
79
+ response = requests.get(endpoint, params=params, timeout=10)
80
+ response.raise_for_status() # Raises HTTPError for bad responses
81
+ except requests.exceptions.RequestException as e:
82
+ logger.error("Failed to connect to Semantic Scholar API: %s", e)
83
+ raise RuntimeError(
84
+ "Failed to connect to Semantic Scholar API. Please retry the same query."
85
+ ) from e
86
+
79
87
  data = response.json()
88
+
89
+ # Check for expected data format
90
+ if "data" not in data:
91
+ logger.error("Unexpected API response format: %s", data)
92
+ raise RuntimeError(
93
+ "Unexpected response from Semantic Scholar API. The results could not be "
94
+ "retrieved due to an unexpected format. "
95
+ "Please modify your search query and try again."
96
+ )
97
+
80
98
  papers = data.get("data", [])
81
- logger.info("Received %d papers", len(papers))
82
99
  if not papers:
83
- return Command(
84
- update={ # Place 'messages' inside 'update'
85
- "messages": [
86
- ToolMessage(
87
- content="No papers found. Please try a different search query.",
88
- tool_call_id=tool_call_id,
89
- )
90
- ]
91
- }
100
+ logger.error(
101
+ "No papers returned from Semantic Scholar API for query: %s", query
92
102
  )
103
+ raise RuntimeError(
104
+ "No papers were found for your query. Consider refining your search "
105
+ "by using more specific keywords or different terms."
106
+ )
107
+
93
108
  # Create a dictionary to store the papers
94
109
  filtered_papers = {
95
110
  paper["paperId"]: {
96
- # "semantic_scholar_id": paper["paperId"], # Store Semantic Scholar ID
111
+ "paper_id": paper["paperId"],
97
112
  "Title": paper.get("title", "N/A"),
98
113
  "Abstract": paper.get("abstract", "N/A"),
99
114
  "Year": paper.get("year", "N/A"),
100
115
  "Citation Count": paper.get("citationCount", "N/A"),
101
116
  "URL": paper.get("url", "N/A"),
102
- # "arXiv_ID": paper.get("externalIds", {}).get(
103
- # "ArXiv", "N/A"
104
- # ), # Extract arXiv ID
117
+ "arxiv_id": paper.get("externalIds", {}).get("ArXiv", "N/A"),
105
118
  }
106
119
  for paper in papers
107
120
  if paper.get("title") and paper.get("authors")
@@ -129,7 +142,7 @@ def search_tool(
129
142
 
130
143
  return Command(
131
144
  update={
132
- "papers": filtered_papers, # Now sending the dictionary directly
145
+ "papers": filtered_papers, # Sending the dictionary directly
133
146
  "last_displayed_papers": "papers",
134
147
  "messages": [
135
148
  ToolMessage(
@@ -40,14 +40,6 @@ class SinglePaperRecInput(BaseModel):
40
40
  model_config = {"arbitrary_types_allowed": True}
41
41
 
42
42
 
43
- # Load hydra configuration
44
- with hydra.initialize(version_base=None, config_path="../../configs"):
45
- cfg = hydra.compose(
46
- config_name="config", overrides=["tools/single_paper_recommendation=default"]
47
- )
48
- cfg = cfg.tools.single_paper_recommendation
49
-
50
-
51
43
  @tool(args_schema=SinglePaperRecInput, parse_docstring=True)
52
44
  def get_single_paper_recommendations(
53
45
  paper_id: str,
@@ -56,19 +48,27 @@ def get_single_paper_recommendations(
56
48
  year: Optional[str] = None,
57
49
  ) -> Command[Any]:
58
50
  """
59
- Get recommendations for on a single paper using its Semantic Scholar ID.
51
+ Get recommendations for a single paper using its Semantic Scholar ID.
60
52
  No other ID types are supported.
61
53
 
62
54
  Args:
63
55
  paper_id (str): The Semantic Scholar Paper ID to get recommendations for.
64
56
  tool_call_id (Annotated[str, InjectedToolCallId]): The tool call ID.
65
- limit (int, optional): The maximum number of recommendations to return. Defaults to 2.
57
+ limit (int, optional): The maximum number of recommendations to return. Defaults to 5.
66
58
  year (str, optional): Year range for papers.
67
59
  Supports formats like "2024-", "-2024", "2024:2025". Defaults to None.
68
60
 
69
61
  Returns:
70
62
  Dict[str, Any]: The recommendations and related information.
71
63
  """
64
+ # Load hydra configuration
65
+ with hydra.initialize(version_base=None, config_path="../../configs"):
66
+ cfg = hydra.compose(
67
+ config_name="config",
68
+ overrides=["tools/single_paper_recommendation=default"],
69
+ )
70
+ cfg = cfg.tools.single_paper_recommendation
71
+ logger.info("Loaded configuration for single paper recommendation tool")
72
72
  logger.info(
73
73
  "Starting single paper recommendations search with paper ID: %s", paper_id
74
74
  )
@@ -84,48 +84,54 @@ def get_single_paper_recommendations(
84
84
  if year:
85
85
  params["year"] = year
86
86
 
87
- response = requests.get(endpoint, params=params, timeout=cfg.request_timeout)
88
- data = response.json()
89
- response = requests.get(endpoint, params=params, timeout=10)
90
- # print(f"API Response Status: {response.status_code}")
91
- logging.info(
87
+ # Wrap API call in try/except to catch connectivity issues and check response format
88
+ try:
89
+ response = requests.get(endpoint, params=params, timeout=cfg.request_timeout)
90
+ response.raise_for_status() # Raises HTTPError for bad responses
91
+ except requests.exceptions.RequestException as e:
92
+ logger.error(
93
+ "Failed to connect to Semantic Scholar API for recommendations: %s", e
94
+ )
95
+ raise RuntimeError(
96
+ "Failed to connect to Semantic Scholar API. Please retry the same query."
97
+ ) from e
98
+
99
+ logger.info(
92
100
  "API Response Status for recommendations of paper %s: %s",
93
101
  paper_id,
94
102
  response.status_code,
95
103
  )
96
- if response.status_code != 200:
97
- raise ValueError("Invalid paper ID or API error.")
98
- # print(f"Request params: {params}")
99
- logging.info("Request params: %s", params)
104
+ logger.info("Request params: %s", params)
100
105
 
101
106
  data = response.json()
102
- recommendations = data.get("recommendedPapers", [])
103
107
 
108
+ # Check for expected data format
109
+ if "recommendedPapers" not in data:
110
+ logger.error("Unexpected API response format: %s", data)
111
+ raise RuntimeError(
112
+ "Unexpected response from Semantic Scholar API. The results could not be "
113
+ "retrieved due to an unexpected format. "
114
+ "Please modify your search query and try again."
115
+ )
116
+
117
+ recommendations = data.get("recommendedPapers", [])
104
118
  if not recommendations:
105
- return Command(
106
- update={
107
- "papers": {},
108
- "messages": [
109
- ToolMessage(
110
- content=f"No recommendations found for {paper_id}.",
111
- tool_call_id=tool_call_id,
112
- )
113
- ],
114
- }
119
+ logger.error("No recommendations returned from API for paper: %s", paper_id)
120
+ raise RuntimeError(
121
+ "No recommendations were found for your query. Consider refining your search "
122
+ "by using more specific keywords or different terms."
115
123
  )
116
124
 
117
125
  # Extract paper ID and title from recommendations
118
126
  filtered_papers = {
119
127
  paper["paperId"]: {
120
- # "semantic_scholar_id": paper["paperId"], # Store Semantic Scholar ID
128
+ "paper_id": paper["paperId"],
121
129
  "Title": paper.get("title", "N/A"),
122
130
  "Abstract": paper.get("abstract", "N/A"),
123
131
  "Year": paper.get("year", "N/A"),
124
132
  "Citation Count": paper.get("citationCount", "N/A"),
125
133
  "URL": paper.get("url", "N/A"),
126
- # "arXiv_ID": paper.get("externalIds", {}).get(
127
- # "ArXiv", "N/A"
128
- # ), # Extract arXiv ID
134
+ "arxiv_id": paper.get("externalIds", {}).get("ArXiv", "N/A"),
129
135
  }
130
136
  for paper in recommendations
131
137
  if paper.get("title") and paper.get("authors")
@@ -143,10 +149,10 @@ def get_single_paper_recommendations(
143
149
  logger.info("Filtered %d papers", len(filtered_papers))
144
150
 
145
151
  content = (
146
- "Recommendations based on single paper were successful. "
147
- "Papers are attached as an artifact."
152
+ "Recommendations based on the single paper were successful. "
153
+ "Papers are attached as an artifact. "
154
+ "Here is a summary of the recommendations:\n"
148
155
  )
149
- content += " Here is a summary of the recommendations:\n"
150
156
  content += f"Number of papers found: {len(filtered_papers)}\n"
151
157
  content += f"Query Paper ID: {paper_id}\n"
152
158
  content += f"Year: {year}\n" if year else ""
@@ -154,7 +160,7 @@ def get_single_paper_recommendations(
154
160
 
155
161
  return Command(
156
162
  update={
157
- "papers": filtered_papers, # Now sending the dictionary directly
163
+ "papers": filtered_papers, # Sending the dictionary directly
158
164
  "last_displayed_papers": "papers",
159
165
  "messages": [
160
166
  ToolMessage(
@@ -3,3 +3,5 @@ Import statements
3
3
  """
4
4
 
5
5
  from . import zotero_read
6
+ from . import zotero_write
7
+ from . import utils
@@ -0,0 +1,5 @@
1
+ """
2
+ Import statements
3
+ """
4
+
5
+ from . import zotero_path