aiagents4pharma 1.15.2__py3-none-any.whl → 1.17.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. aiagents4pharma/__init__.py +1 -1
  2. aiagents4pharma/{talk2competitors → talk2scholars}/__init__.py +1 -1
  3. aiagents4pharma/{talk2competitors → talk2scholars}/agents/main_agent.py +42 -21
  4. aiagents4pharma/{talk2competitors → talk2scholars}/agents/s2_agent.py +41 -32
  5. aiagents4pharma/{talk2competitors/config → talk2scholars/configs}/__init__.py +3 -1
  6. aiagents4pharma/talk2scholars/configs/agents/__init__.py +5 -0
  7. aiagents4pharma/talk2scholars/configs/agents/talk2scholars/__init__.py +6 -0
  8. aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/__init__.py +3 -0
  9. aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/default.yaml +39 -0
  10. aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/__init__.py +3 -0
  11. aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/default.yaml +68 -0
  12. aiagents4pharma/talk2scholars/configs/app/__init__.py +5 -0
  13. aiagents4pharma/talk2scholars/configs/app/frontend/__init__.py +3 -0
  14. aiagents4pharma/talk2scholars/configs/app/frontend/default.yaml +33 -0
  15. aiagents4pharma/talk2scholars/configs/config.yaml +8 -0
  16. aiagents4pharma/talk2scholars/configs/tools/__init__.py +7 -0
  17. aiagents4pharma/talk2scholars/configs/tools/multi_paper_recommendation/__init__.py +3 -0
  18. aiagents4pharma/talk2scholars/configs/tools/multi_paper_recommendation/default.yaml +19 -0
  19. aiagents4pharma/talk2scholars/configs/tools/search/__init__.py +3 -0
  20. aiagents4pharma/talk2scholars/configs/tools/search/default.yaml +19 -0
  21. aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation/__init__.py +3 -0
  22. aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation/default.yaml +20 -0
  23. aiagents4pharma/{talk2competitors → talk2scholars}/state/__init__.py +1 -1
  24. aiagents4pharma/{talk2competitors/state/state_talk2competitors.py → talk2scholars/state/state_talk2scholars.py} +3 -3
  25. aiagents4pharma/{talk2competitors → talk2scholars}/tests/test_langgraph.py +72 -55
  26. aiagents4pharma/{talk2competitors → talk2scholars}/tools/s2/multi_paper_rec.py +13 -5
  27. aiagents4pharma/{talk2competitors → talk2scholars}/tools/s2/search.py +9 -3
  28. aiagents4pharma/{talk2competitors → talk2scholars}/tools/s2/single_paper_rec.py +13 -7
  29. {aiagents4pharma-1.15.2.dist-info → aiagents4pharma-1.17.0.dist-info}/METADATA +7 -6
  30. {aiagents4pharma-1.15.2.dist-info → aiagents4pharma-1.17.0.dist-info}/RECORD +38 -22
  31. aiagents4pharma/talk2competitors/config/config.py +0 -110
  32. /aiagents4pharma/{talk2competitors → talk2scholars}/agents/__init__.py +0 -0
  33. /aiagents4pharma/{talk2competitors → talk2scholars}/tests/__init__.py +0 -0
  34. /aiagents4pharma/{talk2competitors → talk2scholars}/tools/__init__.py +0 -0
  35. /aiagents4pharma/{talk2competitors → talk2scholars}/tools/s2/__init__.py +0 -0
  36. /aiagents4pharma/{talk2competitors → talk2scholars}/tools/s2/display_results.py +0 -0
  37. {aiagents4pharma-1.15.2.dist-info → aiagents4pharma-1.17.0.dist-info}/LICENSE +0 -0
  38. {aiagents4pharma-1.15.2.dist-info → aiagents4pharma-1.17.0.dist-info}/WHEEL +0 -0
  39. {aiagents4pharma-1.15.2.dist-info → aiagents4pharma-1.17.0.dist-info}/top_level.txt +0 -0
@@ -6,6 +6,6 @@ from . import (
6
6
  configs,
7
7
  talk2biomodels,
8
8
  talk2cells,
9
- talk2competitors,
9
+ talk2scholars,
10
10
  talk2knowledgegraphs,
11
11
  )
@@ -2,4 +2,4 @@
2
2
  This file is used to import all the modules in the package.
3
3
  """
4
4
 
5
- from . import agents, config, state, tests, tools
5
+ from . import agents, configs, state, tests, tools
@@ -1,12 +1,12 @@
1
1
  #!/usr/bin/env python3
2
2
 
3
3
  """
4
- Main agent for the talk2competitors app.
4
+ Main agent for the talk2scholars app.
5
5
  """
6
6
 
7
7
  import logging
8
- from typing import Literal
9
- from dotenv import load_dotenv
8
+ from typing import Literal, Any
9
+ import hydra
10
10
  from langchain_core.language_models.chat_models import BaseChatModel
11
11
  from langchain_core.messages import AIMessage
12
12
  from langchain_openai import ChatOpenAI
@@ -14,39 +14,42 @@ from langgraph.checkpoint.memory import MemorySaver
14
14
  from langgraph.graph import END, START, StateGraph
15
15
  from langgraph.types import Command
16
16
  from ..agents import s2_agent
17
- from ..config.config import config
18
- from ..state.state_talk2competitors import Talk2Competitors
17
+ from ..state.state_talk2scholars import Talk2Scholars
19
18
 
20
19
  logging.basicConfig(level=logging.INFO)
21
20
  logger = logging.getLogger(__name__)
22
21
 
23
- load_dotenv()
24
22
 
25
- def make_supervisor_node(llm: BaseChatModel) -> str:
23
+ def make_supervisor_node(llm: BaseChatModel, cfg: Any) -> str:
26
24
  """
27
25
  Creates a supervisor node following LangGraph patterns.
28
26
 
29
27
  Args:
30
28
  llm (BaseChatModel): The language model to use for generating responses.
29
+ cfg (Any): The configuration object.
31
30
 
32
31
  Returns:
33
32
  str: The supervisor node function.
34
33
  """
35
- # options = ["FINISH", "s2_agent"]
36
-
37
- def supervisor_node(state: Talk2Competitors) -> Command[Literal["s2_agent", "__end__"]]:
34
+ def supervisor_node(
35
+ state: Talk2Scholars,
36
+ ) -> Command[Literal["s2_agent", "__end__"]]:
38
37
  """
39
38
  Supervisor node that routes to appropriate sub-agents.
40
39
 
41
40
  Args:
42
- state (Talk2Competitors): The current state of the conversation.
41
+ state (Talk2Scholars): The current state of the conversation.
43
42
 
44
43
  Returns:
45
44
  Command[Literal["s2_agent", "__end__"]]: The command to execute next.
46
45
  """
47
- logger.info("Supervisor node called")
46
+ logger.info(
47
+ "Supervisor node called - Messages count: %d, Current Agent: %s",
48
+ len(state["messages"]),
49
+ state.get("current_agent", "None"),
50
+ )
48
51
 
49
- messages = [{"role": "system", "content": config.MAIN_AGENT_PROMPT}] + state[
52
+ messages = [{"role": "system", "content": cfg.state_modifier}] + state[
50
53
  "messages"
51
54
  ]
52
55
  response = llm.invoke(messages)
@@ -81,7 +84,8 @@ def make_supervisor_node(llm: BaseChatModel) -> str:
81
84
 
82
85
  return supervisor_node
83
86
 
84
- def get_app(thread_id: str, llm_model ='gpt-4o-mini') -> StateGraph:
87
+
88
+ def get_app(thread_id: str, llm_model="gpt-4o-mini") -> StateGraph:
85
89
  """
86
90
  Returns the langraph app with hierarchical structure.
87
91
 
@@ -91,20 +95,30 @@ def get_app(thread_id: str, llm_model ='gpt-4o-mini') -> StateGraph:
91
95
  Returns:
92
96
  The compiled langraph app.
93
97
  """
94
- def call_s2_agent(state: Talk2Competitors) -> Command[Literal["__end__"]]:
98
+
99
+ # Load hydra configuration
100
+ logger.log(logging.INFO, "Load Hydra configuration for Talk2Scholars main agent.")
101
+ with hydra.initialize(version_base=None, config_path="../../configs"):
102
+ cfg = hydra.compose(
103
+ config_name="config", overrides=["agents/talk2scholars/main_agent=default"]
104
+ )
105
+ cfg = cfg.agents.talk2scholars.main_agent
106
+ logger.info("Hydra configuration loaded with values: %s", cfg)
107
+
108
+ def call_s2_agent(state: Talk2Scholars) -> Command[Literal["__end__"]]:
95
109
  """
96
110
  Node for calling the S2 agent.
97
111
 
98
112
  Args:
99
- state (Talk2Competitors): The current state of the conversation.
113
+ state (Talk2Scholars): The current state of the conversation.
100
114
 
101
115
  Returns:
102
116
  Command[Literal["__end__"]]: The command to execute next.
103
117
  """
104
- logger.info("Calling S2 agent")
118
+ logger.info("Calling S2 agent with state: %s", state)
105
119
  app = s2_agent.get_app(thread_id, llm_model)
106
120
  response = app.invoke(state)
107
- logger.info("S2 agent completed")
121
+ logger.info("S2 agent completed with response: %s", response)
108
122
  return Command(
109
123
  goto=END,
110
124
  update={
@@ -114,10 +128,17 @@ def get_app(thread_id: str, llm_model ='gpt-4o-mini') -> StateGraph:
114
128
  "current_agent": "s2_agent",
115
129
  },
116
130
  )
117
- llm = ChatOpenAI(model=llm_model, temperature=0)
118
- workflow = StateGraph(Talk2Competitors)
119
131
 
120
- supervisor = make_supervisor_node(llm)
132
+ logger.log(
133
+ logging.INFO,
134
+ "Using OpenAI model %s with temperature %s",
135
+ llm_model,
136
+ cfg.temperature
137
+ )
138
+ llm = ChatOpenAI(model=llm_model, temperature=cfg.temperature)
139
+ workflow = StateGraph(Talk2Scholars)
140
+
141
+ supervisor = make_supervisor_node(llm, cfg)
121
142
  workflow.add_node("supervisor", supervisor)
122
143
  workflow.add_node("s2_agent", call_s2_agent)
123
144
 
@@ -1,59 +1,68 @@
1
- #/usr/bin/env python3
1
+ # /usr/bin/env python3
2
2
 
3
- '''
3
+ """
4
4
  Agent for interacting with Semantic Scholar
5
- '''
5
+ """
6
6
 
7
7
  import logging
8
- from dotenv import load_dotenv
8
+ import hydra
9
9
  from langchain_openai import ChatOpenAI
10
10
  from langgraph.graph import START, StateGraph
11
- from langgraph.prebuilt import create_react_agent
11
+ from langgraph.prebuilt import create_react_agent, ToolNode
12
12
  from langgraph.checkpoint.memory import MemorySaver
13
- from ..config.config import config
14
- from ..state.state_talk2competitors import Talk2Competitors
15
- # from ..tools.s2 import s2_tools
16
- from ..tools.s2.search import search_tool
17
- from ..tools.s2.display_results import display_results
18
- from ..tools.s2.single_paper_rec import get_single_paper_recommendations
19
- from ..tools.s2.multi_paper_rec import get_multi_paper_recommendations
13
+ from ..state.state_talk2scholars import Talk2Scholars
14
+ from ..tools.s2.search import search_tool as s2_search
15
+ from ..tools.s2.display_results import display_results as s2_display
16
+ from ..tools.s2.single_paper_rec import (
17
+ get_single_paper_recommendations as s2_single_rec,
18
+ )
19
+ from ..tools.s2.multi_paper_rec import get_multi_paper_recommendations as s2_multi_rec
20
20
 
21
- load_dotenv()
22
21
 
23
22
  # Initialize logger
24
23
  logging.basicConfig(level=logging.INFO)
25
24
  logger = logging.getLogger(__name__)
26
25
 
27
- def get_app(uniq_id, llm_model='gpt-4o-mini'):
28
- '''
26
+
27
+ def get_app(uniq_id, llm_model="gpt-4o-mini"):
28
+ """
29
29
  This function returns the langraph app.
30
- '''
31
- def agent_s2_node(state: Talk2Competitors):
32
- '''
30
+ """
31
+
32
+ def agent_s2_node(state: Talk2Scholars):
33
+ """
33
34
  This function calls the model.
34
- '''
35
+ """
35
36
  logger.log(logging.INFO, "Creating Agent_S2 node with thread_id %s", uniq_id)
36
37
  response = model.invoke(state, {"configurable": {"thread_id": uniq_id}})
37
38
  return response
38
39
 
40
+ # Load hydra configuration
41
+ logger.log(logging.INFO, "Load Hydra configuration for Talk2Scholars S2 agent.")
42
+ with hydra.initialize(version_base=None, config_path="../../configs"):
43
+ cfg = hydra.compose(
44
+ config_name="config", overrides=["agents/talk2scholars/s2_agent=default"]
45
+ )
46
+ cfg = cfg.agents.talk2scholars.s2_agent
47
+
39
48
  # Define the tools
40
- tools = [search_tool,
41
- display_results,
42
- get_single_paper_recommendations,
43
- get_multi_paper_recommendations]
49
+ tools = ToolNode([s2_search, s2_display, s2_single_rec, s2_multi_rec])
50
+
51
+ # Define the model
52
+ logger.log(logging.INFO, "Using OpenAI model %s", llm_model)
53
+ llm = ChatOpenAI(model=llm_model, temperature=cfg.temperature)
44
54
 
45
- # Create the LLM
46
- llm = ChatOpenAI(model=llm_model, temperature=0)
55
+ # Create the agent
47
56
  model = create_react_agent(
48
- llm,
49
- tools=tools,
50
- state_schema=Talk2Competitors,
51
- state_modifier=config.S2_AGENT_PROMPT,
52
- checkpointer=MemorySaver()
53
- )
57
+ llm,
58
+ tools=tools,
59
+ state_schema=Talk2Scholars,
60
+ state_modifier=cfg.s2_agent,
61
+ checkpointer=MemorySaver(),
62
+ )
54
63
 
55
64
  # Define a new graph
56
- workflow = StateGraph(Talk2Competitors)
65
+ workflow = StateGraph(Talk2Scholars)
57
66
 
58
67
  # Define the two nodes we will cycle between
59
68
  workflow.add_node("agent_s2", agent_s2_node)
@@ -2,4 +2,6 @@
2
2
  This package contains configuration settings and prompts used by various AI agents
3
3
  """
4
4
 
5
- from . import config
5
+ from . import agents
6
+ from . import tools
7
+ from . import app
@@ -0,0 +1,5 @@
1
+ """
2
+ Import all the modules in the package
3
+ """
4
+
5
+ from . import talk2scholars
@@ -0,0 +1,6 @@
1
+ """
2
+ Import all the modules in the package
3
+ """
4
+
5
+ from . import s2_agent
6
+ from . import main_agent
@@ -0,0 +1,3 @@
1
+ """
2
+ Import all the modules in the package
3
+ """
@@ -0,0 +1,39 @@
1
+ _target_: agents.main_agent.get_app
2
+ openai_api_key: ${oc.env:OPENAI_API_KEY}
3
+ openai_llms:
4
+ - "gpt-4o-mini"
5
+ - "gpt-4-turbo"
6
+ - "gpt-3.5-turbo"
7
+ temperature: 0
8
+ main_agent: >
9
+ "You are a supervisory AI agent that routes user queries to specialized tools.\n"
10
+ "Your task is to select the most appropriate tool based on the user's request.\n\n"
11
+ "Available tools and their capabilities:\n\n"
12
+ "1. semantic_scholar_agent:\n"
13
+ " - Search for academic papers and research\n"
14
+ " - Get paper recommendations\n"
15
+ " - Find similar papers\n"
16
+ " USE FOR: Any queries about finding papers, academic research, "
17
+ "or getting paper recommendations\n\n"
18
+ "ROUTING GUIDELINES:\n\n"
19
+ "ALWAYS route to semantic_scholar_agent for:\n"
20
+ "- Finding academic papers\n"
21
+ "- Searching research topics\n"
22
+ "- Getting paper recommendations\n"
23
+ "- Finding similar papers\n"
24
+ "- Any query about academic literature\n\n"
25
+ "Approach:\n"
26
+ "1. Identify the core need in the user's query\n"
27
+ "2. Select the most appropriate tool based on the guidelines above\n"
28
+ "3. If unclear, ask for clarification\n"
29
+ "4. For multi-step tasks, focus on the immediate next step\n\n"
30
+ "Remember:\n"
31
+ "- Be decisive in your tool selection\n"
32
+ "- Focus on the immediate task\n"
33
+ "- Default to semantic_scholar_agent for any paper-finding tasks\n"
34
+ "- Ask for clarification if the request is ambiguous\n\n"
35
+ "When presenting paper search results, always use this exact format:\n\n"
36
+ "Remember to:\n"
37
+ "- To always add the url\n"
38
+ "- Put URLs on the title line itself as markdown\n"
39
+ "- Maintain consistent spacing and formatting"
@@ -0,0 +1,3 @@
1
+ """
2
+ Import all the modules in the package
3
+ """
@@ -0,0 +1,68 @@
1
+ _target_: agents.s2_agent.get_app
2
+ openai_api_key: ${oc.env:OPENAI_API_KEY}
3
+ openai_llms:
4
+ - "gpt-4o-mini"
5
+ - "gpt-4-turbo"
6
+ - "gpt-3.5-turbo"
7
+ temperature: 0
8
+ s2_agent: >
9
+ "You are a specialized academic research assistant with access to the following tools:\n\n"
10
+ "1. search_papers:\n"
11
+ " USE FOR: General paper searches\n"
12
+ " - Enhances search terms automatically\n"
13
+ " - Adds relevant academic keywords\n"
14
+ " - Focuses on recent research when appropriate\n\n"
15
+ "2. get_single_paper_recommendations:\n"
16
+ " USE FOR: Finding papers similar to a specific paper\n"
17
+ " - Takes a single paper ID\n"
18
+ " - Returns related papers\n\n"
19
+ "3. get_multi_paper_recommendations:\n"
20
+ " USE FOR: Finding papers similar to multiple papers\n"
21
+ " - Takes multiple paper IDs\n"
22
+ " - Finds papers related to all inputs\n\n"
23
+ "GUIDELINES:\n\n"
24
+ "For paper searches:\n"
25
+ "- Enhance search terms with academic language\n"
26
+ "- Include field-specific terminology\n"
27
+ '- Add "recent" or "latest" when appropriate\n'
28
+ "- Keep queries focused and relevant\n\n"
29
+ "For paper recommendations:\n"
30
+ "- Identify paper IDs (40-character hexadecimal strings)\n"
31
+ "- Use single_paper_recommendations for one ID\n"
32
+ "- Use multi_paper_recommendations for multiple IDs\n\n"
33
+ "Best practices:\n"
34
+ "1. Start with a broad search if no paper IDs are provided\n"
35
+ "2. Look for paper IDs in user input\n"
36
+ "3. Enhance search terms for better results\n"
37
+ "4. Consider the academic context\n"
38
+ "5. Be prepared to refine searches based on feedback\n\n"
39
+ "Remember:\n"
40
+ "- Always select the most appropriate tool\n"
41
+ "- Enhance search queries naturally\n"
42
+ "- Consider academic context\n"
43
+ "- Focus on delivering relevant results\n\n"
44
+ "IMPORTANT GUIDELINES FOR PAPER RECOMMENDATIONS:\n\n"
45
+ "For Multiple Papers:\n"
46
+ "- When getting recommendations for multiple papers, always use "
47
+ "get_multi_paper_recommendations tool\n"
48
+ "- DO NOT call get_single_paper_recommendations multiple times\n"
49
+ "- Always pass all paper IDs in a single call to get_multi_paper_recommendations\n"
50
+ '- Use for queries like "find papers related to both/all papers" or '
51
+ '"find similar papers to these papers"\n\n'
52
+ "For Single Paper:\n"
53
+ "- Use get_single_paper_recommendations when focusing on one specific paper\n"
54
+ "- Pass only one paper ID at a time\n"
55
+ '- Use for queries like "find papers similar to this paper" or '
56
+ '"get recommendations for paper X"\n'
57
+ "- Do not use for multiple papers\n\n"
58
+ "Examples:\n"
59
+ '- For "find related papers for both papers":\n'
60
+ " ✓ Use get_multi_paper_recommendations with both paper IDs\n"
61
+ " × Don't make multiple calls to get_single_paper_recommendations\n\n"
62
+ '- For "find papers related to the first paper":\n'
63
+ " ✓ Use get_single_paper_recommendations with just that paper's ID\n"
64
+ " × Don't use get_multi_paper_recommendations\n\n"
65
+ "Remember:\n"
66
+ "- Be precise in identifying which paper ID to use for single recommendations\n"
67
+ "- Don't reuse previous paper IDs unless specifically requested\n"
68
+ "- For fresh paper recommendations, always use the original paper ID"
@@ -0,0 +1,5 @@
1
+ """
2
+ Import all the modules in the package
3
+ """
4
+
5
+ from . import frontend
@@ -0,0 +1,3 @@
1
+ """
2
+ Import all the modules in the package
3
+ """
@@ -0,0 +1,33 @@
1
+ # # Page configuration
2
+ # page:
3
+ # title: "Talk2Scholars"
4
+ # icon: "🤖"
5
+ # layout: "wide"
6
+
7
+ # Available LLM models
8
+ llm_models:
9
+ - "gpt-4o-mini"
10
+ - "gpt-4-turbo"
11
+ - "gpt-3.5-turbo"
12
+ # # Chat UI configuration
13
+ # chat:
14
+ # assistant_avatar: "🤖"
15
+ # user_avatar: "👩🏻‍💻"
16
+ # input_placeholder: "Say something ..."
17
+ # spinner_text: "Fetching response ..."
18
+
19
+ # # Feedback configuration
20
+ # feedback:
21
+ # type: "thumbs"
22
+ # text_label: "[Optional] Please provide an explanation"
23
+ # success_message: "Your feedback is on its way to the developers. Thank you!"
24
+ # success_icon: "🚀"
25
+
26
+ # # Layout configuration
27
+ # layout:
28
+ # column_ratio: [3, 7] # Ratio for main_col1 and main_col2
29
+ # chat_container_height: 575
30
+ # sidebar_container_height: 500
31
+ #
32
+ # # Project name prefix
33
+ # project_name_prefix: "Talk2Scholars-"
@@ -0,0 +1,8 @@
1
+ defaults:
2
+ - _self_
3
+ - agents/talk2scholars/main_agent: default
4
+ - agents/talk2scholars/s2_agent: default
5
+ - tools/search: default
6
+ - tools/single_paper_recommendation: default
7
+ - tools/multi_paper_recommendation: default
8
+ - app/frontend: default
@@ -0,0 +1,7 @@
1
+ """
2
+ Import all the modules in the package
3
+ """
4
+
5
+ from . import search
6
+ from . import single_paper_recommendation
7
+ from . import multi_paper_recommendation
@@ -0,0 +1,3 @@
1
+ """
2
+ Import all the modules in the package
3
+ """
@@ -0,0 +1,19 @@
1
+ api_endpoint: "https://api.semanticscholar.org/recommendations/v1/papers"
2
+ default_limit: 2
3
+ request_timeout: 10
4
+ api_fields:
5
+ - "paperId"
6
+ - "title"
7
+ - "abstract"
8
+ - "year"
9
+ - "authors"
10
+ - "citationCount"
11
+ - "url"
12
+
13
+ # Default headers and params
14
+ headers:
15
+ Content-Type: "application/json"
16
+
17
+ recommendation_params:
18
+ limit: ${.default_limit}
19
+ fields: ${.api_fields}
@@ -0,0 +1,3 @@
1
+ """
2
+ Import all the modules in the package
3
+ """
@@ -0,0 +1,19 @@
1
+ api_endpoint: "https://api.semanticscholar.org/graph/v1/paper/search"
2
+ default_limit: 2
3
+ request_timeout: 10
4
+ api_fields:
5
+ - "paperId"
6
+ - "title"
7
+ - "abstract"
8
+ - "year"
9
+ - "authors"
10
+ - "citationCount"
11
+ - "url"
12
+ # Commented fields that could be added later if needed
13
+ # - "publicationTypes"
14
+ # - "openAccessPdf"
15
+
16
+ # Default search parameters
17
+ search_params:
18
+ limit: ${.default_limit} # Reference to the default_limit above
19
+ fields: ${.api_fields} # Reference to the api_fields above
@@ -0,0 +1,3 @@
1
+ """
2
+ Import all the modules in the package
3
+ """
@@ -0,0 +1,20 @@
1
+ api_endpoint: "https://api.semanticscholar.org/recommendations/v1/papers/forpaper"
2
+ default_limit: 2
3
+ request_timeout: 10
4
+ api_fields:
5
+ - "paperId"
6
+ - "title"
7
+ - "abstract"
8
+ - "year"
9
+ - "authors"
10
+ - "citationCount"
11
+ - "url"
12
+ # Commented fields that could be added later if needed
13
+ # - "publicationTypes"
14
+ # - "openAccessPdf"
15
+
16
+ # Default recommendation parameters
17
+ recommendation_params:
18
+ limit: ${.default_limit} # Reference to the default_limit above
19
+ fields: ${.api_fields} # Reference to the api_fields above
20
+ from_pool: "all-cs" # Using all-cs pool as specified in docs
@@ -2,4 +2,4 @@
2
2
  This file is used to import all the modules in the package.
3
3
  '''
4
4
 
5
- from . import state_talk2competitors
5
+ from . import state_talk2scholars
@@ -1,5 +1,5 @@
1
1
  """
2
- This is the state file for the talk2comp agent.
2
+ This is the state file for the talk2scholars agent.
3
3
  """
4
4
 
5
5
  import logging
@@ -19,9 +19,9 @@ def replace_dict(existing: Dict[str, Any], new: Dict[str, Any]) -> Dict[str, Any
19
19
  return new
20
20
 
21
21
 
22
- class Talk2Competitors(AgentState):
22
+ class Talk2Scholars(AgentState):
23
23
  """
24
- The state for the talk2comp agent, inheriting from AgentState.
24
+ The state for the talk2scholars agent, inheriting from AgentState.
25
25
  """
26
26
 
27
27
  papers: Annotated[Dict[str, Any], replace_dict] # Changed from List to Dict
@@ -1,16 +1,18 @@
1
1
  """
2
- Unit and integration tests for Talk2Competitors system.
2
+ Unit and integration tests for Talk2Scholars system.
3
3
  Each test focuses on a single, specific functionality.
4
4
  Tests are deterministic and independent of each other.
5
5
  """
6
6
 
7
7
  from unittest.mock import Mock, patch
8
-
9
8
  import pytest
10
9
  from langchain_core.messages import AIMessage, HumanMessage
10
+ import hydra
11
+ from hydra.core.global_hydra import GlobalHydra
12
+ from omegaconf import DictConfig, OmegaConf
11
13
 
12
14
  from ..agents.main_agent import get_app, make_supervisor_node
13
- from ..state.state_talk2competitors import replace_dict
15
+ from ..state.state_talk2scholars import replace_dict, Talk2Scholars
14
16
  from ..tools.s2.display_results import display_results
15
17
  from ..tools.s2.multi_paper_rec import get_multi_paper_recommendations
16
18
  from ..tools.s2.search import search_tool
@@ -18,6 +20,42 @@ from ..tools.s2.single_paper_rec import get_single_paper_recommendations
18
20
 
19
21
  # pylint: disable=redefined-outer-name
20
22
 
23
+
24
+ @pytest.fixture(autouse=True)
25
+ def hydra_setup():
26
+ """Setup and cleanup Hydra for tests."""
27
+ GlobalHydra.instance().clear()
28
+ with hydra.initialize(version_base=None, config_path="../configs"):
29
+ yield
30
+
31
+
32
+ @pytest.fixture
33
+ def mock_cfg() -> DictConfig:
34
+ """Create a mock configuration for testing."""
35
+ config = {
36
+ "agents": {
37
+ "talk2scholars": {
38
+ "main_agent": {
39
+ "state_modifier": "Test prompt for main agent",
40
+ "temperature": 0,
41
+ },
42
+ "s2_agent": {
43
+ "temperature": 0,
44
+ "s2_agent": "Test prompt for s2 agent",
45
+ },
46
+ }
47
+ },
48
+ "tools": {
49
+ "search": {
50
+ "api_endpoint": "https://api.semanticscholar.org/graph/v1/paper/search",
51
+ "default_limit": 2,
52
+ "api_fields": ["paperId", "title", "abstract", "year", "authors"],
53
+ }
54
+ },
55
+ }
56
+ return OmegaConf.create(config)
57
+
58
+
21
59
  # Fixed test data for deterministic results
22
60
  MOCK_SEARCH_RESPONSE = {
23
61
  "data": [
@@ -45,27 +83,33 @@ MOCK_STATE_PAPER = {
45
83
 
46
84
 
47
85
  @pytest.fixture
48
- def initial_state():
86
+ def initial_state() -> Talk2Scholars:
49
87
  """Create a base state for tests"""
50
- return {
51
- "messages": [],
52
- "papers": {},
53
- "is_last_step": False,
54
- "current_agent": None,
55
- "llm_model": "gpt-4o-mini",
56
- }
88
+ return Talk2Scholars(
89
+ messages=[],
90
+ papers={},
91
+ is_last_step=False,
92
+ current_agent=None,
93
+ llm_model="gpt-4o-mini",
94
+ next="",
95
+ )
57
96
 
58
97
 
59
98
  class TestMainAgent:
60
99
  """Unit tests for main agent functionality"""
61
100
 
62
- def test_supervisor_routes_search_to_s2(self, initial_state):
101
+ def test_supervisor_routes_search_to_s2(
102
+ self, initial_state: Talk2Scholars, mock_cfg
103
+ ):
63
104
  """Verifies that search-related queries are routed to S2 agent"""
64
105
  llm_mock = Mock()
65
106
  llm_mock.invoke.return_value = AIMessage(content="Search initiated")
66
107
 
67
- supervisor = make_supervisor_node(llm_mock)
68
- state = initial_state.copy()
108
+ # Extract the main_agent config
109
+ supervisor = make_supervisor_node(
110
+ llm_mock, mock_cfg.agents.talk2scholars.main_agent
111
+ )
112
+ state = initial_state
69
113
  state["messages"] = [HumanMessage(content="search for papers")]
70
114
 
71
115
  result = supervisor(state)
@@ -73,13 +117,18 @@ class TestMainAgent:
73
117
  assert not result.update["is_last_step"]
74
118
  assert result.update["current_agent"] == "s2_agent"
75
119
 
76
- def test_supervisor_routes_general_to_end(self, initial_state):
120
+ def test_supervisor_routes_general_to_end(
121
+ self, initial_state: Talk2Scholars, mock_cfg
122
+ ):
77
123
  """Verifies that non-search queries end the conversation"""
78
124
  llm_mock = Mock()
79
125
  llm_mock.invoke.return_value = AIMessage(content="General response")
80
126
 
81
- supervisor = make_supervisor_node(llm_mock)
82
- state = initial_state.copy()
127
+ # Extract the main_agent config
128
+ supervisor = make_supervisor_node(
129
+ llm_mock, mock_cfg.agents.talk2scholars.main_agent
130
+ )
131
+ state = initial_state
83
132
  state["messages"] = [HumanMessage(content="What is ML?")]
84
133
 
85
134
  result = supervisor(state)
@@ -90,9 +139,9 @@ class TestMainAgent:
90
139
  class TestS2Tools:
91
140
  """Unit tests for individual S2 tools"""
92
141
 
93
- def test_display_results_shows_papers(self, initial_state):
142
+ def test_display_results_shows_papers(self, initial_state: Talk2Scholars):
94
143
  """Verifies display_results tool correctly returns papers from state"""
95
- state = initial_state.copy()
144
+ state = initial_state
96
145
  state["papers"] = MOCK_STATE_PAPER
97
146
  result = display_results.invoke(input={"state": state})
98
147
  assert result == MOCK_STATE_PAPER
@@ -199,40 +248,6 @@ class TestS2Tools:
199
248
  assert "papers" in result.update
200
249
  assert len(result.update["messages"]) == 1
201
250
 
202
- @patch("requests.get")
203
- def test_single_paper_rec_empty_response(self, mock_get):
204
- """Tests single paper recommendations with empty response"""
205
- mock_get.return_value.json.return_value = {"recommendedPapers": []}
206
- mock_get.return_value.status_code = 200
207
-
208
- result = get_single_paper_recommendations.invoke(
209
- input={
210
- "paper_id": "123",
211
- "limit": 1,
212
- "tool_call_id": "test123",
213
- "id": "test123",
214
- }
215
- )
216
- assert "papers" in result.update
217
- assert len(result.update["papers"]) == 0
218
-
219
- @patch("requests.post")
220
- def test_multi_paper_rec_empty_response(self, mock_post):
221
- """Tests multi-paper recommendations with empty response"""
222
- mock_post.return_value.json.return_value = {"recommendedPapers": []}
223
- mock_post.return_value.status_code = 200
224
-
225
- result = get_multi_paper_recommendations.invoke(
226
- input={
227
- "paper_ids": ["123", "456"],
228
- "limit": 1,
229
- "tool_call_id": "test123",
230
- "id": "test123",
231
- }
232
- )
233
- assert "papers" in result.update
234
- assert len(result.update["papers"]) == 0
235
-
236
251
 
237
252
  def test_state_replace_dict():
238
253
  """Verifies state dictionary replacement works correctly"""
@@ -244,11 +259,13 @@ def test_state_replace_dict():
244
259
 
245
260
 
246
261
  @pytest.mark.integration
247
- def test_end_to_end_search_workflow(initial_state):
262
+ def test_end_to_end_search_workflow(initial_state: Talk2Scholars, mock_cfg):
248
263
  """Integration test: Complete search workflow"""
249
264
  with (
250
265
  patch("requests.get") as mock_get,
251
266
  patch("langchain_openai.ChatOpenAI") as mock_llm,
267
+ patch("hydra.compose", return_value=mock_cfg),
268
+ patch("hydra.initialize"),
252
269
  ):
253
270
  mock_get.return_value.json.return_value = MOCK_SEARCH_RESPONSE
254
271
  mock_get.return_value.status_code = 200
@@ -258,7 +275,7 @@ def test_end_to_end_search_workflow(initial_state):
258
275
  mock_llm.return_value = llm_instance
259
276
 
260
277
  app = get_app("test_integration")
261
- test_state = initial_state.copy()
278
+ test_state = initial_state
262
279
  test_state["messages"] = [HumanMessage(content="search for ML papers")]
263
280
 
264
281
  config = {
@@ -8,7 +8,7 @@ multi_paper_rec: Tool for getting recommendations
8
8
  import json
9
9
  import logging
10
10
  from typing import Annotated, Any, Dict, List, Optional
11
-
11
+ import hydra
12
12
  import pandas as pd
13
13
  import requests
14
14
  from langchain_core.messages import ToolMessage
@@ -40,6 +40,14 @@ class MultiPaperRecInput(BaseModel):
40
40
  model_config = {"arbitrary_types_allowed": True}
41
41
 
42
42
 
43
+ # Load hydra configuration
44
+ with hydra.initialize(version_base=None, config_path="../../configs"):
45
+ cfg = hydra.compose(
46
+ config_name="config", overrides=["tools/multi_paper_recommendation=default"]
47
+ )
48
+ cfg = cfg.tools.multi_paper_recommendation
49
+
50
+
43
51
  @tool(args_schema=MultiPaperRecInput)
44
52
  def get_multi_paper_recommendations(
45
53
  paper_ids: List[str],
@@ -62,12 +70,12 @@ def get_multi_paper_recommendations(
62
70
  """
63
71
  logging.info("Starting multi-paper recommendations search.")
64
72
 
65
- endpoint = "https://api.semanticscholar.org/recommendations/v1/papers"
66
- headers = {"Content-Type": "application/json"}
73
+ endpoint = cfg.api_endpoint
74
+ headers = cfg.headers
67
75
  payload = {"positivePaperIds": paper_ids, "negativePaperIds": []}
68
76
  params = {
69
77
  "limit": min(limit, 500),
70
- "fields": "paperId,title,abstract,year,authors,citationCount,url",
78
+ "fields": ",".join(cfg.api_fields),
71
79
  }
72
80
 
73
81
  # Add year parameter if provided
@@ -80,7 +88,7 @@ def get_multi_paper_recommendations(
80
88
  headers=headers,
81
89
  params=params,
82
90
  data=json.dumps(payload),
83
- timeout=10,
91
+ timeout=cfg.request_timeout,
84
92
  )
85
93
  logging.info(
86
94
  "API Response Status for multi-paper recommendations: %s", response.status_code
@@ -6,7 +6,7 @@ This tool is used to search for academic papers on Semantic Scholar.
6
6
 
7
7
  import logging
8
8
  from typing import Annotated, Any, Dict, Optional
9
-
9
+ import hydra
10
10
  import pandas as pd
11
11
  import requests
12
12
  from langchain_core.messages import ToolMessage
@@ -34,6 +34,12 @@ class SearchInput(BaseModel):
34
34
  tool_call_id: Annotated[str, InjectedToolCallId]
35
35
 
36
36
 
37
+ # Load hydra configuration
38
+ with hydra.initialize(version_base=None, config_path="../../configs"):
39
+ cfg = hydra.compose(config_name="config", overrides=["tools/search=default"])
40
+ cfg = cfg.tools.search
41
+
42
+
37
43
  @tool(args_schema=SearchInput)
38
44
  def search_tool(
39
45
  query: str,
@@ -55,13 +61,13 @@ def search_tool(
55
61
  Dict[str, Any]: The search results and related information.
56
62
  """
57
63
  print("Starting paper search...")
58
- endpoint = "https://api.semanticscholar.org/graph/v1/paper/search"
64
+ endpoint = cfg.api_endpoint
59
65
  params = {
60
66
  "query": query,
61
67
  "limit": min(limit, 100),
62
68
  # "fields": "paperId,title,abstract,year,authors,
63
69
  # citationCount,url,publicationTypes,openAccessPdf",
64
- "fields": "paperId,title,abstract,year,authors,citationCount,url",
70
+ "fields": ",".join(cfg.api_fields),
65
71
  }
66
72
 
67
73
  # Add year parameter if provided
@@ -6,7 +6,7 @@ This tool is used to return recommendations for a single paper.
6
6
 
7
7
  import logging
8
8
  from typing import Annotated, Any, Dict, Optional
9
-
9
+ import hydra
10
10
  import pandas as pd
11
11
  import requests
12
12
  from langchain_core.messages import ToolMessage
@@ -41,6 +41,14 @@ class SinglePaperRecInput(BaseModel):
41
41
  model_config = {"arbitrary_types_allowed": True}
42
42
 
43
43
 
44
+ # Load hydra configuration
45
+ with hydra.initialize(version_base=None, config_path="../../configs"):
46
+ cfg = hydra.compose(
47
+ config_name="config", overrides=["tools/single_paper_recommendation=default"]
48
+ )
49
+ cfg = cfg.tools.single_paper_recommendation
50
+
51
+
44
52
  @tool(args_schema=SinglePaperRecInput)
45
53
  def get_single_paper_recommendations(
46
54
  paper_id: str,
@@ -63,20 +71,18 @@ def get_single_paper_recommendations(
63
71
  """
64
72
  logger.info("Starting single paper recommendations search.")
65
73
 
66
- endpoint = (
67
- f"https://api.semanticscholar.org/recommendations/v1/papers/forpaper/{paper_id}"
68
- )
74
+ endpoint = f"{cfg.api_endpoint}/{paper_id}"
69
75
  params = {
70
76
  "limit": min(limit, 500), # Max 500 per API docs
71
- "fields": "paperId,title,abstract,year,authors,citationCount,url",
72
- "from": "all-cs", # Using all-cs pool as specified in docs
77
+ "fields": ",".join(cfg.api_fields),
78
+ "from": cfg.recommendation_params.from_pool,
73
79
  }
74
80
 
75
81
  # Add year parameter if provided
76
82
  if year:
77
83
  params["year"] = year
78
84
 
79
- response = requests.get(endpoint, params=params, timeout=10)
85
+ response = requests.get(endpoint, params=params, timeout=cfg.request_timeout)
80
86
  data = response.json()
81
87
  papers = data.get("data", [])
82
88
  response = requests.get(endpoint, params=params, timeout=10)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: aiagents4pharma
3
- Version: 1.15.2
3
+ Version: 1.17.0
4
4
  Summary: AI Agents for drug discovery, drug development, and other pharmaceutical R&D
5
5
  Classifier: Programming Language :: Python :: 3
6
6
  Classifier: License :: OSI Approved :: MIT License
@@ -50,11 +50,10 @@ Requires-Dist: streamlit-feedback
50
50
  [![Talk2BioModels](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2biomodels.yml/badge.svg)](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2biomodels.yml)
51
51
  [![Talk2Cells](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2cells.yml/badge.svg)](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2cells.yml)
52
52
  [![Talk2KnowledgeGraphs](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2knowledgegraphs.yml/badge.svg)](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2knowledgegraphs.yml)
53
- [![Talk2Competitors](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2competitors.yml/badge.svg)](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2competitors.yml)
53
+ [![Talk2Scholars](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2scholars.yml/badge.svg)](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2scholars.yml)
54
54
  ![GitHub Release](https://img.shields.io/github/v/release/VirtualPatientEngine/AIAgents4Pharma)
55
55
  ![Python Version from PEP 621 TOML](https://img.shields.io/python/required-version-toml?tomlFilePath=https%3A%2F%2Fraw.githubusercontent.com%2FVirtualPatientEngine%2FAIAgents4Pharma%2Frefs%2Fheads%2Fmain%2Fpyproject.toml)
56
56
 
57
-
58
57
  <h1 align="center" style="border-bottom: none;">🤖 AIAgents4Pharma</h1>
59
58
 
60
59
  Welcome to **AIAgents4Pharma** – an open-source project by [Team VPE](https://github.com/VirtualPatientEngine) that brings together AI-driven tools to help researchers and pharma interact seamlessly with complex biological data.
@@ -64,7 +63,7 @@ Our toolkit currently consists of three intelligent agents, each designed to sim
64
63
  - **Talk2BioModels**: Engage directly with mathematical models in systems biology.
65
64
  - **Talk2Cells** _(Work in progress)_: Query and analyze sequencing data with ease.
66
65
  - **Talk2KnowledgeGraphs** _(Work in progress)_: Access and explore complex biological knowledge graphs for insightful data connections.
67
- - **Talk2Competitors** _(Coming soon)_: Get recommendations for articles related to your choice. Download, query, and write/retrieve them to your reference manager (currently supporting Zotero).
66
+ - **Talk2Scholars** _(Coming soon)_: Get recommendations for articles related to your choice. Download, query, and write/retrieve them to your reference manager (currently supporting Zotero).
68
67
 
69
68
  ---
70
69
 
@@ -87,7 +86,9 @@ Our toolkit currently consists of three intelligent agents, each designed to sim
87
86
 
88
87
  **Talk2KnowledgeGraphs** is an agent designed to enable interaction with biological knowledge graphs (KGs). KGs integrate vast amounts of structured biological data into a format that highlights relationships between entities, such as proteins, genes, and diseases.
89
88
 
90
- ### 4. Talk2Competitors _(Coming soon)_
89
+ ### 4. Talk2Scholars _(Work in Progress)_
90
+
91
+ Talk2Scholars is an AI-powered hierarchical agent system designed to revolutionize academic paper search and analysis. Through intelligent conversation, users can discover, analyze, and receive recommendations for academic papers using state-of-the-art natural language processing.
91
92
 
92
93
  ## Getting Started
93
94
 
@@ -198,7 +199,7 @@ Check out our [CONTRIBUTING.md](CONTRIBUTING.md) for more information.
198
199
  - **User Interface**: Interactive web UI for all agents.
199
200
  - **Talk2Cells**: Integration of sequencing data analysis tools.
200
201
  - **Talk2KnowledgeGraphs**: Interface for biological knowledge graph interaction.
201
- - **Talk2Competitors**: Interface for exploring articles
202
+ - **Talk2Scholars**: Interface for exploring articles
202
203
 
203
204
  We’re excited to bring AIAgents4Pharma to the bioinformatics and pharmaceutical research community. Together, let’s make data-driven biological research more accessible and insightful.
204
205
 
@@ -1,4 +1,4 @@
1
- aiagents4pharma/__init__.py,sha256=5muWWIg89VHPybfxonO_5xOMJPasKNsGdQRhozDaEmk,177
1
+ aiagents4pharma/__init__.py,sha256=nFCe1As_SuRkmhcdZVsU0aYjYccHxk1DbduLpy8XulY,174
2
2
  aiagents4pharma/configs/__init__.py,sha256=hNkSrXw1Ix1HhkGn_aaidr2coBYySfM0Hm_pMeRcX7k,76
3
3
  aiagents4pharma/configs/config.yaml,sha256=e0w2GOBVWcoPDtX-z4S6yKbv2rja5PfGRBhmTPVIXNU,161
4
4
  aiagents4pharma/configs/talk2biomodels/__init__.py,sha256=safyFKhkd5Wlirl9dMZIHWDLTpY2oLw9wjIM7ZtLIHk,88
@@ -57,22 +57,6 @@ aiagents4pharma/talk2cells/tools/__init__.py,sha256=38nK2a_lEFRjO3qD6Fo9a3983ZCY
57
57
  aiagents4pharma/talk2cells/tools/scp_agent/__init__.py,sha256=s7g0lyH1lMD9pcWHLPtwRJRvzmTh2II7DrxyLulpjmQ,163
58
58
  aiagents4pharma/talk2cells/tools/scp_agent/display_studies.py,sha256=6q59gh_NQaiOU2rn55A3sIIFKlXi4SK3iKgySvUDrtQ,600
59
59
  aiagents4pharma/talk2cells/tools/scp_agent/search_studies.py,sha256=MLe-twtFnOu-P8P9diYq7jvHBHbWFRRCZLcfpUzqPMg,2806
60
- aiagents4pharma/talk2competitors/__init__.py,sha256=haaikzND3c0Euqq86ndA4fl9q42aOop5rYG_8Zh1D-o,119
61
- aiagents4pharma/talk2competitors/agents/__init__.py,sha256=ykszlVGxz3egLHZAttlNoTPxIrnQJZYva_ssR8fwIFk,117
62
- aiagents4pharma/talk2competitors/agents/main_agent.py,sha256=UoHCpZd-HoeG0B6_gAF1cEP2OqMvrTuGe7MZDwL_u1U,3878
63
- aiagents4pharma/talk2competitors/agents/s2_agent.py,sha256=eTrhc4ZPvWOUWMHNYxK0WltsZedZUnAWNu-TeUa-ruk,2501
64
- aiagents4pharma/talk2competitors/config/__init__.py,sha256=HyM6paOpKZ5_tZnyVheSAFmxjT6Mb3PxvWKfP0rz-dE,113
65
- aiagents4pharma/talk2competitors/config/config.py,sha256=jd4ltMBJyTztm9wT7j3ujOyYxL2SXRgxQJ4OZUBmCG4,5387
66
- aiagents4pharma/talk2competitors/state/__init__.py,sha256=DzFjV3hZNes_pL4bDW2_8RsyK9BJcj6ejfBzU0KWn1k,106
67
- aiagents4pharma/talk2competitors/state/state_talk2competitors.py,sha256=GUl1ZfM77XsjIEu-3xy4dtvaiMTA1pXf6i1ozVcX5Gg,993
68
- aiagents4pharma/talk2competitors/tests/__init__.py,sha256=U3PsTiUZaUBD1IZanFGkDIOdFieDVJtGKQ5-woYUo8c,45
69
- aiagents4pharma/talk2competitors/tests/test_langgraph.py,sha256=sEROK1aU3wFqJhZohONVI6Pr7t1d3PSqs-4erVIyiJw,9283
70
- aiagents4pharma/talk2competitors/tools/__init__.py,sha256=YudBDRwaEzDnAcpxGZvEOfyh5-6xd51CTvTKTkywgXw,68
71
- aiagents4pharma/talk2competitors/tools/s2/__init__.py,sha256=9RQH3efTj6qkXk0ICKSc7Mzpkitt4gRGsQ1pGPrrREU,181
72
- aiagents4pharma/talk2competitors/tools/s2/display_results.py,sha256=B8JJGohi1Eyx8C3MhO_SiyQP3R6hPyUKJOAzcHmq3FU,584
73
- aiagents4pharma/talk2competitors/tools/s2/multi_paper_rec.py,sha256=FYLt47DAk6WOKfEk1Gj9zVvJGNyxA283PCp8IKW9U5M,4262
74
- aiagents4pharma/talk2competitors/tools/s2/search.py,sha256=pppjrQv5-8ep4fnqgTSBNgnbSnQsVIcNrRrH0p2TP1o,4025
75
- aiagents4pharma/talk2competitors/tools/s2/single_paper_rec.py,sha256=dAfUQxI7T5eu0eDxK8VAl7-JH0Wnw24CVkOQqwj-hXc,4810
76
60
  aiagents4pharma/talk2knowledgegraphs/__init__.py,sha256=4smVQoSMM6rflVnNkABqlDAAlSn4bYsq7rMVWjRGvis,103
77
61
  aiagents4pharma/talk2knowledgegraphs/datasets/__init__.py,sha256=L3gPuHskSegmtXskVrLIYr7FXe_ibKgJ2GGr1_Wok6k,173
78
62
  aiagents4pharma/talk2knowledgegraphs/datasets/biobridge_primekg.py,sha256=QlzDXmXREoa9MA6-GwzqRjdzndQeGBAF11Td6NFk_9Y,23426
@@ -98,8 +82,40 @@ aiagents4pharma/talk2knowledgegraphs/utils/embeddings/sentence_transformer.py,sh
98
82
  aiagents4pharma/talk2knowledgegraphs/utils/enrichments/__init__.py,sha256=tW426knki2DBIHcWyF_K04iMMdbpIn_e_TpPmTgz2dI,113
99
83
  aiagents4pharma/talk2knowledgegraphs/utils/enrichments/enrichments.py,sha256=Bx8x6zzk5614ApWB90N_iv4_Y_Uq0-KwUeBwYSdQMU4,924
100
84
  aiagents4pharma/talk2knowledgegraphs/utils/enrichments/ollama.py,sha256=8eoxR-VHo0G7ReQIwje7xEhE-SJlHdef7_wJRpnvFIc,4116
101
- aiagents4pharma-1.15.2.dist-info/LICENSE,sha256=IcIbyB1Hyk5ZDah03VNQvJkbNk2hkBCDqQ8qtnCvB4Q,1077
102
- aiagents4pharma-1.15.2.dist-info/METADATA,sha256=k63iE9sbSZrZoraXM4ILVXy_bnrMR6iBENIBCEpE3nQ,8637
103
- aiagents4pharma-1.15.2.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
104
- aiagents4pharma-1.15.2.dist-info/top_level.txt,sha256=-AH8rMmrSnJtq7HaAObS78UU-cTCwvX660dSxeM7a0A,16
105
- aiagents4pharma-1.15.2.dist-info/RECORD,,
85
+ aiagents4pharma/talk2scholars/__init__.py,sha256=gphERyVKZHvOnMQsml7TIHlaIshHJ75R1J3FKExkfuY,120
86
+ aiagents4pharma/talk2scholars/agents/__init__.py,sha256=ykszlVGxz3egLHZAttlNoTPxIrnQJZYva_ssR8fwIFk,117
87
+ aiagents4pharma/talk2scholars/agents/main_agent.py,sha256=etPQUCjHtD-in-kD7Wg_UD6jRtCHj-mj41y03PYbAQM,4616
88
+ aiagents4pharma/talk2scholars/agents/s2_agent.py,sha256=0OlxNb8IEhd7JJNL8rRlr_U67iPgtGjbbAiJo9HFPdY,2737
89
+ aiagents4pharma/talk2scholars/configs/__init__.py,sha256=tf2gz8n7M4ko6xLdX_C925ELVIxoP6SgkPcbeh59ad4,151
90
+ aiagents4pharma/talk2scholars/configs/config.yaml,sha256=a3_jCFAmsVL6gZuvzoRe4jL94mQaSbp0CUXZDUtqhZA,254
91
+ aiagents4pharma/talk2scholars/configs/agents/__init__.py,sha256=yyh7PB2oY_JulnpSQCWS4wwCH_uzIdt47O2Ay48x_oU,75
92
+ aiagents4pharma/talk2scholars/configs/agents/talk2scholars/__init__.py,sha256=Tj4urOkjpu2cTlpJl0Fmr_18RZCR88vns-Gt-XquDzs,95
93
+ aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/__init__.py,sha256=fqQQ-GlRcbzru2KmEk3oMma0R6_SzGM8dOXzYeU4oVA,46
94
+ aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/default.yaml,sha256=77DLch0YDiGslm3HerBaEDqKj1jLqI_jusHFbCchJDU,1617
95
+ aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/__init__.py,sha256=fqQQ-GlRcbzru2KmEk3oMma0R6_SzGM8dOXzYeU4oVA,46
96
+ aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/default.yaml,sha256=6mgBvjifpHJ20kLwuwWr56UoZqmyf1luZnNWlYR6gnc,3135
97
+ aiagents4pharma/talk2scholars/configs/app/__init__.py,sha256=JoSZV6N669kGMv5zLDszwf0ZjcRHx9TJfIqGhIIdPXE,70
98
+ aiagents4pharma/talk2scholars/configs/app/frontend/__init__.py,sha256=fqQQ-GlRcbzru2KmEk3oMma0R6_SzGM8dOXzYeU4oVA,46
99
+ aiagents4pharma/talk2scholars/configs/app/frontend/default.yaml,sha256=BX-J1zQyb0QJ7hcOFOnkJ8aWoWbjK4WE2VG7OZTOyKU,821
100
+ aiagents4pharma/talk2scholars/configs/tools/__init__.py,sha256=w0BJK0MR6Et8Pw1htP8JV0Lr9F_N68CqvbpV14KBy_8,151
101
+ aiagents4pharma/talk2scholars/configs/tools/multi_paper_recommendation/__init__.py,sha256=fqQQ-GlRcbzru2KmEk3oMma0R6_SzGM8dOXzYeU4oVA,46
102
+ aiagents4pharma/talk2scholars/configs/tools/multi_paper_recommendation/default.yaml,sha256=70HSJ8WbS2Qbhur5FpuOPBjrea9g3TioM0gjGn6U1bE,369
103
+ aiagents4pharma/talk2scholars/configs/tools/search/__init__.py,sha256=fqQQ-GlRcbzru2KmEk3oMma0R6_SzGM8dOXzYeU4oVA,46
104
+ aiagents4pharma/talk2scholars/configs/tools/search/default.yaml,sha256=NznRVqB6EamMfsFc5hj5s9ygzl6rPuFPiy9ikcpqp68,486
105
+ aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation/__init__.py,sha256=fqQQ-GlRcbzru2KmEk3oMma0R6_SzGM8dOXzYeU4oVA,46
106
+ aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation/default.yaml,sha256=4_gTvdVc-hf9GNxBKMGQd72s5h53Zy09j9qeZ9Fys04,578
107
+ aiagents4pharma/talk2scholars/state/__init__.py,sha256=S6SxlszIMZSIMJehjevPF9sKyR-PAwWb5TEdo6xWXE8,103
108
+ aiagents4pharma/talk2scholars/state/state_talk2scholars.py,sha256=nwNRKdhoTXAtBGMMp6coMyUCaQVOnoGNqyjpKKw_FVM,998
109
+ aiagents4pharma/talk2scholars/tests/__init__.py,sha256=U3PsTiUZaUBD1IZanFGkDIOdFieDVJtGKQ5-woYUo8c,45
110
+ aiagents4pharma/talk2scholars/tests/test_langgraph.py,sha256=36tWBmdewCZC8Yw1CCIpJxusGLCy21SVHhG1tR5C3TU,9605
111
+ aiagents4pharma/talk2scholars/tools/__init__.py,sha256=YudBDRwaEzDnAcpxGZvEOfyh5-6xd51CTvTKTkywgXw,68
112
+ aiagents4pharma/talk2scholars/tools/s2/__init__.py,sha256=9RQH3efTj6qkXk0ICKSc7Mzpkitt4gRGsQ1pGPrrREU,181
113
+ aiagents4pharma/talk2scholars/tools/s2/display_results.py,sha256=B8JJGohi1Eyx8C3MhO_SiyQP3R6hPyUKJOAzcHmq3FU,584
114
+ aiagents4pharma/talk2scholars/tools/s2/multi_paper_rec.py,sha256=0Y3q8TkF_Phng9L7g1kk9Fhyit9UNitWurp03H0GZv8,4455
115
+ aiagents4pharma/talk2scholars/tools/s2/search.py,sha256=CcgFN7YuuQ9Vl1DJcldnnvPrswABKjNxeauK1rABps8,4176
116
+ aiagents4pharma/talk2scholars/tools/s2/single_paper_rec.py,sha256=irS-igdG8BZbVb0Z4VlIjzsyBlUfREd0v0_RlUM-0_U,4994
117
+ aiagents4pharma-1.17.0.dist-info/LICENSE,sha256=IcIbyB1Hyk5ZDah03VNQvJkbNk2hkBCDqQ8qtnCvB4Q,1077
118
+ aiagents4pharma-1.17.0.dist-info/METADATA,sha256=gGFNQ2C0aAjibBF_Dbge_hPOhBNmXdeoXpbcK_0fQs8,8906
119
+ aiagents4pharma-1.17.0.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
120
+ aiagents4pharma-1.17.0.dist-info/top_level.txt,sha256=-AH8rMmrSnJtq7HaAObS78UU-cTCwvX660dSxeM7a0A,16
121
+ aiagents4pharma-1.17.0.dist-info/RECORD,,
@@ -1,110 +0,0 @@
1
- """Configuration module for AI agents handling paper searches and recommendations."""
2
-
3
-
4
- # pylint: disable=R0903
5
- class Config:
6
- """Configuration class containing prompts for AI agents.
7
-
8
- This class stores prompt templates used by various AI agents in the system,
9
- particularly for academic paper searches and recommendations.
10
- """
11
-
12
- MAIN_AGENT_PROMPT = (
13
- "You are a supervisory AI agent that routes user queries to specialized tools.\n"
14
- "Your task is to select the most appropriate tool based on the user's request.\n\n"
15
- "Available tools and their capabilities:\n\n"
16
- "1. semantic_scholar_agent:\n"
17
- " - Search for academic papers and research\n"
18
- " - Get paper recommendations\n"
19
- " - Find similar papers\n"
20
- " USE FOR: Any queries about finding papers, academic research, "
21
- "or getting paper recommendations\n\n"
22
- "ROUTING GUIDELINES:\n\n"
23
- "ALWAYS route to semantic_scholar_agent for:\n"
24
- "- Finding academic papers\n"
25
- "- Searching research topics\n"
26
- "- Getting paper recommendations\n"
27
- "- Finding similar papers\n"
28
- "- Any query about academic literature\n\n"
29
- "Approach:\n"
30
- "1. Identify the core need in the user's query\n"
31
- "2. Select the most appropriate tool based on the guidelines above\n"
32
- "3. If unclear, ask for clarification\n"
33
- "4. For multi-step tasks, focus on the immediate next step\n\n"
34
- "Remember:\n"
35
- "- Be decisive in your tool selection\n"
36
- "- Focus on the immediate task\n"
37
- "- Default to semantic_scholar_agent for any paper-finding tasks\n"
38
- "- Ask for clarification if the request is ambiguous\n\n"
39
- "When presenting paper search results, always use this exact format:\n\n"
40
- "Remember to:\n"
41
- "- Always remember to add the url\n"
42
- "- Put URLs on the title line itself as markdown\n"
43
- "- Maintain consistent spacing and formatting"
44
- )
45
-
46
- S2_AGENT_PROMPT = (
47
- "You are a specialized academic research assistant with access to the following tools:\n\n"
48
- "1. search_papers:\n"
49
- " USE FOR: General paper searches\n"
50
- " - Enhances search terms automatically\n"
51
- " - Adds relevant academic keywords\n"
52
- " - Focuses on recent research when appropriate\n\n"
53
- "2. get_single_paper_recommendations:\n"
54
- " USE FOR: Finding papers similar to a specific paper\n"
55
- " - Takes a single paper ID\n"
56
- " - Returns related papers\n\n"
57
- "3. get_multi_paper_recommendations:\n"
58
- " USE FOR: Finding papers similar to multiple papers\n"
59
- " - Takes multiple paper IDs\n"
60
- " - Finds papers related to all inputs\n\n"
61
- "GUIDELINES:\n\n"
62
- "For paper searches:\n"
63
- "- Enhance search terms with academic language\n"
64
- "- Include field-specific terminology\n"
65
- '- Add "recent" or "latest" when appropriate\n'
66
- "- Keep queries focused and relevant\n\n"
67
- "For paper recommendations:\n"
68
- "- Identify paper IDs (40-character hexadecimal strings)\n"
69
- "- Use single_paper_recommendations for one ID\n"
70
- "- Use multi_paper_recommendations for multiple IDs\n\n"
71
- "Best practices:\n"
72
- "1. Start with a broad search if no paper IDs are provided\n"
73
- "2. Look for paper IDs in user input\n"
74
- "3. Enhance search terms for better results\n"
75
- "4. Consider the academic context\n"
76
- "5. Be prepared to refine searches based on feedback\n\n"
77
- "Remember:\n"
78
- "- Always select the most appropriate tool\n"
79
- "- Enhance search queries naturally\n"
80
- "- Consider academic context\n"
81
- "- Focus on delivering relevant results\n\n"
82
- "IMPORTANT GUIDELINES FOR PAPER RECOMMENDATIONS:\n\n"
83
- "For Multiple Papers:\n"
84
- "- When getting recommendations for multiple papers, always use "
85
- "get_multi_paper_recommendations tool\n"
86
- "- DO NOT call get_single_paper_recommendations multiple times\n"
87
- "- Always pass all paper IDs in a single call to get_multi_paper_recommendations\n"
88
- '- Use for queries like "find papers related to both/all papers" or '
89
- '"find similar papers to these papers"\n\n'
90
- "For Single Paper:\n"
91
- "- Use get_single_paper_recommendations when focusing on one specific paper\n"
92
- "- Pass only one paper ID at a time\n"
93
- '- Use for queries like "find papers similar to this paper" or '
94
- '"get recommendations for paper X"\n'
95
- "- Do not use for multiple papers\n\n"
96
- "Examples:\n"
97
- '- For "find related papers for both papers":\n'
98
- " ✓ Use get_multi_paper_recommendations with both paper IDs\n"
99
- " × Don't make multiple calls to get_single_paper_recommendations\n\n"
100
- '- For "find papers related to the first paper":\n'
101
- " ✓ Use get_single_paper_recommendations with just that paper's ID\n"
102
- " × Don't use get_multi_paper_recommendations\n\n"
103
- "Remember:\n"
104
- "- Be precise in identifying which paper ID to use for single recommendations\n"
105
- "- Don't reuse previous paper IDs unless specifically requested\n"
106
- "- For fresh paper recommendations, always use the original paper ID"
107
- )
108
-
109
-
110
- config = Config()