aiagents4pharma 1.20.0__py3-none-any.whl → 1.21.0__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (29) hide show
  1. aiagents4pharma/talk2biomodels/configs/config.yaml +5 -0
  2. aiagents4pharma/talk2scholars/agents/main_agent.py +90 -91
  3. aiagents4pharma/talk2scholars/agents/s2_agent.py +61 -17
  4. aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/default.yaml +31 -10
  5. aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/default.yaml +8 -16
  6. aiagents4pharma/talk2scholars/configs/app/frontend/default.yaml +11 -9
  7. aiagents4pharma/talk2scholars/configs/config.yaml +1 -0
  8. aiagents4pharma/talk2scholars/configs/tools/multi_paper_recommendation/default.yaml +2 -0
  9. aiagents4pharma/talk2scholars/configs/tools/retrieve_semantic_scholar_paper_id/__init__.py +3 -0
  10. aiagents4pharma/talk2scholars/configs/tools/search/default.yaml +1 -0
  11. aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation/default.yaml +1 -0
  12. aiagents4pharma/talk2scholars/state/state_talk2scholars.py +36 -7
  13. aiagents4pharma/talk2scholars/tests/test_llm_main_integration.py +58 -0
  14. aiagents4pharma/talk2scholars/tests/test_main_agent.py +98 -122
  15. aiagents4pharma/talk2scholars/tests/test_s2_agent.py +95 -29
  16. aiagents4pharma/talk2scholars/tests/test_s2_tools.py +158 -22
  17. aiagents4pharma/talk2scholars/tools/s2/__init__.py +4 -2
  18. aiagents4pharma/talk2scholars/tools/s2/display_results.py +60 -21
  19. aiagents4pharma/talk2scholars/tools/s2/multi_paper_rec.py +35 -8
  20. aiagents4pharma/talk2scholars/tools/s2/query_results.py +61 -0
  21. aiagents4pharma/talk2scholars/tools/s2/retrieve_semantic_scholar_paper_id.py +79 -0
  22. aiagents4pharma/talk2scholars/tools/s2/search.py +34 -10
  23. aiagents4pharma/talk2scholars/tools/s2/single_paper_rec.py +39 -9
  24. {aiagents4pharma-1.20.0.dist-info → aiagents4pharma-1.21.0.dist-info}/METADATA +2 -2
  25. {aiagents4pharma-1.20.0.dist-info → aiagents4pharma-1.21.0.dist-info}/RECORD +28 -24
  26. aiagents4pharma/talk2scholars/tests/test_integration.py +0 -237
  27. {aiagents4pharma-1.20.0.dist-info → aiagents4pharma-1.21.0.dist-info}/LICENSE +0 -0
  28. {aiagents4pharma-1.20.0.dist-info → aiagents4pharma-1.21.0.dist-info}/WHEEL +0 -0
  29. {aiagents4pharma-1.20.0.dist-info → aiagents4pharma-1.21.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,5 @@
1
+ defaults:
2
+ - _self_
3
+ - agents/t2b_agent: default
4
+ - tools/ask_question: default
5
+ - tools/get_annotation: default
@@ -6,28 +6,17 @@ Main agent for the talk2scholars app using ReAct pattern.
6
6
  This module implements a hierarchical agent system where a supervisor agent
7
7
  routes queries to specialized sub-agents. It follows the LangGraph patterns
8
8
  for multi-agent systems and implements proper state management.
9
-
10
- The main components are:
11
- 1. Supervisor node with ReAct pattern for intelligent routing.
12
- 2. S2 agent node for handling academic paper queries.
13
- 3. Shared state management via Talk2Scholars.
14
- 4. Hydra-based configuration system.
15
-
16
- Example:
17
- app = get_app("thread_123", "gpt-4o-mini")
18
- result = app.invoke({
19
- "messages": [("human", "Find papers about AI agents")]
20
- })
21
9
  """
22
10
 
23
11
  import logging
24
12
  from typing import Literal, Callable
13
+ from pydantic import BaseModel
25
14
  import hydra
26
15
  from langchain_core.language_models.chat_models import BaseChatModel
16
+ from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
27
17
  from langchain_openai import ChatOpenAI
28
18
  from langgraph.checkpoint.memory import MemorySaver
29
19
  from langgraph.graph import END, START, StateGraph
30
- from langgraph.prebuilt import create_react_agent
31
20
  from langgraph.types import Command
32
21
  from ..agents import s2_agent
33
22
  from ..state.state_talk2scholars import Talk2Scholars
@@ -39,13 +28,13 @@ logger = logging.getLogger(__name__)
39
28
 
40
29
  def get_hydra_config():
41
30
  """
42
- Loads and returns the Hydra configuration for the main agent.
31
+ Loads the Hydra configuration for the main agent.
43
32
 
44
- This function fetches the configuration settings for the Talk2Scholars
45
- agent, ensuring that all required parameters are properly initialized.
33
+ This function initializes the Hydra configuration system and retrieves the settings
34
+ for the `Talk2Scholars` agent, ensuring that all required parameters are loaded.
46
35
 
47
36
  Returns:
48
- Any: The configuration object for the main agent.
37
+ DictConfig: The configuration object containing parameters for the main agent.
49
38
  """
50
39
  with hydra.initialize(version_base=None, config_path="../configs"):
51
40
  cfg = hydra.compose(
@@ -54,116 +43,127 @@ def get_hydra_config():
54
43
  return cfg.agents.talk2scholars.main_agent
55
44
 
56
45
 
57
- def make_supervisor_node(llm: BaseChatModel, thread_id: str) -> Callable:
46
+ def make_supervisor_node(llm_model: BaseChatModel, thread_id: str) -> Callable:
58
47
  """
59
- Creates and returns a supervisor node for intelligent routing using the ReAct pattern.
48
+ Creates the supervisor node responsible for routing user queries to the appropriate sub-agents.
60
49
 
61
- This function initializes a supervisor agent that processes user queries and
62
- determines the appropriate sub-agent for further processing. It applies structured
63
- reasoning to manage conversations and direct queries based on context.
50
+ This function initializes the routing logic by leveraging the system and router prompts defined
51
+ in the Hydra configuration. The supervisor determines whether to
52
+ call a sub-agent (like `s2_agent`)
53
+ or directly generate a response using the language model.
64
54
 
65
55
  Args:
66
- llm (BaseChatModel): The language model used by the supervisor agent.
67
- thread_id (str): Unique identifier for the conversation session.
56
+ llm_model (BaseChatModel): The language model used for decision-making.
57
+ thread_id (str): Unique identifier for the current conversation session.
68
58
 
69
59
  Returns:
70
- Callable: A function that acts as the supervisor node in the LangGraph workflow.
71
-
72
- Example:
73
- supervisor = make_supervisor_node(llm, "thread_123")
74
- workflow.add_node("supervisor", supervisor)
60
+ Callable: The supervisor node function that processes user queries and
61
+ decides the next step.
75
62
  """
76
- logger.info("Loading Hydra configuration for Talk2Scholars main agent.")
77
63
  cfg = get_hydra_config()
78
- logger.info("Hydra configuration loaded with values: %s", cfg)
64
+ logger.info("Hydra configuration for Talk2Scholars main agent loaded: %s", cfg)
65
+ members = ["s2_agent"]
66
+ options = ["FINISH"] + members
67
+ # Define system prompt for general interactions
68
+ system_prompt = cfg.system_prompt
69
+ # Define router prompt for routing to sub-agents
70
+ router_prompt = cfg.router_prompt
71
+
72
+ class Router(BaseModel):
73
+ """Worker to route to next. If no workers needed, route to FINISH."""
79
74
 
80
- # Create the supervisor agent using the main agent's configuration
81
- supervisor_agent = create_react_agent(
82
- llm,
83
- tools=[], # Will add sub-agents later
84
- state_modifier=cfg.main_agent,
85
- state_schema=Talk2Scholars,
86
- checkpointer=MemorySaver(),
87
- )
75
+ next: Literal[*options]
88
76
 
89
77
  def supervisor_node(
90
78
  state: Talk2Scholars,
91
- ) -> Command[Literal["s2_agent", "__end__"]]:
79
+ ) -> Command:
92
80
  """
93
- Processes user queries and determines the next step in the conversation flow.
81
+ Handles the routing logic for the supervisor agent.
94
82
 
95
- This function examines the conversation state and decides whether to forward
96
- the query to a specialized sub-agent (e.g., S2 agent) or conclude the interaction.
83
+ This function determines the next agent to invoke based on the router prompt response.
84
+ If no further processing is required, it generates an AI response using the system prompt.
97
85
 
98
86
  Args:
99
- state (Talk2Scholars): The current state of the conversation, containing
100
- messages, papers, and metadata.
87
+ state (Talk2Scholars): The current conversation state, including messages
88
+ exchanged so far.
101
89
 
102
90
  Returns:
103
- Command: The next action to be executed, along with updated state data.
104
-
105
- Example:
106
- result = supervisor_node(current_state)
107
- next_step = result.goto
91
+ Command: A command dictating whether to invoke a sub-agent or generate a final response.
108
92
  """
109
- logger.info(
110
- "Supervisor node called - Messages count: %d",
111
- len(state["messages"]),
112
- )
113
-
114
- # Invoke the supervisor agent with configurable thread_id
115
- result = supervisor_agent.invoke(
116
- state, {"configurable": {"thread_id": thread_id}}
117
- )
118
- goto = "s2_agent"
119
- logger.info("Supervisor agent completed with result: %s", result)
120
-
93
+ messages = [SystemMessage(content=router_prompt)] + state["messages"]
94
+ structured_llm = llm_model.with_structured_output(Router)
95
+ response = structured_llm.invoke(messages)
96
+ goto = response.next
97
+ logger.info("Routing to: %s, Thread ID: %s", goto, thread_id)
98
+ if goto == "FINISH":
99
+ goto = END # Using END from langgraph.graph
100
+ # If no agents were called, and the last message was
101
+ # from the user, call the LLM to respond to the user
102
+ # with a slightly different system prompt.
103
+ if isinstance(messages[-1], HumanMessage):
104
+ response = llm_model.invoke(
105
+ [
106
+ SystemMessage(content=system_prompt),
107
+ ]
108
+ + messages[1:]
109
+ )
110
+ return Command(
111
+ goto=goto, update={"messages": AIMessage(content=response.content)}
112
+ )
113
+ # Go to the requested agent
121
114
  return Command(goto=goto)
122
115
 
123
116
  return supervisor_node
124
117
 
125
118
 
126
- def get_app(thread_id: str, llm_model: str = "gpt-4o-mini") -> StateGraph:
119
+ def get_app(
120
+ thread_id: str,
121
+ llm_model: BaseChatModel = ChatOpenAI(model="gpt-4o-mini", temperature=0),
122
+ ):
127
123
  """
128
- Initializes and returns the LangGraph application with a hierarchical agent system.
124
+ Initializes and returns the LangGraph-based hierarchical agent system.
129
125
 
130
- This function sets up the full agent architecture, including the supervisor
131
- and sub-agents, and compiles the LangGraph workflow for handling user queries.
126
+ This function constructs the agent workflow by defining nodes for the supervisor
127
+ and sub-agents. It compiles the graph using `StateGraph` to enable structured
128
+ conversational workflows.
132
129
 
133
130
  Args:
134
- thread_id (str): Unique identifier for the conversation session.
135
- llm_model (str, optional): The language model to be used. Defaults to "gpt-4o-mini".
131
+ thread_id (str): A unique session identifier for tracking conversation state.
132
+ llm_model (BaseChatModel, optional): The language model used for query processing.
133
+ Defaults to `ChatOpenAI(model="gpt-4o-mini", temperature=0)`.
136
134
 
137
135
  Returns:
138
- StateGraph: A compiled LangGraph application ready for query invocation.
136
+ StateGraph: A compiled LangGraph application that can process user queries.
139
137
 
140
138
  Example:
141
- app = get_app("thread_123")
142
- result = app.invoke(initial_state)
139
+ >>> app = get_app("thread_123")
140
+ >>> result = app.invoke(initial_state)
143
141
  """
144
142
  cfg = get_hydra_config()
145
143
 
146
144
  def call_s2_agent(
147
145
  state: Talk2Scholars,
148
- ) -> Command[Literal["supervisor", "__end__"]]:
146
+ ) -> Command[Literal["supervisor"]]:
149
147
  """
150
- Calls the Semantic Scholar (S2) agent to process academic paper queries.
148
+ Invokes the Semantic Scholar (S2) agent to retrieve relevant research papers.
151
149
 
152
- This function invokes the S2 agent, retrieves relevant research papers,
153
- and updates the conversation state accordingly.
150
+ This function calls the `s2_agent` and updates the conversation state with retrieved
151
+ academic papers. The agent uses Semantic Scholar's API to find papers based on
152
+ user queries.
154
153
 
155
154
  Args:
156
- state (Talk2Scholars): The current conversation state, including user queries
157
- and any previously retrieved papers.
155
+ state (Talk2Scholars): The current state of the conversation, containing messages
156
+ and any previous search results.
158
157
 
159
158
  Returns:
160
- Command: The next action to execute, along with updated messages and papers.
159
+ Command: A command to update the conversation state with the retrieved papers
160
+ and return control to the supervisor node.
161
161
 
162
162
  Example:
163
- result = call_s2_agent(current_state)
164
- next_step = result.goto
163
+ >>> result = call_s2_agent(current_state)
164
+ >>> next_step = result.goto
165
165
  """
166
- logger.info("Calling S2 agent with state: %s", state)
166
+ logger.info("Calling S2 agent")
167
167
  app = s2_agent.get_app(thread_id, llm_model)
168
168
 
169
169
  # Invoke the S2 agent, passing state,
@@ -177,31 +177,30 @@ def get_app(thread_id: str, llm_model: str = "gpt-4o-mini") -> StateGraph:
177
177
  }
178
178
  },
179
179
  )
180
- logger.info("S2 agent completed with response: %s", response)
181
-
180
+ logger.info("S2 agent completed with response")
182
181
  return Command(
183
- goto=END,
184
182
  update={
185
183
  "messages": response["messages"],
186
184
  "papers": response.get("papers", {}),
187
185
  "multi_papers": response.get("multi_papers", {}),
186
+ "last_displayed_papers": response.get("last_displayed_papers", {}),
188
187
  },
188
+ # Always return to supervisor
189
+ goto="supervisor",
189
190
  )
190
191
 
191
192
  # Initialize LLM
192
- logger.info("Using OpenAI model %s with temperature %s", llm_model, cfg.temperature)
193
- llm = ChatOpenAI(model=llm_model, temperature=cfg.temperature)
193
+ logger.info("Using model %s with temperature %s", llm_model, cfg.temperature)
194
194
 
195
195
  # Build the graph
196
196
  workflow = StateGraph(Talk2Scholars)
197
- supervisor = make_supervisor_node(llm, thread_id)
198
-
197
+ supervisor = make_supervisor_node(llm_model, thread_id)
198
+ # Add nodes
199
199
  workflow.add_node("supervisor", supervisor)
200
200
  workflow.add_node("s2_agent", call_s2_agent)
201
+ # Add edges
201
202
  workflow.add_edge(START, "supervisor")
202
- workflow.add_edge("s2_agent", END)
203
-
204
- # Compile the graph without initial state
203
+ # Compile the workflow
205
204
  app = workflow.compile(checkpointer=MemorySaver())
206
205
  logger.info("Main agent workflow compiled")
207
206
  return app
@@ -5,37 +5,80 @@ Agent for interacting with Semantic Scholar
5
5
  """
6
6
 
7
7
  import logging
8
+ from typing import Any, Dict
8
9
  import hydra
9
10
  from langchain_openai import ChatOpenAI
11
+ from langchain_core.language_models.chat_models import BaseChatModel
10
12
  from langgraph.graph import START, StateGraph
11
13
  from langgraph.prebuilt import create_react_agent, ToolNode
12
14
  from langgraph.checkpoint.memory import MemorySaver
13
15
  from ..state.state_talk2scholars import Talk2Scholars
14
16
  from ..tools.s2.search import search_tool as s2_search
15
17
  from ..tools.s2.display_results import display_results as s2_display
18
+ from ..tools.s2.query_results import query_results as s2_query_results
19
+ from ..tools.s2.retrieve_semantic_scholar_paper_id import (
20
+ retrieve_semantic_scholar_paper_id as s2_retrieve_id,
21
+ )
16
22
  from ..tools.s2.single_paper_rec import (
17
23
  get_single_paper_recommendations as s2_single_rec,
18
24
  )
19
25
  from ..tools.s2.multi_paper_rec import get_multi_paper_recommendations as s2_multi_rec
20
26
 
21
-
22
27
  # Initialize logger
23
28
  logging.basicConfig(level=logging.INFO)
24
29
  logger = logging.getLogger(__name__)
25
30
 
26
31
 
27
- def get_app(uniq_id, llm_model="gpt-4o-mini"):
32
+ def get_app(
33
+ uniq_id, llm_model: BaseChatModel = ChatOpenAI(model="gpt-4o-mini", temperature=0)
34
+ ):
28
35
  """
29
- This function returns the langraph app.
36
+ Initializes and returns the LangGraph application for the Semantic Scholar (S2) agent.
37
+
38
+ This function sets up the S2 agent, which integrates various tools to search, retrieve,
39
+ and display research papers from Semantic Scholar. The agent follows the ReAct pattern
40
+ for structured interaction.
41
+
42
+ Args:
43
+ uniq_id (str): Unique identifier for the current conversation session.
44
+ llm_model (BaseChatModel, optional): The language model to be used by the agent.
45
+ Defaults to `ChatOpenAI(model="gpt-4o-mini", temperature=0)`.
46
+
47
+ Returns:
48
+ StateGraph: A compiled LangGraph application that enables the S2 agent to process
49
+ user queries and retrieve research papers.
50
+
51
+ Example:
52
+ >>> app = get_app("thread_123")
53
+ >>> result = app.invoke(initial_state)
30
54
  """
31
55
 
32
- def agent_s2_node(state: Talk2Scholars):
56
+ # def agent_s2_node(state: Talk2Scholars) -> Command[Literal["supervisor"]]:
57
+ def agent_s2_node(state: Talk2Scholars) -> Dict[str, Any]:
33
58
  """
34
- This function calls the model.
59
+ Processes the user query and retrieves relevant research papers.
60
+
61
+ This function calls the language model using the configured `ReAct` agent to analyze
62
+ the state and generate an appropriate response. The function then returns control
63
+ to the main supervisor.
64
+
65
+ Args:
66
+ state (Talk2Scholars): The current conversation state, including messages exchanged
67
+ and any previously retrieved research papers.
68
+
69
+ Returns:
70
+ Dict[str, Any]: A dictionary containing the updated conversation state.
71
+
72
+ Example:
73
+ >>> result = agent_s2_node(current_state)
74
+ >>> papers = result.get("papers", [])
35
75
  """
36
76
  logger.log(logging.INFO, "Creating Agent_S2 node with thread_id %s", uniq_id)
37
- response = model.invoke(state, {"configurable": {"thread_id": uniq_id}})
38
- return response
77
+ result = model.invoke(state, {"configurable": {"thread_id": uniq_id}})
78
+
79
+ return result
80
+
81
+ logger.log(logging.INFO, "thread_id, llm_model: %s, %s", uniq_id, llm_model)
39
82
 
40
83
  # Load hydra configuration
41
84
  logger.log(logging.INFO, "Load Hydra configuration for Talk2Scholars S2 agent.")
@@ -46,30 +89,31 @@ def get_app(uniq_id, llm_model="gpt-4o-mini"):
46
89
  cfg = cfg.agents.talk2scholars.s2_agent
47
90
 
48
91
  # Define the tools
49
- tools = ToolNode([s2_search, s2_display, s2_single_rec, s2_multi_rec])
92
+ tools = ToolNode(
93
+ [
94
+ s2_search,
95
+ s2_display,
96
+ s2_query_results,
97
+ s2_retrieve_id,
98
+ s2_single_rec,
99
+ s2_multi_rec,
100
+ ]
101
+ )
50
102
 
51
103
  # Define the model
52
104
  logger.log(logging.INFO, "Using OpenAI model %s", llm_model)
53
- llm = ChatOpenAI(model=llm_model, temperature=cfg.temperature)
54
105
 
55
106
  # Create the agent
56
107
  model = create_react_agent(
57
- llm,
108
+ llm_model,
58
109
  tools=tools,
59
110
  state_schema=Talk2Scholars,
60
- # prompt=cfg.s2_agent,
61
111
  state_modifier=cfg.s2_agent,
62
112
  checkpointer=MemorySaver(),
63
113
  )
64
114
 
65
- # Define a new graph
66
115
  workflow = StateGraph(Talk2Scholars)
67
-
68
- # Define the two nodes we will cycle between
69
116
  workflow.add_node("agent_s2", agent_s2_node)
70
-
71
- # Set the entrypoint as `agent`
72
- # This means that this node is the first one called
73
117
  workflow.add_edge(START, "agent_s2")
74
118
 
75
119
  # Initialize memory to persist state between graph runs
@@ -5,14 +5,35 @@ openai_llms:
5
5
  - "gpt-4-turbo"
6
6
  - "gpt-3.5-turbo"
7
7
  temperature: 0
8
- main_agent: >
9
- You are an intelligent research assistant coordinating academic paper discovery and analysis.
8
+ system_prompt: >
9
+ You are the Talk2Scholars agent coordinating academic paper discovery and analysis.
10
10
 
11
- AVAILABLE TOOLS AND ROUTING:
12
- 1. semantic_scholar_agent:
13
- Access to tools:
14
- - search_tool: For paper discovery
15
- - display_results: For showing paper results
16
- - get_single_paper_recommendations: For single paper recommendations
17
- - get_multi_paper_recommendations: For multi-paper recommendations
18
- ROUTE TO THIS AGENT FOR: Any query about academic papers, research, or articles
11
+ You have access to the following agents:
12
+ 1. S2_agent: This agent can be used to search and recommend papers
13
+ from Semantic Scholar. Use this agent when the user asks for
14
+ general paper searches and recommendations. This agent can also
15
+ retrieve the Semantic Scholar ID of a paper.
16
+ router_prompt: >
17
+ You are a supervisor tasked with managing a conversation between the
18
+ following workers: {members}. Given the user request, respond with the
19
+ worker to act next. Each worker will perform a task and respond with
20
+ their results and status. When finished, respond with FINISH.
21
+
22
+ Here is a description of the workers:
23
+ 1. S2_agent: This agent can be used to search and recommend papers
24
+ from Semantic Scholar. Use this agent when the user asks for
25
+ general paper searches and recommendations. This agent can also
26
+ retrieve the Semantic Scholar ID of a paper. It can also be used to
27
+ provide more information about a paper.
28
+
29
+ Here are some instructions for the workers:
30
+ 1. Call the S2 agent for general paper searches and recommendations.
31
+ 2. The S2 agent has access to tools for querying and displaying papers.
32
+ 3. If the user wants suggestions for papers and you don’t have
33
+ a Semantic Scholar ID for it but do have the title from
34
+ the last displayed results, use the S2 agent to retrieve the
35
+ Semantic Scholar ID of the paper. Then, use the S2 agent again to display
36
+ recommendations for the paper.
37
+ 4. You can call the S2 agent to get more information about a paper based
38
+ on the context of the conversation.
39
+ 5. Respond with FINISH when all tasks are completed.
@@ -6,19 +6,11 @@ openai_llms:
6
6
  - "gpt-3.5-turbo"
7
7
  temperature: 0
8
8
  s2_agent: >
9
- You are a specialized academic research agent with access to tools for paper discovery and analysis.
10
-
11
- YOUR TOOLS:
12
- 1. search_tool:
13
- - Finds research papers based on user queries.
14
- - If no papers are found, it performs a new search.
15
-
16
- 2. display_results:
17
- - Shows the current research papers.
18
- - If no papers are found, it will instruct you to perform a search.
19
-
20
- 3. get_single_paper_recommendations:
21
- - Provides recommendations based on a single selected paper.
22
-
23
- 4. get_multi_paper_recommendations:
24
- - Provides recommendations based on multiple selected papers.
9
+ You are an academic research assistant with access to the
10
+ Semantic Scholar API for paper discovery and analysis.
11
+ You also have tools to gain more insights on the papers and
12
+ display them.
13
+ You must strictly rely on retrieved information and avoid
14
+ generating unsupported content. Do not generate hallucinations
15
+ or fabricate details of any article. Stay focused on accurate,
16
+ sourced academic insights.
@@ -1,14 +1,13 @@
1
- # # Page configuration
2
- # page:
3
- # title: "Talk2Scholars"
4
- # icon: "🤖"
5
- # layout: "wide"
1
+ # Page configuration
2
+ page:
3
+ title: "Talk2Scholars"
4
+ icon: "🤖"
5
+ layout: "wide"
6
6
 
7
7
  # Available LLM models
8
- llm_models:
9
- - "gpt-4o-mini"
10
- - "gpt-4-turbo"
11
- - "gpt-3.5-turbo"
8
+ llms:
9
+ available_models:
10
+ - "OpenAI/gpt-4o-mini"
12
11
  # # Chat UI configuration
13
12
  # chat:
14
13
  # assistant_avatar: "🤖"
@@ -16,6 +15,9 @@ llm_models:
16
15
  # input_placeholder: "Say something ..."
17
16
  # spinner_text: "Fetching response ..."
18
17
 
18
+ api_keys:
19
+ openai_key: "OPENAI_API_KEY"
20
+ nvidia_key: "NVIDIA_API_KEY"
19
21
  # # Feedback configuration
20
22
  # feedback:
21
23
  # type: "thumbs"
@@ -5,4 +5,5 @@ defaults:
5
5
  - tools/search: default
6
6
  - tools/single_paper_recommendation: default
7
7
  - tools/multi_paper_recommendation: default
8
+ - tools/retrieve_semantic_scholar_paper_id: default
8
9
  - app/frontend: default
@@ -9,6 +9,8 @@ api_fields:
9
9
  - "authors"
10
10
  - "citationCount"
11
11
  - "url"
12
+ # Commented fields that could be added later if needed
13
+ # - "externalIds"
12
14
 
13
15
  # Default headers and params
14
16
  headers:
@@ -0,0 +1,3 @@
1
+ """
2
+ Import all the modules in the package
3
+ """
@@ -10,6 +10,7 @@ api_fields:
10
10
  - "citationCount"
11
11
  - "url"
12
12
  # Commented fields that could be added later if needed
13
+ # - "externalIds"
13
14
  # - "publicationTypes"
14
15
  # - "openAccessPdf"
15
16
 
@@ -10,6 +10,7 @@ api_fields:
10
10
  - "citationCount"
11
11
  - "url"
12
12
  # Commented fields that could be added later if needed
13
+ # - "externalIds"
13
14
  # - "publicationTypes"
14
15
  # - "openAccessPdf"
15
16
 
@@ -1,9 +1,14 @@
1
1
  """
2
- This is the state file for the talk2scholars agent.
2
+ State management for the Talk2Scholars agent.
3
+
4
+ This module defines the state class `Talk2Scholars`, which maintains the conversation
5
+ context, retrieved papers, and other relevant metadata. The state ensures consistency
6
+ across agent interactions.
3
7
  """
4
8
 
5
9
  import logging
6
10
  from typing import Annotated, Any, Dict
11
+ from langchain_core.language_models import BaseChatModel
7
12
  from langgraph.prebuilt.chat_agent_executor import AgentState
8
13
 
9
14
  # Configure logging
@@ -12,22 +17,46 @@ logger = logging.getLogger(__name__)
12
17
 
13
18
 
14
19
  def replace_dict(existing: Dict[str, Any], new: Dict[str, Any]) -> Dict[str, Any]:
15
- """Replace the existing dict with the new one."""
20
+ """
21
+ Replaces the existing dictionary with a new dictionary.
22
+
23
+ This function logs the state update and ensures that the old state is replaced
24
+ with the new one.
25
+
26
+ Args:
27
+ existing (Dict[str, Any]): The current dictionary state.
28
+ new (Dict[str, Any]): The new dictionary state to replace the existing one.
29
+
30
+ Returns:
31
+ Dict[str, Any]: The updated dictionary state.
32
+
33
+ Example:
34
+ >>> old_state = {"papers": {"id1": "Paper 1"}}
35
+ >>> new_state = {"papers": {"id2": "Paper 2"}}
36
+ >>> updated_state = replace_dict(old_state, new_state)
37
+ >>> print(updated_state)
38
+ {"papers": {"id2": "Paper 2"}}
39
+ """
16
40
  logger.info("Updating existing state %s with the state dict: %s", existing, new)
17
41
  return new
18
42
 
19
43
 
20
44
  class Talk2Scholars(AgentState):
21
45
  """
22
- The state for the talk2scholars agent, inheriting from AgentState.
46
+ Represents the state of the Talk2Scholars agent.
47
+
48
+ This class extends `AgentState` to maintain conversation history, retrieved papers,
49
+ and interactions with the language model.
23
50
 
24
51
  Attributes:
25
- papers: Dictionary of papers from search results
26
- multi_papers: Dictionary of papers from multi-paper recommendations
27
- llm_model: Model being used
52
+ last_displayed_papers (Dict[str, Any]): Stores the most recently displayed papers.
53
+ papers (Dict[str, Any]): Stores the research papers retrieved from the agent's queries.
54
+ multi_papers (Dict[str, Any]): Stores multiple recommended papers from various sources.
55
+ llm_model (BaseChatModel): The language model instance used for generating responses.
28
56
  """
29
57
 
30
58
  # Agent state fields
59
+ last_displayed_papers: Annotated[Dict[str, Any], replace_dict]
31
60
  papers: Annotated[Dict[str, Any], replace_dict]
32
61
  multi_papers: Annotated[Dict[str, Any], replace_dict]
33
- llm_model: str
62
+ llm_model: BaseChatModel