aiagents4pharma 1.6.2__py3-none-any.whl → 1.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,20 +5,23 @@ BasicoModel class for loading and simulating SBML models
5
5
  using the basico package.
6
6
  """
7
7
 
8
+ import logging
8
9
  from typing import Optional, Dict, Union
9
- from time import sleep
10
- from urllib.error import URLError
11
10
  from pydantic import Field, model_validator
12
11
  import pandas as pd
13
12
  import basico
14
13
  from .sys_bio_model import SysBioModel
15
14
 
15
+ # Initialize logger
16
+ logging.basicConfig(level=logging.INFO)
17
+ logger = logging.getLogger(__name__)
18
+
16
19
  class BasicoModel(SysBioModel):
17
20
  """
18
21
  Model that loads and simulates SBML models using the basico package.
19
- Can load models from an SBML file or download them using a BioModels model_id.
22
+ Can load models from an SBML file or download them using a BioModels biomodel_id.
20
23
  """
21
- model_id: Optional[int] = Field(None, description="BioModels model ID to download and load")
24
+ biomodel_id: Optional[int] = Field(None, description="BioModels model ID to download and load")
22
25
  sbml_file_path: Optional[str] = Field(None, description="Path to an SBML file to load")
23
26
  simulation_results: Optional[str] = None
24
27
  name: Optional[str] = Field("", description="Name of the model")
@@ -28,27 +31,21 @@ class BasicoModel(SysBioModel):
28
31
  copasi_model: Optional[object] = None # Holds the loaded Copasi model
29
32
 
30
33
  @model_validator(mode="after")
31
- def check_model_id_or_sbml_file_path(self):
34
+ def check_biomodel_id_or_sbml_file_path(self):
32
35
  """
33
- Validate that either model_id or sbml_file_path is provided.
36
+ Validate that either biomodel_id or sbml_file_path is provided.
34
37
  """
35
- if not self.model_id and not self.sbml_file_path:
36
- raise ValueError("Either model_id or sbml_file_path must be provided.")
37
- if self.model_id:
38
- attempts = 0
39
- max_retries = 5
40
- while attempts < max_retries:
41
- try:
42
- self.copasi_model = basico.load_biomodel(self.model_id)
43
- break
44
- except URLError as e:
45
- attempts += 1
46
- sleep(10*attempts)
47
- if attempts >= max_retries:
48
- raise e
49
- self.description = basico.biomodels.get_model_info(self.model_id)["description"]
38
+ if not self.biomodel_id and not self.sbml_file_path:
39
+ logger.error("Either biomodel_id or sbml_file_path must be provided.")
40
+ raise ValueError("Either biomodel_id or sbml_file_path must be provided.")
41
+ if self.biomodel_id:
42
+ self.copasi_model = basico.load_biomodel(self.biomodel_id)
43
+ self.description = basico.biomodels.get_model_info(self.biomodel_id)["description"]
44
+ self.name = basico.model_info.get_model_name(model=self.copasi_model)
50
45
  elif self.sbml_file_path:
51
46
  self.copasi_model = basico.load_model(self.sbml_file_path)
47
+ self.description = basico.model_info.get_notes(model=self.copasi_model)
48
+ self.name = basico.model_info.get_model_name(model=self.copasi_model)
52
49
  return self
53
50
 
54
51
  def simulate(self,
@@ -92,10 +89,16 @@ class BasicoModel(SysBioModel):
92
89
  df_result = basico.run_time_course(model=self.copasi_model,
93
90
  intervals=interval,
94
91
  duration=duration)
92
+ # Replace curly braces in column headers with square brackets
93
+ # Because curly braces in the world of LLMS are used for
94
+ # structured output
95
95
  df_result.columns = df_result.columns.str.replace('{', '[', regex=False).\
96
96
  str.replace('}', ']', regex=False)
97
+ # Reset the index
97
98
  df_result.reset_index(inplace=True)
99
+ # Store the simulation results
98
100
  self.simulation_results = df_result
101
+ # Return copy of the simulation results
99
102
  return df_result.copy()
100
103
 
101
104
  def get_model_metadata(self) -> Dict[str, Union[str, int]]:
@@ -12,18 +12,18 @@ class SysBioModel(ABC, BaseModel):
12
12
  This class serves as a general structure for models, allowing
13
13
  different mathematical approaches to be implemented in subclasses.
14
14
  """
15
- model_id: Optional[int] = Field(None, description="BioModel ID of the model")
15
+ biomodel_id: Optional[int] = Field(None, description="BioModel ID of the model")
16
16
  sbml_file_path: Optional[str] = Field(None, description="Path to an SBML file")
17
17
  name: Optional[str] = Field(..., description="Name of the model")
18
18
  description: Optional[str] = Field("", description="Description of the model")
19
19
 
20
20
  @model_validator(mode="after")
21
- def check_model_id_or_sbml_file_path(self):
21
+ def check_biomodel_id_or_sbml_file_path(self):
22
22
  """
23
- Validate that either model_id or sbml_file_path is provided.
23
+ Validate that either biomodel_id or sbml_file_path is provided.
24
24
  """
25
- if not self.model_id and not self.sbml_file_path:
26
- raise ValueError("Either model_id or sbml_file_path must be provided.")
25
+ if not self.biomodel_id and not self.sbml_file_path:
26
+ raise ValueError("Either biomodel_id or sbml_file_path must be provided.")
27
27
  return self
28
28
 
29
29
  @abstractmethod
@@ -1,10 +1,8 @@
1
1
  '''
2
2
  This file is used to import all the modules in the package.
3
3
  '''
4
- # import everything from the module
5
- from . import ask_question
4
+ from . import search_models
6
5
  from . import simulate_model
6
+ from . import ask_question
7
7
  from . import custom_plotter
8
- from . import fetch_parameters
9
- from . import model_description
10
- from . import search_models
8
+ from . import get_modelinfo
@@ -4,34 +4,30 @@
4
4
  Tool for asking a question about the simulation results.
5
5
  """
6
6
 
7
- from typing import Type, Optional
8
- from dataclasses import dataclass
9
- import streamlit as st
7
+ import logging
8
+ from typing import Type, Annotated
9
+ import pandas as pd
10
10
  from pydantic import BaseModel, Field
11
11
  from langchain_core.tools.base import BaseTool
12
- from langchain_core.callbacks import CallbackManagerForToolRun
13
12
  from langchain.agents.agent_types import AgentType
14
13
  from langchain_experimental.agents import create_pandas_dataframe_agent
15
14
  from langchain_openai import ChatOpenAI
16
- from ..models.basico_model import BasicoModel
15
+ from langgraph.prebuilt import InjectedState
17
16
 
18
- @dataclass
19
- class ModelData:
20
- """
21
- Dataclass for storing the model data.
22
- """
23
- modelid: Optional[int] = None
24
- sbml_file_path: Optional[str] = None
25
- model_object: Optional[BasicoModel] = None
17
+ # Initialize logger
18
+ logging.basicConfig(level=logging.INFO)
19
+ logger = logging.getLogger(__name__)
26
20
 
27
21
  class AskQuestionInput(BaseModel):
28
22
  """
29
23
  Input schema for the AskQuestion tool.
30
24
  """
31
25
  question: str = Field(description="question about the simulation results")
26
+ state: Annotated[dict, InjectedState]
32
27
 
33
- # Note: It's important that every field has type hints. BaseTool is a
34
- # Pydantic class and not having type hints can lead to unexpected behavior.
28
+ # Note: It's important that every field has type hints.
29
+ # BaseTool is a Pydantic class and not having type hints
30
+ # can lead to unexpected behavior.
35
31
  class AskQuestionTool(BaseTool):
36
32
  """
37
33
  Tool for calculating the product of two numbers.
@@ -39,75 +35,38 @@ class AskQuestionTool(BaseTool):
39
35
  name: str = "ask_question"
40
36
  description: str = "A tool to ask question about the simulation results."
41
37
  args_schema: Type[BaseModel] = AskQuestionInput
42
- return_direct: bool = True
43
- st_session_key: str = None
44
- sys_bio_model: ModelData = ModelData()
38
+ return_direct: bool = False
45
39
 
46
40
  def _run(self,
47
41
  question: str,
48
- run_manager: Optional[CallbackManagerForToolRun] = None) -> str:
42
+ state: Annotated[dict, InjectedState]) -> str:
49
43
  """
50
44
  Run the tool.
51
45
 
52
46
  Args:
53
47
  question (str): The question to ask about the simulation results.
48
+ state (dict): The state of the graph.
54
49
  run_manager (Optional[CallbackManagerForToolRun]): The CallbackManagerForToolRun object.
55
50
 
56
51
  Returns:
57
52
  str: The answer to the question.
58
53
  """
59
- st_session_key = self.st_session_key
60
- sys_bio_model = self.sys_bio_model
61
- # Check if sys_bio_model is provided in the input
62
- if sys_bio_model.modelid or sys_bio_model.sbml_file_path or sys_bio_model.model_object:
63
- if sys_bio_model.modelid is not None:
64
- model_object = BasicoModel(model_id=sys_bio_model.modelid)
65
- elif sys_bio_model.sbml_file_path is not None:
66
- model_object = BasicoModel(sbml_file_path=sys_bio_model.sbml_file_path)
67
- else:
68
- model_object = sys_bio_model.model_object
69
- else:
70
- # If the sys_bio_model is not provided in the input,
71
- # get it from the Streamlit session state
72
- if st_session_key:
73
- if st_session_key not in st.session_state:
74
- return f"Session key {st_session_key} not found in Streamlit session state."
75
- model_object = st.session_state[st_session_key]
76
- else:
77
- return "Please provide a valid model object or \
78
- Streamlit session key that contains the model object."
79
- # Update the object in the streamlit session state
80
- if st_session_key:
81
- st.session_state[st_session_key] = model_object
82
- if model_object.simulation_results is None:
83
- model_object.simulate()
84
- df = model_object.simulation_results
85
- # If there is a Streamlit session key,
86
- # display the simulation results
87
- if st_session_key:
88
- st.text(f"Simulation Results of the model {model_object.model_id}")
89
- st.dataframe(df, use_container_width = False, width = 650)
90
- # Check if run_manager's metadata has the key 'prompt_content'
54
+ logger.log(logging.INFO,
55
+ "Calling ask_question tool %s", question)
56
+ # Check if the simulation results are available
57
+ if 'dic_simulated_data' not in state:
58
+ return "Please run the simulation first before \
59
+ asking a question about the simulation results."
60
+ df = pd.DataFrame.from_dict(state['dic_simulated_data'])
91
61
  prompt_content = None
92
- if run_manager and 'prompt' in run_manager.metadata:
93
- prompt_content = run_manager.metadata['prompt']
62
+ # if run_manager and 'prompt' in run_manager.metadata:
63
+ # prompt_content = run_manager.metadata['prompt']
94
64
  # Create a pandas dataframe agent with OpenAI
95
- df_agent = create_pandas_dataframe_agent(ChatOpenAI(model="gpt-3.5-turbo"),
96
- allow_dangerous_code=True,
97
- agent_type=AgentType.OPENAI_FUNCTIONS,
98
- df=df,
99
- prefix=prompt_content)
65
+ df_agent = create_pandas_dataframe_agent(
66
+ ChatOpenAI(model=state['llm_model']),
67
+ allow_dangerous_code=True,
68
+ agent_type=AgentType.OPENAI_FUNCTIONS,
69
+ df=df,
70
+ prefix=prompt_content)
100
71
  llm_result = df_agent.invoke(question)
101
72
  return llm_result["output"]
102
-
103
- def get_metadata(self):
104
- """
105
- Get metadata for the tool.
106
-
107
- Returns:
108
- dict: The metadata for the tool.
109
- """
110
- return {
111
- "name": self.name,
112
- "description": self.description
113
- }
@@ -5,75 +5,80 @@ Tool for plotting a custom figure.
5
5
  """
6
6
 
7
7
  import logging
8
- from typing import Type, List, TypedDict
8
+ from typing import Type, List, TypedDict, Annotated, Tuple, Union, Literal
9
9
  from pydantic import BaseModel, Field
10
- import streamlit as st
10
+ import pandas as pd
11
11
  from langchain_openai import ChatOpenAI
12
12
  from langchain_core.tools import BaseTool
13
- from langchain_core.prompts import ChatPromptTemplate
13
+ from langgraph.prebuilt import InjectedState
14
14
 
15
15
  # Initialize logger
16
16
  logging.basicConfig(level=logging.INFO)
17
17
  logger = logging.getLogger(__name__)
18
18
 
19
- class CustomHeader(TypedDict):
20
- """
21
- A list of headers extracted from the user question.
22
- """
23
- y: List[str]
24
-
25
19
  class CustomPlotterInput(BaseModel):
26
20
  """
27
21
  Input schema for the PlotImage tool.
28
22
  """
29
23
  question: str = Field(description="Description of the plot")
24
+ state: Annotated[dict, InjectedState]
30
25
 
31
- # Note: It's important that every field has type hints. BaseTool is a
32
- # Pydantic class and not having type hints can lead to unexpected behavior.
26
+ # Note: It's important that every field has type hints.
27
+ # BaseTool is a Pydantic class and not having type hints
28
+ # can lead to unexpected behavior.
33
29
  class CustomPlotterTool(BaseTool):
34
30
  """
35
31
  Tool for plotting a custom plot.
36
32
  """
37
33
  name: str = "custom_plotter"
38
- description: str = "A tool to plot or visualize the simulation results."
34
+ description: str = "A tool to plot a custom figure."
39
35
  args_schema: Type[BaseModel] = CustomPlotterInput
40
- st_session_key: str = None
36
+ response_format: str = "content_and_artifact"
41
37
 
42
- def _run(self, question: str) -> str:
38
+ def _run(self,
39
+ question: str,
40
+ state: Annotated[dict, InjectedState]
41
+ ) -> Tuple[str, Union[None, List[str]]]:
43
42
  """
44
43
  Run the tool.
45
44
 
46
45
  Args:
47
- question (str): The question to ask about the model description.
46
+ question (str): The question about the custom plot.
47
+ state (dict): The state of the graph.
48
48
 
49
49
  Returns:
50
50
  str: The answer to the question
51
51
  """
52
- # Check if sys_bio_model is provided
53
- model_object = st.session_state[self.st_session_key]
54
- if model_object is None:
55
- return "Please run the simulation first before plotting the figure."
56
- if model_object.simulation_results is None:
57
- return "Please run the simulation first before plotting the figure."
58
- df = model_object.simulation_results
59
- species_names = "\n".join(df.columns.tolist())
60
- llm = ChatOpenAI(model="gpt-4o")
61
- system = f"""
62
- Given the user question: {question},
63
- and the species: {species_names},
64
- which species are relevant to the user's question?
65
- """
52
+ logger.log(logging.INFO, "Calling custom_plotter tool %s", question)
53
+ # Check if the simulation results are available
54
+ # if 'dic_simulated_data' not in state:
55
+ # return "Please run the simulation first before plotting the figure.", None
56
+ df = pd.DataFrame.from_dict(state['dic_simulated_data'])
57
+ species_names = df.columns.tolist()
58
+ # Exclude the time column
59
+ species_names.remove('Time')
60
+ # In the following code, we extract the species
61
+ # from the user question. We use Literal to restrict
62
+ # the species names to the ones available in the
63
+ # simulation results.
64
+ class CustomHeader(TypedDict):
65
+ """
66
+ A list of species based on user question.
67
+ """
68
+ relevant_species: Union[None, List[Literal[*species_names]]] = Field(
69
+ description="List of species based on user question.")
70
+ # Create an instance of the LLM model
71
+ llm = ChatOpenAI(model=state['llm_model'], temperature=0)
66
72
  llm_with_structured_output = llm.with_structured_output(CustomHeader)
67
- system_prompt_structured_output = ChatPromptTemplate.from_template(system)
68
- chain = system_prompt_structured_output | llm_with_structured_output
69
- results = chain.invoke({"input": question})
70
- logger.info("Suggestions: %s", results)
73
+ results = llm_with_structured_output.invoke(question)
71
74
  extracted_species = []
72
- for species in results['y']:
73
- if species in df.columns.tolist():
75
+ # Extract the species from the results
76
+ # that are available in the simulation results
77
+ for species in results['relevant_species']:
78
+ if species in species_names:
74
79
  extracted_species.append(species)
75
80
  logger.info("Extracted species: %s", extracted_species)
76
- st.session_state.custom_simulation_results = extracted_species
77
81
  if len(extracted_species) == 0:
78
- return "No species found in the simulation results that matches the user prompt."
79
- return "Plotted the figure using the following species: " + str(extracted_species)
82
+ return "No species found in the simulation results that matches the user prompt.", None
83
+ content = f"Plotted custom figure with species: {', '.join(extracted_species)}"
84
+ return content, extracted_species
@@ -0,0 +1,130 @@
1
+ #!/usr/bin/env python3
2
+
3
+ """
4
+ Tool for get model information.
5
+ """
6
+
7
+ import logging
8
+ from typing import Type, Optional, Annotated
9
+ from dataclasses import dataclass
10
+ import basico
11
+ from pydantic import BaseModel, Field
12
+ from langchain_core.tools import BaseTool
13
+ from langchain_core.messages import ToolMessage
14
+ from langchain_core.tools.base import InjectedToolCallId
15
+ from langgraph.prebuilt import InjectedState
16
+ from langgraph.types import Command
17
+ from .load_biomodel import ModelData, load_biomodel
18
+
19
+ # Initialize logger
20
+ logging.basicConfig(level=logging.INFO)
21
+ logger = logging.getLogger(__name__)
22
+
23
+ @dataclass
24
+ class RequestedModelInfo:
25
+ """
26
+ Dataclass for storing the requested model information.
27
+ """
28
+ species: bool = Field(description="Get species from the model.")
29
+ parameters: bool = Field(description="Get parameters from the model.")
30
+ compartments: bool = Field(description="Get compartments from the model.")
31
+ units: bool = Field(description="Get units from the model.")
32
+ description: bool = Field(description="Get description from the model.")
33
+ name: bool = Field(description="Get name from the model.")
34
+
35
+ class GetModelInfoInput(BaseModel):
36
+ """
37
+ Input schema for the GetModelInfo tool.
38
+ """
39
+ requested_model_info: RequestedModelInfo = Field(description="requested model information")
40
+ sys_bio_model: ModelData = Field(description="model data")
41
+ tool_call_id: Annotated[str, InjectedToolCallId]
42
+ state: Annotated[dict, InjectedState]
43
+
44
+ # Note: It's important that every field has type hints. BaseTool is a
45
+ # Pydantic class and not having type hints can lead to unexpected behavior.
46
+ class GetModelInfoTool(BaseTool):
47
+ """
48
+ This tool ise used extract model information.
49
+ """
50
+ name: str = "get_parameters"
51
+ description: str = "A tool for extracting model information."
52
+ args_schema: Type[BaseModel] = GetModelInfoInput
53
+
54
+ def _run(self,
55
+ requested_model_info: RequestedModelInfo,
56
+ tool_call_id: Annotated[str, InjectedToolCallId],
57
+ state: Annotated[dict, InjectedState],
58
+ sys_bio_model: Optional[ModelData] = None,
59
+ ) -> Command:
60
+ """
61
+ Run the tool.
62
+
63
+ Args:
64
+ requested_model_info (RequestedModelInfo): The requested model information.
65
+ tool_call_id (str): The tool call ID. This is injected by the system.
66
+ state (dict): The state of the tool.
67
+ sys_bio_model (ModelData): The model data.
68
+
69
+ Returns:
70
+ Command: The updated state of the tool.
71
+ """
72
+ logger.log(logging.INFO,
73
+ "Calling get_modelinfo tool %s, %s",
74
+ sys_bio_model,
75
+ requested_model_info)
76
+ # print (state, 'state')
77
+ sbml_file_path = state['sbml_file_path'][-1] if len(state['sbml_file_path']) > 0 else None
78
+ model_obj = load_biomodel(sys_bio_model,
79
+ sbml_file_path=sbml_file_path)
80
+ dic_results = {}
81
+ # Extract species from the model
82
+ if requested_model_info.species:
83
+ df_species = basico.model_info.get_species(model=model_obj.copasi_model)
84
+ dic_results['Species'] = df_species.index.tolist()
85
+ dic_results['Species'] = ','.join(dic_results['Species'])
86
+
87
+ # Extract parameters from the model
88
+ if requested_model_info.parameters:
89
+ df_parameters = basico.model_info.get_parameters(model=model_obj.copasi_model)
90
+ dic_results['Parameters'] = df_parameters.index.tolist()
91
+ dic_results['Parameters'] = ','.join(dic_results['Parameters'])
92
+
93
+ # Extract compartments from the model
94
+ if requested_model_info.compartments:
95
+ df_compartments = basico.model_info.get_compartments(model=model_obj.copasi_model)
96
+ dic_results['Compartments'] = df_compartments.index.tolist()
97
+ dic_results['Compartments'] = ','.join(dic_results['Compartments'])
98
+
99
+ # Extract description from the model
100
+ if requested_model_info.description:
101
+ dic_results['Description'] = model_obj.description
102
+
103
+ # Extract description from the model
104
+ if requested_model_info.name:
105
+ dic_results['Name'] = model_obj.name
106
+
107
+ # Extract time unit from the model
108
+ if requested_model_info.units:
109
+ dic_results['Units'] = basico.model_info.get_model_units(model=model_obj.copasi_model)
110
+
111
+ # Prepare the dictionary of updated state for the model
112
+ dic_updated_state_for_model = {}
113
+ for key, value in {
114
+ "model_id": [sys_bio_model.biomodel_id],
115
+ "sbml_file_path": [sbml_file_path],
116
+ }.items():
117
+ if value:
118
+ dic_updated_state_for_model[key] = value
119
+
120
+ return Command(
121
+ update=dic_updated_state_for_model|{
122
+ # update the message history
123
+ "messages": [
124
+ ToolMessage(
125
+ content=dic_results,
126
+ tool_call_id=tool_call_id
127
+ )
128
+ ],
129
+ }
130
+ )
@@ -0,0 +1,29 @@
1
+ #!/usr/bin/env python3
2
+
3
+ """
4
+ Function for loading the BioModel.
5
+ """
6
+
7
+ from dataclasses import dataclass
8
+ from ..models.basico_model import BasicoModel
9
+
10
+ @dataclass
11
+ class ModelData:
12
+ """
13
+ Dataclass for storing the model data.
14
+ """
15
+ biomodel_id: int = None
16
+ # sbml_file_path: Optional[str] = None
17
+ use_uploaded_sbml_file: bool = False
18
+
19
+ def load_biomodel(sys_bio_model, sbml_file_path=None):
20
+ """
21
+ Load the BioModel.
22
+ """
23
+ model_object = None
24
+ if sys_bio_model.biomodel_id:
25
+ model_object = BasicoModel(biomodel_id=sys_bio_model.biomodel_id)
26
+ elif sbml_file_path:
27
+ model_object = BasicoModel(sbml_file_path=sbml_file_path)
28
+ return model_object
29
+ # return None
@@ -4,21 +4,21 @@
4
4
  Tool for searching models based on search query.
5
5
  """
6
6
 
7
- from urllib.error import URLError
8
- from time import sleep
9
- from typing import Type
7
+ from typing import Type, Annotated
10
8
  from pydantic import BaseModel, Field
11
9
  from basico import biomodels
12
10
  from langchain_core.tools import BaseTool
13
11
  from langchain_core.output_parsers import StrOutputParser
14
12
  from langchain_core.prompts import ChatPromptTemplate
15
13
  from langchain_openai import ChatOpenAI
14
+ from langgraph.prebuilt import InjectedState
16
15
 
17
16
  class SearchModelsInput(BaseModel):
18
17
  """
19
18
  Input schema for the search models tool.
20
19
  """
21
20
  query: str = Field(description="Search models query", default=None)
21
+ state: Annotated[dict, InjectedState]
22
22
 
23
23
  # Note: It's important that every field has type hints. BaseTool is a
24
24
  # Pydantic class and not having type hints can lead to unexpected behavior.
@@ -31,7 +31,9 @@ class SearchModelsTool(BaseTool):
31
31
  args_schema: Type[BaseModel] = SearchModelsInput
32
32
  return_direct: bool = True
33
33
 
34
- def _run(self, query: str) -> str:
34
+ def _run(self,
35
+ query: str,
36
+ state: Annotated[dict, InjectedState]) -> dict:
35
37
  """
36
38
  Run the tool.
37
39
 
@@ -39,20 +41,10 @@ class SearchModelsTool(BaseTool):
39
41
  query (str): The search query.
40
42
 
41
43
  Returns:
42
- str: The answer to the question.
44
+ dict: The answer to the question in the form of a dictionary.
43
45
  """
44
- attempts = 0
45
- max_retries = 3
46
- while attempts < max_retries:
47
- try:
48
- search_results = biomodels.search_for_model(query)
49
- break
50
- except URLError as e:
51
- attempts += 1
52
- sleep(10)
53
- if attempts >= max_retries:
54
- raise e
55
- llm = ChatOpenAI(model="gpt-4o-mini")
46
+ search_results = biomodels.search_for_model(query)
47
+ llm = ChatOpenAI(model=state['llm_model'])
56
48
  # Check if run_manager's metadata has the key 'prompt_content'
57
49
  prompt_content = f'''
58
50
  Convert the input into a table.
@@ -80,15 +72,3 @@ class SearchModelsTool(BaseTool):
80
72
  parser = StrOutputParser()
81
73
  chain = prompt_template | llm | parser
82
74
  return chain.invoke({"input": search_results})
83
-
84
- def get_metadata(self):
85
- """
86
- Get metadata for the tool.
87
-
88
- Returns:
89
- dict: The metadata for the tool.
90
- """
91
- return {
92
- "name": self.name,
93
- "description": self.description
94
- }
@@ -4,22 +4,21 @@
4
4
  Tool for simulating a model.
5
5
  """
6
6
 
7
- from typing import Type, Union, List, Optional
7
+ import logging
8
8
  from dataclasses import dataclass
9
+ from typing import Type, Union, List, Annotated
9
10
  import basico
10
11
  from pydantic import BaseModel, Field
12
+ from langgraph.types import Command
13
+ from langgraph.prebuilt import InjectedState
11
14
  from langchain_core.tools import BaseTool
12
- import streamlit as st
13
- # import plotly.express as px
14
- from ..models.basico_model import BasicoModel
15
+ from langchain_core.messages import ToolMessage
16
+ from langchain_core.tools.base import InjectedToolCallId
17
+ from .load_biomodel import ModelData, load_biomodel
15
18
 
16
- @dataclass
17
- class ModelData:
18
- """
19
- Dataclass for storing the model data.
20
- """
21
- modelid: Optional[int] = None
22
- sbml_file_path: Optional[str] = None
19
+ # Initialize logger
20
+ logging.basicConfig(level=logging.INFO)
21
+ logger = logging.getLogger(__name__)
23
22
 
24
23
  @dataclass
25
24
  class TimeData:
@@ -34,17 +33,20 @@ class SpeciesData:
34
33
  """
35
34
  Dataclass for storing the species data.
36
35
  """
37
- species_name: List[str] = None
38
- species_concentration: List[Union[int, float]] = None
36
+ species_name: List[str] = Field(description="species name", default=None)
37
+ species_concentration: List[Union[int, float]] = Field(
38
+ description="initial species concentration",
39
+ default=None)
39
40
 
40
41
  @dataclass
41
42
  class TimeSpeciesNameConcentration:
42
43
  """
43
44
  Dataclass for storing the time, species name, and concentration data.
44
45
  """
45
- time: Union[int, float] = None
46
- species_name: str = None
47
- species_concentration: Union[int, float] = None
46
+ time: Union[int, float] = Field(description="time point where the event occurs")
47
+ species_name: str = Field(description="species name")
48
+ species_concentration: Union[int, float] = Field(
49
+ description="species concentration at the time point")
48
50
 
49
51
  @dataclass
50
52
  class RecurringData:
@@ -52,16 +54,43 @@ class RecurringData:
52
54
  Dataclass for storing the species and time data
53
55
  on recurring basis.
54
56
  """
55
- data: List[TimeSpeciesNameConcentration] = None
57
+ data: List[TimeSpeciesNameConcentration] = Field(
58
+ description="species and time data on recurring basis",
59
+ default=None)
60
+
61
+ @dataclass
62
+ class ArgumentData:
63
+ """
64
+ Dataclass for storing the argument data.
65
+ """
66
+ time_data: TimeData = Field(description="time data", default=None)
67
+ species_data: SpeciesData = Field(
68
+ description="species name and initial concentration data",
69
+ default=None)
70
+ recurring_data: RecurringData = Field(
71
+ description="species and time data on recurring basis",
72
+ default=None)
73
+
74
+ def add_rec_events(model_object, recurring_data):
75
+ """
76
+ Add recurring events to the model.
77
+ """
78
+ for row in recurring_data.data:
79
+ tp, sn, sc = row.time, row.species_name, row.species_concentration
80
+ basico.add_event(f'{sn}_{tp}',
81
+ f'Time > {tp}',
82
+ [[sn, str(sc)]],
83
+ model=model_object.copasi_model)
56
84
 
57
85
  class SimulateModelInput(BaseModel):
58
86
  """
59
87
  Input schema for the SimulateModel tool.
60
88
  """
61
- model_data: ModelData = Field(description="model data", default=None)
62
- time_data: TimeData = Field(description="time data", default=None)
63
- species_data: SpeciesData = Field(description="species data", default=None)
64
- recurring_data: RecurringData = Field(description="recurring data", default=None)
89
+ sys_bio_model: ModelData = Field(description="model data", default=None)
90
+ arg_data: ArgumentData = Field(description="time, species, and recurring data",
91
+ default=None)
92
+ tool_call_id: Annotated[str, InjectedToolCallId]
93
+ state: Annotated[dict, InjectedState]
65
94
 
66
95
  # Note: It's important that every field has type hints. BaseTool is a
67
96
  # Pydantic class and not having type hints can lead to unexpected behavior.
@@ -72,119 +101,78 @@ class SimulateModelTool(BaseTool):
72
101
  name: str = "simulate_model"
73
102
  description: str = "A tool to simulate a model."
74
103
  args_schema: Type[BaseModel] = SimulateModelInput
75
- st_session_key: str = None
76
104
 
77
105
  def _run(self,
78
- model_data: ModelData = None,
79
- time_data: TimeData = None,
80
- species_data: SpeciesData = None,
81
- recurring_data: RecurringData = None):
106
+ tool_call_id: Annotated[str, InjectedToolCallId],
107
+ state: Annotated[dict, InjectedState],
108
+ sys_bio_model: ModelData = None,
109
+ arg_data: ArgumentData = None
110
+ ) -> Command:
82
111
  """
83
112
  Run the tool.
84
113
 
85
114
  Args:
86
- model_data (Optional[ModelData]): The model data.
87
- time_data (Optional[TimeData]): The time data.
88
- species_data (Optional[SpeciesData]): The species data.
89
- recurring_data (Optional[RecurringData]): The recurring data.
115
+ tool_call_id (str): The tool call ID. This is injected by the system.
116
+ state (dict): The state of the tool.
117
+ sys_bio_model (ModelData): The model data.
118
+ arg_data (ArgumentData): The argument data.
90
119
 
91
120
  Returns:
92
121
  str: The result of the simulation.
93
122
  """
94
- # st_session_key = self.st_session_key
95
- # Retrieve the model ID, duration, and interval
96
- modelid = model_data.modelid if model_data is not None else None
97
- # duration = time_data.duration if time_data is not None else 100.0
98
- # interval = time_data.interval if time_data is not None else 10
123
+ logger.log(logging.INFO,
124
+ "Calling simulate_model tool %s, %s",
125
+ sys_bio_model,
126
+ arg_data)
127
+ sbml_file_path = state['sbml_file_path'][-1] if len(state['sbml_file_path']) > 0 else None
128
+ model_object = load_biomodel(sys_bio_model,
129
+ sbml_file_path=sbml_file_path)
99
130
  # Prepare the dictionary of species data
100
131
  # that will be passed to the simulate method
101
132
  # of the BasicoModel class
102
- # dic_species_data = None
103
- # if species_data is not None:
104
- # dic_species_data = dict(zip(species_data.species_name,
105
- # species_data.species_concentration))
106
- dic_species_data = dict(zip(species_data.species_name, \
107
- species_data.species_concentration)) \
108
- if species_data is not None else None
109
- # Retrieve the SBML file path from the Streamlit session state
110
- # otherwise retrieve it from the model_data object if the user
111
- # has provided it.
112
- sbml_file_path = model_data.sbml_file_path if model_data is not None else None
113
- if self.st_session_key:
114
- if self.st_session_key not in st.session_state:
115
- return f"Session key {self.st_session_key} not found in Streamlit session state."
116
- if 'sbml_file_path' in st.session_state:
117
- sbml_file_path = st.session_state.sbml_file_path
118
- # Check if both modelid and sbml_file_path are None
119
- if modelid is None and sbml_file_path is None:
120
- # Then load the model from the Streamlit session state
121
- # if the streamlit session exists
122
- if self.st_session_key:
123
- model_object = st.session_state[self.st_session_key]
124
- # If this model object is None, then return an error message
125
- if model_object is None:
126
- return "Please provide a BioModels ID or an SBML file path for simulation."
127
- # Retrieve the model ID from the model object
128
- modelid = model_object.model_id
129
- else:
130
- # Otherwise return an error message
131
- return "Please provide a BioModels ID or an SBML file path for simulation."
132
- elif modelid:
133
- # Create a BasicoModel object with the model ID
134
- # model_object = BasicoModel(model_id=modelid)
135
- model_object = BasicoModel(model_id=modelid)
136
- # Save the model object in the Streamlit session state
137
- st.session_state[self.st_session_key] = model_object
138
- elif sbml_file_path:
139
- # Create a BasicoModel object with the SBML file path
140
- model_object = BasicoModel(sbml_file_path=sbml_file_path)
141
- modelid = model_object.model_id
142
- # Save the model object in the Streamlit session state
143
- st.session_state[self.st_session_key] = model_object
144
- # Add recurring events (if any) to the model
145
- if recurring_data is not None:
146
- for row in recurring_data.data:
147
- tp, sn, sc = row.time, row.species_name, row.species_concentration
148
- basico.add_event(f'{sn}_{tp}',
149
- f'Time > {tp}',
150
- [[sn, str(sc)]],
151
- model=model_object.copasi_model)
152
- # print (f'Added event {sn}_{tp} at time {tp} \
153
- # for species {sn} with concentration {sc}')
133
+ duration = 100.0
134
+ interval = 10
135
+ dic_species_data = None
136
+ if arg_data:
137
+ # Prepare the dictionary of species data
138
+ if arg_data.species_data is not None:
139
+ dic_species_data = dict(zip(arg_data.species_data.species_name,
140
+ arg_data.species_data.species_concentration))
141
+ # Add recurring events (if any) to the model
142
+ if arg_data.recurring_data is not None:
143
+ add_rec_events(model_object, arg_data.recurring_data)
144
+ # Set the duration and interval
145
+ if arg_data.time_data is not None:
146
+ duration = arg_data.time_data.duration
147
+ interval = arg_data.time_data.interval
148
+
154
149
  # Simulate the model
155
150
  df = model_object.simulate(
156
151
  parameters=dic_species_data,
157
- duration=time_data.duration if time_data is not None else 100.0,
158
- interval=time_data.interval if time_data is not None else 10)
159
- # Convert the DataFrame to long format for plotting
160
- # and ignore the index column
161
- df = df.melt(id_vars='Time',
162
- var_name='Species',
163
- value_name='Concentration')
164
- # Plot the simulation results using Plotly
165
- # fig = px.line(df,
166
- # x='Time',
167
- # y='Concentration',
168
- # color='Species',
169
- # title=f"Concentration of Species over Time in the model {modelid}",
170
- # height=600,
171
- # width=800
172
- # )
173
- # Display the plot in Streamlit
174
- # st.plotly_chart(fig, use_container_width = False)
175
- # if modelid is None:
176
- # modelid = "internal"
177
- # return f"Simulation results for the model {modelid}."
178
- return "Simulation results for the model."
179
-
180
- def get_metadata(self):
181
- """
182
- Get metadata for the tool.
152
+ duration=duration,
153
+ interval=interval
154
+ )
183
155
 
184
- Returns:
185
- dict: The metadata for the tool.
186
- """
187
- return {
188
- "name": self.name,
189
- "description": self.description
190
- }
156
+ # Prepare the dictionary of updated state for the model
157
+ dic_updated_state_for_model = {}
158
+ for key, value in {
159
+ "model_id": [sys_bio_model.biomodel_id],
160
+ "sbml_file_path": [sbml_file_path],
161
+ }.items():
162
+ if value:
163
+ dic_updated_state_for_model[key] = value
164
+
165
+ # Return the updated state of the tool
166
+ return Command(
167
+ update=dic_updated_state_for_model|{
168
+ # update the state keys
169
+ "dic_simulated_data": df.to_dict(),
170
+ # update the message history
171
+ "messages": [
172
+ ToolMessage(
173
+ content="Simulation results are ready.",
174
+ tool_call_id=tool_call_id
175
+ )
176
+ ],
177
+ }
178
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: aiagents4pharma
3
- Version: 1.6.2
3
+ Version: 1.7.0
4
4
  Summary: AI Agents for drug discovery, drug development, and other pharmaceutical R&D
5
5
  Classifier: Programming Language :: Python :: 3
6
6
  Classifier: License :: OSI Approved :: MIT License
@@ -13,6 +13,7 @@ Requires-Dist: coverage==7.6.4
13
13
  Requires-Dist: einops==0.8.0
14
14
  Requires-Dist: gdown==5.2.0
15
15
  Requires-Dist: huggingface_hub==0.26.5
16
+ Requires-Dist: hydra-core==1.3.2
16
17
  Requires-Dist: joblib==1.4.2
17
18
  Requires-Dist: langchain==0.3.7
18
19
  Requires-Dist: langchain-community==0.3.5
@@ -43,7 +44,10 @@ Requires-Dist: mkdocs-include-markdown-plugin==7.1.2
43
44
  Requires-Dist: mkdocstrings==0.27.0
44
45
  Requires-Dist: streamlit-feedback
45
46
 
46
- [![TESTS](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests.yml/badge.svg?branch=feat%2Finitial-setup)](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests.yml)
47
+ [![Talk2BioModels](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2biomodels.yml/badge.svg)](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2biomodels.yml)
48
+ [![Talk2Cells](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2cells.yml/badge.svg)](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2cells.yml)
49
+ [![Talk2KnowledgeGraphs](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2knowledgegraphs.yml/badge.svg)](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2knowledgegraphs.yml)
50
+ [![Talk2Competitors](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2competitors.yml/badge.svg)](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2competitors.yml)
47
51
 
48
52
  <h1 align="center" style="border-bottom: none;">🤖 AIAgents4Pharma</h1>
49
53
 
@@ -1,15 +1,15 @@
1
1
  aiagents4pharma/__init__.py,sha256=ds5Ft1k2ww2m7N-yv5ZyymBw0LBuHVjwC9TuekQBL_c,151
2
2
  aiagents4pharma/talk2biomodels/__init__.py,sha256=MueXwbnuiQyiju7mW6NepFUiZJdodMzmUK3TkQT7iPk,99
3
3
  aiagents4pharma/talk2biomodels/models/__init__.py,sha256=5fTHHm3PVloYPNKXbgNlcPgv3-u28ZquxGydFYDfhJA,122
4
- aiagents4pharma/talk2biomodels/models/basico_model.py,sha256=UzTj6EUS7GiaNJogRUY5LkEYv-mUw2kO10v1NW89nPw,4597
5
- aiagents4pharma/talk2biomodels/models/sys_bio_model.py,sha256=xN-ZXCpIxNkEXuDIvi_AW6LpCyPqXReGyhLPyJIXNqs,1980
6
- aiagents4pharma/talk2biomodels/tools/__init__.py,sha256=qO3MYCJmXpoUlZYtSW3VxTXdt-yUNHyH-Ua578pdyhg,280
7
- aiagents4pharma/talk2biomodels/tools/ask_question.py,sha256=o9ae4s3wsDFr_pGBU1cSxKhJ7E2yjybIzG1Y4z6957Y,4534
8
- aiagents4pharma/talk2biomodels/tools/custom_plotter.py,sha256=CdgJjlHAkdyjnwPD6nHARsJXnx_CE0MWg5VOz4oBjY0,2910
9
- aiagents4pharma/talk2biomodels/tools/fetch_parameters.py,sha256=WdLGPdJi4DwGwYMoReByGE9O1Msv68jkVd02F8L2liM,2864
10
- aiagents4pharma/talk2biomodels/tools/model_description.py,sha256=lcVKVvh50wJ4BmB7xMnTZOtWjCmQUnkh6TQJsX-IjGw,5338
11
- aiagents4pharma/talk2biomodels/tools/search_models.py,sha256=cqsK_ApjrAyjEiXWsXTebDpuvEnA5nb6WYiUGayRkW4,3034
12
- aiagents4pharma/talk2biomodels/tools/simulate_model.py,sha256=MxlAy62SuonBbEbKmoUz0HcdfTWvk-x9WMSo17dBU9U,7552
4
+ aiagents4pharma/talk2biomodels/models/basico_model.py,sha256=js7ORLwbJPaIsko5oRToMMCh4l8LsN292OIvFzTfvRg,4946
5
+ aiagents4pharma/talk2biomodels/models/sys_bio_model.py,sha256=ylpPba2SA8kl68q3k1kJbiUdRYplPHykyslTQLDZ19I,1995
6
+ aiagents4pharma/talk2biomodels/tools/__init__.py,sha256=tkNUpUtZOe3hwbKQgp97e4gJ3p2d38G8WFHNO9A83uM,209
7
+ aiagents4pharma/talk2biomodels/tools/ask_question.py,sha256=UEdT46DIFZHlAOF4cNX4_s7VjHvbbiGpNmEY2-XW2iA,2655
8
+ aiagents4pharma/talk2biomodels/tools/custom_plotter.py,sha256=ESsdV_sz3wdynK9EbqZYhoY9r5zLto3NTZmUYvqS_TU,3282
9
+ aiagents4pharma/talk2biomodels/tools/get_modelinfo.py,sha256=czxjRa9jvjTow-POGU5wSV8_lwfknAezBnzCFA0gQ8E,5189
10
+ aiagents4pharma/talk2biomodels/tools/load_biomodel.py,sha256=QQOBCrDoEEWdSVCIYyBXL2WLSgEs3yB_fDpwALnsl-U,707
11
+ aiagents4pharma/talk2biomodels/tools/search_models.py,sha256=GzF313UgawtrA0fvYEh_AX2HKO9eMV77YH-R__Gw3iY,2630
12
+ aiagents4pharma/talk2biomodels/tools/simulate_model.py,sha256=Ax9nuHyINMIlqETujfYxqnzUsgLE-TUFV0EY5Qr7rPk,6249
13
13
  aiagents4pharma/talk2cells/__init__.py,sha256=zmOP5RAhabgKIQP-W4P4qKME2tG3fhAXM3MeO5_H8kE,120
14
14
  aiagents4pharma/talk2cells/agents/__init__.py,sha256=38nK2a_lEFRjO3qD6Fo9a3983ZCYat6hmJKWY61y2Mo,128
15
15
  aiagents4pharma/talk2cells/agents/scp_agent.py,sha256=gDMfhUNWHa_XWOqm1Ql6yLAdI_7bnIk5sRYn43H2sYk,3090
@@ -28,8 +28,8 @@ aiagents4pharma/talk2knowledgegraphs/utils/embeddings/__init__.py,sha256=xRb0x7S
28
28
  aiagents4pharma/talk2knowledgegraphs/utils/embeddings/embeddings.py,sha256=1nGznrAj-xT0xuSMBGz2dOujJ7M_IwSR84njxtxsy9A,2523
29
29
  aiagents4pharma/talk2knowledgegraphs/utils/embeddings/huggingface.py,sha256=2vi_elf6EgzfagFAO5QnL3a_aXZyN7B1EBziu44MTfM,3806
30
30
  aiagents4pharma/talk2knowledgegraphs/utils/embeddings/sentence_transformer.py,sha256=36iKlisOpMtGR5xfTAlSHXWvPqVC_Jbezod8kbBBMVg,2136
31
- aiagents4pharma-1.6.2.dist-info/LICENSE,sha256=IcIbyB1Hyk5ZDah03VNQvJkbNk2hkBCDqQ8qtnCvB4Q,1077
32
- aiagents4pharma-1.6.2.dist-info/METADATA,sha256=Lc-YDFCpxGDo0Y5kudger5uBuvmGP9l6edO6mtLM1BU,7242
33
- aiagents4pharma-1.6.2.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
34
- aiagents4pharma-1.6.2.dist-info/top_level.txt,sha256=-AH8rMmrSnJtq7HaAObS78UU-cTCwvX660dSxeM7a0A,16
35
- aiagents4pharma-1.6.2.dist-info/RECORD,,
31
+ aiagents4pharma-1.7.0.dist-info/LICENSE,sha256=IcIbyB1Hyk5ZDah03VNQvJkbNk2hkBCDqQ8qtnCvB4Q,1077
32
+ aiagents4pharma-1.7.0.dist-info/METADATA,sha256=Ppwb-9bU-tkQ18AjrX6xmySzOrmcXIIszAL6j-ZpfCw,7988
33
+ aiagents4pharma-1.7.0.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
34
+ aiagents4pharma-1.7.0.dist-info/top_level.txt,sha256=-AH8rMmrSnJtq7HaAObS78UU-cTCwvX660dSxeM7a0A,16
35
+ aiagents4pharma-1.7.0.dist-info/RECORD,,
@@ -1,84 +0,0 @@
1
- #!/usr/bin/env python3
2
-
3
- """
4
- Tool for fetching species and parameters from the model.
5
- """
6
-
7
- from urllib.error import URLError
8
- from time import sleep
9
- from typing import Type
10
- import basico
11
- from pydantic import BaseModel, Field
12
- from langchain_core.tools import BaseTool
13
- import streamlit as st
14
-
15
- class FetchParametersInput(BaseModel):
16
- """
17
- Input schema for the ResetModel tool.
18
- """
19
- fetch_species: bool = Field(description="Fetch species from the model.")
20
- fetch_parameters: bool = Field(description="Fetch parameters from the model.")
21
-
22
- # Note: It's important that every field has type hints. BaseTool is a
23
- # Pydantic class and not having type hints can lead to unexpected behavior.
24
- class FetchParametersTool(BaseTool):
25
- """
26
- This tool fetches species and parameters from the model
27
- and returns them as a string in a dictionary.
28
- """
29
- name: str = "fetch_parameters"
30
- description: str = "A tool for fetching species and parameters from the model."
31
- args_schema: Type[BaseModel] = FetchParametersInput
32
- st_session_key: str = None
33
-
34
- def _run(self,
35
- fetch_species: bool,
36
- fetch_parameters: bool
37
- ) -> str:
38
- """
39
- Run the tool.
40
-
41
- Args:
42
- fetch_species (bool): Fetch species from the model.
43
- fetch_parameters (bool): Fetch parameters from the model.
44
-
45
- Returns:
46
- dict: The species and parameters from the model.
47
- """
48
- model_obj = st.session_state[self.st_session_key]
49
- # Extract species from the model
50
- species = []
51
- if fetch_species:
52
- df_species = self.get_species_and_parameters(model_obj, fetch_species=True)
53
- species = df_species.index.tolist()
54
- species = ','.join(species)
55
-
56
- # Extract parameters from the model
57
- parameters = []
58
- if fetch_parameters:
59
- df_parameters = self.get_species_and_parameters(model_obj, fetch_species=False)
60
- parameters = df_parameters.index.tolist()
61
- parameters = ','.join(parameters)
62
- return {'Species': species, 'Parameters': parameters}
63
-
64
- def get_species_and_parameters(self,
65
- model_obj,
66
- fetch_species: bool):
67
- """
68
- Get the species from the model.
69
- """
70
- attempts = 0
71
- max_retries = 3
72
- while attempts < max_retries:
73
- try:
74
- if fetch_species:
75
- df = basico.model_info.get_species(model=model_obj.copasi_model)
76
- else:
77
- df = basico.model_info.get_parameters(model=model_obj.copasi_model)
78
- break
79
- except URLError as e:
80
- attempts += 1
81
- sleep(10)
82
- if attempts >= max_retries:
83
- raise e
84
- return df
@@ -1,135 +0,0 @@
1
- #!/usr/bin/env python3
2
-
3
- """
4
- Tool for asking a question about the model description.
5
- """
6
-
7
- from typing import Type, Optional
8
- from dataclasses import dataclass
9
- from pydantic import BaseModel, Field, model_validator
10
- import streamlit as st
11
- from langchain_core.tools import BaseTool
12
- from langchain_core.output_parsers import StrOutputParser
13
- from langchain_core.callbacks import CallbackManagerForToolRun
14
- from langchain_core.prompts import ChatPromptTemplate
15
- from langchain_openai import ChatOpenAI
16
- from ..models.basico_model import BasicoModel
17
-
18
- @dataclass
19
- class ModelData:
20
- """
21
- Dataclass for storing the model data.
22
- """
23
- model_id: Optional[int] = None
24
- sbml_file_path: Optional[str] = None
25
- model_object: Optional[BasicoModel] = None
26
-
27
- # Check if model_object is an instance of BasicoModel
28
- # otherwise make it None. This is important because
29
- # sometimes the LLM may invoke the tool with an
30
- # inappropriate model_object.
31
- @model_validator(mode="before")
32
- @classmethod
33
- def check_model_object(cls, data):
34
- """
35
- Check if model_object is an instance of BasicoModel.
36
- """
37
- if 'model_object' in data:
38
- if not isinstance(data['model_object'], BasicoModel):
39
- data['model_object'] = None
40
- return data
41
-
42
- class ModelDescriptionInput(BaseModel):
43
- """
44
- Input schema for the ModelDescription tool.
45
- """
46
- question: str = Field(description="question about the model description")
47
- sys_bio_model: ModelData = Field(description="model data", default=None)
48
-
49
- # Note: It's important that every field has type hints. BaseTool is a
50
- # Pydantic class and not having type hints can lead to unexpected behavior.
51
- class ModelDescriptionTool(BaseTool):
52
- """
53
- Tool for returning the description of the specified BioModel.
54
- """
55
- name: str = "model_description"
56
- description: str = '''A tool to ask about the description of the model.'''
57
- args_schema: Type[BaseModel] = ModelDescriptionInput
58
- return_direct: bool = True
59
- st_session_key: str = None
60
-
61
- def _run(self,
62
- question: str,
63
- sys_bio_model: ModelData = ModelData(),
64
- run_manager: Optional[CallbackManagerForToolRun] = None) -> str:
65
- """
66
- Run the tool.
67
-
68
- Args:
69
- question (str): The question to ask about the model description.
70
- run_manager (Optional[CallbackManagerForToolRun]): The CallbackManagerForToolRun object.
71
-
72
- Returns:
73
- str: The answer to the question.
74
- """
75
- st_session_key = self.st_session_key
76
- # Check if sys_bio_model is provided in the input schema
77
- if sys_bio_model.model_id or sys_bio_model.sbml_file_path \
78
- or sys_bio_model.model_object not in [None, "", {}]:
79
- if sys_bio_model.model_id:
80
- model_object = BasicoModel(model_id=sys_bio_model.model_id)
81
- elif sys_bio_model.sbml_file_path:
82
- model_object = BasicoModel(sbml_file_path=sys_bio_model.sbml_file_path)
83
- else:
84
- print (sys_bio_model.model_object, 'model_object')
85
- model_object = sys_bio_model.model_object
86
- if st_session_key:
87
- st.session_state[st_session_key] = model_object
88
- # Check if sys_bio_model is provided in the Streamlit session state
89
- elif st_session_key:
90
- if st_session_key not in st.session_state:
91
- return f"Session key {st_session_key} " \
92
- "not found in Streamlit session state."
93
- model_object = st.session_state[st_session_key]
94
- else:
95
- return "Please provide a valid model object or Streamlit "\
96
- "session key that contains the model object."
97
- # check if model_object is None
98
- if model_object is None:
99
- return "Please provide a BioModels ID or an SBML file path for the model."
100
- description = model_object.description
101
- if description in [None, ""]:
102
- return "No description found for the model."
103
- # Append the BioModel ID of the model to the description
104
- description = f"{description} (BioModel ID: {model_object.model_id})"
105
- llm = ChatOpenAI(model="gpt-3.5-turbo")
106
- # Check if run_manager's metadata has the key 'prompt_content'
107
- if run_manager and 'prompt' in run_manager.metadata:
108
- prompt_content = run_manager.metadata['prompt']
109
- else:
110
- prompt_content = '''
111
- Given the description of a System biology model:
112
- {description},
113
- answer the user question:
114
- {question}.
115
- '''
116
- prompt_template = ChatPromptTemplate.from_messages(
117
- [("system", prompt_content),
118
- ("user", "{description} {question}")]
119
- )
120
- parser = StrOutputParser()
121
- chain = prompt_template | llm | parser
122
- return chain.invoke({"description": description,
123
- "question": question})
124
-
125
- def get_metadata(self):
126
- """
127
- Get metadata for the tool.
128
-
129
- Returns:
130
- dict: The metadata for the tool.
131
- """
132
- return {
133
- "name": self.name,
134
- "description": self.description
135
- }