aiagents4pharma 1.21.0__py3-none-any.whl → 1.22.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. aiagents4pharma/talk2biomodels/configs/config.yaml +2 -1
  2. aiagents4pharma/talk2biomodels/configs/tools/__init__.py +1 -0
  3. aiagents4pharma/talk2biomodels/configs/tools/custom_plotter/__init__.py +3 -0
  4. aiagents4pharma/talk2biomodels/configs/tools/custom_plotter/default.yaml +8 -0
  5. aiagents4pharma/talk2biomodels/models/basico_model.py +2 -1
  6. aiagents4pharma/talk2biomodels/tests/test_getmodelinfo.py +31 -0
  7. aiagents4pharma/talk2biomodels/tests/test_integration.py +4 -4
  8. aiagents4pharma/talk2biomodels/tests/test_load_biomodel.py +32 -0
  9. aiagents4pharma/talk2biomodels/tests/test_search_models.py +13 -7
  10. aiagents4pharma/talk2biomodels/tools/custom_plotter.py +77 -33
  11. aiagents4pharma/talk2biomodels/tools/get_modelinfo.py +2 -0
  12. aiagents4pharma/talk2biomodels/tools/load_biomodel.py +15 -5
  13. aiagents4pharma/talk2biomodels/tools/parameter_scan.py +8 -2
  14. aiagents4pharma/talk2biomodels/tools/search_models.py +43 -38
  15. aiagents4pharma/talk2biomodels/tools/simulate_model.py +3 -1
  16. aiagents4pharma/talk2biomodels/tools/steady_state.py +3 -1
  17. aiagents4pharma/talk2biomodels/tools/utils.py +22 -0
  18. aiagents4pharma/talk2knowledgegraphs/configs/__init__.py +1 -0
  19. aiagents4pharma/talk2knowledgegraphs/configs/config.yaml +1 -0
  20. aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_nim_molmim.py +64 -0
  21. aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_pubchem.py +33 -0
  22. aiagents4pharma/talk2knowledgegraphs/tests/test_utils_pubchem_utils.py +16 -0
  23. aiagents4pharma/talk2knowledgegraphs/utils/__init__.py +1 -0
  24. aiagents4pharma/talk2knowledgegraphs/utils/embeddings/__init__.py +1 -0
  25. aiagents4pharma/talk2knowledgegraphs/utils/embeddings/nim_molmim.py +54 -0
  26. aiagents4pharma/talk2knowledgegraphs/utils/enrichments/__init__.py +1 -0
  27. aiagents4pharma/talk2knowledgegraphs/utils/enrichments/pubchem_strings.py +49 -0
  28. aiagents4pharma/talk2knowledgegraphs/utils/pubchem_utils.py +42 -0
  29. {aiagents4pharma-1.21.0.dist-info → aiagents4pharma-1.22.1.dist-info}/METADATA +2 -1
  30. {aiagents4pharma-1.21.0.dist-info → aiagents4pharma-1.22.1.dist-info}/RECORD +33 -23
  31. {aiagents4pharma-1.21.0.dist-info → aiagents4pharma-1.22.1.dist-info}/LICENSE +0 -0
  32. {aiagents4pharma-1.21.0.dist-info → aiagents4pharma-1.22.1.dist-info}/WHEEL +0 -0
  33. {aiagents4pharma-1.21.0.dist-info → aiagents4pharma-1.22.1.dist-info}/top_level.txt +0 -0
@@ -2,4 +2,5 @@ defaults:
2
2
  - _self_
3
3
  - agents/t2b_agent: default
4
4
  - tools/ask_question: default
5
- - tools/get_annotation: default
5
+ - tools/get_annotation: default
6
+ - tools/custom_plotter: default
@@ -3,3 +3,4 @@ Import all the modules in the package
3
3
  '''
4
4
  from . import ask_question
5
5
  from . import get_annotation
6
+ from . import custom_plotter
@@ -0,0 +1,3 @@
1
+ '''
2
+ Import all the modules in the package
3
+ '''
@@ -0,0 +1,8 @@
1
+ system_prompt_custom_header: >
2
+ You are custom plotter tool. You can extract species from the given
3
+ list of species names based on user question. If no species is relevant,
4
+ set the attribute `relevant_species` to None.
5
+ If the user asks for very specific species (for example, using the
6
+ keyword `only` or `exactly` in the question), set this attribute to
7
+ correspond strictly to the species available in the simulation results,
8
+ otherwise set it to None.
@@ -21,7 +21,8 @@ class BasicoModel(SysBioModel):
21
21
  Model that loads and simulates SBML models using the basico package.
22
22
  Can load models from an SBML file or download them using a BioModels biomodel_id.
23
23
  """
24
- biomodel_id: Optional[int] = Field(None, description="BioModels model ID to download and load")
24
+ biomodel_id: Optional[Union[int, str]] = Field(None,
25
+ description="BioModels model ID to download and load")
25
26
  sbml_file_path: Optional[str] = Field(None, description="Path to an SBML file to load")
26
27
  simulation_results: Optional[str] = None
27
28
  name: Optional[str] = Field("", description="Name of the model")
@@ -55,3 +55,34 @@ def test_model_with_no_species():
55
55
  test_condition = True
56
56
  break
57
57
  assert test_condition
58
+
59
+ def test_model_with_no_parameters():
60
+ '''
61
+ Test the get_modelinfo tool with a model that does not
62
+ return any parameters.
63
+
64
+ This should raise a tool error.
65
+ '''
66
+ unique_id = 12345
67
+ app = get_app(unique_id)
68
+ config = {"configurable": {"thread_id": unique_id}}
69
+ prompt = "Extract all parameters from model 10"
70
+ # Test the tool get_modelinfo
71
+ app.invoke(
72
+ {"messages": [HumanMessage(content=prompt)]},
73
+ config=config
74
+ )
75
+ current_state = app.get_state(config)
76
+ reversed_messages = current_state.values["messages"][::-1]
77
+ # Loop through the reversed messages until a
78
+ # ToolMessage is found.
79
+ test_condition = False
80
+ for msg in reversed_messages:
81
+ # Check if the message is a ToolMessage from the get_modelinfo tool
82
+ if isinstance(msg, ToolMessage) and msg.name == "get_modelinfo":
83
+ # Check if the message is an error message
84
+ if (msg.status == "error" and
85
+ "ValueError('Unable to extract parameters from the model.')" in msg.content):
86
+ test_condition = True
87
+ break
88
+ assert test_condition
@@ -20,7 +20,7 @@ def test_integration():
20
20
  # ##########################################
21
21
  # ## Test simulate_model tool
22
22
  # ##########################################
23
- prompt = '''Simulate the model 537 for 100 hours and time intervals
23
+ prompt = '''Simulate the model BIOMD0000000537 for 100 hours and time intervals
24
24
  100 with an initial concentration of `DoseQ2W` set to 300 and `Dose`
25
25
  set to 0. Reset the concentration of `Ab{serum}` to 100 every 25 hours.'''
26
26
  # Test the tool get_modelinfo
@@ -51,12 +51,12 @@ def test_integration():
51
51
  assert '211' in assistant_msg
52
52
 
53
53
  ##########################################
54
- # Test custom_plotter tool when the
54
+ # Test the custom_plotter tool when the
55
55
  # simulation results are available but
56
56
  # the species is not available
57
57
  ##########################################
58
58
  prompt = """Call the custom_plotter tool to make a plot
59
- showing only species `TP53` and `Pyruvate`. Let me
59
+ showing only species 'Infected cases'. Let me
60
60
  know if these species were not found. Do not
61
61
  invoke any other tool."""
62
62
  # Update state
@@ -117,7 +117,7 @@ def test_integration():
117
117
  # These may contain additional visuals that
118
118
  # need to be displayed to the user.
119
119
  if msg.name == "custom_plotter":
120
- predicted_artifact = msg.artifact
120
+ predicted_artifact = msg.artifact['dic_data']
121
121
  break
122
122
  # Convert the artifact into a pandas dataframe
123
123
  # for easy comparison
@@ -0,0 +1,32 @@
1
+ '''
2
+ Test cases for Talk2Biomodels.
3
+ '''
4
+
5
+ import pytest
6
+ from ..tools.load_biomodel import ModelData
7
+
8
+ def test_model_data_valid_biomodel_id():
9
+ '''
10
+ Test the ModelData class with valid
11
+ biomodel
12
+ '''
13
+ # Test with string biomodel_id starting with 'BIOMD'
14
+ model_data = ModelData(biomodel_id='BIOMD0000000537')
15
+ assert model_data.biomodel_id == 'BIOMD0000000537'
16
+
17
+ # Test with string biomodel_id starting with 'MODEL'
18
+ model_data = ModelData(biomodel_id='MODEL0000000537')
19
+ assert model_data.biomodel_id == 'MODEL0000000537'
20
+
21
+ def test_model_data_invalid_biomodel_id():
22
+ '''
23
+ Test the ModelData class with invalid
24
+ biomodel
25
+ '''
26
+ # Test with invalid string biomodel_id
27
+ with pytest.raises(ValueError):
28
+ ModelData(biomodel_id='12345')
29
+
30
+ # Test with float biomodel_id
31
+ with pytest.raises(ValueError):
32
+ ModelData(biomodel_id=123.45)
@@ -2,7 +2,7 @@
2
2
  Test cases for Talk2Biomodels search models tool.
3
3
  '''
4
4
 
5
- from langchain_core.messages import HumanMessage
5
+ from langchain_core.messages import HumanMessage, ToolMessage
6
6
  from langchain_nvidia_ai_endpoints import ChatNVIDIA
7
7
  from ..agents.t2b_agent import get_app
8
8
 
@@ -22,9 +22,15 @@ def test_search_models_tool():
22
22
  {"messages": [HumanMessage(content=prompt)]},
23
23
  config=config
24
24
  )
25
- assistant_msg = response["messages"][-1].content
26
- # Check if the assistant message is a string
27
- assert isinstance(assistant_msg, str)
28
- # Check if the assistant message contains the
29
- # biomodel id BIO0000000537
30
- assert "BIOMD0000000537" in assistant_msg
25
+ # Extract the assistant artifact which contains
26
+ # all the search results
27
+ found_model_537 = False
28
+ for msg in response["messages"]:
29
+ if isinstance(msg, ToolMessage) and msg.name == "search_models":
30
+ msg_artifact = msg.artifact
31
+ for model in msg_artifact["dic_data"]:
32
+ if model["id"] == "BIOMD0000000537":
33
+ found_model_537 = True
34
+ break
35
+ # Check if the model BIOMD0000000537 is found
36
+ assert found_model_537
@@ -1,25 +1,81 @@
1
1
  #!/usr/bin/env python3
2
2
 
3
3
  """
4
- Tool for plotting a custom figure.
4
+ Tool for plotting a custom y-axis of a simulation plot.
5
5
  """
6
6
 
7
7
  import logging
8
8
  from typing import Type, Annotated, List, Tuple, Union, Literal
9
9
  from pydantic import BaseModel, Field
10
+ import hydra
10
11
  import pandas as pd
11
12
  from langchain_core.tools import BaseTool
13
+ from langchain_core.prompts import ChatPromptTemplate
12
14
  from langgraph.prebuilt import InjectedState
15
+ from .load_biomodel import ModelData, load_biomodel
16
+ from .utils import get_model_units
13
17
 
14
18
  # Initialize logger
15
19
  logging.basicConfig(level=logging.INFO)
16
20
  logger = logging.getLogger(__name__)
17
21
 
22
+ def extract_relevant_species(question, species_names, state):
23
+ """
24
+ Extract the relevant species from the user question.
25
+
26
+ Args:
27
+ question (str): The user question.
28
+ species_names (list): The species names available in the simulation results.
29
+ state (dict): The state of the graph.
30
+
31
+ Returns:
32
+ CustomHeader: The relevant species
33
+ """
34
+ # In the following code, we extract the species
35
+ # from the user question. We use Literal to restrict
36
+ # the species names to the ones available in the
37
+ # simulation results.
38
+ class CustomHeader(BaseModel):
39
+ """
40
+ A list of species based on user question.
41
+
42
+ This is a Pydantic model that restricts the species
43
+ names to the ones available in the simulation results.
44
+
45
+ If no species is relevant, set the attribute
46
+ `relevant_species` to None.
47
+ """
48
+ relevant_species: Union[None, List[Literal[*species_names]]] = Field(
49
+ description="This is a list of species based on the user question."
50
+ "It is restricted to the species available in the simulation results."
51
+ "If no species is relevant, set this attribute to None."
52
+ "If the user asks for very specific species (for example, using the"
53
+ "keyword `only` in the question), set this attribute to correspond "
54
+ "to the species available in the simulation results, otherwise set it to None."
55
+ )
56
+ # Load hydra configuration
57
+ with hydra.initialize(version_base=None, config_path="../configs"):
58
+ cfg = hydra.compose(config_name='config',
59
+ overrides=['tools/custom_plotter=default'])
60
+ cfg = cfg.tools.custom_plotter
61
+ # Get the system prompt
62
+ system_prompt = cfg.system_prompt_custom_header
63
+ # Create an instance of the LLM model
64
+ logging.log(logging.INFO, "LLM model: %s", state['llm_model'])
65
+ llm = state['llm_model']
66
+ llm_with_structured_output = llm.with_structured_output(CustomHeader)
67
+ prompt = ChatPromptTemplate.from_messages([("system", system_prompt),
68
+ ("human", "{input}")])
69
+ few_shot_structured_llm = prompt | llm_with_structured_output
70
+ return few_shot_structured_llm.invoke(question)
71
+
18
72
  class CustomPlotterInput(BaseModel):
19
73
  """
20
- Input schema for the PlotImage tool.
74
+ Input schema for the custom plotter tool.
21
75
  """
22
76
  question: str = Field(description="Description of the plot")
77
+ sys_bio_model: ModelData = Field(description="model data",
78
+ default=None)
23
79
  simulation_name: str = Field(description="Name assigned to the simulation")
24
80
  state: Annotated[dict, InjectedState]
25
81
 
@@ -31,15 +87,20 @@ class CustomPlotterInput(BaseModel):
31
87
  # can lead to unexpected behavior.
32
88
  class CustomPlotterTool(BaseTool):
33
89
  """
34
- Tool for making custom plots
90
+ Tool for custom plotting the y-axis of a plot.
35
91
  """
36
92
  name: str = "custom_plotter"
37
- description: str = "A tool to make custom plots of the simulation results"
93
+ description: str = '''A visualization tool designed to extract and display a subset
94
+ of the larger simulation plot generated by the simulate_model tool.
95
+ It allows users to specify particular species for the y-axis,
96
+ providing a more targeted view of key species without the clutter
97
+ of the full plot.'''
38
98
  args_schema: Type[BaseModel] = CustomPlotterInput
39
99
  response_format: str = "content_and_artifact"
40
100
 
41
101
  def _run(self,
42
102
  question: str,
103
+ sys_bio_model: ModelData,
43
104
  simulation_name: str,
44
105
  state: Annotated[dict, InjectedState]
45
106
  ) -> Tuple[str, Union[None, List[str]]]:
@@ -48,12 +109,17 @@ class CustomPlotterTool(BaseTool):
48
109
 
49
110
  Args:
50
111
  question (str): The question about the custom plot.
112
+ sys_bio_model (ModelData): The model data.
113
+ simulation_name (str): The name assigned to the simulation.
51
114
  state (dict): The state of the graph.
52
115
 
53
116
  Returns:
54
117
  str: The answer to the question
55
118
  """
56
- logger.log(logging.INFO, "Calling custom_plotter tool %s", question)
119
+ logger.log(logging.INFO, "Calling custom_plotter tool %s, %s", question, sys_bio_model)
120
+ # Load the model
121
+ sbml_file_path = state['sbml_file_path'][-1] if len(state['sbml_file_path']) > 0 else None
122
+ model_object = load_biomodel(sys_bio_model, sbml_file_path=sbml_file_path)
57
123
  dic_simulated_data = {}
58
124
  for data in state["dic_simulated_data"]:
59
125
  for key in data:
@@ -71,33 +137,9 @@ class CustomPlotterTool(BaseTool):
71
137
  # Exclude the time column
72
138
  species_names.remove('Time')
73
139
  logging.log(logging.INFO, "Species names: %s", species_names)
74
- # In the following code, we extract the species
75
- # from the user question. We use Literal to restrict
76
- # the species names to the ones available in the
77
- # simulation results.
78
- class CustomHeader(BaseModel):
79
- """
80
- A list of species based on user question.
81
-
82
- This is a Pydantic model that restricts the species
83
- names to the ones available in the simulation results.
84
-
85
- If no species is relevant, set the attribute
86
- `relevant_species` to None.
87
- """
88
- relevant_species: Union[None, List[Literal[*species_names]]] = Field(
89
- description="This is a list of species based on the user question."
90
- "It is restricted to the species available in the simulation results."
91
- "If no species is relevant, set this attribute to None."
92
- "If the user asks for very specific species (for example, using the"
93
- "keyword `only` in the question), set this attribute to correspond "
94
- "to the species available in the simulation results, otherwise set it to None."
95
- )
96
- # Create an instance of the LLM model
97
- logging.log(logging.INFO, "LLM model: %s", state['llm_model'])
98
- llm = state['llm_model']
99
- llm_with_structured_output = llm.with_structured_output(CustomHeader)
100
- results = llm_with_structured_output.invoke(question)
140
+ # Extract the relevant species from the user question
141
+ results = extract_relevant_species(question, species_names, state)
142
+ print (results)
101
143
  if results.relevant_species is None:
102
144
  raise ValueError("No species found in the simulation results \
103
145
  that matches the user prompt.")
@@ -110,4 +152,6 @@ class CustomPlotterTool(BaseTool):
110
152
  logging.info("Extracted species: %s", extracted_species)
111
153
  # Include the time column
112
154
  extracted_species.insert(0, 'Time')
113
- return f"Custom plot {simulation_name}", df[extracted_species].to_dict(orient='records')
155
+ return f"Custom plot {simulation_name}",{
156
+ 'dic_data': df[extracted_species].to_dict(orient='records')
157
+ }| get_model_units(model_object)
@@ -100,6 +100,8 @@ class GetModelInfoTool(BaseTool):
100
100
  # Extract parameters from the model
101
101
  if requested_model_info.parameters:
102
102
  df_parameters = basico.model_info.get_parameters(model=model_obj.copasi_model)
103
+ if df_parameters is None:
104
+ raise ValueError("Unable to extract parameters from the model.")
103
105
  # Convert index into a column
104
106
  df_parameters.reset_index(inplace=True)
105
107
  dic_results['Parameters'] = df_parameters[
@@ -4,15 +4,25 @@
4
4
  Function for loading the BioModel.
5
5
  """
6
6
 
7
- from dataclasses import dataclass
7
+ from typing import Annotated, Any, Union
8
+ from pydantic import BaseModel, BeforeValidator
8
9
  from ..models.basico_model import BasicoModel
9
10
 
10
- @dataclass
11
- class ModelData:
11
+ def ensure_biomodel_id(value: Any) -> Any:
12
12
  """
13
- Dataclass for storing the model data.
13
+ Ensure that the biomodel_id is an integer or a string starting with 'BIOMD' or 'MODEL'.
14
14
  """
15
- biomodel_id: int = None
15
+ if isinstance(value, int):
16
+ return value
17
+ if isinstance(value, str) and (value.startswith("BIOMD") or value.startswith("MODEL")):
18
+ return value
19
+ raise ValueError("biomodel_id must be an integer or a string starting with 'BIOMD' or 'MODEL'.")
20
+
21
+ class ModelData(BaseModel):
22
+ """
23
+ Base model for the model data.
24
+ """
25
+ biomodel_id: Annotated[Union[int, str], BeforeValidator(ensure_biomodel_id)] = None
16
26
  # sbml_file_path: Optional[str] = None
17
27
  use_uploaded_sbml_file: bool = False
18
28
 
@@ -17,6 +17,7 @@ from langchain_core.messages import ToolMessage
17
17
  from langchain_core.tools.base import InjectedToolCallId
18
18
  from .load_biomodel import ModelData, load_biomodel
19
19
  from .load_arguments import TimeData, SpeciesInitialData
20
+ from .utils import get_model_units
20
21
 
21
22
  # Initialize logger
22
23
  logging.basicConfig(level=logging.INFO)
@@ -119,7 +120,11 @@ def run_parameter_scan(model_object,
119
120
  """
120
121
  # Extract all parameter names from the model
121
122
  df_all_parameters = basico.model_info.get_parameters(model=model_object.copasi_model)
122
- all_parameters = df_all_parameters.index.tolist()
123
+ all_parameters = []
124
+ if df_all_parameters is not None:
125
+ # For example model 10 in the BioModels database
126
+ # has no parameters
127
+ all_parameters = df_all_parameters.index.tolist()
123
128
 
124
129
  # Extract all species name from the model
125
130
  df_all_species = basico.model_info.get_species(model=model_object.copasi_model)
@@ -280,7 +285,8 @@ class ParameterScanTool(BaseTool):
280
285
  "messages": [
281
286
  ToolMessage(
282
287
  content=f"Parameter scan results of {arg_data.experiment_name}",
283
- tool_call_id=tool_call_id
288
+ tool_call_id=tool_call_id,
289
+ artifact=get_model_units(model_object)
284
290
  )
285
291
  ],
286
292
  }
@@ -7,11 +7,12 @@ Tool for searching models based on search query.
7
7
  from typing import Type, Annotated
8
8
  import logging
9
9
  from pydantic import BaseModel, Field
10
+ import pandas as pd
10
11
  from basico import biomodels
12
+ from langgraph.types import Command
11
13
  from langchain_core.tools import BaseTool
12
- from langchain_core.output_parsers import StrOutputParser
13
- from langchain_core.prompts import ChatPromptTemplate
14
- from langgraph.prebuilt import InjectedState
14
+ from langchain_core.messages import ToolMessage
15
+ from langchain_core.tools.base import InjectedToolCallId
15
16
 
16
17
  # Initialize logger
17
18
  logging.basicConfig(level=logging.INFO)
@@ -22,7 +23,10 @@ class SearchModelsInput(BaseModel):
22
23
  Input schema for the search models tool.
23
24
  """
24
25
  query: str = Field(description="Search models query", default=None)
25
- state: Annotated[dict, InjectedState]
26
+ num_query: int = Field(description="Top number of models to search",
27
+ default=10,
28
+ le=100)
29
+ tool_call_id: Annotated[str, InjectedToolCallId]
26
30
 
27
31
  # Note: It's important that every field has type hints. BaseTool is a
28
32
  # Pydantic class and not having type hints can lead to unexpected behavior.
@@ -31,50 +35,51 @@ class SearchModelsTool(BaseTool):
31
35
  Tool for returning the search results based on the search query.
32
36
  """
33
37
  name: str = "search_models"
34
- description: str = "Search models in the BioMmodels database based on keywords."
38
+ description: str = "Search for only manually curated models in "
39
+ "the BioMmodels database based on keywords."
35
40
  args_schema: Type[BaseModel] = SearchModelsInput
36
41
  return_direct: bool = False
37
42
 
38
43
  def _run(self,
39
- query: str,
40
- state: Annotated[dict, InjectedState]) -> dict:
44
+ tool_call_id: Annotated[str, InjectedToolCallId],
45
+ query: str = None,
46
+ num_query: int = 10) -> dict:
41
47
  """
42
48
  Run the tool.
43
49
 
44
50
  Args:
45
51
  query (str): The search query.
52
+ num_query (int): The number of models to search.
53
+ tool_call_id (str): The tool call ID.
46
54
 
47
55
  Returns:
48
56
  dict: The answer to the question in the form of a dictionary.
49
57
  """
50
- logger.log(logging.INFO, "Searching models with the query and model: %s, %s",
51
- query, state['llm_model'])
52
- search_results = biomodels.search_for_model(query)
53
- llm = state['llm_model']
54
- # Check if run_manager's metadata has the key 'prompt_content'
55
- prompt_content = f'''
56
- Convert the input into a table.
57
-
58
- The table must contain the following columns:
59
- - #
60
- - BioModel ID
61
- - BioModel Name
62
- - Format
63
- - Submission Date
64
-
65
- Additional Guidelines:
66
- - The column # must contain the row number starting from 1.
67
- - Embed the url for each BioModel ID in the table
68
- in the first column in the markdown format.
69
- - The Submission Date must be in the format YYYY-MM-DD.
70
-
71
- Input:
72
- {input}.
73
- '''
74
- prompt_template = ChatPromptTemplate.from_messages(
75
- [("system", prompt_content),
76
- ("user", "{input}")]
77
- )
78
- parser = StrOutputParser()
79
- chain = prompt_template | llm | parser
80
- return chain.invoke({"input": search_results})
58
+ logger.log(logging.INFO, "Searching models with the query and number %s, %s",
59
+ query, num_query)
60
+ # Search for models based on the query
61
+ search_results = biomodels.search_for_model(query, num_results=num_query)
62
+ # Convert the search results to a pandas DataFrame
63
+ df = pd.DataFrame(search_results)
64
+ # Prepare a message to return
65
+ first_n = min(3, len(search_results))
66
+ content = f"Found {len(search_results)} manually curated models"
67
+ content += f" for the query: {query}."
68
+ # Pass the first 3 models to the LLM
69
+ # to avoid hallucinations
70
+ content += f" Here is the summary of the first {first_n} models:"
71
+ for i in range(first_n):
72
+ content += f"\nModel {i+1}: {search_results[i]['name']} (ID: {search_results[i]['id']})"
73
+ # Return the updated state of the tool
74
+ return Command(
75
+ update={
76
+ # update the message history
77
+ "messages": [
78
+ ToolMessage(
79
+ content=content,
80
+ tool_call_id=tool_call_id,
81
+ artifact={'dic_data': df.to_dict(orient='records')}
82
+ )
83
+ ],
84
+ }
85
+ )
@@ -14,6 +14,7 @@ from langchain_core.messages import ToolMessage
14
14
  from langchain_core.tools.base import InjectedToolCallId
15
15
  from .load_biomodel import ModelData, load_biomodel
16
16
  from .load_arguments import ArgumentData, add_rec_events
17
+ from .utils import get_model_units
17
18
 
18
19
  # Initialize logger
19
20
  logging.basicConfig(level=logging.INFO)
@@ -116,7 +117,8 @@ class SimulateModelTool(BaseTool):
116
117
  "messages": [
117
118
  ToolMessage(
118
119
  content=f"Simulation results of {arg_data.experiment_name}",
119
- tool_call_id=tool_call_id
120
+ tool_call_id=tool_call_id,
121
+ artifact=get_model_units(model_object)
120
122
  )
121
123
  ],
122
124
  }
@@ -134,6 +134,7 @@ class SteadyStateTool(BaseTool):
134
134
  # Run the parameter scan
135
135
  df_steady_state = run_steady_state(model_object,
136
136
  dic_species_to_be_analyzed_before_experiment)
137
+ print (df_steady_state)
137
138
  # Prepare the dictionary of scanned data
138
139
  # that will be passed to the state of the graph
139
140
  dic_steady_state_data = {
@@ -160,7 +161,8 @@ class SteadyStateTool(BaseTool):
160
161
  content=f"Steady state analysis of"
161
162
  f" {arg_data.experiment_name}"
162
163
  " was successful.",
163
- tool_call_id=tool_call_id
164
+ tool_call_id=tool_call_id,
165
+ artifact={'dic_data': df_steady_state.to_dict(orient='records')}
164
166
  )
165
167
  ],
166
168
  }
@@ -0,0 +1,22 @@
1
+ #!/usr/bin/env python3
2
+
3
+ """
4
+ Utility functions for T2B tools.
5
+ """
6
+
7
+ import basico
8
+
9
+ def get_model_units(model_object):
10
+ """
11
+ Get the units of the model.
12
+
13
+ Args:
14
+ model_object: The model object.
15
+
16
+ Returns:
17
+ dict: The units of the model.
18
+ """
19
+ model_units = basico.model_info.get_model_units(model=model_object.copasi_model)
20
+ model_units_y = model_units['quantity_unit']
21
+ model_units_x = model_units['time_unit']
22
+ return {'y_axis_label': model_units_y, 'x_axis_label': model_units_x}
@@ -5,3 +5,4 @@ Import all the modules in the package
5
5
  from . import agents
6
6
  from . import tools
7
7
  from . import app
8
+ from . import utils
@@ -4,4 +4,5 @@ defaults:
4
4
  - tools/subgraph_extraction: default
5
5
  - tools/subgraph_summarization: default
6
6
  - tools/graphrag_reasoning: default
7
+ - utils/pubchem_utils: default
7
8
  - app/frontend: default
@@ -0,0 +1,64 @@
1
+ #!/usr/bin/env python3
2
+
3
+ """
4
+ Test cases for utils/embeddings/nim_molmim.py
5
+ """
6
+
7
+ import unittest
8
+ from unittest.mock import patch, MagicMock
9
+ from ..utils.embeddings.nim_molmim import EmbeddingWithMOLMIM
10
+
11
+ class TestEmbeddingWithMOLMIM(unittest.TestCase):
12
+ """
13
+ Test cases for EmbeddingWithMOLMIM class.
14
+ """
15
+ def setUp(self):
16
+ self.base_url = "https://fake-nim-api.com/embeddings"
17
+ self.embeddings_model = EmbeddingWithMOLMIM(self.base_url)
18
+ self.test_texts = ["CCO", "CCC", "C=O"]
19
+ self.test_query = "CCO"
20
+ self.mock_response = {
21
+ "embeddings": [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]]
22
+ }
23
+
24
+ @patch("requests.post")
25
+ def test_embed_documents(self, mock_post):
26
+ '''
27
+ Test the embed_documents method.
28
+ '''
29
+ # Mock the response from requests.post
30
+ mock_post.return_value = MagicMock()
31
+ mock_post.return_value.json.return_value = self.mock_response
32
+ embeddings = self.embeddings_model.embed_documents(self.test_texts)
33
+ # Assertions
34
+ self.assertEqual(embeddings, self.mock_response["embeddings"])
35
+ mock_post.assert_called_once_with(
36
+ self.base_url,
37
+ headers={
38
+ 'accept': 'application/json',
39
+ 'Content-Type': 'application/json'
40
+ },
41
+ data='{"sequences": ["CCO", "CCC", "C=O"]}',
42
+ timeout=60
43
+ )
44
+
45
+ @patch("requests.post")
46
+ def test_embed_query(self, mock_post):
47
+ '''
48
+ Test the embed_query method.
49
+ '''
50
+ # Mock the response from requests.post
51
+ mock_post.return_value = MagicMock()
52
+ mock_post.return_value.json.return_value = {"embeddings": [[0.1, 0.2, 0.3]]}
53
+ embedding = self.embeddings_model.embed_query(self.test_query)
54
+ # Assertions
55
+ self.assertEqual(embedding, [[0.1, 0.2, 0.3]])
56
+ mock_post.assert_called_once_with(
57
+ self.base_url,
58
+ headers={
59
+ 'accept': 'application/json',
60
+ 'Content-Type': 'application/json'
61
+ },
62
+ data='{"sequences": ["CCO"]}',
63
+ timeout=60
64
+ )
@@ -0,0 +1,33 @@
1
+ #!/usr/bin/env python3
2
+
3
+ """
4
+ Test cases for utils/enrichments/pubchem_strings.py
5
+ """
6
+
7
+ import pytest
8
+ from ..utils.enrichments.pubchem_strings import EnrichmentWithPubChem
9
+
10
+ # In this test, we will consider 2 examples:
11
+ # 1. PubChem ID: 5311000 (Alclometasone)
12
+ # 2. PubChem ID: 1X (Fake ID)
13
+ # The expected SMILES representation for the first PubChem ID is:
14
+ SMILES_FIRST = 'C[C@@H]1C[C@H]2[C@@H]3[C@@H](CC4=CC(=O)C=C[C@@]'
15
+ SMILES_FIRST += '4([C@H]3[C@H](C[C@@]2([C@]1(C(=O)CO)O)C)O)C)Cl'
16
+ # The expected SMILES representation for the second PubChem ID is None.
17
+
18
+ @pytest.fixture(name="enrich_obj")
19
+ def fixture_pubchem_config():
20
+ """Return a dictionary with the configuration for the PubChem enrichment."""
21
+ return EnrichmentWithPubChem()
22
+
23
+ def test_enrich_documents(enrich_obj):
24
+ """Test the enrich_documents method."""
25
+ pubchem_ids = ["5311000", "1X"]
26
+ enriched_strings = enrich_obj.enrich_documents(pubchem_ids)
27
+ assert enriched_strings == [SMILES_FIRST, None]
28
+
29
+ def test_enrich_documents_with_rag(enrich_obj):
30
+ """Test the enrich_documents_with_rag method."""
31
+ pubchem_ids = ["5311000", "1X"]
32
+ enriched_strings = enrich_obj.enrich_documents_with_rag(pubchem_ids, None)
33
+ assert enriched_strings == [SMILES_FIRST, None]
@@ -0,0 +1,16 @@
1
+ """
2
+ Test cases for utils/pubchem_utils.py
3
+ """
4
+
5
+ from ..utils import pubchem_utils
6
+
7
+ def test_drugbank_id2pubchem_cid():
8
+ """
9
+ Test the drugbank_id2pubchem_cid method.
10
+
11
+ The DrugBank ID for Alclometasone is DB00240.
12
+ The PubChem CID for Alclometasone is 5311000.
13
+ """
14
+ drugbank_id = "DB00240"
15
+ pubchem_cid = pubchem_utils.drugbank_id2pubchem_cid(drugbank_id)
16
+ assert pubchem_cid == 5311000
@@ -5,3 +5,4 @@ from . import embeddings
5
5
  from . import enrichments
6
6
  from . import extractions
7
7
  from . import kg_utils
8
+ from . import pubchem_utils
@@ -5,3 +5,4 @@ from . import embeddings
5
5
  from . import sentence_transformer
6
6
  from . import huggingface
7
7
  from . import ollama
8
+ from . import nim_molmim
@@ -0,0 +1,54 @@
1
+ """
2
+ Embedding class using MOLMIM model from NVIDIA NIM.
3
+ """
4
+
5
+ import json
6
+ from typing import List
7
+ import requests
8
+ from .embeddings import Embeddings
9
+
10
+ class EmbeddingWithMOLMIM(Embeddings):
11
+ """
12
+ Embedding class using MOLMIM model from NVIDIA NIM
13
+ """
14
+ def __init__(self, base_url: str):
15
+ """
16
+ Initialize the EmbeddingWithMOLMIM class.
17
+
18
+ Args:
19
+ base_url: The base URL for the NIM/MOLMIM model.
20
+ """
21
+ # Set base URL
22
+ self.base_url = base_url
23
+
24
+ def embed_documents(self, texts: List[str]) -> List[float]:
25
+ """
26
+ Generate embedding for a list of SMILES strings using MOLMIM model.
27
+
28
+ Args:
29
+ texts: The list of SMILES strings to be embedded.
30
+
31
+ Returns:
32
+ The list of embeddings for the given SMILES strings.
33
+ """
34
+ headers = {
35
+ 'accept': 'application/json',
36
+ 'Content-Type': 'application/json'
37
+ }
38
+ data = json.dumps({"sequences": texts})
39
+ response = requests.post(self.base_url, headers=headers, data=data, timeout=60)
40
+ embeddings = response.json()["embeddings"]
41
+ return embeddings
42
+
43
+ def embed_query(self, text: str) -> List[float]:
44
+ """
45
+ Generate embeddings for an input query using MOLMIM model.
46
+
47
+ Args:
48
+ text: A query to be embedded.
49
+ Returns:
50
+ The embeddings for the given query.
51
+ """
52
+ # Generate the embedding
53
+ embeddings = self.embed_documents([text])
54
+ return embeddings
@@ -3,3 +3,4 @@ This package contains modules to use the enrichment model
3
3
  """
4
4
  from . import enrichments
5
5
  from . import ollama
6
+ from . import pubchem_strings
@@ -0,0 +1,49 @@
1
+ #!/usr/bin/env python3
2
+
3
+ """
4
+ Enrichment class for enriching PubChem IDs with their STRINGS representation.
5
+ """
6
+
7
+ from typing import List
8
+ import pubchempy as pcp
9
+ from .enrichments import Enrichments
10
+
11
+ class EnrichmentWithPubChem(Enrichments):
12
+ """
13
+ Enrichment class using PubChem
14
+ """
15
+ def enrich_documents(self, texts: List[str]) -> List[str]:
16
+ """
17
+ Enrich a list of input PubChem IDs with their STRINGS representation.
18
+
19
+ Args:
20
+ texts: The list of pubchem IDs to be enriched.
21
+
22
+ Returns:
23
+ The list of enriched STRINGS
24
+ """
25
+
26
+ enriched_pubchem_ids = []
27
+ pubchem_cids = texts
28
+ for pubchem_cid in pubchem_cids:
29
+ try:
30
+ c = pcp.Compound.from_cid(pubchem_cid)
31
+ except pcp.BadRequestError:
32
+ enriched_pubchem_ids.append(None)
33
+ continue
34
+ enriched_pubchem_ids.append(c.isomeric_smiles)
35
+
36
+ return enriched_pubchem_ids
37
+
38
+ def enrich_documents_with_rag(self, texts, docs):
39
+ """
40
+ Enrich a list of input PubChem IDs with their STRINGS representation.
41
+
42
+ Args:
43
+ texts: The list of pubchem IDs to be enriched.
44
+ docs: None
45
+
46
+ Returns:
47
+ The list of enriched STRINGS
48
+ """
49
+ return self.enrich_documents(texts)
@@ -0,0 +1,42 @@
1
+ #!/usr/bin/env python3
2
+
3
+ """
4
+ Enrichment class for enriching PubChem IDs with their STRINGS representation.
5
+ """
6
+
7
+ import logging
8
+ import requests
9
+ import hydra
10
+
11
+ # Initialize logger
12
+ logging.basicConfig(level=logging.INFO)
13
+ logger = logging.getLogger(__name__)
14
+
15
+ def drugbank_id2pubchem_cid(drugbank_id):
16
+ """
17
+ Convert DrugBank ID to PubChem CID.
18
+
19
+ Args:
20
+ drugbank_id: The DrugBank ID of the drug.
21
+
22
+ Returns:
23
+ The PubChem CID of the drug.
24
+ """
25
+ logger.log(logging.INFO, "Load Hydra configuration for PubChem ID conversion.")
26
+ with hydra.initialize(version_base=None, config_path="../configs"):
27
+ cfg = hydra.compose(config_name='config',
28
+ overrides=['utils/pubchem_utils=default'])
29
+ cfg = cfg.utils.pubchem_utils
30
+ # Prepare the URL
31
+ pubchem_url_for_drug = cfg.drugbank_id_to_pubchem_cid_url + drugbank_id + '/JSON'
32
+ # Get the data
33
+ response = requests.get(pubchem_url_for_drug, timeout=60)
34
+ data = response.json()
35
+ # Extract the PubChem CID
36
+ cid = None
37
+ for substance in data.get("PC_Substances", []):
38
+ for compound in substance.get("compound", []):
39
+ if "id" in compound and "type" in compound["id"] and compound["id"]["type"] == 1:
40
+ cid = compound["id"].get("id", {}).get("cid")
41
+ break
42
+ return cid
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: aiagents4pharma
3
- Version: 1.21.0
3
+ Version: 1.22.1
4
4
  Summary: AI Agents for drug discovery, drug development, and other pharmaceutical R&D.
5
5
  Classifier: Programming Language :: Python :: 3
6
6
  Classifier: License :: OSI Approved :: MIT License
@@ -30,6 +30,7 @@ Requires-Dist: ollama==0.4.6
30
30
  Requires-Dist: pandas==2.2.3
31
31
  Requires-Dist: pcst_fast==1.0.10
32
32
  Requires-Dist: plotly==5.24.1
33
+ Requires-Dist: pubchempy==1.0.4
33
34
  Requires-Dist: pydantic==2.9.2
34
35
  Requires-Dist: pylint==3.3.1
35
36
  Requires-Dist: pypdf==5.2.0
@@ -7,17 +7,19 @@ aiagents4pharma/talk2biomodels/api/kegg.py,sha256=QzYDAfJ16E7tbHGxP8ZNWRizMkMRS_
7
7
  aiagents4pharma/talk2biomodels/api/ols.py,sha256=qq0Qy-gJDxanQW-HfCChDsTQsY1M41ua8hMlTnfuzrA,2202
8
8
  aiagents4pharma/talk2biomodels/api/uniprot.py,sha256=aPUAVBR7UYXDuuhDpKezAK2aTMzo-NxFYFq6C0W5u6U,1175
9
9
  aiagents4pharma/talk2biomodels/configs/__init__.py,sha256=safyFKhkd5Wlirl9dMZIHWDLTpY2oLw9wjIM7ZtLIHk,88
10
- aiagents4pharma/talk2biomodels/configs/config.yaml,sha256=X0CMsnx6hHNvV04wsENQSGXadx0aKIy6mziSopVUdZI,116
10
+ aiagents4pharma/talk2biomodels/configs/config.yaml,sha256=ysp8ONR1DVOazxk63S4VA7g4HP9q8xAJ3oMIdcICxSE,150
11
11
  aiagents4pharma/talk2biomodels/configs/agents/__init__.py,sha256=_ZoG8snICK2bidWtc2KOGs738LWg9_r66V9mOMnEb-E,71
12
12
  aiagents4pharma/talk2biomodels/configs/agents/t2b_agent/__init__.py,sha256=-fAORvyFmG2iSvFOFDixmt9OTQRR58y89uhhu2EgbA8,46
13
13
  aiagents4pharma/talk2biomodels/configs/agents/t2b_agent/default.yaml,sha256=pSViMKwKyMQDm8LzbfIaGdxph73iHYaXMiv5YOuxM7k,536
14
- aiagents4pharma/talk2biomodels/configs/tools/__init__.py,sha256=B08KWjj7bpizuTETGnnngrEVK4nzdWGREdoCCSw1Sm4,102
14
+ aiagents4pharma/talk2biomodels/configs/tools/__init__.py,sha256=AzOyyV24G1YQ4Tg4Y5IvntvWaqhoKkE04RIKSLqI_SA,131
15
15
  aiagents4pharma/talk2biomodels/configs/tools/ask_question/__init__.py,sha256=-fAORvyFmG2iSvFOFDixmt9OTQRR58y89uhhu2EgbA8,46
16
16
  aiagents4pharma/talk2biomodels/configs/tools/ask_question/default.yaml,sha256=7k49GkLbPy4v7w5-zfwkgBUPaH6R1IrRPCXvUiUiCKE,1300
17
+ aiagents4pharma/talk2biomodels/configs/tools/custom_plotter/__init__.py,sha256=-fAORvyFmG2iSvFOFDixmt9OTQRR58y89uhhu2EgbA8,46
18
+ aiagents4pharma/talk2biomodels/configs/tools/custom_plotter/default.yaml,sha256=14Aic9IDr1eOyeLo_YGMwZL40CYsgqOXix2j9ucB4EA,464
17
19
  aiagents4pharma/talk2biomodels/configs/tools/get_annotation/__init__.py,sha256=-fAORvyFmG2iSvFOFDixmt9OTQRR58y89uhhu2EgbA8,46
18
20
  aiagents4pharma/talk2biomodels/configs/tools/get_annotation/default.yaml,sha256=o5kqLJ5QGJsLMUhAqotudIMhxxNfPUVcDVH1tdRIutU,304
19
21
  aiagents4pharma/talk2biomodels/models/__init__.py,sha256=5fTHHm3PVloYPNKXbgNlcPgv3-u28ZquxGydFYDfhJA,122
20
- aiagents4pharma/talk2biomodels/models/basico_model.py,sha256=PH25FTOuUjsmw_UUxoRb-4kptOYpicEn4GqS0phS3nk,4807
22
+ aiagents4pharma/talk2biomodels/models/basico_model.py,sha256=0vHS-wPZ3kHTMf1KLrcMTgqz67tgK9sswffSQjAs6vw,4851
21
23
  aiagents4pharma/talk2biomodels/models/sys_bio_model.py,sha256=JeoiGQAvQABHnG0wKR2XBmmxqQdtgO6kxaLDUTUmr1s,2001
22
24
  aiagents4pharma/talk2biomodels/states/__init__.py,sha256=YLg1-N0D9qyRRLRqwqfLCLAqZYDtMVZTfI8Y0b_4tbA,139
23
25
  aiagents4pharma/talk2biomodels/states/state_talk2biomodels.py,sha256=S1UtXvocWR8Y9OVUp6pIDFnmaCcjbwmUbW8u79TuGcg,1508
@@ -26,26 +28,28 @@ aiagents4pharma/talk2biomodels/tests/test_api.py,sha256=7Kz2r5F5tjmn3F0LoM33oP-2
26
28
  aiagents4pharma/talk2biomodels/tests/test_ask_question.py,sha256=rdForKfj2zj2IXl6ntK9_I0AbgsCv8MXOZ2khBnaPms,1620
27
29
  aiagents4pharma/talk2biomodels/tests/test_basico_model.py,sha256=y82fpTJMPHwtXxlle1cGQ_2Bewwpxi0aJSVrVAYLhN0,2060
28
30
  aiagents4pharma/talk2biomodels/tests/test_get_annotation.py,sha256=GbobfjtCAOV0HddM4pb2o3c49Q05fKIM0Ubnf8BRxHM,8273
29
- aiagents4pharma/talk2biomodels/tests/test_getmodelinfo.py,sha256=Y1sFhoMF4mbvlag7D-dEvv6ytjmAqzMLPvSvaVEI_Qk,2045
30
- aiagents4pharma/talk2biomodels/tests/test_integration.py,sha256=XvQmnkIkAcgjmNwsW4FXiCwMMU7fpCpxfqhG2v2KyF4,5170
31
+ aiagents4pharma/talk2biomodels/tests/test_getmodelinfo.py,sha256=CIKeIm5FGTCyW-FR2nIo3Tr5AyAdxBelNngkdUd5-kk,3206
32
+ aiagents4pharma/talk2biomodels/tests/test_integration.py,sha256=9nYRqpJ_OtmUypuHm4URzoHhcpKEldCEwKxLzh8jcZU,5193
33
+ aiagents4pharma/talk2biomodels/tests/test_load_biomodel.py,sha256=Th5EVlfowwoM0tAu1R2oPISqW7SFduC-TYa3jIqIvC0,892
31
34
  aiagents4pharma/talk2biomodels/tests/test_param_scan.py,sha256=vRbnn4uVWFbfZbU4gVCjHi5WDCUrErut8ElzAPE5y84,2648
32
35
  aiagents4pharma/talk2biomodels/tests/test_query_article.py,sha256=lArISS111mQUZmjLY82PkRVPSTcN2h8KNF4gpTTvwL0,3185
33
- aiagents4pharma/talk2biomodels/tests/test_search_models.py,sha256=ttOzN78b06ixYF_SbSyrhQSnmCgOMlSoeG9Q1FeRAis,1028
36
+ aiagents4pharma/talk2biomodels/tests/test_search_models.py,sha256=543uNSIPVLrD1mwH0Ru5l1tlTZ452MIr4aIgCssMw3g,1270
34
37
  aiagents4pharma/talk2biomodels/tests/test_simulate_model.py,sha256=GjLE1DZpcKUAFSmoHD86vkfK0b5LJPM8a4WYyraazig,1487
35
38
  aiagents4pharma/talk2biomodels/tests/test_steady_state.py,sha256=2bzxj74vekazgLG7hiMALRiqP_4sVmue9cN4zCZ42T8,3556
36
39
  aiagents4pharma/talk2biomodels/tests/test_sys_bio_model.py,sha256=HSmBBViMi0jYf4gWX21IbppAfDzG0nr_S3KtKS9fZVQ,2165
37
40
  aiagents4pharma/talk2biomodels/tools/__init__.py,sha256=6H2HWv5Q4NZYEmw-Ti5KZnJlEqhaC2HXSDZa6kiSl-U,350
38
41
  aiagents4pharma/talk2biomodels/tools/ask_question.py,sha256=NZwKT7DHc4TW9e8LOkHaRG_nqUs_lEandvi89DTXilQ,4640
39
- aiagents4pharma/talk2biomodels/tools/custom_plotter.py,sha256=DsnQIKebchy6tgzLZnY7VLVesu3Es-OdqLfW61kIn3A,4762
42
+ aiagents4pharma/talk2biomodels/tools/custom_plotter.py,sha256=gzKUEz28Ioz_0L-FRF_EITDXZItix3j0PryyKQyHEEE,6875
40
43
  aiagents4pharma/talk2biomodels/tools/get_annotation.py,sha256=njxUmFuFwlzY3Doq-XlepGXJTMgnYfs88L4RkKSiptw,13438
41
- aiagents4pharma/talk2biomodels/tools/get_modelinfo.py,sha256=57dkXrBeRpyiaW3dYkoWIfr6zSsFHcWRhvUVNyLcvUs,6363
44
+ aiagents4pharma/talk2biomodels/tools/get_modelinfo.py,sha256=TPXH70CQ6cQ1UpTUPtc2MXKDAm5mfZX4lj-2CLRzv1Y,6482
42
45
  aiagents4pharma/talk2biomodels/tools/load_arguments.py,sha256=bffNIlBDTCSFYiZprA73yi8Jbb8z3Oh2decVNh1UnZc,4162
43
- aiagents4pharma/talk2biomodels/tools/load_biomodel.py,sha256=pyVzLQoMnuJYEwsjeOlqcUrbU1F1Z-pNlgkhFaoKpy0,689
44
- aiagents4pharma/talk2biomodels/tools/parameter_scan.py,sha256=aNh94LgBgVXBIczuNkbSsOZ9j54YVEdZWmZbZr7Nk8k,12465
46
+ aiagents4pharma/talk2biomodels/tools/load_biomodel.py,sha256=o-Z_ZqpHy2S-HvMfrB4aI3lrFuYIEG5u0z6uDKOJXak,1215
47
+ aiagents4pharma/talk2biomodels/tools/parameter_scan.py,sha256=-0OzZWoZxMImLNrQReT-PLEIu2sPa4zWg_Pd27EhMsE,12715
45
48
  aiagents4pharma/talk2biomodels/tools/query_article.py,sha256=6xfirRRMXN-wxqZxYYbKeEMXLMAHKl5IPShfpoOEBcc,2268
46
- aiagents4pharma/talk2biomodels/tools/search_models.py,sha256=LdvfCNeiO8fU6lszd7UUzk4NXP6ETuMsCRb2SpXcztw,2841
47
- aiagents4pharma/talk2biomodels/tools/simulate_model.py,sha256=qXs9lg9XgA7EaRiX3wBS8w_ug8tI-G3pzhcRg6dTRio,5060
48
- aiagents4pharma/talk2biomodels/tools/steady_state.py,sha256=j3ckuNlUtv7lT922MbN0JhT9H0JpWAdx2mLPwao6uu8,7123
49
+ aiagents4pharma/talk2biomodels/tools/search_models.py,sha256=uJ16fqTqL-SUgVQ1iXyaM8XA4uDwRtuot933XZ28Z3M,3169
50
+ aiagents4pharma/talk2biomodels/tools/simulate_model.py,sha256=KfQFDQuHF4cwEGqTPMDNmKSjQRRNSdJZsvqlj37W1A4,5159
51
+ aiagents4pharma/talk2biomodels/tools/steady_state.py,sha256=vGF7SH98mYWSbWAKxhYmzi96Gzw9VhzYMvXXPgAqBYk,7245
52
+ aiagents4pharma/talk2biomodels/tools/utils.py,sha256=iI5WlWcsrjE75zY2BoPAoVtS5NhE7vP6OyRnWil7OHY,520
49
53
  aiagents4pharma/talk2cells/__init__.py,sha256=zmOP5RAhabgKIQP-W4P4qKME2tG3fhAXM3MeO5_H8kE,120
50
54
  aiagents4pharma/talk2cells/agents/__init__.py,sha256=38nK2a_lEFRjO3qD6Fo9a3983ZCYat6hmJKWY61y2Mo,128
51
55
  aiagents4pharma/talk2cells/agents/scp_agent.py,sha256=gDMfhUNWHa_XWOqm1Ql6yLAdI_7bnIk5sRYn43H2sYk,3090
@@ -59,8 +63,8 @@ aiagents4pharma/talk2cells/tools/scp_agent/search_studies.py,sha256=MLe-twtFnOu-
59
63
  aiagents4pharma/talk2knowledgegraphs/__init__.py,sha256=Z0Eo7LTiKk0STsr8VI7wkCLq7PHrK1vYlH4I1hSNLiA,165
60
64
  aiagents4pharma/talk2knowledgegraphs/agents/__init__.py,sha256=iOAzuy_8A03tQDFtSBhC9dldUo62z5gfxcVtXAdLOJs,92
61
65
  aiagents4pharma/talk2knowledgegraphs/agents/t2kg_agent.py,sha256=j6MA1LB28mqpb6ZEmNLGcvDZvOnlGbJB9r7VXyEGask,3079
62
- aiagents4pharma/talk2knowledgegraphs/configs/__init__.py,sha256=Y49ucO22v9oe9EwFiXN6MU2wvyB3_ZBpmHwHbeh-ZVQ,106
63
- aiagents4pharma/talk2knowledgegraphs/configs/config.yaml,sha256=rwUIZ2t5j5hlFyre7VnV8zMsP0qpPTwvAFExgvQD6q0,196
66
+ aiagents4pharma/talk2knowledgegraphs/configs/__init__.py,sha256=4_DVdpahaJ55yPl0aZotlFA_MYWLFF2cubWyKtBVI_Q,126
67
+ aiagents4pharma/talk2knowledgegraphs/configs/config.yaml,sha256=bag4w3JCSqaojG37MTksy3ZehAPe3qoVzjIN2uh3nrc,229
64
68
  aiagents4pharma/talk2knowledgegraphs/configs/agents/t2kg_agent/__init__.py,sha256=-fAORvyFmG2iSvFOFDixmt9OTQRR58y89uhhu2EgbA8,46
65
69
  aiagents4pharma/talk2knowledgegraphs/configs/agents/t2kg_agent/default.yaml,sha256=ENCGROwYFpR6g4QD518h73sshdn3vPVpotBMk1QJcpU,4830
66
70
  aiagents4pharma/talk2knowledgegraphs/configs/app/__init__.py,sha256=fKfc3FR7g5KjY9b6jzrU6cwKTVVpkoVZQS3dvUowu34,69
@@ -91,26 +95,32 @@ aiagents4pharma/talk2knowledgegraphs/tests/test_tools_subgraph_extraction.py,sha
91
95
  aiagents4pharma/talk2knowledgegraphs/tests/test_tools_subgraph_summarization.py,sha256=oBqfspXXOxH04OQuPb8BCW0liIQTGKXtaPNSrPpQtFc,7597
92
96
  aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_embeddings.py,sha256=uYFoE_6zeU10_1mLLAHUr5c4S2XZMSc0Q_860o-KWEw,1517
93
97
  aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_huggingface.py,sha256=hzX84pheZdEsTtikF2KtBFiH44_xPjYXxLA6p4Ax1CY,1623
98
+ aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_nim_molmim.py,sha256=LwtTZ-M7lHGxvRrGBXbyIT8AkA3T2OpeKqtNq3RK7Ik,2164
94
99
  aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_ollama.py,sha256=jn-TrPwF0aR9kVoerwkbMZa3U6Hc6HjV6Zoau4qSH4g,1834
95
100
  aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_sentencetransformer.py,sha256=Qxo6WeIDRy8aLh1tNKw0kSlzmUj3MtTak63oW2YwB24,1327
96
101
  aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_enrichments.py,sha256=N6HRr4lWHXY7bTHe2uXJe4D_EG9WqZPibZne6qLl9_k,1447
97
102
  aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_ollama.py,sha256=JhY7axvVULLywDJ2ctA-gob5YPeaJYWsaMNjHT6L9CU,3021
103
+ aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_pubchem.py,sha256=bk27KElJxOvKJ2RTz4ftleExQPMyWWS755KKmlImzbk,1241
98
104
  aiagents4pharma/talk2knowledgegraphs/tests/test_utils_kg_utils.py,sha256=pal76wi7WgQWUNk56BrzfFV8jKpbDaHHdbwtgx_gXLI,2410
105
+ aiagents4pharma/talk2knowledgegraphs/tests/test_utils_pubchem_utils.py,sha256=C07YqUNYW7ofpKAzKh0lBovXKLvaiXFb3oJU6k1dvu4,411
99
106
  aiagents4pharma/talk2knowledgegraphs/tools/__init__.py,sha256=zpD4h7EYtyq0QNOqLd6bkxrPlPb2XN64ceI9ncgESrA,171
100
107
  aiagents4pharma/talk2knowledgegraphs/tools/graphrag_reasoning.py,sha256=OEuOFncDRdb7TQEGq4rkT5On-jI-R7Nt8K5EBzaND8w,5338
101
108
  aiagents4pharma/talk2knowledgegraphs/tools/load_arguments.py,sha256=zhmsRp-8vjB5rRekqTA07d3yb-42HWqng9dDMkvK6hM,623
102
109
  aiagents4pharma/talk2knowledgegraphs/tools/subgraph_extraction.py,sha256=te06QMFQfgJWrjaGrqpcOYeaV38jwm0KY_rXVSMHkeI,11468
103
110
  aiagents4pharma/talk2knowledgegraphs/tools/subgraph_summarization.py,sha256=mDSBOxopDfNhEJeU8fVI8b5lXTYrRzcc97aLbFgYSy4,4413
104
- aiagents4pharma/talk2knowledgegraphs/utils/__init__.py,sha256=Q9mzcSmkmhdnOn13fxGh1fNECYoUR5Y5CCuEJTIxwAI,167
111
+ aiagents4pharma/talk2knowledgegraphs/utils/__init__.py,sha256=cZqb3LZLmBnmyAtWFv2Z-4uJvQmx0M4zKsfiWrlM3Pk,195
105
112
  aiagents4pharma/talk2knowledgegraphs/utils/kg_utils.py,sha256=6vQnPkeOWae_8jePjhma3sJuMTngy0I0tqzdFt6OqKg,2507
106
- aiagents4pharma/talk2knowledgegraphs/utils/embeddings/__init__.py,sha256=4TGK0XIVkkfGOyrSVwFQ-Lp-rzH9CCl-fWcqkFJKRLc,174
113
+ aiagents4pharma/talk2knowledgegraphs/utils/pubchem_utils.py,sha256=IlrdGbRGD0IM7eMcpkOjuRjKNuH3lz_X8zN6RHwk61c,1340
114
+ aiagents4pharma/talk2knowledgegraphs/utils/embeddings/__init__.py,sha256=POSDrSdFAWsBCueOPD-Fok-ARdTywJU1ivwpT9EU1Kw,199
107
115
  aiagents4pharma/talk2knowledgegraphs/utils/embeddings/embeddings.py,sha256=1nGznrAj-xT0xuSMBGz2dOujJ7M_IwSR84njxtxsy9A,2523
108
116
  aiagents4pharma/talk2knowledgegraphs/utils/embeddings/huggingface.py,sha256=2vi_elf6EgzfagFAO5QnL3a_aXZyN7B1EBziu44MTfM,3806
117
+ aiagents4pharma/talk2knowledgegraphs/utils/embeddings/nim_molmim.py,sha256=XH6JNfmMS38UEU7UGJeeabHfRykharnQpQaqjO86OlQ,1537
109
118
  aiagents4pharma/talk2knowledgegraphs/utils/embeddings/ollama.py,sha256=8w0sjt3Ex5YJ_XvpKl9UbhdTiiaoMIarbPUxLBU-1Uw,2378
110
119
  aiagents4pharma/talk2knowledgegraphs/utils/embeddings/sentence_transformer.py,sha256=36iKlisOpMtGR5xfTAlSHXWvPqVC_Jbezod8kbBBMVg,2136
111
- aiagents4pharma/talk2knowledgegraphs/utils/enrichments/__init__.py,sha256=tW426knki2DBIHcWyF_K04iMMdbpIn_e_TpPmTgz2dI,113
120
+ aiagents4pharma/talk2knowledgegraphs/utils/enrichments/__init__.py,sha256=JKGavA-umsGX3ng17_UYAvDBdbg-W-mPn8Q6JfP7J9U,143
112
121
  aiagents4pharma/talk2knowledgegraphs/utils/enrichments/enrichments.py,sha256=Bx8x6zzk5614ApWB90N_iv4_Y_Uq0-KwUeBwYSdQMU4,924
113
122
  aiagents4pharma/talk2knowledgegraphs/utils/enrichments/ollama.py,sha256=8eoxR-VHo0G7ReQIwje7xEhE-SJlHdef7_wJRpnvFIc,4116
123
+ aiagents4pharma/talk2knowledgegraphs/utils/enrichments/pubchem_strings.py,sha256=qsVlDCGGDkUCv-R5_xFGhrtLS7P0CfagnM2qATwiOFM,1333
114
124
  aiagents4pharma/talk2knowledgegraphs/utils/extractions/__init__.py,sha256=7gwwtfzKhB8GuOBD47XRi0NprwEXkOzwNl5eeu-hDTI,86
115
125
  aiagents4pharma/talk2knowledgegraphs/utils/extractions/pcst.py,sha256=m5p0yoJb7I19ua5yeQfXPf7c4r6S1XPwttsrM7Qoy94,9336
116
126
  aiagents4pharma/talk2scholars/__init__.py,sha256=gphERyVKZHvOnMQsml7TIHlaIshHJ75R1J3FKExkfuY,120
@@ -152,8 +162,8 @@ aiagents4pharma/talk2scholars/tools/s2/query_results.py,sha256=EUfzRh5Qc_tMl5fDI
152
162
  aiagents4pharma/talk2scholars/tools/s2/retrieve_semantic_scholar_paper_id.py,sha256=Lg1L4HQCN2LaQEyWtLD73O67PMoXkPHi-Y8rCzHS0A4,2499
153
163
  aiagents4pharma/talk2scholars/tools/s2/search.py,sha256=mnBQWDuQ50UVw6B-bRuL8Ek1av-pEtdgzVMxpEA2BpI,4296
154
164
  aiagents4pharma/talk2scholars/tools/s2/single_paper_rec.py,sha256=xgnUj9W9JkeTvB2VJBJUAnia789GGNGqdqgJ_G16v2s,5120
155
- aiagents4pharma-1.21.0.dist-info/LICENSE,sha256=IcIbyB1Hyk5ZDah03VNQvJkbNk2hkBCDqQ8qtnCvB4Q,1077
156
- aiagents4pharma-1.21.0.dist-info/METADATA,sha256=YsjDHw3yfqfPClv0N3j35AObxHBhStDojFUUslyd_1Q,7757
157
- aiagents4pharma-1.21.0.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
158
- aiagents4pharma-1.21.0.dist-info/top_level.txt,sha256=-AH8rMmrSnJtq7HaAObS78UU-cTCwvX660dSxeM7a0A,16
159
- aiagents4pharma-1.21.0.dist-info/RECORD,,
165
+ aiagents4pharma-1.22.1.dist-info/LICENSE,sha256=IcIbyB1Hyk5ZDah03VNQvJkbNk2hkBCDqQ8qtnCvB4Q,1077
166
+ aiagents4pharma-1.22.1.dist-info/METADATA,sha256=5mWYXohM-eUXLQ8w_3ZpIEWE4jmr3YBKEJWj9bzbgxg,7789
167
+ aiagents4pharma-1.22.1.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
168
+ aiagents4pharma-1.22.1.dist-info/top_level.txt,sha256=-AH8rMmrSnJtq7HaAObS78UU-cTCwvX660dSxeM7a0A,16
169
+ aiagents4pharma-1.22.1.dist-info/RECORD,,