aiagents4pharma 1.8.0__py3-none-any.whl → 1.15.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. aiagents4pharma/__init__.py +9 -5
  2. aiagents4pharma/configs/__init__.py +5 -0
  3. aiagents4pharma/configs/config.yaml +4 -0
  4. aiagents4pharma/configs/talk2biomodels/__init__.py +6 -0
  5. aiagents4pharma/configs/talk2biomodels/agents/__init__.py +5 -0
  6. aiagents4pharma/configs/talk2biomodels/agents/t2b_agent/__init__.py +3 -0
  7. aiagents4pharma/configs/talk2biomodels/agents/t2b_agent/default.yaml +14 -0
  8. aiagents4pharma/configs/talk2biomodels/tools/__init__.py +4 -0
  9. aiagents4pharma/configs/talk2biomodels/tools/ask_question/__init__.py +3 -0
  10. aiagents4pharma/talk2biomodels/__init__.py +3 -0
  11. aiagents4pharma/talk2biomodels/agents/__init__.py +5 -0
  12. aiagents4pharma/talk2biomodels/agents/t2b_agent.py +96 -0
  13. aiagents4pharma/talk2biomodels/api/__init__.py +6 -0
  14. aiagents4pharma/talk2biomodels/api/kegg.py +83 -0
  15. aiagents4pharma/talk2biomodels/api/ols.py +72 -0
  16. aiagents4pharma/talk2biomodels/api/uniprot.py +35 -0
  17. aiagents4pharma/talk2biomodels/models/basico_model.py +29 -32
  18. aiagents4pharma/talk2biomodels/models/sys_bio_model.py +9 -6
  19. aiagents4pharma/talk2biomodels/states/__init__.py +5 -0
  20. aiagents4pharma/talk2biomodels/states/state_talk2biomodels.py +41 -0
  21. aiagents4pharma/talk2biomodels/tests/__init__.py +3 -0
  22. aiagents4pharma/talk2biomodels/tests/test_api.py +57 -0
  23. aiagents4pharma/talk2biomodels/tests/test_ask_question.py +44 -0
  24. aiagents4pharma/talk2biomodels/tests/test_basico_model.py +54 -0
  25. aiagents4pharma/talk2biomodels/tests/test_get_annotation.py +171 -0
  26. aiagents4pharma/talk2biomodels/tests/test_getmodelinfo.py +26 -0
  27. aiagents4pharma/talk2biomodels/tests/test_integration.py +126 -0
  28. aiagents4pharma/talk2biomodels/tests/test_param_scan.py +68 -0
  29. aiagents4pharma/talk2biomodels/tests/test_query_article.py +76 -0
  30. aiagents4pharma/talk2biomodels/tests/test_search_models.py +28 -0
  31. aiagents4pharma/talk2biomodels/tests/test_simulate_model.py +39 -0
  32. aiagents4pharma/talk2biomodels/tests/test_steady_state.py +90 -0
  33. aiagents4pharma/talk2biomodels/tests/test_sys_bio_model.py +63 -0
  34. aiagents4pharma/talk2biomodels/tools/__init__.py +5 -0
  35. aiagents4pharma/talk2biomodels/tools/ask_question.py +61 -18
  36. aiagents4pharma/talk2biomodels/tools/custom_plotter.py +20 -14
  37. aiagents4pharma/talk2biomodels/tools/get_annotation.py +304 -0
  38. aiagents4pharma/talk2biomodels/tools/get_modelinfo.py +11 -9
  39. aiagents4pharma/talk2biomodels/tools/load_arguments.py +114 -0
  40. aiagents4pharma/talk2biomodels/tools/load_biomodel.py +0 -1
  41. aiagents4pharma/talk2biomodels/tools/parameter_scan.py +287 -0
  42. aiagents4pharma/talk2biomodels/tools/query_article.py +59 -0
  43. aiagents4pharma/talk2biomodels/tools/simulate_model.py +35 -90
  44. aiagents4pharma/talk2biomodels/tools/steady_state.py +167 -0
  45. aiagents4pharma/talk2cells/tests/scp_agent/test_scp_agent.py +23 -0
  46. aiagents4pharma/talk2cells/tools/scp_agent/__init__.py +6 -0
  47. aiagents4pharma/talk2cells/tools/scp_agent/display_studies.py +25 -0
  48. aiagents4pharma/talk2cells/tools/scp_agent/search_studies.py +79 -0
  49. aiagents4pharma/talk2competitors/__init__.py +5 -0
  50. aiagents4pharma/talk2competitors/agents/__init__.py +6 -0
  51. aiagents4pharma/talk2competitors/agents/main_agent.py +130 -0
  52. aiagents4pharma/talk2competitors/agents/s2_agent.py +75 -0
  53. aiagents4pharma/talk2competitors/config/__init__.py +5 -0
  54. aiagents4pharma/talk2competitors/config/config.py +110 -0
  55. aiagents4pharma/talk2competitors/state/__init__.py +5 -0
  56. aiagents4pharma/talk2competitors/state/state_talk2competitors.py +32 -0
  57. aiagents4pharma/talk2competitors/tests/__init__.py +3 -0
  58. aiagents4pharma/talk2competitors/tests/test_langgraph.py +274 -0
  59. aiagents4pharma/talk2competitors/tools/__init__.py +7 -0
  60. aiagents4pharma/talk2competitors/tools/s2/__init__.py +8 -0
  61. aiagents4pharma/talk2competitors/tools/s2/display_results.py +25 -0
  62. aiagents4pharma/talk2competitors/tools/s2/multi_paper_rec.py +132 -0
  63. aiagents4pharma/talk2competitors/tools/s2/search.py +119 -0
  64. aiagents4pharma/talk2competitors/tools/s2/single_paper_rec.py +141 -0
  65. aiagents4pharma/talk2knowledgegraphs/__init__.py +2 -1
  66. aiagents4pharma/talk2knowledgegraphs/tests/__init__.py +0 -0
  67. aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_biobridge_primekg.py +242 -0
  68. aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_dataset.py +29 -0
  69. aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_primekg.py +73 -0
  70. aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_starkqa_primekg.py +116 -0
  71. aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_embeddings.py +47 -0
  72. aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_huggingface.py +45 -0
  73. aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_sentencetransformer.py +40 -0
  74. aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_enrichments.py +39 -0
  75. aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_ollama.py +117 -0
  76. aiagents4pharma/talk2knowledgegraphs/utils/__init__.py +5 -0
  77. aiagents4pharma/talk2knowledgegraphs/utils/enrichments/__init__.py +5 -0
  78. aiagents4pharma/talk2knowledgegraphs/utils/enrichments/enrichments.py +36 -0
  79. aiagents4pharma/talk2knowledgegraphs/utils/enrichments/ollama.py +123 -0
  80. {aiagents4pharma-1.8.0.dist-info → aiagents4pharma-1.15.0.dist-info}/METADATA +44 -25
  81. aiagents4pharma-1.15.0.dist-info/RECORD +102 -0
  82. aiagents4pharma-1.8.0.dist-info/RECORD +0 -35
  83. {aiagents4pharma-1.8.0.dist-info → aiagents4pharma-1.15.0.dist-info}/LICENSE +0 -0
  84. {aiagents4pharma-1.8.0.dist-info → aiagents4pharma-1.15.0.dist-info}/WHEEL +0 -0
  85. {aiagents4pharma-1.8.0.dist-info → aiagents4pharma-1.15.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,45 @@
1
+ """
2
+ Test cases for utils/embeddings/huggingface.py
3
+ """
4
+
5
+ import pytest
6
+ from ..utils.embeddings.huggingface import EmbeddingWithHuggingFace
7
+
8
+ @pytest.fixture(name="embedding_model")
9
+ def embedding_model_fixture():
10
+ """Return the configuration object for the HuggingFace embedding model and model object"""
11
+ return EmbeddingWithHuggingFace(
12
+ model_name="NeuML/pubmedbert-base-embeddings",
13
+ model_cache_dir="../../cache",
14
+ truncation=True,
15
+ )
16
+
17
+ def test_embedding_with_huggingface_embed_documents(embedding_model):
18
+ """Test embedding documents using the EmbeddingWithHuggingFace class."""
19
+ # Perform embedding
20
+ texts = ["Adalimumab", "Infliximab", "Vedolizumab"]
21
+ result = embedding_model.embed_documents(texts)
22
+ # Check the result
23
+ assert len(result) == 3
24
+ assert len(result[0]) == 768
25
+
26
+ def test_embedding_with_huggingface_embed_query(embedding_model):
27
+ """Test embedding a query using the EmbeddingWithHuggingFace class."""
28
+ # Perform embedding
29
+ text = "Adalimumab"
30
+ result = embedding_model.embed_query(text)
31
+ # Check the result
32
+ assert len(result) == 768
33
+
34
+
35
+ def test_embedding_with_huggingface_failed():
36
+ """Test embedding documents using the EmbeddingWithHuggingFace class."""
37
+ # Check if the model is available on HuggingFace Hub
38
+ model_name = "aiagents4pharma/embeddings"
39
+ err_msg = f"Model {model_name} is not available on HuggingFace Hub."
40
+ with pytest.raises(ValueError, match=err_msg):
41
+ EmbeddingWithHuggingFace(
42
+ model_name=model_name,
43
+ model_cache_dir="../../cache",
44
+ truncation=True,
45
+ )
@@ -0,0 +1,40 @@
1
+ """
2
+ Test cases for utils/embeddings/sentence_transformer.py
3
+ """
4
+
5
+ import pytest
6
+ import numpy as np
7
+ from ..utils.embeddings.sentence_transformer import EmbeddingWithSentenceTransformer
8
+
9
+ @pytest.fixture(name="embedding_model")
10
+ def embedding_model_fixture():
11
+ """
12
+ Fixture for creating an instance of EmbeddingWithSentenceTransformer.
13
+ """
14
+ model_name = "sentence-transformers/all-MiniLM-L6-v1" # Small model for testing
15
+ return EmbeddingWithSentenceTransformer(model_name=model_name)
16
+
17
+ def test_embed_documents(embedding_model):
18
+ """
19
+ Test the embed_documents method of EmbeddingWithSentenceTransformer class.
20
+ """
21
+ # Perform embedding
22
+ texts = ["This is a test sentence.", "Another test sentence."]
23
+ embeddings = embedding_model.embed_documents(texts)
24
+ # Check the result
25
+ assert len(embeddings) == len(texts)
26
+ assert len(embeddings[0]) > 0
27
+ assert len(embeddings[0]) == 384
28
+ assert embeddings.dtype == np.float32
29
+
30
+ def test_embed_query(embedding_model):
31
+ """
32
+ Test the embed_query method of EmbeddingWithSentenceTransformer class.
33
+ """
34
+ # Perform embedding
35
+ text = "This is a test query."
36
+ embedding = embedding_model.embed_query(text)
37
+ # Check the result
38
+ assert len(embedding) > 0
39
+ assert len(embedding) == 384
40
+ assert embedding.dtype == np.float32
@@ -0,0 +1,39 @@
1
+ """
2
+ Test cases for utils/enrichments/enrichments.py
3
+ """
4
+
5
+ from ..utils.enrichments.enrichments import Enrichments
6
+
7
+ class TestEnrichments(Enrichments):
8
+ """Test implementation of the Enrichments interface for testing purposes."""
9
+
10
+ def enrich_documents(self, texts: list[str]) -> list[list[float]]:
11
+ return [
12
+ f"Additional text description of {text} as the input." for text in texts
13
+ ]
14
+
15
+ def enrich_documents_with_rag(self, texts, docs):
16
+ # Currently we don't have a RAG model to test this method.
17
+ # Thus, we will just call the enrich_documents method instead.
18
+ return self.enrich_documents(texts)
19
+
20
+ def test_enrich_documents():
21
+ """Test enriching documents using the Enrichments interface."""
22
+ enrichments = TestEnrichments()
23
+ texts = ["text1", "text2"]
24
+ result = enrichments.enrich_documents(texts)
25
+ assert result == [
26
+ "Additional text description of text1 as the input.",
27
+ "Additional text description of text2 as the input.",
28
+ ]
29
+
30
+ def test_enrich_documents_with_rag():
31
+ """Test enriching documents with RAG using the Enrichments interface."""
32
+ enrichments = TestEnrichments()
33
+ texts = ["text1", "text2"]
34
+ docs = ["doc1", "doc2"]
35
+ result = enrichments.enrich_documents_with_rag(texts, docs)
36
+ assert result == [
37
+ "Additional text description of text1 as the input.",
38
+ "Additional text description of text2 as the input.",
39
+ ]
@@ -0,0 +1,117 @@
1
+ """
2
+ Test cases for utils/enrichments/ollama.py
3
+ """
4
+
5
+ import pytest
6
+ import ollama
7
+ from ..utils.enrichments.ollama import EnrichmentWithOllama
8
+
9
+ @pytest.fixture(name="ollama_config")
10
+ def fixture_ollama_config():
11
+ """Return a dictionary with Ollama configuration."""
12
+ return {
13
+ "model_name": "smollm2:360m",
14
+ "prompt_enrichment": """
15
+ Given the input as a list of strings, please return the list of addditional information of
16
+ each input terms using your prior knowledge.
17
+
18
+ Example:
19
+ Input: ['acetaminophen', 'aspirin']
20
+ Ouput: ['acetaminophen is a medication used to treat pain and fever',
21
+ 'aspirin is a medication used to treat pain, fever, and inflammation']
22
+
23
+ Do not include any pretext as the output, only the list of strings enriched.
24
+
25
+ Input: {input}
26
+ """,
27
+ "temperature": 0.0,
28
+ "streaming": False,
29
+ }
30
+
31
+ def test_no_model_ollama(ollama_config):
32
+ """Test the case when the Ollama model is not available."""
33
+ cfg = ollama_config
34
+ cfg_model = "smollm2:135m" # Choose a small model
35
+
36
+ # Delete the Ollama model
37
+ try:
38
+ ollama.delete(cfg_model)
39
+ except ollama.ResponseError:
40
+ pass
41
+
42
+ # Check if the model is available
43
+ with pytest.raises(
44
+ ValueError, match=f"Error: Pulled {cfg_model} model and restarted Ollama server."
45
+ ):
46
+ EnrichmentWithOllama(
47
+ model_name=cfg_model,
48
+ prompt_enrichment=cfg["prompt_enrichment"],
49
+ temperature=cfg["temperature"],
50
+ streaming=cfg["streaming"],
51
+ )
52
+ ollama.delete(cfg_model)
53
+
54
+ def test_enrich_nodes_ollama(ollama_config):
55
+ """Test the Ollama textual enrichment class for node enrichment."""
56
+ # Prepare enrichment model
57
+ cfg = ollama_config
58
+ enr_model = EnrichmentWithOllama(
59
+ model_name=cfg["model_name"],
60
+ prompt_enrichment=cfg["prompt_enrichment"],
61
+ temperature=cfg["temperature"],
62
+ streaming=cfg["streaming"],
63
+ )
64
+
65
+ # Perform enrichment for nodes
66
+ nodes = ["Adalimumab", "Infliximab"]
67
+ enriched_nodes = enr_model.enrich_documents(nodes)
68
+ # Check the enriched nodes
69
+ assert len(enriched_nodes) == 2
70
+ assert all(
71
+ enriched_nodes[i] != nodes[i] for i in range(len(nodes))
72
+ )
73
+
74
+
75
+ def test_enrich_relations_ollama(ollama_config):
76
+ """Test the Ollama textual enrichment class for relation enrichment."""
77
+ # Prepare enrichment model
78
+ cfg = ollama_config
79
+ enr_model = EnrichmentWithOllama(
80
+ model_name=cfg["model_name"],
81
+ prompt_enrichment=cfg["prompt_enrichment"],
82
+ temperature=cfg["temperature"],
83
+ streaming=cfg["streaming"],
84
+ )
85
+ # Perform enrichment for relations
86
+ relations = [
87
+ "IL23R-gene causation disease-inflammatory bowel diseases",
88
+ "NOD2-gene causation disease-inflammatory bowel diseases",
89
+ ]
90
+ enriched_relations = enr_model.enrich_documents(relations)
91
+ # Check the enriched relations
92
+ assert len(enriched_relations) == 2
93
+ assert all(
94
+ enriched_relations[i] != relations[i]
95
+ for i in range(len(relations))
96
+ )
97
+
98
+
99
+ def test_enrich_ollama_rag(ollama_config):
100
+ """Test the Ollama textual enrichment class for enrichment with RAG (not implemented)."""
101
+ # Prepare enrichment model
102
+ cfg = ollama_config
103
+ enr_model = EnrichmentWithOllama(
104
+ model_name=cfg["model_name"],
105
+ prompt_enrichment=cfg["prompt_enrichment"],
106
+ temperature=cfg["temperature"],
107
+ streaming=cfg["streaming"],
108
+ )
109
+ # Perform enrichment for nodes
110
+ nodes = ["Adalimumab", "Infliximab"]
111
+ docs = [r"\path\to\doc1", r"\path\to\doc2"]
112
+ enriched_nodes = enr_model.enrich_documents_with_rag(nodes, docs)
113
+ # Check the enriched nodes
114
+ assert len(enriched_nodes) == 2
115
+ assert all(
116
+ enriched_nodes[i] != nodes[i] for i in range(len(nodes))
117
+ )
@@ -0,0 +1,5 @@
1
+ '''
2
+ This file is used to import utlities.
3
+ '''
4
+ from . import enrichments
5
+ from . import embeddings
@@ -0,0 +1,5 @@
1
+ """
2
+ This package contains modules to use the enrichment model
3
+ """
4
+ from . import enrichments
5
+ from . import ollama
@@ -0,0 +1,36 @@
1
+ """
2
+ Enrichments interface
3
+ """
4
+
5
+ from abc import ABC, abstractmethod
6
+
7
+ class Enrichments(ABC):
8
+ """Interface for enrichment models.
9
+
10
+ This is an interface meant for implementing text enrichment models.
11
+
12
+ Enrichment models are used to enrich node or relation features in a given knowledge graph.
13
+ """
14
+
15
+ @abstractmethod
16
+ def enrich_documents(self, texts: list[str]) -> list[list[str]]:
17
+ """Enrich documents.
18
+
19
+ Args:
20
+ texts: List of documents to enrich.
21
+
22
+ Returns:
23
+ List of enriched documents.
24
+ """
25
+
26
+ @abstractmethod
27
+ def enrich_documents_with_rag(self, texts: list[str], docs: list[str]) -> list[str]:
28
+ """Enrich documents with RAG.
29
+
30
+ Args:
31
+ texts: List of documents to enrich.
32
+ docs: List of reference documents to enrich the input texts.
33
+
34
+ Returns:
35
+ List of enriched documents with RAG.
36
+ """
@@ -0,0 +1,123 @@
1
+ #!/usr/bin/env python3
2
+
3
+ """
4
+ Enrichment class using Ollama model based on LangChain Enrichment class.
5
+ """
6
+
7
+ import time
8
+ from typing import List
9
+ import subprocess
10
+ import ast
11
+ import ollama
12
+ from langchain_ollama import ChatOllama
13
+ from langchain_core.prompts import ChatPromptTemplate
14
+ from langchain_core.output_parsers import StrOutputParser
15
+ from .enrichments import Enrichments
16
+
17
+ class EnrichmentWithOllama(Enrichments):
18
+ """
19
+ Enrichment class using Ollama model based on the Enrichment abstract class.
20
+ """
21
+ def __init__(
22
+ self,
23
+ model_name: str,
24
+ prompt_enrichment: str,
25
+ temperature: float,
26
+ streaming: bool,
27
+ ):
28
+ """
29
+ Initialize the EnrichmentWithOllama class.
30
+
31
+ Args:
32
+ model_name: The name of the Ollama model to be used.
33
+ prompt_enrichment: The prompt enrichment template.
34
+ temperature: The temperature for the Ollama model.
35
+ streaming: The streaming flag for the Ollama model.
36
+ """
37
+ # Setup the Ollama server
38
+ self.__setup(model_name)
39
+
40
+ # Set parameters
41
+ self.model_name = model_name
42
+ self.prompt_enrichment = prompt_enrichment
43
+ self.temperature = temperature
44
+ self.streaming = streaming
45
+
46
+ # Prepare prompt template
47
+ self.prompt_template = ChatPromptTemplate.from_messages(
48
+ [
49
+ ("system", self.prompt_enrichment),
50
+ ("human", "{input}"),
51
+ ]
52
+ )
53
+
54
+ # Prepare model
55
+ self.model = ChatOllama(
56
+ model=self.model_name,
57
+ temperature=self.temperature,
58
+ streaming=self.streaming,
59
+ )
60
+
61
+ def __setup(self, model_name: str) -> None:
62
+ """
63
+ Check if the Ollama model is available and run the Ollama server if needed.
64
+
65
+ Args:
66
+ model_name: The name of the Ollama model to be used.
67
+ """
68
+ try:
69
+ models_list = ollama.list()["models"]
70
+ if model_name not in [m['model'].replace(":latest", "") for m in models_list]:
71
+ ollama.pull(model_name)
72
+ time.sleep(30)
73
+ raise ValueError(f"Pulled {model_name} model")
74
+ except Exception as e:
75
+ with subprocess.Popen(
76
+ "ollama serve", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
77
+ ):
78
+ time.sleep(10)
79
+ raise ValueError(f"Error: {e} and restarted Ollama server.") from e
80
+
81
+ def enrich_documents(self, texts: List[str]) -> List[str]:
82
+ """
83
+ Enrich a list of input texts with additional textual features using OLLAMA model.
84
+ Important: Make sure the input is a list of texts based on the defined prompt template
85
+ with 'input' as the variable name.
86
+
87
+ Args:
88
+ texts: The list of texts to be enriched.
89
+
90
+ Returns:
91
+ The list of enriched texts.
92
+ """
93
+
94
+ # Perform enrichment
95
+ chain = self.prompt_template | self.model | StrOutputParser()
96
+
97
+ # Generate the enriched node
98
+ # Important: Make sure the input is a list of texts based on the defined prompt template
99
+ # with 'input' as the variable name
100
+ enriched_texts = chain.invoke({"input": "[" + ", ".join(texts) + "]"})
101
+
102
+ # Convert the enriched nodes to a list of dictionary
103
+ enriched_texts = ast.literal_eval(enriched_texts.replace("```", ""))
104
+
105
+ # Final check for the enriched texts
106
+ assert len(enriched_texts) == len(texts)
107
+
108
+ return enriched_texts
109
+
110
+ def enrich_documents_with_rag(self, texts, docs):
111
+ """
112
+ Enrich a list of input texts with additional textual features using OLLAMA model with RAG.
113
+ As of now, we don't have a RAG model to test this method yet.
114
+ Thus, we will just call the enrich_documents method instead.
115
+
116
+ Args:
117
+ texts: The list of texts to be enriched.
118
+ docs: The list of reference documents to enrich the input texts.
119
+
120
+ Returns:
121
+ The list of enriched texts
122
+ """
123
+ return self.enrich_documents(texts)
@@ -1,11 +1,11 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: aiagents4pharma
3
- Version: 1.8.0
3
+ Version: 1.15.0
4
4
  Summary: AI Agents for drug discovery, drug development, and other pharmaceutical R&D
5
5
  Classifier: Programming Language :: Python :: 3
6
6
  Classifier: License :: OSI Approved :: MIT License
7
7
  Classifier: Operating System :: OS Independent
8
- Requires-Python: >=3.10
8
+ Requires-Python: >=3.12
9
9
  Description-Content-Type: text/markdown
10
10
  License-File: LICENSE
11
11
  Requires-Dist: copasi_basico==0.78
@@ -17,16 +17,19 @@ Requires-Dist: hydra-core==1.3.2
17
17
  Requires-Dist: joblib==1.4.2
18
18
  Requires-Dist: langchain==0.3.7
19
19
  Requires-Dist: langchain-community==0.3.5
20
- Requires-Dist: langchain-core==0.3.15
20
+ Requires-Dist: langchain-core==0.3.31
21
21
  Requires-Dist: langchain-experimental==0.3.3
22
22
  Requires-Dist: langchain-openai==0.2.5
23
- Requires-Dist: langgraph==0.2.62
23
+ Requires-Dist: langchain_ollama==0.2.2
24
+ Requires-Dist: langgraph==0.2.66
24
25
  Requires-Dist: matplotlib==3.9.2
25
26
  Requires-Dist: openai==1.59.4
27
+ Requires-Dist: ollama==0.4.6
26
28
  Requires-Dist: pandas==2.2.3
27
29
  Requires-Dist: plotly==5.24.1
28
30
  Requires-Dist: pydantic==2.9.2
29
31
  Requires-Dist: pylint==3.3.1
32
+ Requires-Dist: pypdf==5.2.0
30
33
  Requires-Dist: pytest==8.3.3
31
34
  Requires-Dist: pytest-asyncio==0.25.2
32
35
  Requires-Dist: streamlit==1.39.0
@@ -48,6 +51,9 @@ Requires-Dist: streamlit-feedback
48
51
  [![Talk2Cells](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2cells.yml/badge.svg)](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2cells.yml)
49
52
  [![Talk2KnowledgeGraphs](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2knowledgegraphs.yml/badge.svg)](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2knowledgegraphs.yml)
50
53
  [![Talk2Competitors](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2competitors.yml/badge.svg)](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2competitors.yml)
54
+ ![GitHub Release](https://img.shields.io/github/v/release/VirtualPatientEngine/AIAgents4Pharma)
55
+ ![Python Version from PEP 621 TOML](https://img.shields.io/python/required-version-toml?tomlFilePath=https%3A%2F%2Fraw.githubusercontent.com%2FVirtualPatientEngine%2FAIAgents4Pharma%2Frefs%2Fheads%2Fmain%2Fpyproject.toml)
56
+
51
57
 
52
58
  <h1 align="center" style="border-bottom: none;">🤖 AIAgents4Pharma</h1>
53
59
 
@@ -56,9 +62,9 @@ Welcome to **AIAgents4Pharma** – an open-source project by [Team VPE](https://
56
62
  Our toolkit currently consists of three intelligent agents, each designed to simplify and enhance access to specialized data in biology:
57
63
 
58
64
  - **Talk2BioModels**: Engage directly with mathematical models in systems biology.
59
- - **Talk2Cells** *(Work in progress)*: Query and analyze sequencing data with ease.
60
- - **Talk2KnowledgeGraphs** *(Work in progress)*: Access and explore complex biological knowledge graphs for insightful data connections.
61
- - **Talk2Competitors** *(Coming soon)*: Get recommendations for articles related to your choice. Download, query, and write/retrieve them to your reference manager (currently supporting Zotero).
65
+ - **Talk2Cells** _(Work in progress)_: Query and analyze sequencing data with ease.
66
+ - **Talk2KnowledgeGraphs** _(Work in progress)_: Access and explore complex biological knowledge graphs for insightful data connections.
67
+ - **Talk2Competitors** _(Coming soon)_: Get recommendations for articles related to your choice. Download, query, and write/retrieve them to your reference manager (currently supporting Zotero).
62
68
 
63
69
  ---
64
70
 
@@ -71,68 +77,77 @@ Our toolkit currently consists of three intelligent agents, each designed to sim
71
77
  - Forward simulation of both internal and open-source models (BioModels).
72
78
  - Adjust parameters within the model to simulate different conditions.
73
79
  - Query simulation results.
80
+ - Extract model information such as species, parameters, units and description.
74
81
 
75
- ### 2. Talk2Cells *(Work in Progress)*
82
+ ### 2. Talk2Cells _(Work in Progress)_
76
83
 
77
84
  **Talk2Cells** is being developed to provide direct access to and analysis of sequencing data, such as RNA-Seq or DNA-Seq, using natural language.
78
85
 
79
- ### 3. Talk2KnowledgeGraphs *(Work in Progress)*
86
+ ### 3. Talk2KnowledgeGraphs _(Work in Progress)_
80
87
 
81
88
  **Talk2KnowledgeGraphs** is an agent designed to enable interaction with biological knowledge graphs (KGs). KGs integrate vast amounts of structured biological data into a format that highlights relationships between entities, such as proteins, genes, and diseases.
82
89
 
83
- ### 4. Talk2KnowledgeGraphs *(Coming soon)*
90
+ ### 4. Talk2Competitors _(Coming soon)_
84
91
 
85
92
  ## Getting Started
86
93
 
87
- ### Prerequisites
88
-
89
- - **Python 3.10+**
90
- - **Git**
91
- - Required libraries specified in `requirements.txt`
94
+ ![Python Version from PEP 621 TOML](https://img.shields.io/python/required-version-toml?tomlFilePath=https%3A%2F%2Fraw.githubusercontent.com%2FVirtualPatientEngine%2FAIAgents4Pharma%2Frefs%2Fheads%2Fmain%2Fpyproject.toml)
92
95
 
93
96
  ### Installation
97
+
94
98
  #### Option 1: PyPI
95
- ```bash
96
- pip install aiagents4pharma
97
- ```
99
+
100
+ ```bash
101
+ pip install aiagents4pharma
102
+ ```
98
103
 
99
104
  Check out the tutorials on each agent for detailed instrcutions.
100
105
 
101
106
  #### Option 2: git
107
+
102
108
  1. **Clone the repository:**
109
+
103
110
  ```bash
104
111
  git clone https://github.com/VirtualPatientEngine/AIAgents4Pharma
105
112
  cd AIAgents4Pharma
106
113
  ```
107
114
 
108
115
  2. **Install dependencies:**
116
+
109
117
  ```bash
110
118
  pip install .
111
119
  ```
112
120
 
113
121
  3. **Initialize OPENAI_API_KEY**
122
+
114
123
  ```bash
115
- export OPENAI_API_KEY = ....
124
+ export OPENAI_API_KEY=....
116
125
  ```
117
126
 
118
127
  4. **[Optional] Set up login credentials**
128
+
119
129
  ```bash
120
130
  vi .streamlit/secrets.toml
121
131
  ```
132
+
122
133
  and enter
134
+
123
135
  ```
124
136
  password='XXX'
125
137
  ```
126
- Please note that the passowrd will be same for all the users.
138
+
139
+ Please note that the passoword will be same for all the users.
127
140
 
128
141
  5. **[Optional] Initialize LANGSMITH_API_KEY**
142
+
129
143
  ```bash
130
144
  export LANGCHAIN_TRACING_V2=true
131
145
  export LANGCHAIN_API_KEY=<your-api-key>
132
146
  ```
133
- Please note that this will create a new tracing project in your Langsmith
134
- account with the name `<user_name>@<uuid>`, where `user_name` is the name
135
- you provided in the previous step. If you skip the previous step, it will
147
+
148
+ Please note that this will create a new tracing project in your Langsmith
149
+ account with the name `<user_name>@<uuid>`, where `user_name` is the name
150
+ you provided in the previous step. If you skip the previous step, it will
136
151
  default to `default`. <uuid> will be the 128 bit unique ID created for the
137
152
  session.
138
153
 
@@ -164,6 +179,7 @@ We welcome contributions to AIAgents4Pharma! Here’s how you can help:
164
179
  5. **Open a pull request**
165
180
 
166
181
  ### Current Needs
182
+
167
183
  - **Beta testers** for Talk2BioModels.
168
184
  - **Developers** with experience in natural language processing, bioinformatics, or knowledge graphs for contributions to AIAgents4Pharma.
169
185
 
@@ -174,19 +190,22 @@ Check out our [CONTRIBUTING.md](CONTRIBUTING.md) for more information.
174
190
  ## Roadmap
175
191
 
176
192
  ### Completed
193
+
177
194
  - **Talk2BioModels**: Initial release with core capabilities for interacting with systems biology models.
178
195
 
179
196
  ### Planned
197
+
180
198
  - **User Interface**: Interactive web UI for all agents.
181
199
  - **Talk2Cells**: Integration of sequencing data analysis tools.
182
200
  - **Talk2KnowledgeGraphs**: Interface for biological knowledge graph interaction.
183
- - **Talk2Competitors**
201
+ - **Talk2Competitors**: Interface for exploring articles
184
202
 
185
- We’re excited to bring AIAgents4Pharma to the bioinformatics and pharmaceutical research community. Together, let’s make data-driven biological research more accessible and insightful.
203
+ We’re excited to bring AIAgents4Pharma to the bioinformatics and pharmaceutical research community. Together, let’s make data-driven biological research more accessible and insightful.
186
204
 
187
205
  **Get Started** with AIAgents4Pharma today and transform the way you interact with biological data.
188
206
 
189
207
  ---
190
208
 
191
209
  ## Feedback
210
+
192
211
  Questions/Bug reports/Feature requests/Comments/Suggestions? We welcome all. Please use the `Isssues` tab 😀