aiagents4pharma 1.30.4__py3-none-any.whl → 1.32.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aiagents4pharma/talk2scholars/agents/main_agent.py +4 -3
- aiagents4pharma/talk2scholars/agents/paper_download_agent.py +3 -4
- aiagents4pharma/talk2scholars/agents/pdf_agent.py +6 -7
- aiagents4pharma/talk2scholars/agents/s2_agent.py +23 -20
- aiagents4pharma/talk2scholars/agents/zotero_agent.py +11 -11
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/default.yaml +19 -19
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/default.yaml +20 -15
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/zotero_agent/default.yaml +27 -6
- aiagents4pharma/talk2scholars/state/state_talk2scholars.py +7 -7
- aiagents4pharma/talk2scholars/tests/test_main_agent.py +16 -16
- aiagents4pharma/talk2scholars/tests/test_paper_download_agent.py +17 -24
- aiagents4pharma/talk2scholars/tests/test_paper_download_tools.py +152 -135
- aiagents4pharma/talk2scholars/tests/test_pdf_agent.py +9 -16
- aiagents4pharma/talk2scholars/tests/test_question_and_answer_tool.py +790 -218
- aiagents4pharma/talk2scholars/tests/test_s2_agent.py +9 -9
- aiagents4pharma/talk2scholars/tests/test_s2_display.py +8 -8
- aiagents4pharma/talk2scholars/tests/test_s2_query.py +8 -8
- aiagents4pharma/talk2scholars/tests/test_zotero_agent.py +12 -12
- aiagents4pharma/talk2scholars/tests/test_zotero_path.py +11 -12
- aiagents4pharma/talk2scholars/tests/test_zotero_read.py +400 -22
- aiagents4pharma/talk2scholars/tools/paper_download/__init__.py +0 -6
- aiagents4pharma/talk2scholars/tools/paper_download/download_arxiv_input.py +89 -31
- aiagents4pharma/talk2scholars/tools/pdf/question_and_answer.py +540 -156
- aiagents4pharma/talk2scholars/tools/s2/__init__.py +4 -4
- aiagents4pharma/talk2scholars/tools/s2/{display_results.py → display_dataframe.py} +19 -21
- aiagents4pharma/talk2scholars/tools/s2/query_dataframe.py +71 -0
- aiagents4pharma/talk2scholars/tools/zotero/utils/read_helper.py +213 -35
- aiagents4pharma/talk2scholars/tools/zotero/zotero_read.py +3 -3
- aiagents4pharma-1.32.0.dist-info/METADATA +364 -0
- {aiagents4pharma-1.30.4.dist-info → aiagents4pharma-1.32.0.dist-info}/RECORD +33 -35
- {aiagents4pharma-1.30.4.dist-info → aiagents4pharma-1.32.0.dist-info}/WHEEL +1 -1
- aiagents4pharma/talk2scholars/tools/paper_download/abstract_downloader.py +0 -45
- aiagents4pharma/talk2scholars/tools/paper_download/arxiv_downloader.py +0 -115
- aiagents4pharma/talk2scholars/tools/s2/query_results.py +0 -61
- aiagents4pharma-1.30.4.dist-info/METADATA +0 -334
- {aiagents4pharma-1.30.4.dist-info → aiagents4pharma-1.32.0.dist-info}/licenses/LICENSE +0 -0
- {aiagents4pharma-1.30.4.dist-info → aiagents4pharma-1.32.0.dist-info}/top_level.txt +0 -0
@@ -1,61 +0,0 @@
|
|
1
|
-
#!/usr/bin/env python3
|
2
|
-
|
3
|
-
"""
|
4
|
-
This tool is used to display the table of studies.
|
5
|
-
"""
|
6
|
-
|
7
|
-
import logging
|
8
|
-
from typing import Annotated
|
9
|
-
import pandas as pd
|
10
|
-
from langchain_experimental.agents import create_pandas_dataframe_agent
|
11
|
-
from langchain_core.tools import tool
|
12
|
-
from langgraph.prebuilt import InjectedState
|
13
|
-
|
14
|
-
# Configure logging
|
15
|
-
logging.basicConfig(level=logging.INFO)
|
16
|
-
logger = logging.getLogger(__name__)
|
17
|
-
|
18
|
-
|
19
|
-
class NoPapersFoundError(Exception):
|
20
|
-
"""Exception raised when no papers are found in the state."""
|
21
|
-
|
22
|
-
|
23
|
-
@tool("query_results", parse_docstring=True)
|
24
|
-
def query_results(question: str, state: Annotated[dict, InjectedState]) -> str:
|
25
|
-
"""
|
26
|
-
Query the last displayed papers from the state. If no papers are found,
|
27
|
-
raises an exception.
|
28
|
-
|
29
|
-
Use this also to get the last displayed papers from the state,
|
30
|
-
and then use the papers to get recommendations for a single paper or
|
31
|
-
multiple papers.
|
32
|
-
|
33
|
-
Args:
|
34
|
-
question (str): The question to ask the agent.
|
35
|
-
state (dict): The state of the agent containing the papers.
|
36
|
-
|
37
|
-
Returns:
|
38
|
-
str: A message with the last displayed papers.
|
39
|
-
"""
|
40
|
-
logger.info("Querying last displayed papers with question: %s", question)
|
41
|
-
llm_model = state.get("llm_model")
|
42
|
-
if not state.get("last_displayed_papers"):
|
43
|
-
logger.info("No papers displayed so far, raising NoPapersFoundError")
|
44
|
-
raise NoPapersFoundError(
|
45
|
-
"No papers found. A search needs to be performed first."
|
46
|
-
)
|
47
|
-
context_key = state.get("last_displayed_papers", "pdf_data")
|
48
|
-
dic_papers = state.get(context_key)
|
49
|
-
df_papers = pd.DataFrame.from_dict(dic_papers, orient="index")
|
50
|
-
df_agent = create_pandas_dataframe_agent(
|
51
|
-
llm_model,
|
52
|
-
allow_dangerous_code=True,
|
53
|
-
agent_type="tool-calling",
|
54
|
-
df=df_papers,
|
55
|
-
max_iterations=5,
|
56
|
-
include_df_in_prompt=True,
|
57
|
-
number_of_head_rows=df_papers.shape[0],
|
58
|
-
verbose=True,
|
59
|
-
)
|
60
|
-
llm_result = df_agent.invoke(question, stream_mode=None)
|
61
|
-
return llm_result["output"]
|
@@ -1,334 +0,0 @@
|
|
1
|
-
Metadata-Version: 2.4
|
2
|
-
Name: aiagents4pharma
|
3
|
-
Version: 1.30.4
|
4
|
-
Summary: AI Agents for drug discovery, drug development, and other pharmaceutical R&D.
|
5
|
-
Classifier: Programming Language :: Python :: 3
|
6
|
-
Classifier: License :: OSI Approved :: MIT License
|
7
|
-
Classifier: Operating System :: OS Independent
|
8
|
-
Requires-Python: >=3.12
|
9
|
-
Description-Content-Type: text/markdown
|
10
|
-
License-File: LICENSE
|
11
|
-
Requires-Dist: copasi_basico==0.78
|
12
|
-
Requires-Dist: coverage==7.6.4
|
13
|
-
Requires-Dist: einops==0.8.0
|
14
|
-
Requires-Dist: gdown==5.2.0
|
15
|
-
Requires-Dist: gravis==0.1.0
|
16
|
-
Requires-Dist: huggingface_hub==0.26.5
|
17
|
-
Requires-Dist: hydra-core==1.3.2
|
18
|
-
Requires-Dist: joblib==1.4.2
|
19
|
-
Requires-Dist: langchain==0.3.7
|
20
|
-
Requires-Dist: langchain-community==0.3.5
|
21
|
-
Requires-Dist: langchain-core==0.3.40
|
22
|
-
Requires-Dist: langchain-experimental==0.3.3
|
23
|
-
Requires-Dist: langchain-nvidia-ai-endpoints==0.3.9
|
24
|
-
Requires-Dist: langchain-openai==0.2.5
|
25
|
-
Requires-Dist: langchain_ollama==0.2.3
|
26
|
-
Requires-Dist: langgraph_supervisor==0.0.9
|
27
|
-
Requires-Dist: matplotlib==3.9.2
|
28
|
-
Requires-Dist: openai==1.59.4
|
29
|
-
Requires-Dist: ollama==0.4.7
|
30
|
-
Requires-Dist: pandas==2.2.3
|
31
|
-
Requires-Dist: pcst_fast==1.0.10
|
32
|
-
Requires-Dist: plotly==5.24.1
|
33
|
-
Requires-Dist: pubchempy==1.0.4
|
34
|
-
Requires-Dist: pydantic==2.9.2
|
35
|
-
Requires-Dist: pylint==3.3.1
|
36
|
-
Requires-Dist: pypdf==5.2.0
|
37
|
-
Requires-Dist: pytest==8.3.3
|
38
|
-
Requires-Dist: pytest-asyncio==0.25.2
|
39
|
-
Requires-Dist: pyzotero==1.6.9
|
40
|
-
Requires-Dist: streamlit==1.39.0
|
41
|
-
Requires-Dist: sentence_transformers==3.3.1
|
42
|
-
Requires-Dist: tabulate==0.9.0
|
43
|
-
Requires-Dist: torch==2.2.2
|
44
|
-
Requires-Dist: torch_geometric==2.6.1
|
45
|
-
Requires-Dist: transformers==4.48.0
|
46
|
-
Requires-Dist: mkdocs==1.6.1
|
47
|
-
Requires-Dist: mkdocs-jupyter==0.25.1
|
48
|
-
Requires-Dist: mkdocs-material==9.5.47
|
49
|
-
Requires-Dist: mkdocstrings-python==1.12.2
|
50
|
-
Requires-Dist: mkdocs-include-markdown-plugin==7.1.2
|
51
|
-
Requires-Dist: mkdocstrings==0.27.0
|
52
|
-
Requires-Dist: streamlit-feedback
|
53
|
-
Requires-Dist: anndata==0.11.3
|
54
|
-
Requires-Dist: h5py==3.13.0
|
55
|
-
Requires-Dist: igraph==0.11.8
|
56
|
-
Requires-Dist: ipykernel==6.29.5
|
57
|
-
Requires-Dist: ipython==8.32.0
|
58
|
-
Requires-Dist: nbformat==5.10.4
|
59
|
-
Requires-Dist: scipy==1.15.2
|
60
|
-
Requires-Dist: tqdm==4.67.1
|
61
|
-
Requires-Dist: umap-learn==0.5.7
|
62
|
-
Requires-Dist: plotly-express==0.4.1
|
63
|
-
Requires-Dist: seaborn==0.13.2
|
64
|
-
Requires-Dist: scanpy==1.11.0
|
65
|
-
Dynamic: license-file
|
66
|
-
|
67
|
-
[](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2biomodels.yml)
|
68
|
-
[](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2cells.yml)
|
69
|
-
[](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2knowledgegraphs.yml)
|
70
|
-
[](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2scholars.yml)
|
71
|
-
[](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2aiagents4pharma.yml)
|
72
|
-

|
73
|
-

|
74
|
-

|
75
|
-
|
76
|
-
## Introduction
|
77
|
-
|
78
|
-
Welcome to **AIAgents4Pharma** – an open-source project by [Team VPE](https://github.com/VirtualPatientEngine) that brings together AI-driven tools to help researchers and pharma interact seamlessly with complex biological data.
|
79
|
-
|
80
|
-
Our toolkit currently consists of the following agents:
|
81
|
-
|
82
|
-
- **Talk2BioModels** _(v1 released; v2 in progress)_: Engage directly with mathematical models in systems biology.
|
83
|
-
- **Talk2KnowledgeGraphs** _(v1 in progress)_: Access and explore complex biological knowledge graphs for insightful data connections.
|
84
|
-
- **Talk2Scholars** _(v1 in progress)_: Get recommendations for articles related to your choice. Download, query, and write/retrieve them to your reference manager (currently supporting Zotero).
|
85
|
-
- **Talk2Cells** _(v1 in progress)_: Query and analyze sequencing data with ease.
|
86
|
-
- **Talk2AIAgents4Pharma** _(v1 in progress)_: Converse with all the agents above (currently supports T2B and T2KG)
|
87
|
-
|
88
|
-

|
89
|
-
|
90
|
-
## Getting Started
|
91
|
-
|
92
|
-

|
93
|
-
|
94
|
-
### Installation
|
95
|
-
|
96
|
-
_Please use version 1.26.2 or later for better support with NVIDIA NIM models._
|
97
|
-
|
98
|
-
#### Option 1: PyPI
|
99
|
-
|
100
|
-

|
101
|
-
|
102
|
-
```bash
|
103
|
-
pip install aiagents4pharma
|
104
|
-
```
|
105
|
-
|
106
|
-
Check out the tutorials on each agent for detailed instructions.
|
107
|
-
|
108
|
-
#### Option 2: Docker Hub
|
109
|
-
|
110
|
-
_We now have `Talk2AIAgents4Pharma`, `Talk2Biomodels`, and `Talk2Scholars` available on Docker Hub._
|
111
|
-
|
112
|
-
##### **Run Talk2AIAgents4Pharma and Talk2KnowledgeGraphs**
|
113
|
-
|
114
|
-
Talk2AIAgents4Pharma and Talk2KnowledgeGraphs require Ollama for embedding models, so Docker Compose is used to run both containers in the same network.
|
115
|
-
|
116
|
-
###### **Setup Environment Variables**
|
117
|
-
|
118
|
-
1. Choose the app you want to use:
|
119
|
-
|
120
|
-
```sh
|
121
|
-
# Navigate to the correct directory before setting up environment variables.
|
122
|
-
# Use one of the following commands based on the agent you want to use:
|
123
|
-
cd AIAgents4Pharma/aiagents4pharma/talk2aiagents4pharma
|
124
|
-
cd AIAgents4Pharma/aiagents4pharma/talk2knowledgegraphs
|
125
|
-
```
|
126
|
-
|
127
|
-
2. Copy the `.env.example` file and rename it to `.env`:
|
128
|
-
```sh
|
129
|
-
cp .env.example .env
|
130
|
-
```
|
131
|
-
3. Open the `.env` file and add your API keys:
|
132
|
-
|
133
|
-
```plaintext
|
134
|
-
OPENAI_API_KEY=your_openai_api_key
|
135
|
-
NVIDIA_API_KEY=your_nvidia_api_key
|
136
|
-
OLLAMA_HOST=http://ollama:11434
|
137
|
-
LANGCHAIN_TRACING_V2=true
|
138
|
-
LANGCHAIN_API_KEY=your_langchain_api_key_here
|
139
|
-
# Notes:
|
140
|
-
# The API endpoint for Ollama is already set in env.example.
|
141
|
-
# Both API keys (OPENAI_API_KEY and NVIDIA_API_KEY) are required for Talk2AIAgents4Pharma.
|
142
|
-
# If using Talk2KnowledgeGraphs separately, only the OPENAI_API_KEY is needed.
|
143
|
-
# Langsmith API for tracing is optional for both, set it in env.example if required.
|
144
|
-
```
|
145
|
-
|
146
|
-
4. Save the file.
|
147
|
-
|
148
|
-
To start the containers, run the following command:
|
149
|
-
|
150
|
-
```sh
|
151
|
-
docker compose --profile cpu up # for CPU mode
|
152
|
-
docker compose --profile nvidia up # for GPU mode
|
153
|
-
docker compose --profile amd up # for AMD mode
|
154
|
-
```
|
155
|
-
|
156
|
-
This will:
|
157
|
-
|
158
|
-
- Pull the latest images if they are not already available.
|
159
|
-
- Start both Talk2AIAgents4Pharma or Talk2KnowledgeGraphs and Ollama containers in the same network.
|
160
|
-
- Ensure Ollama is running first before launching Talk2AIAgents4Pharma or Talk2KnowledgeGraphs.
|
161
|
-
|
162
|
-
To Access the web app, open your browser and go to:
|
163
|
-
|
164
|
-
```
|
165
|
-
http://localhost:8501
|
166
|
-
```
|
167
|
-
|
168
|
-
To stop the containers, run:
|
169
|
-
|
170
|
-
```sh
|
171
|
-
docker compose down
|
172
|
-
```
|
173
|
-
|
174
|
-
##### **Run Talk2Biomodels and Talk2Scholars**
|
175
|
-
|
176
|
-
1. **Run the containers**
|
177
|
-
|
178
|
-
```bash
|
179
|
-
docker run -d \
|
180
|
-
--name talk2biomodels \
|
181
|
-
-e OPENAI_API_KEY=<your_openai_api_key> \
|
182
|
-
-e NVIDIA_API_KEY=<your_nvidia_api_key> \
|
183
|
-
-p 8501:8501 \
|
184
|
-
virtualpatientengine/talk2biomodels
|
185
|
-
```
|
186
|
-
|
187
|
-
```bash
|
188
|
-
docker run -d \
|
189
|
-
--name talk2scholars \
|
190
|
-
-e OPENAI_API_KEY=<your_openai_api_key> \
|
191
|
-
-e ZOTERO_API_KEY=<your_zotero_api_key> \
|
192
|
-
-e ZOTERO_USER_ID=<your_zotero_user_id> \
|
193
|
-
-p 8501:8501 \
|
194
|
-
virtualpatientengine/talk2scholars
|
195
|
-
```
|
196
|
-
|
197
|
-
2. **Access the Web App**
|
198
|
-
Open your browser and go to:
|
199
|
-
|
200
|
-
```
|
201
|
-
http://localhost:8501
|
202
|
-
```
|
203
|
-
|
204
|
-
_You can create a free account at NVIDIA and apply for their
|
205
|
-
free credits [here](https://build.nvidia.com/explore/discover)._
|
206
|
-
|
207
|
-
#### **Notes**
|
208
|
-
|
209
|
-
- Ensure you **replace `<your_openai_api_key>`, `<your_nvidia_api_key>`, `<your_zotero_api_key>`, and `<your_zotero_user_id>`** with your actual credentials.
|
210
|
-
- Both applications use **port `8501`**, so run them on different ports if needed:
|
211
|
-
```bash
|
212
|
-
docker run -d -e OPENAI_API_KEY=<your_openai_api_key> -p 8501:8501 virtualpatientengine/talk2scholars
|
213
|
-
```
|
214
|
-
Then, access it via `http://localhost:8501`.
|
215
|
-
|
216
|
-
#### Option 3: git
|
217
|
-
|
218
|
-

|
219
|
-
|
220
|
-
1. **Clone the repository:**
|
221
|
-
```bash
|
222
|
-
git clone https://github.com/VirtualPatientEngine/AIAgents4Pharma
|
223
|
-
cd AIAgents4Pharma
|
224
|
-
```
|
225
|
-
2. **Install dependencies:**
|
226
|
-
|
227
|
-
```bash
|
228
|
-
pip install -r requirements.txt
|
229
|
-
```
|
230
|
-
|
231
|
-
⚠️ The current version of T2KG requires additional Ollama library to be installed.
|
232
|
-
|
233
|
-
Ollama can be easily downloaded and installed from the following link: [https://ollama.com/download](https://ollama.com/download)
|
234
|
-
|
235
|
-
As an alternative, use the following commands to install the library using terminal and to pull necessary model:
|
236
|
-
|
237
|
-
- Ubuntu:
|
238
|
-
```
|
239
|
-
curl -fsSL https://ollama.com/install.sh | sh
|
240
|
-
ollama pull nomic-embed-text
|
241
|
-
```
|
242
|
-
- Windows:
|
243
|
-
```
|
244
|
-
curl -L https://ollama.com/download/ollama-windows-amd64.zip -o ollama-windows-amd64.zip
|
245
|
-
tar -xzf .\ollama-windows-amd64.zip
|
246
|
-
start ollama serve
|
247
|
-
ollama pull nomic-embed-text
|
248
|
-
```
|
249
|
-
- macOS:
|
250
|
-
```
|
251
|
-
brew install ollama
|
252
|
-
ollama pull nomic-embed-text
|
253
|
-
```
|
254
|
-
A list of pulled Ollama models can be checked using the following command:
|
255
|
-
|
256
|
-
```
|
257
|
-
ollama list
|
258
|
-
```
|
259
|
-
|
260
|
-
⚠️ `pcst_fast 1.0.10` library requires `Microsoft Visual C++ 14.0` or greater to be installed.
|
261
|
-
You can download `Microsoft C++ Build Tools` from [here](https://visualstudio.microsoft.com/visual-cpp-build-tools/).
|
262
|
-
|
263
|
-
3. **Initialize OPENAI_API_KEY and NVIDIA_API_KEY**
|
264
|
-
|
265
|
-
```bash
|
266
|
-
export OPENAI_API_KEY=....
|
267
|
-
export NVIDIA_API_KEY=....
|
268
|
-
```
|
269
|
-
|
270
|
-
_You can create a free account at NVIDIA and apply for their
|
271
|
-
free credits [here](https://build.nvidia.com/explore/discover)._
|
272
|
-
|
273
|
-
4. **Initialize ZOTERO_API_KEY and ZOTERO_USER_ID**
|
274
|
-
|
275
|
-
```bash
|
276
|
-
export ZOTERO_API_KEY=....
|
277
|
-
export ZOTERO_USER_ID=....
|
278
|
-
```
|
279
|
-
|
280
|
-
_Please note that ZOTERO keys are requried only if you want to launch Talk2Scholars. For all the other agents, please ignore this step._
|
281
|
-
|
282
|
-
5. **[Optional] Initialize LANGSMITH_API_KEY**
|
283
|
-
|
284
|
-
```bash
|
285
|
-
export LANGCHAIN_TRACING_V2=true
|
286
|
-
export LANGCHAIN_API_KEY=<your-api-key>
|
287
|
-
```
|
288
|
-
|
289
|
-
_Please note that this will create a new tracing project in your Langsmith
|
290
|
-
account with the name `T2X-xxxx`, where `X` can be `AA4P` (Main Agent),
|
291
|
-
`B` (Biomodels), `S` (Scholars), `KG` (KnowledgeGraphs), or `C` (Cells).
|
292
|
-
If you skip the previous step, it will default to the name `default`.
|
293
|
-
`xxxx` will be the 4-digit ID created for the session._
|
294
|
-
|
295
|
-
6. **Launch the app:**
|
296
|
-
```bash
|
297
|
-
streamlit run app/frontend/streamlit_app_<agent>.py
|
298
|
-
```
|
299
|
-
_Replace <agent> with the agent name you are interested to launch._
|
300
|
-
|
301
|
-
For detailed instructions on each agent, please refer to their respective modules.
|
302
|
-
|
303
|
-
---
|
304
|
-
|
305
|
-
## Contributing
|
306
|
-
|
307
|
-
We welcome contributions to AIAgents4Pharma! Here’s how you can help:
|
308
|
-
|
309
|
-
1. **Fork the repository**
|
310
|
-
2. **Create a new branch** for your feature (`git checkout -b feat/feature-name`)
|
311
|
-
3. **Commit your changes** (`git commit -m 'feat: Add new feature'`)
|
312
|
-
4. **Push to the branch** (`git push origin feat/feature-name`)
|
313
|
-
5. **Open a pull request** and reach out to any one of us below via Discussions:
|
314
|
-
|
315
|
-
_Note: We welcome all contributions, not just programming-related ones. Feel free to open bug reports, suggest new features, or participate as a beta tester. Your support is greatly appreciated!_
|
316
|
-
|
317
|
-
- **Talk2Biomodels/Talk2Cells**: [@gurdeep330](https://github.com/gurdeep330) [@lilijap](https://github.com/lilijap) [@dmccloskey](https://github.com/dmccloskey)
|
318
|
-
- **Talk2KnowledgeGraphs**: [@awmulyadi](https://github.com/awmulyadi) [@dmccloskey](https://github.com/dmccloskey)
|
319
|
-
- **Talk2Scholars**: [@ansh-info](https://github.com/ansh-info) [@gurdeep330](https://github.com/gurdeep330) [@dmccloskey](https://github.com/dmccloskey)
|
320
|
-
|
321
|
-
### Current Needs
|
322
|
-
|
323
|
-
- **Beta testers** for Talk2BioModels and Talk2Scholars.
|
324
|
-
- **Developers** with experience in Python and Bioinformatics and/or knowledge graphs for contributions to AIAgents4Pharma.
|
325
|
-
|
326
|
-
Feel free to reach out to us via Discussions.
|
327
|
-
|
328
|
-
Check out our [CONTRIBUTING.md](CONTRIBUTING.md) for more information.
|
329
|
-
|
330
|
-
---
|
331
|
-
|
332
|
-
## Feedback
|
333
|
-
|
334
|
-
Questions/Bug reports/Feature requests/Comments/Suggestions? We welcome all. Please use `Issues` or `Discussions` 😀
|
File without changes
|
File without changes
|