tooluniverse 0.2.0__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tooluniverse might be problematic. Click here for more details.
- tooluniverse/__init__.py +340 -4
- tooluniverse/admetai_tool.py +84 -0
- tooluniverse/agentic_tool.py +563 -0
- tooluniverse/alphafold_tool.py +96 -0
- tooluniverse/base_tool.py +129 -6
- tooluniverse/boltz_tool.py +207 -0
- tooluniverse/chem_tool.py +192 -0
- tooluniverse/compose_scripts/__init__.py +1 -0
- tooluniverse/compose_scripts/biomarker_discovery.py +293 -0
- tooluniverse/compose_scripts/comprehensive_drug_discovery.py +186 -0
- tooluniverse/compose_scripts/drug_safety_analyzer.py +89 -0
- tooluniverse/compose_scripts/literature_tool.py +34 -0
- tooluniverse/compose_scripts/output_summarizer.py +279 -0
- tooluniverse/compose_scripts/tool_description_optimizer.py +681 -0
- tooluniverse/compose_scripts/tool_discover.py +705 -0
- tooluniverse/compose_scripts/tool_graph_composer.py +448 -0
- tooluniverse/compose_tool.py +371 -0
- tooluniverse/ctg_tool.py +1002 -0
- tooluniverse/custom_tool.py +81 -0
- tooluniverse/dailymed_tool.py +108 -0
- tooluniverse/data/admetai_tools.json +155 -0
- tooluniverse/data/agentic_tools.json +1156 -0
- tooluniverse/data/alphafold_tools.json +87 -0
- tooluniverse/data/boltz_tools.json +9 -0
- tooluniverse/data/chembl_tools.json +16 -0
- tooluniverse/data/clait_tools.json +108 -0
- tooluniverse/data/clinicaltrials_gov_tools.json +326 -0
- tooluniverse/data/compose_tools.json +202 -0
- tooluniverse/data/dailymed_tools.json +70 -0
- tooluniverse/data/dataset_tools.json +646 -0
- tooluniverse/data/disease_target_score_tools.json +712 -0
- tooluniverse/data/efo_tools.json +17 -0
- tooluniverse/data/embedding_tools.json +319 -0
- tooluniverse/data/enrichr_tools.json +31 -0
- tooluniverse/data/europe_pmc_tools.json +22 -0
- tooluniverse/data/expert_feedback_tools.json +10 -0
- tooluniverse/data/fda_drug_adverse_event_tools.json +491 -0
- tooluniverse/data/fda_drug_labeling_tools.json +1 -1
- tooluniverse/data/fda_drugs_with_brand_generic_names_for_tool.py +76929 -148860
- tooluniverse/data/finder_tools.json +209 -0
- tooluniverse/data/gene_ontology_tools.json +113 -0
- tooluniverse/data/gwas_tools.json +1082 -0
- tooluniverse/data/hpa_tools.json +333 -0
- tooluniverse/data/humanbase_tools.json +47 -0
- tooluniverse/data/idmap_tools.json +74 -0
- tooluniverse/data/mcp_client_tools_example.json +113 -0
- tooluniverse/data/mcpautoloadertool_defaults.json +28 -0
- tooluniverse/data/medlineplus_tools.json +141 -0
- tooluniverse/data/monarch_tools.json +1 -1
- tooluniverse/data/openalex_tools.json +36 -0
- tooluniverse/data/opentarget_tools.json +1 -1
- tooluniverse/data/output_summarization_tools.json +101 -0
- tooluniverse/data/packages/bioinformatics_core_tools.json +1756 -0
- tooluniverse/data/packages/categorized_tools.txt +206 -0
- tooluniverse/data/packages/cheminformatics_tools.json +347 -0
- tooluniverse/data/packages/earth_sciences_tools.json +74 -0
- tooluniverse/data/packages/genomics_tools.json +776 -0
- tooluniverse/data/packages/image_processing_tools.json +38 -0
- tooluniverse/data/packages/machine_learning_tools.json +789 -0
- tooluniverse/data/packages/neuroscience_tools.json +62 -0
- tooluniverse/data/packages/original_tools.txt +0 -0
- tooluniverse/data/packages/physics_astronomy_tools.json +62 -0
- tooluniverse/data/packages/scientific_computing_tools.json +560 -0
- tooluniverse/data/packages/single_cell_tools.json +453 -0
- tooluniverse/data/packages/software_tools.json +4954 -0
- tooluniverse/data/packages/structural_biology_tools.json +396 -0
- tooluniverse/data/packages/visualization_tools.json +399 -0
- tooluniverse/data/pubchem_tools.json +215 -0
- tooluniverse/data/pubtator_tools.json +68 -0
- tooluniverse/data/rcsb_pdb_tools.json +1332 -0
- tooluniverse/data/reactome_tools.json +19 -0
- tooluniverse/data/semantic_scholar_tools.json +26 -0
- tooluniverse/data/special_tools.json +2 -25
- tooluniverse/data/tool_composition_tools.json +88 -0
- tooluniverse/data/toolfinderkeyword_defaults.json +34 -0
- tooluniverse/data/txagent_client_tools.json +9 -0
- tooluniverse/data/uniprot_tools.json +211 -0
- tooluniverse/data/url_fetch_tools.json +94 -0
- tooluniverse/data/uspto_downloader_tools.json +9 -0
- tooluniverse/data/uspto_tools.json +811 -0
- tooluniverse/data/xml_tools.json +3275 -0
- tooluniverse/dataset_tool.py +296 -0
- tooluniverse/default_config.py +165 -0
- tooluniverse/efo_tool.py +42 -0
- tooluniverse/embedding_database.py +630 -0
- tooluniverse/embedding_sync.py +396 -0
- tooluniverse/enrichr_tool.py +266 -0
- tooluniverse/europe_pmc_tool.py +52 -0
- tooluniverse/execute_function.py +1775 -95
- tooluniverse/extended_hooks.py +444 -0
- tooluniverse/gene_ontology_tool.py +194 -0
- tooluniverse/graphql_tool.py +158 -36
- tooluniverse/gwas_tool.py +358 -0
- tooluniverse/hpa_tool.py +1645 -0
- tooluniverse/humanbase_tool.py +389 -0
- tooluniverse/logging_config.py +254 -0
- tooluniverse/mcp_client_tool.py +764 -0
- tooluniverse/mcp_integration.py +413 -0
- tooluniverse/mcp_tool_registry.py +925 -0
- tooluniverse/medlineplus_tool.py +337 -0
- tooluniverse/openalex_tool.py +228 -0
- tooluniverse/openfda_adv_tool.py +283 -0
- tooluniverse/openfda_tool.py +393 -160
- tooluniverse/output_hook.py +1122 -0
- tooluniverse/package_tool.py +195 -0
- tooluniverse/pubchem_tool.py +158 -0
- tooluniverse/pubtator_tool.py +168 -0
- tooluniverse/rcsb_pdb_tool.py +38 -0
- tooluniverse/reactome_tool.py +108 -0
- tooluniverse/remote/boltz/boltz_mcp_server.py +50 -0
- tooluniverse/remote/depmap_24q2/depmap_24q2_mcp_tool.py +442 -0
- tooluniverse/remote/expert_feedback/human_expert_mcp_tools.py +2013 -0
- tooluniverse/remote/expert_feedback/simple_test.py +23 -0
- tooluniverse/remote/expert_feedback/start_web_interface.py +188 -0
- tooluniverse/remote/expert_feedback/web_only_interface.py +0 -0
- tooluniverse/remote/expert_feedback_mcp/human_expert_mcp_server.py +1611 -0
- tooluniverse/remote/expert_feedback_mcp/simple_test.py +34 -0
- tooluniverse/remote/expert_feedback_mcp/start_web_interface.py +91 -0
- tooluniverse/remote/immune_compass/compass_tool.py +327 -0
- tooluniverse/remote/pinnacle/pinnacle_tool.py +328 -0
- tooluniverse/remote/transcriptformer/transcriptformer_tool.py +586 -0
- tooluniverse/remote/uspto_downloader/uspto_downloader_mcp_server.py +61 -0
- tooluniverse/remote/uspto_downloader/uspto_downloader_tool.py +120 -0
- tooluniverse/remote_tool.py +99 -0
- tooluniverse/restful_tool.py +53 -30
- tooluniverse/scripts/generate_tool_graph.py +408 -0
- tooluniverse/scripts/visualize_tool_graph.py +829 -0
- tooluniverse/semantic_scholar_tool.py +62 -0
- tooluniverse/smcp.py +2452 -0
- tooluniverse/smcp_server.py +975 -0
- tooluniverse/test/mcp_server_test.py +0 -0
- tooluniverse/test/test_admetai_tool.py +370 -0
- tooluniverse/test/test_agentic_tool.py +129 -0
- tooluniverse/test/test_alphafold_tool.py +71 -0
- tooluniverse/test/test_chem_tool.py +37 -0
- tooluniverse/test/test_compose_lieraturereview.py +63 -0
- tooluniverse/test/test_compose_tool.py +448 -0
- tooluniverse/test/test_dailymed.py +69 -0
- tooluniverse/test/test_dataset_tool.py +200 -0
- tooluniverse/test/test_disease_target_score.py +56 -0
- tooluniverse/test/test_drugbank_filter_examples.py +179 -0
- tooluniverse/test/test_efo.py +31 -0
- tooluniverse/test/test_enrichr_tool.py +21 -0
- tooluniverse/test/test_europe_pmc_tool.py +20 -0
- tooluniverse/test/test_fda_adv.py +95 -0
- tooluniverse/test/test_fda_drug_labeling.py +91 -0
- tooluniverse/test/test_gene_ontology_tools.py +66 -0
- tooluniverse/test/test_gwas_tool.py +139 -0
- tooluniverse/test/test_hpa.py +625 -0
- tooluniverse/test/test_humanbase_tool.py +20 -0
- tooluniverse/test/test_idmap_tools.py +61 -0
- tooluniverse/test/test_mcp_server.py +211 -0
- tooluniverse/test/test_mcp_tool.py +247 -0
- tooluniverse/test/test_medlineplus.py +220 -0
- tooluniverse/test/test_openalex_tool.py +32 -0
- tooluniverse/test/test_opentargets.py +28 -0
- tooluniverse/test/test_pubchem_tool.py +116 -0
- tooluniverse/test/test_pubtator_tool.py +37 -0
- tooluniverse/test/test_rcsb_pdb_tool.py +86 -0
- tooluniverse/test/test_reactome.py +54 -0
- tooluniverse/test/test_semantic_scholar_tool.py +24 -0
- tooluniverse/test/test_software_tools.py +147 -0
- tooluniverse/test/test_tool_description_optimizer.py +49 -0
- tooluniverse/test/test_tool_finder.py +26 -0
- tooluniverse/test/test_tool_finder_llm.py +252 -0
- tooluniverse/test/test_tools_find.py +195 -0
- tooluniverse/test/test_uniprot_tools.py +74 -0
- tooluniverse/test/test_uspto_tool.py +72 -0
- tooluniverse/test/test_xml_tool.py +113 -0
- tooluniverse/tool_finder_embedding.py +267 -0
- tooluniverse/tool_finder_keyword.py +693 -0
- tooluniverse/tool_finder_llm.py +699 -0
- tooluniverse/tool_graph_web_ui.py +955 -0
- tooluniverse/tool_registry.py +416 -0
- tooluniverse/uniprot_tool.py +155 -0
- tooluniverse/url_tool.py +253 -0
- tooluniverse/uspto_tool.py +240 -0
- tooluniverse/utils.py +369 -41
- tooluniverse/xml_tool.py +369 -0
- tooluniverse-1.0.0.dist-info/METADATA +377 -0
- tooluniverse-1.0.0.dist-info/RECORD +186 -0
- tooluniverse-1.0.0.dist-info/entry_points.txt +9 -0
- tooluniverse/generate_mcp_tools.py +0 -113
- tooluniverse/mcp_server.py +0 -3340
- tooluniverse-0.2.0.dist-info/METADATA +0 -139
- tooluniverse-0.2.0.dist-info/RECORD +0 -21
- tooluniverse-0.2.0.dist-info/entry_points.txt +0 -4
- {tooluniverse-0.2.0.dist-info → tooluniverse-1.0.0.dist-info}/WHEEL +0 -0
- {tooluniverse-0.2.0.dist-info → tooluniverse-1.0.0.dist-info}/licenses/LICENSE +0 -0
- {tooluniverse-0.2.0.dist-info → tooluniverse-1.0.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,789 @@
|
|
|
1
|
+
[
|
|
2
|
+
{
|
|
3
|
+
"type": "PackageTool",
|
|
4
|
+
"name": "get_scikit_learn_info",
|
|
5
|
+
"description": "Get comprehensive information about scikit-learn – simple and efficient tools for predictive data analysis",
|
|
6
|
+
"parameter": {
|
|
7
|
+
"type": "object",
|
|
8
|
+
"properties": {
|
|
9
|
+
"include_examples": {
|
|
10
|
+
"type": "boolean",
|
|
11
|
+
"description": "Whether to include usage examples and quick start guide",
|
|
12
|
+
"default": true
|
|
13
|
+
}
|
|
14
|
+
}
|
|
15
|
+
},
|
|
16
|
+
"package_name": "scikit-learn",
|
|
17
|
+
"local_info": {
|
|
18
|
+
"name": "scikit-learn",
|
|
19
|
+
"description": "Simple and efficient tools for predictive data analysis. Built on NumPy, SciPy, and matplotlib. Provides a range of supervised and unsupervised learning algorithms.",
|
|
20
|
+
"category": "Machine Learning",
|
|
21
|
+
"import_name": "sklearn",
|
|
22
|
+
"popularity": 90,
|
|
23
|
+
"keywords": [
|
|
24
|
+
"machine learning",
|
|
25
|
+
"classification",
|
|
26
|
+
"regression",
|
|
27
|
+
"clustering",
|
|
28
|
+
"dimensionality reduction"
|
|
29
|
+
],
|
|
30
|
+
"documentation": "https://scikit-learn.org/stable/",
|
|
31
|
+
"repository": "https://github.com/scikit-learn/scikit-learn",
|
|
32
|
+
"installation": {
|
|
33
|
+
"pip": "pip install scikit-learn",
|
|
34
|
+
"conda": "conda install scikit-learn"
|
|
35
|
+
},
|
|
36
|
+
"usage_example": "from sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score\n\n# Load dataset\niris = datasets.load_iris()\nX, y = iris.data, iris.target\n\n# Split data\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)\n\n# Train model\nclf = RandomForestClassifier()\nclf.fit(X_train, y_train)\n\n# Make predictions\ny_pred = clf.predict(X_test)\nprint(f'Accuracy: {accuracy_score(y_test, y_pred)}')",
|
|
37
|
+
"quick_start": [
|
|
38
|
+
"Install: pip install scikit-learn",
|
|
39
|
+
"Import: from sklearn import datasets, model_selection",
|
|
40
|
+
"Load data: X, y = datasets.load_iris(return_X_y=True)",
|
|
41
|
+
"Split: train_test_split(X, y, test_size=0.3)",
|
|
42
|
+
"Train: model.fit(X_train, y_train); predict: model.predict(X_test)"
|
|
43
|
+
]
|
|
44
|
+
}
|
|
45
|
+
},
|
|
46
|
+
{
|
|
47
|
+
"type": "PackageTool",
|
|
48
|
+
"name": "get_pytorch_info",
|
|
49
|
+
"description": "Get comprehensive information about PyTorch – an open source machine learning framework",
|
|
50
|
+
"parameter": {
|
|
51
|
+
"type": "object",
|
|
52
|
+
"properties": {
|
|
53
|
+
"include_examples": {
|
|
54
|
+
"type": "boolean",
|
|
55
|
+
"description": "Whether to include usage examples and quick start guide",
|
|
56
|
+
"default": true
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
},
|
|
60
|
+
"package_name": "torch",
|
|
61
|
+
"local_info": {
|
|
62
|
+
"name": "PyTorch",
|
|
63
|
+
"description": "An open source machine learning framework that accelerates the path from research prototyping to production deployment. Provides tensor computation with strong GPU acceleration and deep neural networks.",
|
|
64
|
+
"category": "Deep Learning",
|
|
65
|
+
"import_name": "torch",
|
|
66
|
+
"popularity": 88,
|
|
67
|
+
"keywords": [
|
|
68
|
+
"deep learning",
|
|
69
|
+
"neural networks",
|
|
70
|
+
"tensors",
|
|
71
|
+
"GPU",
|
|
72
|
+
"autograd"
|
|
73
|
+
],
|
|
74
|
+
"documentation": "https://pytorch.org/docs/stable/",
|
|
75
|
+
"repository": "https://github.com/pytorch/pytorch",
|
|
76
|
+
"installation": {
|
|
77
|
+
"pip": "pip install torch torchvision torchaudio",
|
|
78
|
+
"conda": "conda install pytorch torchvision torchaudio -c pytorch"
|
|
79
|
+
},
|
|
80
|
+
"usage_example": "import torch\nimport torch.nn as nn\nimport torch.optim as optim\n\n# Create a simple neural network\nclass SimpleNet(nn.Module):\n def __init__(self):\n super(SimpleNet, self).__init__()\n self.fc1 = nn.Linear(784, 128)\n self.fc2 = nn.Linear(128, 10)\n self.relu = nn.ReLU()\n \n def forward(self, x):\n x = self.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\n# Create model instance\nmodel = SimpleNet()\nprint(model)",
|
|
81
|
+
"quick_start": [
|
|
82
|
+
"Install: pip install torch torchvision",
|
|
83
|
+
"Import: import torch, import torch.nn as nn",
|
|
84
|
+
"Create tensors: torch.randn(2, 3)",
|
|
85
|
+
"Define model: class MyModel(nn.Module)",
|
|
86
|
+
"Train: optimizer.zero_grad(); loss.backward(); optimizer.step()"
|
|
87
|
+
]
|
|
88
|
+
}
|
|
89
|
+
},
|
|
90
|
+
{
|
|
91
|
+
"type": "PackageTool",
|
|
92
|
+
"name": "get_torch_geometric_info",
|
|
93
|
+
"description": "Get comprehensive information about PyTorch Geometric – a high-performance library for graph neural networks widely used in molecular and materials science.",
|
|
94
|
+
"parameter": {
|
|
95
|
+
"type": "object",
|
|
96
|
+
"properties": {
|
|
97
|
+
"include_examples": {
|
|
98
|
+
"type": "boolean",
|
|
99
|
+
"description": "Whether to include usage examples and quick start guide",
|
|
100
|
+
"default": true
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
},
|
|
104
|
+
"package_name": "torch_geometric",
|
|
105
|
+
"local_info": {
|
|
106
|
+
"name": "PyTorch Geometric",
|
|
107
|
+
"description": "A geometric deep-learning extension library for PyTorch that offers efficient GNN layers, data loaders and utilities – ideal for molecules, crystals and knowledge-graphs.",
|
|
108
|
+
"category": "AI for Science / Graph Neural Networks",
|
|
109
|
+
"import_name": "torch_geometric",
|
|
110
|
+
"popularity": 78,
|
|
111
|
+
"keywords": [
|
|
112
|
+
"graph neural networks",
|
|
113
|
+
"geometric deep learning",
|
|
114
|
+
"molecular data",
|
|
115
|
+
"PyTorch",
|
|
116
|
+
"GNNs"
|
|
117
|
+
],
|
|
118
|
+
"documentation": "https://pytorch-geometric.readthedocs.io/",
|
|
119
|
+
"repository": "https://github.com/pyg-team/pytorch_geometric",
|
|
120
|
+
"installation": {
|
|
121
|
+
"pip": "pip install torch-geometric",
|
|
122
|
+
"conda": "conda install pyg -c pyg -c conda-forge"
|
|
123
|
+
},
|
|
124
|
+
"usage_example": "from torch_geometric.datasets import QM9\nfrom torch_geometric.nn import GCNConv, global_mean_pool\nimport torch\n\ndataset = QM9(root='data/QM9')\n\nclass GCN(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = GCNConv(dataset.num_node_features, 128)\n self.conv2 = GCNConv(128, 64)\n self.lin = torch.nn.Linear(64, 1)\n def forward(self, x, edge_index, batch):\n x = torch.relu(self.conv1(x, edge_index))\n x = torch.relu(self.conv2(x, edge_index))\n x = global_mean_pool(x, batch)\n return self.lin(x)\n\nmodel = GCN()",
|
|
125
|
+
"quick_start": [
|
|
126
|
+
"Install: pip install torch-geometric",
|
|
127
|
+
"Load data: from torch_geometric.datasets import TUDataset",
|
|
128
|
+
"Create GNN: from torch_geometric.nn import GCNConv",
|
|
129
|
+
"Prepare graph data (e.g., QM9)",
|
|
130
|
+
"Define a GNN model and loss",
|
|
131
|
+
"Train and evaluate with PyTorch"
|
|
132
|
+
]
|
|
133
|
+
}
|
|
134
|
+
},
|
|
135
|
+
{
|
|
136
|
+
"type": "PackageTool",
|
|
137
|
+
"name": "get_hyperopt_info",
|
|
138
|
+
"description": "Get comprehensive information about Hyperopt – distributed hyperparameter optimization",
|
|
139
|
+
"parameter": {
|
|
140
|
+
"type": "object",
|
|
141
|
+
"properties": {
|
|
142
|
+
"include_examples": {
|
|
143
|
+
"type": "boolean",
|
|
144
|
+
"description": "Whether to include usage examples and quick start guide",
|
|
145
|
+
"default": true
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
},
|
|
149
|
+
"package_name": "hyperopt",
|
|
150
|
+
"local_info": {
|
|
151
|
+
"name": "Hyperopt",
|
|
152
|
+
"description": "Python library for hyperparameter optimization using algorithms like random search, Tree of Parzen Estimators (TPE), and adaptive TPE. Supports distributed optimization across multiple cores/machines.",
|
|
153
|
+
"category": "Machine Learning Optimization",
|
|
154
|
+
"import_name": "hyperopt",
|
|
155
|
+
"popularity": 85,
|
|
156
|
+
"keywords": [
|
|
157
|
+
"hyperparameter optimization",
|
|
158
|
+
"Bayesian optimization",
|
|
159
|
+
"TPE",
|
|
160
|
+
"distributed computing",
|
|
161
|
+
"model tuning"
|
|
162
|
+
],
|
|
163
|
+
"documentation": "http://hyperopt.github.io/hyperopt/",
|
|
164
|
+
"repository": "https://github.com/hyperopt/hyperopt",
|
|
165
|
+
"installation": {
|
|
166
|
+
"pip": "pip install hyperopt",
|
|
167
|
+
"conda": "conda install -c conda-forge hyperopt"
|
|
168
|
+
},
|
|
169
|
+
"usage_example": "from hyperopt import fmin, tpe, hp, STATUS_OK, Trials\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import cross_val_score\n\n# Define search space\nspace = {\n 'n_estimators': hp.choice('n_estimators', [10, 50, 100, 200]),\n 'max_depth': hp.choice('max_depth', [3, 5, 10, None])\n}\n\n# Objective function\ndef objective(params):\n clf = RandomForestClassifier(**params)\n score = cross_val_score(clf, X, y, cv=3).mean()\n return {'loss': -score, 'status': STATUS_OK}\n\n# Optimize\ntrials = Trials()\nbest = fmin(objective, space, algo=tpe.suggest, max_evals=100, trials=trials)",
|
|
170
|
+
"quick_start": [
|
|
171
|
+
"Install: pip install hyperopt",
|
|
172
|
+
"Import: from hyperopt import fmin, tpe, hp",
|
|
173
|
+
"Define space: space = {'param': hp.choice('param', [1, 2, 3])}",
|
|
174
|
+
"Define objective: def objective(params): return loss",
|
|
175
|
+
"Optimize: best = fmin(objective, space, algo=tpe.suggest)"
|
|
176
|
+
]
|
|
177
|
+
}
|
|
178
|
+
},
|
|
179
|
+
{
|
|
180
|
+
"type": "PackageTool",
|
|
181
|
+
"name": "get_umap_learn_info",
|
|
182
|
+
"description": "Get comprehensive information about UMAP-learn – dimensionality reduction technique",
|
|
183
|
+
"parameter": {
|
|
184
|
+
"type": "object",
|
|
185
|
+
"properties": {
|
|
186
|
+
"include_examples": {
|
|
187
|
+
"type": "boolean",
|
|
188
|
+
"description": "Whether to include usage examples and quick start guide",
|
|
189
|
+
"default": true
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
},
|
|
193
|
+
"package_name": "umap-learn",
|
|
194
|
+
"local_info": {
|
|
195
|
+
"name": "UMAP",
|
|
196
|
+
"description": "Uniform Manifold Approximation and Projection for dimensionality reduction. Preserves local and global structure while being computationally efficient for large datasets.",
|
|
197
|
+
"category": "Dimensionality Reduction",
|
|
198
|
+
"import_name": "umap",
|
|
199
|
+
"popularity": 88,
|
|
200
|
+
"keywords": [
|
|
201
|
+
"dimensionality reduction",
|
|
202
|
+
"manifold learning",
|
|
203
|
+
"visualization",
|
|
204
|
+
"clustering",
|
|
205
|
+
"single-cell"
|
|
206
|
+
],
|
|
207
|
+
"documentation": "https://umap-learn.readthedocs.io/",
|
|
208
|
+
"repository": "https://github.com/lmcinnes/umap",
|
|
209
|
+
"installation": {
|
|
210
|
+
"pip": "pip install umap-learn",
|
|
211
|
+
"conda": "conda install -c conda-forge umap-learn"
|
|
212
|
+
},
|
|
213
|
+
"usage_example": "import umap\nfrom sklearn.datasets import load_iris\nimport matplotlib.pyplot as plt\n\n# Load data\niris = load_iris()\nX, y = iris.data, iris.target\n\n# Apply UMAP\nreducer = umap.UMAP(n_neighbors=15, min_dist=0.1, n_components=2)\nembedding = reducer.fit_transform(X)\n\n# Plot results\nplt.scatter(embedding[:, 0], embedding[:, 1], c=y, cmap='viridis')\nplt.title('UMAP projection of Iris dataset')\nplt.show()",
|
|
214
|
+
"quick_start": [
|
|
215
|
+
"Install: pip install umap-learn",
|
|
216
|
+
"Import: import umap",
|
|
217
|
+
"Create: reducer = umap.UMAP(n_components=2)",
|
|
218
|
+
"Fit: embedding = reducer.fit_transform(data)",
|
|
219
|
+
"Plot: Use matplotlib/seaborn for visualization"
|
|
220
|
+
]
|
|
221
|
+
}
|
|
222
|
+
},
|
|
223
|
+
{
|
|
224
|
+
"type": "PackageTool",
|
|
225
|
+
"name": "get_statsmodels_info",
|
|
226
|
+
"description": "Get comprehensive information about statsmodels – statistical modeling and econometrics",
|
|
227
|
+
"parameter": {
|
|
228
|
+
"type": "object",
|
|
229
|
+
"properties": {
|
|
230
|
+
"include_examples": {
|
|
231
|
+
"type": "boolean",
|
|
232
|
+
"description": "Whether to include usage examples and quick start guide",
|
|
233
|
+
"default": true
|
|
234
|
+
}
|
|
235
|
+
}
|
|
236
|
+
},
|
|
237
|
+
"package_name": "statsmodels",
|
|
238
|
+
"local_info": {
|
|
239
|
+
"name": "statsmodels",
|
|
240
|
+
"description": "Statistical modeling library providing classes and functions for statistical estimation, statistical tests, statistical data exploration, and regression analysis.",
|
|
241
|
+
"category": "Statistical Analysis",
|
|
242
|
+
"import_name": "statsmodels",
|
|
243
|
+
"popularity": 85,
|
|
244
|
+
"keywords": [
|
|
245
|
+
"statistical modeling",
|
|
246
|
+
"regression",
|
|
247
|
+
"time series",
|
|
248
|
+
"econometrics",
|
|
249
|
+
"hypothesis testing"
|
|
250
|
+
],
|
|
251
|
+
"documentation": "https://www.statsmodels.org/stable/",
|
|
252
|
+
"repository": "https://github.com/statsmodels/statsmodels",
|
|
253
|
+
"installation": {
|
|
254
|
+
"pip": "pip install statsmodels",
|
|
255
|
+
"conda": "conda install -c conda-forge statsmodels"
|
|
256
|
+
},
|
|
257
|
+
"usage_example": "import statsmodels.api as sm\nimport numpy as np\nimport pandas as pd\n\n# Generate sample data\nnp.random.seed(42)\nX = np.random.randn(100, 2)\ny = X[:, 0] + 2*X[:, 1] + np.random.randn(100)\n\n# Add constant for intercept\nX = sm.add_constant(X)\n\n# Fit OLS regression\nmodel = sm.OLS(y, X)\nresults = model.fit()\nprint(results.summary())",
|
|
258
|
+
"quick_start": [
|
|
259
|
+
"Install: pip install statsmodels",
|
|
260
|
+
"Import: import statsmodels.api as sm",
|
|
261
|
+
"Prepare: X = sm.add_constant(X) # for intercept",
|
|
262
|
+
"Fit: model = sm.OLS(y, X); results = model.fit()",
|
|
263
|
+
"Analyze: results.summary(), results.params"
|
|
264
|
+
]
|
|
265
|
+
}
|
|
266
|
+
},
|
|
267
|
+
{
|
|
268
|
+
"type": "PackageTool",
|
|
269
|
+
"name": "get_schnetpack_info",
|
|
270
|
+
"description": "Get comprehensive information about SchNetPack – a deep-learning toolbox for molecules and materials built on PyTorch.",
|
|
271
|
+
"parameter": {
|
|
272
|
+
"type": "object",
|
|
273
|
+
"properties": {
|
|
274
|
+
"include_examples": {
|
|
275
|
+
"type": "boolean",
|
|
276
|
+
"description": "Whether to include usage examples and a quick-start guide",
|
|
277
|
+
"default": true
|
|
278
|
+
}
|
|
279
|
+
}
|
|
280
|
+
},
|
|
281
|
+
"package_name": "schnetpack",
|
|
282
|
+
"local_info": {
|
|
283
|
+
"name": "SchNetPack",
|
|
284
|
+
"description": "A modular deep-learning framework implementing SchNet and related architectures for quantum-chemical and atomistic property prediction.",
|
|
285
|
+
"category": "AI for Science / Atomistic ML",
|
|
286
|
+
"import_name": "schnetpack",
|
|
287
|
+
"popularity": 60,
|
|
288
|
+
"keywords": [
|
|
289
|
+
"atomistic ML",
|
|
290
|
+
"quantum chemistry",
|
|
291
|
+
"materials",
|
|
292
|
+
"SchNet",
|
|
293
|
+
"PyTorch"
|
|
294
|
+
],
|
|
295
|
+
"documentation": "https://schnetpack.readthedocs.io/",
|
|
296
|
+
"repository": "https://github.com/atomistic-machine-learning/schnetpack",
|
|
297
|
+
"installation": {
|
|
298
|
+
"pip": "pip install schnetpack",
|
|
299
|
+
"conda": "conda install -c conda-forge schnetpack"
|
|
300
|
+
},
|
|
301
|
+
"usage_example": "import schnetpack as spk\nimport torch\n\ndataset = spk.data.AtomsData('qm9.db')\ntrain, val, test = spk.train.train_test_split(data=dataset, split=[8,1,1])\nrepresentation = spk.representation.SchNet(n_atom_basis=128)\nprediction = spk.output_modules.Atomwise(property='energy', mean=dataset._property_mean('energy'))\nmodel = spk.AtomisticModel(representation, prediction)\ntrainer = spk.train.Trainer('run', model, loss_fn=torch.nn.MSELoss(),\n train_loader=train, validation_loader=val)\ntrainer.train(device=torch.device('cpu'))",
|
|
302
|
+
"quick_start": [
|
|
303
|
+
"1. Install SchNetPack: pip install schnetpack",
|
|
304
|
+
"2. Load or create an AtomsData database",
|
|
305
|
+
"3. Build a SchNet representation layer",
|
|
306
|
+
"4. Add an output module (e.g., Atomwise)",
|
|
307
|
+
"5. Train with spk.train.Trainer"
|
|
308
|
+
]
|
|
309
|
+
}
|
|
310
|
+
},
|
|
311
|
+
{
|
|
312
|
+
"type": "PackageTool",
|
|
313
|
+
"name": "get_harmony_pytorch_info",
|
|
314
|
+
"description": "Get comprehensive information about harmony-pytorch – single-cell data integration",
|
|
315
|
+
"parameter": {
|
|
316
|
+
"type": "object",
|
|
317
|
+
"properties": {
|
|
318
|
+
"info_type": {
|
|
319
|
+
"type": "string",
|
|
320
|
+
"enum": [
|
|
321
|
+
"overview",
|
|
322
|
+
"installation",
|
|
323
|
+
"usage",
|
|
324
|
+
"documentation"
|
|
325
|
+
],
|
|
326
|
+
"description": "Type of information to retrieve about harmony-pytorch"
|
|
327
|
+
}
|
|
328
|
+
},
|
|
329
|
+
"required": [
|
|
330
|
+
"info_type"
|
|
331
|
+
]
|
|
332
|
+
},
|
|
333
|
+
"package_name": "harmony-pytorch",
|
|
334
|
+
"local_info": {
|
|
335
|
+
"name": "harmony-pytorch",
|
|
336
|
+
"description": "PyTorch implementation of Harmony algorithm for single-cell data integration. Removes batch effects and integrates datasets from different experimental conditions, technologies, or laboratories.",
|
|
337
|
+
"category": "Single-Cell Integration",
|
|
338
|
+
"import_name": "harmony",
|
|
339
|
+
"popularity": 68,
|
|
340
|
+
"keywords": [
|
|
341
|
+
"batch correction",
|
|
342
|
+
"data integration",
|
|
343
|
+
"single-cell",
|
|
344
|
+
"harmony",
|
|
345
|
+
"batch effects"
|
|
346
|
+
],
|
|
347
|
+
"documentation": "https://github.com/lilab-bcb/harmony-pytorch",
|
|
348
|
+
"repository": "https://github.com/lilab-bcb/harmony-pytorch",
|
|
349
|
+
"installation": {
|
|
350
|
+
"pip": "pip install harmony-pytorch",
|
|
351
|
+
"conda": "conda install -c conda-forge harmony-pytorch"
|
|
352
|
+
},
|
|
353
|
+
"usage_example": "import numpy as np\nimport pandas as pd\nfrom harmony import harmonize\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nfrom sklearn.datasets import make_blobs\n\n# Create sample data with batch effects\nnp.random.seed(42)\n\n# Generate two batches of data\nbatch1_data, _ = make_blobs(n_samples=200, centers=3, n_features=50, \n cluster_std=1.0, center_box=(0.0, 5.0))\nbatch2_data, _ = make_blobs(n_samples=200, centers=3, n_features=50, \n cluster_std=1.0, center_box=(3.0, 8.0)) # Shifted\n\n# Combine data\nX = np.vstack([batch1_data, batch2_data])\nbatch_labels = ['Batch1'] * 200 + ['Batch2'] * 200\n\nprint(f'Combined data shape: {X.shape}')\nprint(f'Batch distribution: {pd.Series(batch_labels).value_counts().to_dict()}')\n\n# Apply PCA for visualization\npca = PCA(n_components=50)\nX_pca = pca.fit_transform(X)\n\nprint(f'PCA data shape: {X_pca.shape}')\nprint(f'Explained variance ratio (first 5 PCs): {pca.explained_variance_ratio_[:5]}')\n\n# Apply Harmony for batch correction\nprint('\\nApplying Harmony batch correction...')\n\n# Create batch array (required format)\nbatch_array = np.array([0 if b == 'Batch1' else 1 for b in batch_labels])\n\n# Run Harmony\nX_harmony = harmonize(\n X_pca, # PCA coordinates\n batch_array, # Batch assignments\n batch_key='batch',\n max_iter_harmony=20,\n random_state=42\n)\n\nprint(f'Harmony corrected data shape: {X_harmony.shape}')\n\n# Visualize results\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))\n\n# Plot before Harmony\ncolors = ['red' if b == 'Batch1' else 'blue' for b in batch_labels]\nax1.scatter(X_pca[:, 0], X_pca[:, 1], c=colors, alpha=0.6)\nax1.set_title('Before Harmony Correction')\nax1.set_xlabel('PC1')\nax1.set_ylabel('PC2')\nax1.legend(['Batch1', 'Batch2'])\n\n# Plot after Harmony\nax2.scatter(X_harmony[:, 0], X_harmony[:, 1], c=colors, alpha=0.6)\nax2.set_title('After Harmony Correction')\nax2.set_xlabel('Harmony PC1')\nax2.set_ylabel('Harmony PC2')\nax2.legend(['Batch1', 'Batch2'])\n\nplt.tight_layout()\nplt.show()\n\nprint('\\nHarmony integration complete!')\nprint('Use the corrected coordinates for downstream analysis')",
|
|
354
|
+
"quick_start": [
|
|
355
|
+
"Install: pip install harmony-pytorch",
|
|
356
|
+
"Import: from harmony import harmonize",
|
|
357
|
+
"Prepare data: PCA coordinates + batch labels",
|
|
358
|
+
"Run Harmony: harmonize(X_pca, batch_array)",
|
|
359
|
+
"Use corrected coordinates for clustering/UMAP",
|
|
360
|
+
"Integrate with scanpy workflows"
|
|
361
|
+
]
|
|
362
|
+
}
|
|
363
|
+
},
|
|
364
|
+
{
|
|
365
|
+
"type": "PackageTool",
|
|
366
|
+
"name": "get_python_libsbml_info",
|
|
367
|
+
"description": "Get comprehensive information about python-libsbml – SBML (Systems Biology Markup Language) support",
|
|
368
|
+
"parameter": {
|
|
369
|
+
"type": "object",
|
|
370
|
+
"properties": {
|
|
371
|
+
"info_type": {
|
|
372
|
+
"type": "string",
|
|
373
|
+
"enum": [
|
|
374
|
+
"overview",
|
|
375
|
+
"installation",
|
|
376
|
+
"usage",
|
|
377
|
+
"documentation"
|
|
378
|
+
],
|
|
379
|
+
"description": "Type of information to retrieve about python-libsbml"
|
|
380
|
+
}
|
|
381
|
+
},
|
|
382
|
+
"required": [
|
|
383
|
+
"info_type"
|
|
384
|
+
]
|
|
385
|
+
},
|
|
386
|
+
"package_name": "python-libsbml",
|
|
387
|
+
"local_info": {
|
|
388
|
+
"name": "python-libsbml",
|
|
389
|
+
"description": "Python bindings for libSBML, the Systems Biology Markup Language library. Enables reading, writing, and manipulation of SBML files for systems biology and metabolic modeling applications.",
|
|
390
|
+
"category": "Systems Biology / SBML",
|
|
391
|
+
"import_name": "libsbml",
|
|
392
|
+
"popularity": 68,
|
|
393
|
+
"keywords": [
|
|
394
|
+
"SBML",
|
|
395
|
+
"systems biology",
|
|
396
|
+
"metabolic models",
|
|
397
|
+
"biochemical networks",
|
|
398
|
+
"model exchange"
|
|
399
|
+
],
|
|
400
|
+
"documentation": "http://sbml.org/Software/libSBML/docs/python-api/",
|
|
401
|
+
"repository": "https://github.com/sbmlteam/python-libsbml",
|
|
402
|
+
"installation": {
|
|
403
|
+
"pip": "pip install python-libsbml",
|
|
404
|
+
"conda": "conda install -c conda-forge python-libsbml"
|
|
405
|
+
},
|
|
406
|
+
"usage_example": "import libsbml\nimport sys\n\n# Create a new SBML document\ndocument = libsbml.SBMLDocument(3, 1) # SBML Level 3, Version 1\nmodel = document.createModel()\nmodel.setId('example_model')\nmodel.setName('Example Metabolic Model')\n\nprint(f'Created model: {model.getName()}')\nprint(f'SBML Level: {document.getLevel()}, Version: {document.getVersion()}')\n\n# Create compartment\ncompartment = model.createCompartment()\ncompartment.setId('cytoplasm')\ncompartment.setName('Cytoplasm')\ncompartment.setConstant(True)\ncompartment.setSize(1.0)\n\n# Create species (metabolites)\nglucose = model.createSpecies()\nglucose.setId('glucose')\nglucose.setName('Glucose')\nglucose.setCompartment('cytoplasm')\nglucose.setInitialConcentration(10.0)\nglucose.setConstant(False)\n\ng6p = model.createSpecies()\ng6p.setId('g6p')\ng6p.setName('Glucose-6-phosphate')\ng6p.setCompartment('cytoplasm')\ng6p.setInitialConcentration(0.0)\ng6p.setConstant(False)\n\n# Create reaction\nreaction = model.createReaction()\nreaction.setId('hexokinase')\nreaction.setName('Hexokinase')\nreaction.setReversible(False)\n\n# Add reactants and products\nreactant = reaction.createReactant()\nreactant.setSpecies('glucose')\nreactant.setStoichiometry(1.0)\n\nproduct = reaction.createProduct()\nproduct.setSpecies('g6p')\nproduct.setStoichiometry(1.0)\n\nprint(f'Model components:')\nprint(f' Compartments: {model.getNumCompartments()}')\nprint(f' Species: {model.getNumSpecies()}')\nprint(f' Reactions: {model.getNumReactions()}')\n\n# Validate model\nerrors = document.checkConsistency()\nprint(f'\\nValidation errors: {errors}')\n\n# Write to file\nwriter = libsbml.SBMLWriter()\nsuccess = writer.writeSBMLToFile(document, 'example_model.xml')\nif success:\n print('Model saved to example_model.xml')\nelse:\n print('Failed to write model')",
|
|
407
|
+
"quick_start": [
|
|
408
|
+
"Install: pip install python-libsbml",
|
|
409
|
+
"Create document: libsbml.SBMLDocument(level, version)",
|
|
410
|
+
"Create model: document.createModel()",
|
|
411
|
+
"Add compartments: model.createCompartment()",
|
|
412
|
+
"Add species: model.createSpecies()",
|
|
413
|
+
"Add reactions: model.createReaction()"
|
|
414
|
+
]
|
|
415
|
+
}
|
|
416
|
+
},
|
|
417
|
+
{
|
|
418
|
+
"type": "PackageTool",
|
|
419
|
+
"name": "get_pymzml_info",
|
|
420
|
+
"description": "Get comprehensive information about pymzML – mzML file parser for mass spectrometry",
|
|
421
|
+
"parameter": {
|
|
422
|
+
"type": "object",
|
|
423
|
+
"properties": {
|
|
424
|
+
"info_type": {
|
|
425
|
+
"type": "string",
|
|
426
|
+
"enum": [
|
|
427
|
+
"overview",
|
|
428
|
+
"installation",
|
|
429
|
+
"usage",
|
|
430
|
+
"documentation"
|
|
431
|
+
],
|
|
432
|
+
"description": "Type of information to retrieve about pymzML"
|
|
433
|
+
}
|
|
434
|
+
},
|
|
435
|
+
"required": [
|
|
436
|
+
"info_type"
|
|
437
|
+
]
|
|
438
|
+
},
|
|
439
|
+
"package_name": "pymzml",
|
|
440
|
+
"local_info": {
|
|
441
|
+
"name": "pymzML",
|
|
442
|
+
"description": "Python interface for mzML mass spectrometry files. Provides efficient parsing of mzML format with support for random access, spectrum extraction, and metadata handling for proteomics and metabolomics applications.",
|
|
443
|
+
"category": "Mass Spectrometry I/O",
|
|
444
|
+
"import_name": "pymzml",
|
|
445
|
+
"popularity": 70,
|
|
446
|
+
"keywords": [
|
|
447
|
+
"mzML",
|
|
448
|
+
"mass spectrometry",
|
|
449
|
+
"proteomics",
|
|
450
|
+
"metabolomics",
|
|
451
|
+
"file parsing"
|
|
452
|
+
],
|
|
453
|
+
"documentation": "https://pymzml.readthedocs.io/",
|
|
454
|
+
"repository": "https://github.com/pymzml/pymzML",
|
|
455
|
+
"installation": {
|
|
456
|
+
"pip": "pip install pymzml",
|
|
457
|
+
"conda": "conda install -c conda-forge pymzml"
|
|
458
|
+
},
|
|
459
|
+
"usage_example": "import pymzml\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Open mzML file\nmsrun = pymzml.run.Reader('sample.mzML')\n\n# Basic file information\nprint(f'MS run info: {msrun.info}')\n\n# Iterate through spectra\nms1_spectra = []\nms2_spectra = []\n\nfor spectrum in msrun:\n if spectrum.ms_level == 1:\n ms1_spectra.append(spectrum)\n elif spectrum.ms_level == 2:\n ms2_spectra.append(spectrum)\n\nprint(f'Found {len(ms1_spectra)} MS1 and {len(ms2_spectra)} MS2 spectra')\n\n# Access spectrum data\nif ms1_spectra:\n first_spectrum = ms1_spectra[0]\n print(f'Spectrum ID: {first_spectrum.ID}')\n print(f'Retention time: {first_spectrum.scan_time_in_minutes():.2f} min')\n print(f'Number of peaks: {len(first_spectrum.peaks(\"centroided\"))}')\n \n # Get m/z and intensity arrays\n mz_array, intensity_array = first_spectrum.peaks(\"centroided\")\n \n # Plot spectrum\n plt.figure(figsize=(12, 6))\n plt.plot(mz_array, intensity_array)\n plt.xlabel('m/z')\n plt.ylabel('Intensity')\n plt.title(f'MS1 Spectrum (RT: {first_spectrum.scan_time_in_minutes():.2f} min)')\n plt.show()\n \n # Find peaks above threshold\n threshold = np.max(intensity_array) * 0.05\n high_peaks = [(mz, intensity) for mz, intensity in zip(mz_array, intensity_array) \n if intensity > threshold]\n print(f'Peaks above 5% threshold: {len(high_peaks)}')",
|
|
460
|
+
"quick_start": [
|
|
461
|
+
"Install: pip install pymzml",
|
|
462
|
+
"Open file: msrun = pymzml.run.Reader('file.mzML')",
|
|
463
|
+
"Iterate spectra: for spectrum in msrun",
|
|
464
|
+
"Get peaks: spectrum.peaks('centroided')",
|
|
465
|
+
"Access metadata: spectrum.scan_time_in_minutes()",
|
|
466
|
+
"Filter by MS level: spectrum.ms_level"
|
|
467
|
+
]
|
|
468
|
+
}
|
|
469
|
+
},
|
|
470
|
+
{
|
|
471
|
+
"type": "PackageTool",
|
|
472
|
+
"name": "get_faiss_info",
|
|
473
|
+
"description": "Get comprehensive information about Faiss – efficient similarity search and clustering",
|
|
474
|
+
"parameter": {
|
|
475
|
+
"type": "object",
|
|
476
|
+
"properties": {
|
|
477
|
+
"include_examples": {
|
|
478
|
+
"type": "boolean",
|
|
479
|
+
"description": "Whether to include usage examples and quick start guide",
|
|
480
|
+
"default": true
|
|
481
|
+
}
|
|
482
|
+
}
|
|
483
|
+
},
|
|
484
|
+
"package_name": "faiss-cpu",
|
|
485
|
+
"local_info": {
|
|
486
|
+
"name": "Faiss",
|
|
487
|
+
"description": "Library for efficient similarity search and clustering of dense vectors. Provides GPU-accelerated implementations of nearest neighbor search algorithms for large-scale applications.",
|
|
488
|
+
"category": "Vector Search",
|
|
489
|
+
"import_name": "faiss",
|
|
490
|
+
"popularity": 92,
|
|
491
|
+
"keywords": [
|
|
492
|
+
"similarity search",
|
|
493
|
+
"nearest neighbors",
|
|
494
|
+
"clustering",
|
|
495
|
+
"vector search",
|
|
496
|
+
"embeddings"
|
|
497
|
+
],
|
|
498
|
+
"documentation": "https://faiss.ai/",
|
|
499
|
+
"repository": "https://github.com/facebookresearch/faiss",
|
|
500
|
+
"installation": {
|
|
501
|
+
"pip": "pip install faiss-cpu",
|
|
502
|
+
"conda": "conda install -c conda-forge faiss-cpu"
|
|
503
|
+
},
|
|
504
|
+
"usage_example": "import faiss\nimport numpy as np\n\n# Generate random data\nnp.random.seed(42)\nd = 64 # dimension\nnb = 100000 # database size\nnq = 10000 # number of queries\nxb = np.random.random((nb, d)).astype('float32')\nxq = np.random.random((nq, d)).astype('float32')\n\n# Build index\nindex = faiss.IndexFlatL2(d)\nindex.add(xb)\n\n# Search\nk = 4 # number of nearest neighbors\nD, I = index.search(xq, k)\nprint(f'Found {len(I)} results')\nprint(f'Distances shape: {D.shape}')",
|
|
505
|
+
"quick_start": [
|
|
506
|
+
"1. Install Faiss: pip install faiss-cpu",
|
|
507
|
+
"2. Import: import faiss",
|
|
508
|
+
"3. Create index: index = faiss.IndexFlatL2(dimension)",
|
|
509
|
+
"4. Add vectors: index.add(vectors)",
|
|
510
|
+
"5. Search: distances, indices = index.search(query, k)"
|
|
511
|
+
]
|
|
512
|
+
}
|
|
513
|
+
},
|
|
514
|
+
{
|
|
515
|
+
"type": "PackageTool",
|
|
516
|
+
"name": "get_hmmlearn_info",
|
|
517
|
+
"description": "Get comprehensive information about hmmlearn – Hidden Markov Models in Python",
|
|
518
|
+
"parameter": {
|
|
519
|
+
"type": "object",
|
|
520
|
+
"properties": {
|
|
521
|
+
"info_type": {
|
|
522
|
+
"type": "string",
|
|
523
|
+
"enum": [
|
|
524
|
+
"overview",
|
|
525
|
+
"installation",
|
|
526
|
+
"usage",
|
|
527
|
+
"models",
|
|
528
|
+
"documentation"
|
|
529
|
+
],
|
|
530
|
+
"description": "Type of information to retrieve about hmmlearn"
|
|
531
|
+
}
|
|
532
|
+
},
|
|
533
|
+
"required": [
|
|
534
|
+
"info_type"
|
|
535
|
+
]
|
|
536
|
+
},
|
|
537
|
+
"package_name": "hmmlearn",
|
|
538
|
+
"local_info": {
|
|
539
|
+
"name": "hmmlearn",
|
|
540
|
+
"description": "Hidden Markov Models in Python with scikit-learn compatible API. Provides implementations of Gaussian HMM, Multinomial HMM, and other variants for sequence modeling tasks.",
|
|
541
|
+
"category": "Machine Learning / Sequence Modeling",
|
|
542
|
+
"import_name": "hmmlearn",
|
|
543
|
+
"popularity": 70,
|
|
544
|
+
"keywords": [
|
|
545
|
+
"hidden markov models",
|
|
546
|
+
"sequence modeling",
|
|
547
|
+
"time series",
|
|
548
|
+
"state estimation",
|
|
549
|
+
"Baum-Welch"
|
|
550
|
+
],
|
|
551
|
+
"documentation": "https://hmmlearn.readthedocs.io/",
|
|
552
|
+
"repository": "https://github.com/hmmlearn/hmmlearn",
|
|
553
|
+
"installation": {
|
|
554
|
+
"pip": "pip install hmmlearn",
|
|
555
|
+
"conda": "conda install -c conda-forge hmmlearn"
|
|
556
|
+
},
|
|
557
|
+
"usage_example": "from hmmlearn import hmm\nimport numpy as np\n\n# Create sample data\nnp.random.seed(42)\nlengths = [10, 20, 15]\nX = np.concatenate([\n np.random.normal(0, 1, (10, 2)),\n np.random.normal(3, 1, (20, 2)),\n np.random.normal(-2, 1, (15, 2))\n])\n\n# Fit Gaussian HMM\nmodel = hmm.GaussianHMM(n_components=3, covariance_type='full')\nmodel.fit(X, lengths)\n\nprint(f'Converged: {model.monitor_.converged}')\nprint(f'Log likelihood: {model.score(X, lengths):.2f}')\n\n# Predict hidden states\nstates = model.predict(X, lengths)\nprint(f'Predicted states: {states[:10]}')\n\n# Decode most likely state sequence\nlogprob, path = model.decode(X, lengths)\nprint(f'Most likely path: {path[:10]}')",
|
|
558
|
+
"quick_start": [
|
|
559
|
+
"Install: pip install hmmlearn",
|
|
560
|
+
"Import: from hmmlearn import hmm",
|
|
561
|
+
"Create model: model = hmm.GaussianHMM(n_components=3)",
|
|
562
|
+
"Fit data: model.fit(X, lengths)",
|
|
563
|
+
"Predict states: model.predict(X)",
|
|
564
|
+
"Decode sequence: model.decode(X)"
|
|
565
|
+
]
|
|
566
|
+
}
|
|
567
|
+
},
|
|
568
|
+
{
|
|
569
|
+
"type": "PackageTool",
|
|
570
|
+
"name": "get_cobrapy_info",
|
|
571
|
+
"description": "Get comprehensive information about COBRApy – constraint-based metabolic modeling",
|
|
572
|
+
"parameter": {
|
|
573
|
+
"type": "object",
|
|
574
|
+
"properties": {
|
|
575
|
+
"info_type": {
|
|
576
|
+
"type": "string",
|
|
577
|
+
"enum": [
|
|
578
|
+
"overview",
|
|
579
|
+
"installation",
|
|
580
|
+
"usage",
|
|
581
|
+
"documentation"
|
|
582
|
+
],
|
|
583
|
+
"description": "Type of information to retrieve about COBRApy"
|
|
584
|
+
}
|
|
585
|
+
},
|
|
586
|
+
"required": [
|
|
587
|
+
"info_type"
|
|
588
|
+
]
|
|
589
|
+
},
|
|
590
|
+
"package_name": "cobra",
|
|
591
|
+
"local_info": {
|
|
592
|
+
"name": "COBRApy",
|
|
593
|
+
"description": "Constraint-Based Reconstruction and Analysis (COBRA) methods for metabolic modeling. Enables flux balance analysis, gene essentiality prediction, and metabolic network optimization.",
|
|
594
|
+
"category": "Systems Biology / Metabolic Modeling",
|
|
595
|
+
"import_name": "cobra",
|
|
596
|
+
"popularity": 85,
|
|
597
|
+
"keywords": [
|
|
598
|
+
"metabolism",
|
|
599
|
+
"flux balance analysis",
|
|
600
|
+
"systems biology",
|
|
601
|
+
"metabolic networks",
|
|
602
|
+
"SBML"
|
|
603
|
+
],
|
|
604
|
+
"documentation": "https://cobrapy.readthedocs.io/",
|
|
605
|
+
"repository": "https://github.com/opencobra/cobrapy",
|
|
606
|
+
"installation": {
|
|
607
|
+
"pip": "pip install cobra",
|
|
608
|
+
"conda": "conda install -c conda-forge cobra"
|
|
609
|
+
},
|
|
610
|
+
"usage_example": "import cobra\nimport cobra.test\nfrom cobra.flux_analysis import flux_variability_analysis\nimport pandas as pd\n\n# Load example E. coli model\nprint('Loading E. coli metabolic model...')\nmodel = cobra.test.create_test_model('textbook')\n\nprint(f'Model: {model.id}')\nprint(f'Reactions: {len(model.reactions)}')\nprint(f'Metabolites: {len(model.metabolites)}')\nprint(f'Genes: {len(model.genes)}')\n\n# Perform flux balance analysis\nprint('\\nPerforming flux balance analysis...')\nsolution = model.optimize()\n\nprint(f'Optimization status: {solution.status}')\nprint(f'Objective value: {solution.objective_value:.3f}')\nprint(f'Growth rate: {solution.fluxes[\"BIOMASS_Ecoli_core\"]:.3f} h⁻¹')\n\n# Show top fluxes\nprint('\\nTop active fluxes:')\nactive_fluxes = solution.fluxes[abs(solution.fluxes) > 0.01].sort_values(key=abs, ascending=False)\nfor rxn_id, flux in active_fluxes.head(10).items():\n rxn = model.reactions.get_by_id(rxn_id)\n print(f'{rxn_id:15s}: {flux:8.3f} {rxn.name}')\n\n# Gene essentiality analysis\nprint('\\nAnalyzing gene essentiality...')\ngene_results = []\nfor gene in list(model.genes)[:10]: # Test first 10 genes\n with model:\n gene.knock_out()\n ko_solution = model.optimize()\n growth_rate = ko_solution.objective_value if ko_solution.status == 'optimal' else 0\n essential = growth_rate < 0.01\n gene_results.append({\n 'gene': gene.id,\n 'growth_rate': growth_rate,\n 'essential': essential\n })\n\nessential_genes = [g for g in gene_results if g['essential']]\nprint(f'Essential genes found: {len(essential_genes)}/{len(gene_results)}')\nfor gene in essential_genes:\n print(f' {gene[\"gene\"]}: growth rate {gene[\"growth_rate\"]:.3f}')\n\n# Flux variability analysis\nprint('\\nPerforming flux variability analysis...')\nfva_result = flux_variability_analysis(model, model.reactions[:5]) # First 5 reactions\nprint(fva_result)\n\n# Medium composition analysis\nprint(f'\\nCurrent medium ({len(model.medium)} components):')\nfor metabolite, flux in model.medium.items():\n met = model.metabolites.get_by_id(metabolite)\n print(f' {metabolite:15s}: {flux:8.3f} ({met.name})')\n\n# Test different carbon sources\nprint('\\nTesting different carbon sources...')\ncarbon_sources = ['EX_glc__D_e', 'EX_fru_e', 'EX_xyl__D_e']\nfor carbon in carbon_sources:\n if carbon in [r.id for r in model.exchanges]:\n with model:\n # Close all carbon sources\n for ex in model.exchanges:\n if 'EX_' in ex.id and any(c in ex.id for c in ['glc', 'fru', 'xyl', 'ac']):\n ex.lower_bound = 0\n # Open specific carbon source\n model.reactions.get_by_id(carbon).lower_bound = -10\n solution = model.optimize()\n growth = solution.objective_value if solution.status == 'optimal' else 0\n carbon_name = carbon.replace('EX_', '').replace('_e', '')\n print(f' {carbon_name:10s}: growth rate {growth:.3f}')\n\nprint('\\nCOBRApy enables:')\nprint('- Constraint-based metabolic modeling')\nprint('- Flux balance analysis (FBA)')\nprint('- Gene essentiality prediction')\nprint('- Flux variability analysis')\nprint('- SBML model import/export')\nprint('- Integration with experimental data')",
|
|
611
|
+
"quick_start": [
|
|
612
|
+
"Install: pip install cobra",
|
|
613
|
+
"Load model: model = cobra.test.create_test_model()",
|
|
614
|
+
"Optimize: solution = model.optimize()",
|
|
615
|
+
"Gene knockout: gene.knock_out()",
|
|
616
|
+
"FVA: flux_variability_analysis(model, reactions)",
|
|
617
|
+
"Save/load: cobra.io.read_sbml_model(file)"
|
|
618
|
+
]
|
|
619
|
+
}
|
|
620
|
+
},
|
|
621
|
+
{
|
|
622
|
+
"type": "PackageTool",
|
|
623
|
+
"name": "get_deepxde_info",
|
|
624
|
+
"description": "Get comprehensive information about DeepXDE – a library for physics-informed neural networks (PINNs) solving PDEs and inverse problems.",
|
|
625
|
+
"parameter": {
|
|
626
|
+
"type": "object",
|
|
627
|
+
"properties": {
|
|
628
|
+
"include_examples": {
|
|
629
|
+
"type": "boolean",
|
|
630
|
+
"description": "Whether to include usage examples and a quick-start guide",
|
|
631
|
+
"default": true
|
|
632
|
+
}
|
|
633
|
+
}
|
|
634
|
+
},
|
|
635
|
+
"package_name": "deepxde",
|
|
636
|
+
"local_info": {
|
|
637
|
+
"name": "DeepXDE",
|
|
638
|
+
"description": "A flexible TensorFlow/PyTorch-based framework for building physics-informed neural networks to solve differential equations and scientific inverse problems.",
|
|
639
|
+
"category": "AI for Science / Physics-Informed NNs",
|
|
640
|
+
"import_name": "deepxde",
|
|
641
|
+
"popularity": 65,
|
|
642
|
+
"keywords": [
|
|
643
|
+
"PINN",
|
|
644
|
+
"differential equations",
|
|
645
|
+
"scientific ML",
|
|
646
|
+
"inverse problems"
|
|
647
|
+
],
|
|
648
|
+
"documentation": "https://deepxde.readthedocs.io/",
|
|
649
|
+
"repository": "https://github.com/lululxvi/deepxde",
|
|
650
|
+
"installation": {
|
|
651
|
+
"pip": "pip install deepxde",
|
|
652
|
+
"conda": "conda install -c conda-forge deepxde"
|
|
653
|
+
},
|
|
654
|
+
"usage_example": "import deepxde as dde\nimport numpy as np\n\ndef pde(x, y):\n y_xx = dde.grad.hessian(y, x)\n return y_xx + np.pi**2 * np.sin(np.pi * x)\n\ndomain = dde.geometry.Interval(0, 1)\nbc = dde.icbc.DirichletBC(domain, lambda x: 0, lambda _, on_boundary: on_boundary)\n\ndata = dde.data.PDE(domain, pde, [bc], num_domain=20, num_boundary=10)\nnet = dde.nn.FNN([1, 32, 32, 32, 1], 'tanh', 'Glorot normal')\nmodel = dde.Model(data, net)\nmodel.compile('adam', lr=1e-3)\nmodel.train(epochs=5000)",
|
|
655
|
+
"quick_start": [
|
|
656
|
+
"1. Install DeepXDE: pip install deepxde",
|
|
657
|
+
"2. Define geometry and PDE function",
|
|
658
|
+
"3. Add boundary/initial conditions",
|
|
659
|
+
"4. Build a data object: dde.data.PDE(...)",
|
|
660
|
+
"5. Train the PINN with model.train()"
|
|
661
|
+
]
|
|
662
|
+
}
|
|
663
|
+
},
|
|
664
|
+
{
|
|
665
|
+
"type": "PackageTool",
|
|
666
|
+
"name": "get_deeppurpose_info",
|
|
667
|
+
"description": "Get comprehensive information about DeepPurpose – deep learning toolkit for drug discovery",
|
|
668
|
+
"parameter": {
|
|
669
|
+
"type": "object",
|
|
670
|
+
"properties": {
|
|
671
|
+
"info_type": {
|
|
672
|
+
"type": "string",
|
|
673
|
+
"enum": [
|
|
674
|
+
"overview",
|
|
675
|
+
"installation",
|
|
676
|
+
"usage",
|
|
677
|
+
"models",
|
|
678
|
+
"documentation"
|
|
679
|
+
],
|
|
680
|
+
"description": "Type of information to retrieve about DeepPurpose"
|
|
681
|
+
}
|
|
682
|
+
},
|
|
683
|
+
"required": [
|
|
684
|
+
"info_type"
|
|
685
|
+
]
|
|
686
|
+
},
|
|
687
|
+
"package_name": "deeppurpose",
|
|
688
|
+
"local_info": {
|
|
689
|
+
"name": "DeepPurpose",
|
|
690
|
+
"description": "Deep learning toolkit for drug discovery and drug-target interaction prediction. Provides unified framework for various molecular representations and deep learning architectures for drug discovery tasks.",
|
|
691
|
+
"category": "Drug Discovery / AI",
|
|
692
|
+
"import_name": "DeepPurpose",
|
|
693
|
+
"popularity": 75,
|
|
694
|
+
"keywords": [
|
|
695
|
+
"drug discovery",
|
|
696
|
+
"drug-target interaction",
|
|
697
|
+
"molecular representation",
|
|
698
|
+
"pharmaceutical AI"
|
|
699
|
+
],
|
|
700
|
+
"documentation": "https://deeppurpose.readthedocs.io/",
|
|
701
|
+
"repository": "https://github.com/kexinhuang12345/DeepPurpose",
|
|
702
|
+
"installation": {
|
|
703
|
+
"pip": "pip install DeepPurpose",
|
|
704
|
+
"conda": "conda install -c conda-forge deeppurpose"
|
|
705
|
+
},
|
|
706
|
+
"usage_example": "from DeepPurpose import utils, DTI\nimport pandas as pd\n\n# Load sample drug-target interaction data\nX_drug, X_target, y = utils.load_process_DAVIS(\n path='./data', binary=False, convert_to_log=True\n)\n\n# Create train/validation/test splits\ntrain, val, test = utils.data_process(\n X_drug, X_target, y, \n drug_encoding='Morgan', \n target_encoding='CNN',\n split_method='random',\n frac=[0.7, 0.1, 0.2]\n)\n\n# Initialize and train model\nconfig = utils.generate_config(\n drug_encoding='Morgan',\n target_encoding='CNN', \n cls_hidden_dims=[1024, 1024, 512]\n)\nmodel = DTI.model_initialize(**config)\nmodel.train(train, val, test)\n\n# Make predictions\npredictions = model.predict(test)",
|
|
707
|
+
"quick_start": [
|
|
708
|
+
"Install: pip install DeepPurpose",
|
|
709
|
+
"Load data: utils.load_process_DAVIS()",
|
|
710
|
+
"Choose encodings: drug_encoding='Morgan', target_encoding='CNN'",
|
|
711
|
+
"Process data: utils.data_process()",
|
|
712
|
+
"Initialize model: DTI.model_initialize()",
|
|
713
|
+
"Train and predict: model.train(), model.predict()"
|
|
714
|
+
]
|
|
715
|
+
}
|
|
716
|
+
},
|
|
717
|
+
{
|
|
718
|
+
"type": "PackageTool",
|
|
719
|
+
"name": "get_xgboost_info",
|
|
720
|
+
"description": "Get information about the xgboost package. Optimized gradient boosting framework",
|
|
721
|
+
"package_name": "xgboost",
|
|
722
|
+
"parameter": {
|
|
723
|
+
"type": "object",
|
|
724
|
+
"properties": {},
|
|
725
|
+
"required": []
|
|
726
|
+
},
|
|
727
|
+
"required": []
|
|
728
|
+
},
|
|
729
|
+
{
|
|
730
|
+
"type": "PackageTool",
|
|
731
|
+
"name": "get_lightgbm_info",
|
|
732
|
+
"description": "Get information about the lightgbm package. Fast gradient boosting framework",
|
|
733
|
+
"package_name": "lightgbm",
|
|
734
|
+
"parameter": {
|
|
735
|
+
"type": "object",
|
|
736
|
+
"properties": {},
|
|
737
|
+
"required": []
|
|
738
|
+
},
|
|
739
|
+
"required": []
|
|
740
|
+
},
|
|
741
|
+
{
|
|
742
|
+
"type": "PackageTool",
|
|
743
|
+
"name": "get_catboost_info",
|
|
744
|
+
"description": "Get information about the catboost package. High-performance gradient boosting library",
|
|
745
|
+
"package_name": "catboost",
|
|
746
|
+
"parameter": {
|
|
747
|
+
"type": "object",
|
|
748
|
+
"properties": {},
|
|
749
|
+
"required": []
|
|
750
|
+
},
|
|
751
|
+
"required": []
|
|
752
|
+
},
|
|
753
|
+
{
|
|
754
|
+
"type": "PackageTool",
|
|
755
|
+
"name": "get_optuna_info",
|
|
756
|
+
"description": "Get information about the optuna package. Hyperparameter optimization framework",
|
|
757
|
+
"package_name": "optuna",
|
|
758
|
+
"parameter": {
|
|
759
|
+
"type": "object",
|
|
760
|
+
"properties": {},
|
|
761
|
+
"required": []
|
|
762
|
+
},
|
|
763
|
+
"required": []
|
|
764
|
+
},
|
|
765
|
+
{
|
|
766
|
+
"type": "PackageTool",
|
|
767
|
+
"name": "get_skopt_info",
|
|
768
|
+
"description": "Get information about the skopt package. Scikit-Optimize: sequential model-based optimization",
|
|
769
|
+
"package_name": "skopt",
|
|
770
|
+
"parameter": {
|
|
771
|
+
"type": "object",
|
|
772
|
+
"properties": {},
|
|
773
|
+
"required": []
|
|
774
|
+
},
|
|
775
|
+
"required": []
|
|
776
|
+
},
|
|
777
|
+
{
|
|
778
|
+
"type": "PackageTool",
|
|
779
|
+
"name": "get_imbalanced_learn_info",
|
|
780
|
+
"description": "Get information about the imbalanced-learn package. Python toolbox for imbalanced dataset learning",
|
|
781
|
+
"package_name": "imbalanced-learn",
|
|
782
|
+
"parameter": {
|
|
783
|
+
"type": "object",
|
|
784
|
+
"properties": {},
|
|
785
|
+
"required": []
|
|
786
|
+
},
|
|
787
|
+
"required": []
|
|
788
|
+
}
|
|
789
|
+
]
|