aiagents4pharma 0.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aiagents4pharma/__init__.py +11 -0
- aiagents4pharma/talk2aiagents4pharma/.dockerignore +13 -0
- aiagents4pharma/talk2aiagents4pharma/Dockerfile +133 -0
- aiagents4pharma/talk2aiagents4pharma/README.md +1 -0
- aiagents4pharma/talk2aiagents4pharma/__init__.py +5 -0
- aiagents4pharma/talk2aiagents4pharma/agents/__init__.py +6 -0
- aiagents4pharma/talk2aiagents4pharma/agents/main_agent.py +70 -0
- aiagents4pharma/talk2aiagents4pharma/configs/__init__.py +5 -0
- aiagents4pharma/talk2aiagents4pharma/configs/agents/__init__.py +5 -0
- aiagents4pharma/talk2aiagents4pharma/configs/agents/main_agent/default.yaml +29 -0
- aiagents4pharma/talk2aiagents4pharma/configs/app/__init__.py +0 -0
- aiagents4pharma/talk2aiagents4pharma/configs/app/frontend/__init__.py +0 -0
- aiagents4pharma/talk2aiagents4pharma/configs/app/frontend/default.yaml +102 -0
- aiagents4pharma/talk2aiagents4pharma/configs/config.yaml +4 -0
- aiagents4pharma/talk2aiagents4pharma/docker-compose/cpu/.env.example +23 -0
- aiagents4pharma/talk2aiagents4pharma/docker-compose/cpu/docker-compose.yml +93 -0
- aiagents4pharma/talk2aiagents4pharma/docker-compose/gpu/.env.example +23 -0
- aiagents4pharma/talk2aiagents4pharma/docker-compose/gpu/docker-compose.yml +108 -0
- aiagents4pharma/talk2aiagents4pharma/install.md +154 -0
- aiagents4pharma/talk2aiagents4pharma/states/__init__.py +5 -0
- aiagents4pharma/talk2aiagents4pharma/states/state_talk2aiagents4pharma.py +18 -0
- aiagents4pharma/talk2aiagents4pharma/tests/__init__.py +3 -0
- aiagents4pharma/talk2aiagents4pharma/tests/test_main_agent.py +312 -0
- aiagents4pharma/talk2biomodels/.dockerignore +13 -0
- aiagents4pharma/talk2biomodels/Dockerfile +104 -0
- aiagents4pharma/talk2biomodels/README.md +1 -0
- aiagents4pharma/talk2biomodels/__init__.py +5 -0
- aiagents4pharma/talk2biomodels/agents/__init__.py +6 -0
- aiagents4pharma/talk2biomodels/agents/t2b_agent.py +104 -0
- aiagents4pharma/talk2biomodels/api/__init__.py +5 -0
- aiagents4pharma/talk2biomodels/api/ols.py +75 -0
- aiagents4pharma/talk2biomodels/api/uniprot.py +36 -0
- aiagents4pharma/talk2biomodels/configs/__init__.py +5 -0
- aiagents4pharma/talk2biomodels/configs/agents/__init__.py +5 -0
- aiagents4pharma/talk2biomodels/configs/agents/t2b_agent/__init__.py +3 -0
- aiagents4pharma/talk2biomodels/configs/agents/t2b_agent/default.yaml +14 -0
- aiagents4pharma/talk2biomodels/configs/app/__init__.py +0 -0
- aiagents4pharma/talk2biomodels/configs/app/frontend/__init__.py +0 -0
- aiagents4pharma/talk2biomodels/configs/app/frontend/default.yaml +72 -0
- aiagents4pharma/talk2biomodels/configs/config.yaml +7 -0
- aiagents4pharma/talk2biomodels/configs/tools/__init__.py +5 -0
- aiagents4pharma/talk2biomodels/configs/tools/ask_question/__init__.py +3 -0
- aiagents4pharma/talk2biomodels/configs/tools/ask_question/default.yaml +30 -0
- aiagents4pharma/talk2biomodels/configs/tools/custom_plotter/__init__.py +3 -0
- aiagents4pharma/talk2biomodels/configs/tools/custom_plotter/default.yaml +8 -0
- aiagents4pharma/talk2biomodels/configs/tools/get_annotation/__init__.py +3 -0
- aiagents4pharma/talk2biomodels/configs/tools/get_annotation/default.yaml +8 -0
- aiagents4pharma/talk2biomodels/install.md +63 -0
- aiagents4pharma/talk2biomodels/models/__init__.py +5 -0
- aiagents4pharma/talk2biomodels/models/basico_model.py +125 -0
- aiagents4pharma/talk2biomodels/models/sys_bio_model.py +60 -0
- aiagents4pharma/talk2biomodels/states/__init__.py +6 -0
- aiagents4pharma/talk2biomodels/states/state_talk2biomodels.py +49 -0
- aiagents4pharma/talk2biomodels/tests/BIOMD0000000449_url.xml +1585 -0
- aiagents4pharma/talk2biomodels/tests/__init__.py +3 -0
- aiagents4pharma/talk2biomodels/tests/article_on_model_537.pdf +0 -0
- aiagents4pharma/talk2biomodels/tests/test_api.py +31 -0
- aiagents4pharma/talk2biomodels/tests/test_ask_question.py +42 -0
- aiagents4pharma/talk2biomodels/tests/test_basico_model.py +67 -0
- aiagents4pharma/talk2biomodels/tests/test_get_annotation.py +190 -0
- aiagents4pharma/talk2biomodels/tests/test_getmodelinfo.py +92 -0
- aiagents4pharma/talk2biomodels/tests/test_integration.py +116 -0
- aiagents4pharma/talk2biomodels/tests/test_load_biomodel.py +35 -0
- aiagents4pharma/talk2biomodels/tests/test_param_scan.py +71 -0
- aiagents4pharma/talk2biomodels/tests/test_query_article.py +184 -0
- aiagents4pharma/talk2biomodels/tests/test_save_model.py +47 -0
- aiagents4pharma/talk2biomodels/tests/test_search_models.py +35 -0
- aiagents4pharma/talk2biomodels/tests/test_simulate_model.py +44 -0
- aiagents4pharma/talk2biomodels/tests/test_steady_state.py +86 -0
- aiagents4pharma/talk2biomodels/tests/test_sys_bio_model.py +67 -0
- aiagents4pharma/talk2biomodels/tools/__init__.py +17 -0
- aiagents4pharma/talk2biomodels/tools/ask_question.py +125 -0
- aiagents4pharma/talk2biomodels/tools/custom_plotter.py +165 -0
- aiagents4pharma/talk2biomodels/tools/get_annotation.py +342 -0
- aiagents4pharma/talk2biomodels/tools/get_modelinfo.py +159 -0
- aiagents4pharma/talk2biomodels/tools/load_arguments.py +134 -0
- aiagents4pharma/talk2biomodels/tools/load_biomodel.py +44 -0
- aiagents4pharma/talk2biomodels/tools/parameter_scan.py +310 -0
- aiagents4pharma/talk2biomodels/tools/query_article.py +64 -0
- aiagents4pharma/talk2biomodels/tools/save_model.py +98 -0
- aiagents4pharma/talk2biomodels/tools/search_models.py +96 -0
- aiagents4pharma/talk2biomodels/tools/simulate_model.py +137 -0
- aiagents4pharma/talk2biomodels/tools/steady_state.py +187 -0
- aiagents4pharma/talk2biomodels/tools/utils.py +23 -0
- aiagents4pharma/talk2cells/README.md +1 -0
- aiagents4pharma/talk2cells/__init__.py +5 -0
- aiagents4pharma/talk2cells/agents/__init__.py +6 -0
- aiagents4pharma/talk2cells/agents/scp_agent.py +87 -0
- aiagents4pharma/talk2cells/states/__init__.py +6 -0
- aiagents4pharma/talk2cells/states/state_talk2cells.py +15 -0
- aiagents4pharma/talk2cells/tests/scp_agent/test_scp_agent.py +22 -0
- aiagents4pharma/talk2cells/tools/__init__.py +6 -0
- aiagents4pharma/talk2cells/tools/scp_agent/__init__.py +6 -0
- aiagents4pharma/talk2cells/tools/scp_agent/display_studies.py +27 -0
- aiagents4pharma/talk2cells/tools/scp_agent/search_studies.py +78 -0
- aiagents4pharma/talk2knowledgegraphs/.dockerignore +13 -0
- aiagents4pharma/talk2knowledgegraphs/Dockerfile +131 -0
- aiagents4pharma/talk2knowledgegraphs/README.md +1 -0
- aiagents4pharma/talk2knowledgegraphs/__init__.py +5 -0
- aiagents4pharma/talk2knowledgegraphs/agents/__init__.py +5 -0
- aiagents4pharma/talk2knowledgegraphs/agents/t2kg_agent.py +99 -0
- aiagents4pharma/talk2knowledgegraphs/configs/__init__.py +5 -0
- aiagents4pharma/talk2knowledgegraphs/configs/agents/t2kg_agent/__init__.py +3 -0
- aiagents4pharma/talk2knowledgegraphs/configs/agents/t2kg_agent/default.yaml +62 -0
- aiagents4pharma/talk2knowledgegraphs/configs/app/__init__.py +5 -0
- aiagents4pharma/talk2knowledgegraphs/configs/app/frontend/__init__.py +3 -0
- aiagents4pharma/talk2knowledgegraphs/configs/app/frontend/default.yaml +79 -0
- aiagents4pharma/talk2knowledgegraphs/configs/config.yaml +13 -0
- aiagents4pharma/talk2knowledgegraphs/configs/tools/__init__.py +5 -0
- aiagents4pharma/talk2knowledgegraphs/configs/tools/graphrag_reasoning/__init__.py +3 -0
- aiagents4pharma/talk2knowledgegraphs/configs/tools/graphrag_reasoning/default.yaml +24 -0
- aiagents4pharma/talk2knowledgegraphs/configs/tools/multimodal_subgraph_extraction/__init__.py +0 -0
- aiagents4pharma/talk2knowledgegraphs/configs/tools/multimodal_subgraph_extraction/default.yaml +33 -0
- aiagents4pharma/talk2knowledgegraphs/configs/tools/subgraph_extraction/__init__.py +3 -0
- aiagents4pharma/talk2knowledgegraphs/configs/tools/subgraph_extraction/default.yaml +43 -0
- aiagents4pharma/talk2knowledgegraphs/configs/tools/subgraph_summarization/__init__.py +3 -0
- aiagents4pharma/talk2knowledgegraphs/configs/tools/subgraph_summarization/default.yaml +9 -0
- aiagents4pharma/talk2knowledgegraphs/configs/utils/database/milvus/__init__.py +3 -0
- aiagents4pharma/talk2knowledgegraphs/configs/utils/database/milvus/default.yaml +61 -0
- aiagents4pharma/talk2knowledgegraphs/configs/utils/enrichments/ols_terms/default.yaml +3 -0
- aiagents4pharma/talk2knowledgegraphs/configs/utils/enrichments/reactome_pathways/default.yaml +3 -0
- aiagents4pharma/talk2knowledgegraphs/configs/utils/enrichments/uniprot_proteins/default.yaml +6 -0
- aiagents4pharma/talk2knowledgegraphs/configs/utils/pubchem_utils/default.yaml +5 -0
- aiagents4pharma/talk2knowledgegraphs/datasets/__init__.py +5 -0
- aiagents4pharma/talk2knowledgegraphs/datasets/biobridge_primekg.py +607 -0
- aiagents4pharma/talk2knowledgegraphs/datasets/dataset.py +25 -0
- aiagents4pharma/talk2knowledgegraphs/datasets/primekg.py +212 -0
- aiagents4pharma/talk2knowledgegraphs/datasets/starkqa_primekg.py +210 -0
- aiagents4pharma/talk2knowledgegraphs/docker-compose/cpu/.env.example +23 -0
- aiagents4pharma/talk2knowledgegraphs/docker-compose/cpu/docker-compose.yml +93 -0
- aiagents4pharma/talk2knowledgegraphs/docker-compose/gpu/.env.example +23 -0
- aiagents4pharma/talk2knowledgegraphs/docker-compose/gpu/docker-compose.yml +108 -0
- aiagents4pharma/talk2knowledgegraphs/entrypoint.sh +180 -0
- aiagents4pharma/talk2knowledgegraphs/install.md +165 -0
- aiagents4pharma/talk2knowledgegraphs/milvus_data_dump.py +886 -0
- aiagents4pharma/talk2knowledgegraphs/states/__init__.py +5 -0
- aiagents4pharma/talk2knowledgegraphs/states/state_talk2knowledgegraphs.py +40 -0
- aiagents4pharma/talk2knowledgegraphs/tests/__init__.py +0 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_agents_t2kg_agent.py +318 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_biobridge_primekg.py +248 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_dataset.py +33 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_primekg.py +86 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_starkqa_primekg.py +125 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_tools_graphrag_reasoning.py +257 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_tools_milvus_multimodal_subgraph_extraction.py +1444 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_tools_multimodal_subgraph_extraction.py +159 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_tools_subgraph_extraction.py +152 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_tools_subgraph_summarization.py +201 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_database_milvus_connection_manager.py +812 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_embeddings.py +51 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_huggingface.py +49 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_nim_molmim.py +59 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_ollama.py +63 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_sentencetransformer.py +47 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_enrichments.py +40 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_ollama.py +94 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_ols.py +70 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_pubchem.py +45 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_reactome.py +44 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_uniprot.py +48 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_extractions_milvus_multimodal_pcst.py +759 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_kg_utils.py +78 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_pubchem_utils.py +123 -0
- aiagents4pharma/talk2knowledgegraphs/tools/__init__.py +11 -0
- aiagents4pharma/talk2knowledgegraphs/tools/graphrag_reasoning.py +138 -0
- aiagents4pharma/talk2knowledgegraphs/tools/load_arguments.py +22 -0
- aiagents4pharma/talk2knowledgegraphs/tools/milvus_multimodal_subgraph_extraction.py +965 -0
- aiagents4pharma/talk2knowledgegraphs/tools/multimodal_subgraph_extraction.py +374 -0
- aiagents4pharma/talk2knowledgegraphs/tools/subgraph_extraction.py +291 -0
- aiagents4pharma/talk2knowledgegraphs/tools/subgraph_summarization.py +123 -0
- aiagents4pharma/talk2knowledgegraphs/utils/__init__.py +5 -0
- aiagents4pharma/talk2knowledgegraphs/utils/database/__init__.py +5 -0
- aiagents4pharma/talk2knowledgegraphs/utils/database/milvus_connection_manager.py +586 -0
- aiagents4pharma/talk2knowledgegraphs/utils/embeddings/__init__.py +5 -0
- aiagents4pharma/talk2knowledgegraphs/utils/embeddings/embeddings.py +81 -0
- aiagents4pharma/talk2knowledgegraphs/utils/embeddings/huggingface.py +111 -0
- aiagents4pharma/talk2knowledgegraphs/utils/embeddings/nim_molmim.py +54 -0
- aiagents4pharma/talk2knowledgegraphs/utils/embeddings/ollama.py +87 -0
- aiagents4pharma/talk2knowledgegraphs/utils/embeddings/sentence_transformer.py +73 -0
- aiagents4pharma/talk2knowledgegraphs/utils/enrichments/__init__.py +12 -0
- aiagents4pharma/talk2knowledgegraphs/utils/enrichments/enrichments.py +37 -0
- aiagents4pharma/talk2knowledgegraphs/utils/enrichments/ollama.py +129 -0
- aiagents4pharma/talk2knowledgegraphs/utils/enrichments/ols_terms.py +89 -0
- aiagents4pharma/talk2knowledgegraphs/utils/enrichments/pubchem_strings.py +78 -0
- aiagents4pharma/talk2knowledgegraphs/utils/enrichments/reactome_pathways.py +71 -0
- aiagents4pharma/talk2knowledgegraphs/utils/enrichments/uniprot_proteins.py +98 -0
- aiagents4pharma/talk2knowledgegraphs/utils/extractions/__init__.py +5 -0
- aiagents4pharma/talk2knowledgegraphs/utils/extractions/milvus_multimodal_pcst.py +762 -0
- aiagents4pharma/talk2knowledgegraphs/utils/extractions/multimodal_pcst.py +298 -0
- aiagents4pharma/talk2knowledgegraphs/utils/extractions/pcst.py +229 -0
- aiagents4pharma/talk2knowledgegraphs/utils/kg_utils.py +67 -0
- aiagents4pharma/talk2knowledgegraphs/utils/pubchem_utils.py +104 -0
- aiagents4pharma/talk2scholars/.dockerignore +13 -0
- aiagents4pharma/talk2scholars/Dockerfile +104 -0
- aiagents4pharma/talk2scholars/README.md +1 -0
- aiagents4pharma/talk2scholars/__init__.py +7 -0
- aiagents4pharma/talk2scholars/agents/__init__.py +13 -0
- aiagents4pharma/talk2scholars/agents/main_agent.py +89 -0
- aiagents4pharma/talk2scholars/agents/paper_download_agent.py +96 -0
- aiagents4pharma/talk2scholars/agents/pdf_agent.py +101 -0
- aiagents4pharma/talk2scholars/agents/s2_agent.py +135 -0
- aiagents4pharma/talk2scholars/agents/zotero_agent.py +127 -0
- aiagents4pharma/talk2scholars/configs/__init__.py +7 -0
- aiagents4pharma/talk2scholars/configs/agents/__init__.py +7 -0
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/__init__.py +7 -0
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/default.yaml +52 -0
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/paper_download_agent/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/paper_download_agent/default.yaml +19 -0
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/pdf_agent/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/pdf_agent/default.yaml +19 -0
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/default.yaml +44 -0
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/zotero_agent/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/zotero_agent/default.yaml +19 -0
- aiagents4pharma/talk2scholars/configs/app/__init__.py +7 -0
- aiagents4pharma/talk2scholars/configs/app/frontend/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/app/frontend/default.yaml +72 -0
- aiagents4pharma/talk2scholars/configs/config.yaml +16 -0
- aiagents4pharma/talk2scholars/configs/tools/__init__.py +21 -0
- aiagents4pharma/talk2scholars/configs/tools/multi_paper_recommendation/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/tools/multi_paper_recommendation/default.yaml +26 -0
- aiagents4pharma/talk2scholars/configs/tools/paper_download/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/tools/paper_download/default.yaml +124 -0
- aiagents4pharma/talk2scholars/configs/tools/question_and_answer/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/tools/question_and_answer/default.yaml +62 -0
- aiagents4pharma/talk2scholars/configs/tools/retrieve_semantic_scholar_paper_id/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/tools/retrieve_semantic_scholar_paper_id/default.yaml +12 -0
- aiagents4pharma/talk2scholars/configs/tools/search/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/tools/search/default.yaml +26 -0
- aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation/default.yaml +26 -0
- aiagents4pharma/talk2scholars/configs/tools/zotero_read/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/tools/zotero_read/default.yaml +57 -0
- aiagents4pharma/talk2scholars/configs/tools/zotero_write/__inti__.py +3 -0
- aiagents4pharma/talk2scholars/configs/tools/zotero_write/default.yaml +55 -0
- aiagents4pharma/talk2scholars/docker-compose/cpu/.env.example +21 -0
- aiagents4pharma/talk2scholars/docker-compose/cpu/docker-compose.yml +90 -0
- aiagents4pharma/talk2scholars/docker-compose/gpu/.env.example +21 -0
- aiagents4pharma/talk2scholars/docker-compose/gpu/docker-compose.yml +105 -0
- aiagents4pharma/talk2scholars/install.md +122 -0
- aiagents4pharma/talk2scholars/state/__init__.py +7 -0
- aiagents4pharma/talk2scholars/state/state_talk2scholars.py +98 -0
- aiagents4pharma/talk2scholars/tests/__init__.py +3 -0
- aiagents4pharma/talk2scholars/tests/test_agents_main_agent.py +256 -0
- aiagents4pharma/talk2scholars/tests/test_agents_paper_agents_download_agent.py +139 -0
- aiagents4pharma/talk2scholars/tests/test_agents_pdf_agent.py +114 -0
- aiagents4pharma/talk2scholars/tests/test_agents_s2_agent.py +198 -0
- aiagents4pharma/talk2scholars/tests/test_agents_zotero_agent.py +160 -0
- aiagents4pharma/talk2scholars/tests/test_s2_tools_display_dataframe.py +91 -0
- aiagents4pharma/talk2scholars/tests/test_s2_tools_query_dataframe.py +191 -0
- aiagents4pharma/talk2scholars/tests/test_states_state.py +38 -0
- aiagents4pharma/talk2scholars/tests/test_tools_paper_downloader.py +507 -0
- aiagents4pharma/talk2scholars/tests/test_tools_question_and_answer_tool.py +105 -0
- aiagents4pharma/talk2scholars/tests/test_tools_s2_multi.py +307 -0
- aiagents4pharma/talk2scholars/tests/test_tools_s2_retrieve.py +67 -0
- aiagents4pharma/talk2scholars/tests/test_tools_s2_search.py +286 -0
- aiagents4pharma/talk2scholars/tests/test_tools_s2_single.py +298 -0
- aiagents4pharma/talk2scholars/tests/test_utils_arxiv_downloader.py +469 -0
- aiagents4pharma/talk2scholars/tests/test_utils_base_paper_downloader.py +598 -0
- aiagents4pharma/talk2scholars/tests/test_utils_biorxiv_downloader.py +669 -0
- aiagents4pharma/talk2scholars/tests/test_utils_medrxiv_downloader.py +500 -0
- aiagents4pharma/talk2scholars/tests/test_utils_nvidia_nim_reranker.py +117 -0
- aiagents4pharma/talk2scholars/tests/test_utils_pdf_answer_formatter.py +67 -0
- aiagents4pharma/talk2scholars/tests/test_utils_pdf_batch_processor.py +92 -0
- aiagents4pharma/talk2scholars/tests/test_utils_pdf_collection_manager.py +173 -0
- aiagents4pharma/talk2scholars/tests/test_utils_pdf_document_processor.py +68 -0
- aiagents4pharma/talk2scholars/tests/test_utils_pdf_generate_answer.py +72 -0
- aiagents4pharma/talk2scholars/tests/test_utils_pdf_gpu_detection.py +129 -0
- aiagents4pharma/talk2scholars/tests/test_utils_pdf_paper_loader.py +116 -0
- aiagents4pharma/talk2scholars/tests/test_utils_pdf_rag_pipeline.py +88 -0
- aiagents4pharma/talk2scholars/tests/test_utils_pdf_retrieve_chunks.py +190 -0
- aiagents4pharma/talk2scholars/tests/test_utils_pdf_singleton_manager.py +159 -0
- aiagents4pharma/talk2scholars/tests/test_utils_pdf_vector_normalization.py +121 -0
- aiagents4pharma/talk2scholars/tests/test_utils_pdf_vector_store.py +406 -0
- aiagents4pharma/talk2scholars/tests/test_utils_pubmed_downloader.py +1007 -0
- aiagents4pharma/talk2scholars/tests/test_utils_read_helper_utils.py +106 -0
- aiagents4pharma/talk2scholars/tests/test_utils_s2_utils_ext_ids.py +403 -0
- aiagents4pharma/talk2scholars/tests/test_utils_tool_helper_utils.py +85 -0
- aiagents4pharma/talk2scholars/tests/test_utils_zotero_human_in_the_loop.py +266 -0
- aiagents4pharma/talk2scholars/tests/test_utils_zotero_path.py +496 -0
- aiagents4pharma/talk2scholars/tests/test_utils_zotero_pdf_downloader_utils.py +46 -0
- aiagents4pharma/talk2scholars/tests/test_utils_zotero_read.py +743 -0
- aiagents4pharma/talk2scholars/tests/test_utils_zotero_write.py +151 -0
- aiagents4pharma/talk2scholars/tools/__init__.py +9 -0
- aiagents4pharma/talk2scholars/tools/paper_download/__init__.py +12 -0
- aiagents4pharma/talk2scholars/tools/paper_download/paper_downloader.py +442 -0
- aiagents4pharma/talk2scholars/tools/paper_download/utils/__init__.py +22 -0
- aiagents4pharma/talk2scholars/tools/paper_download/utils/arxiv_downloader.py +207 -0
- aiagents4pharma/talk2scholars/tools/paper_download/utils/base_paper_downloader.py +336 -0
- aiagents4pharma/talk2scholars/tools/paper_download/utils/biorxiv_downloader.py +313 -0
- aiagents4pharma/talk2scholars/tools/paper_download/utils/medrxiv_downloader.py +196 -0
- aiagents4pharma/talk2scholars/tools/paper_download/utils/pubmed_downloader.py +323 -0
- aiagents4pharma/talk2scholars/tools/pdf/__init__.py +7 -0
- aiagents4pharma/talk2scholars/tools/pdf/question_and_answer.py +170 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/__init__.py +37 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/answer_formatter.py +62 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/batch_processor.py +198 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/collection_manager.py +172 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/document_processor.py +76 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/generate_answer.py +97 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/get_vectorstore.py +59 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/gpu_detection.py +150 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/nvidia_nim_reranker.py +97 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/paper_loader.py +123 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/rag_pipeline.py +113 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/retrieve_chunks.py +197 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/singleton_manager.py +140 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/tool_helper.py +86 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/vector_normalization.py +150 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/vector_store.py +327 -0
- aiagents4pharma/talk2scholars/tools/s2/__init__.py +21 -0
- aiagents4pharma/talk2scholars/tools/s2/display_dataframe.py +110 -0
- aiagents4pharma/talk2scholars/tools/s2/multi_paper_rec.py +111 -0
- aiagents4pharma/talk2scholars/tools/s2/query_dataframe.py +233 -0
- aiagents4pharma/talk2scholars/tools/s2/retrieve_semantic_scholar_paper_id.py +128 -0
- aiagents4pharma/talk2scholars/tools/s2/search.py +101 -0
- aiagents4pharma/talk2scholars/tools/s2/single_paper_rec.py +102 -0
- aiagents4pharma/talk2scholars/tools/s2/utils/__init__.py +5 -0
- aiagents4pharma/talk2scholars/tools/s2/utils/multi_helper.py +223 -0
- aiagents4pharma/talk2scholars/tools/s2/utils/search_helper.py +205 -0
- aiagents4pharma/talk2scholars/tools/s2/utils/single_helper.py +216 -0
- aiagents4pharma/talk2scholars/tools/zotero/__init__.py +7 -0
- aiagents4pharma/talk2scholars/tools/zotero/utils/__init__.py +7 -0
- aiagents4pharma/talk2scholars/tools/zotero/utils/read_helper.py +270 -0
- aiagents4pharma/talk2scholars/tools/zotero/utils/review_helper.py +74 -0
- aiagents4pharma/talk2scholars/tools/zotero/utils/write_helper.py +194 -0
- aiagents4pharma/talk2scholars/tools/zotero/utils/zotero_path.py +180 -0
- aiagents4pharma/talk2scholars/tools/zotero/utils/zotero_pdf_downloader.py +133 -0
- aiagents4pharma/talk2scholars/tools/zotero/zotero_read.py +105 -0
- aiagents4pharma/talk2scholars/tools/zotero/zotero_review.py +162 -0
- aiagents4pharma/talk2scholars/tools/zotero/zotero_write.py +91 -0
- aiagents4pharma-0.0.0.dist-info/METADATA +335 -0
- aiagents4pharma-0.0.0.dist-info/RECORD +336 -0
- aiagents4pharma-0.0.0.dist-info/WHEEL +4 -0
- aiagents4pharma-0.0.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,886 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# pylint: skip-file
|
|
3
|
+
"""
|
|
4
|
+
Dynamic Cross-Platform PrimeKG Multimodal Data Loader for Milvus Database.
|
|
5
|
+
Automatically detects system capabilities and chooses appropriate libraries and configurations.
|
|
6
|
+
|
|
7
|
+
Supports:
|
|
8
|
+
- Windows, Linux, macOS
|
|
9
|
+
- CPU-only mode (pandas/numpy)
|
|
10
|
+
- NVIDIA GPU mode (cudf/cupy)
|
|
11
|
+
- Dynamic index selection based on hardware
|
|
12
|
+
- Automatic dependency installation
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import glob
|
|
16
|
+
import logging
|
|
17
|
+
import os
|
|
18
|
+
import platform
|
|
19
|
+
import subprocess
|
|
20
|
+
import sys
|
|
21
|
+
from typing import Any, Union
|
|
22
|
+
|
|
23
|
+
# Configure logging
|
|
24
|
+
logging.basicConfig(level=logging.INFO, format="[DATA LOADER] %(message)s")
|
|
25
|
+
logger = logging.getLogger(__name__)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class SystemDetector:
|
|
29
|
+
"""Detect system capabilities and choose appropriate libraries."""
|
|
30
|
+
|
|
31
|
+
def __init__(self):
|
|
32
|
+
self.os_type = platform.system().lower() # 'windows', 'linux', 'darwin'
|
|
33
|
+
self.architecture = platform.machine().lower() # 'x86_64', 'arm64', etc.
|
|
34
|
+
self.has_nvidia_gpu = self._detect_nvidia_gpu()
|
|
35
|
+
self.use_gpu = self.has_nvidia_gpu and self.os_type != "darwin" # No CUDA on macOS
|
|
36
|
+
|
|
37
|
+
logger.info("System Detection Results:")
|
|
38
|
+
logger.info(" OS: %s", self.os_type)
|
|
39
|
+
logger.info(" Architecture: %s", self.architecture)
|
|
40
|
+
logger.info(" NVIDIA GPU detected: %s", self.has_nvidia_gpu)
|
|
41
|
+
logger.info(" Will use GPU acceleration: %s", self.use_gpu)
|
|
42
|
+
|
|
43
|
+
def _detect_nvidia_gpu(self) -> bool:
|
|
44
|
+
"""Detect if NVIDIA GPU is available."""
|
|
45
|
+
try:
|
|
46
|
+
# Try nvidia-smi command
|
|
47
|
+
result = subprocess.run(["nvidia-smi"], capture_output=True, text=True, timeout=10)
|
|
48
|
+
return result.returncode == 0
|
|
49
|
+
except (
|
|
50
|
+
subprocess.TimeoutExpired,
|
|
51
|
+
FileNotFoundError,
|
|
52
|
+
subprocess.SubprocessError,
|
|
53
|
+
):
|
|
54
|
+
return False
|
|
55
|
+
|
|
56
|
+
def get_required_packages(self) -> list[str]:
|
|
57
|
+
"""Get list of packages to install based on system capabilities - matches original logic."""
|
|
58
|
+
if self.use_gpu and self.os_type == "linux":
|
|
59
|
+
# Exact package list from original script for GPU mode
|
|
60
|
+
packages = [
|
|
61
|
+
"pip install --extra-index-url=https://pypi.nvidia.com cudf-cu12",
|
|
62
|
+
"pip install --extra-index-url=https://pypi.nvidia.com dask-cudf-cu12",
|
|
63
|
+
"pip install pymilvus",
|
|
64
|
+
"pip install numpy",
|
|
65
|
+
"pip install pandas",
|
|
66
|
+
"pip install tqdm",
|
|
67
|
+
]
|
|
68
|
+
return packages
|
|
69
|
+
else:
|
|
70
|
+
# CPU-only packages
|
|
71
|
+
packages = [
|
|
72
|
+
"pip install pymilvus",
|
|
73
|
+
"pip install numpy",
|
|
74
|
+
"pip install pandas",
|
|
75
|
+
"pip install tqdm",
|
|
76
|
+
"pip install pyarrow",
|
|
77
|
+
]
|
|
78
|
+
return packages
|
|
79
|
+
|
|
80
|
+
def install_packages(self):
|
|
81
|
+
"""Install required packages using original script's exact logic."""
|
|
82
|
+
packages = self.get_required_packages()
|
|
83
|
+
|
|
84
|
+
logger.info(
|
|
85
|
+
"Installing packages for %s system%s",
|
|
86
|
+
self.os_type,
|
|
87
|
+
" with GPU support" if self.use_gpu else "",
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
for package_cmd in packages:
|
|
91
|
+
logger.info("Running: %s", package_cmd)
|
|
92
|
+
try:
|
|
93
|
+
result = subprocess.run(
|
|
94
|
+
package_cmd.split(),
|
|
95
|
+
capture_output=True,
|
|
96
|
+
text=True,
|
|
97
|
+
check=True,
|
|
98
|
+
timeout=300,
|
|
99
|
+
)
|
|
100
|
+
if result.returncode != 0:
|
|
101
|
+
logger.error("Error installing package: %s", result.stderr)
|
|
102
|
+
if "cudf" in package_cmd or "dask-cudf" in package_cmd:
|
|
103
|
+
logger.warning("GPU package installation failed, falling back to CPU mode")
|
|
104
|
+
self.use_gpu = False
|
|
105
|
+
return self.install_packages() # Retry with CPU packages
|
|
106
|
+
else:
|
|
107
|
+
sys.exit(1)
|
|
108
|
+
else:
|
|
109
|
+
logger.info("Successfully installed: %s", package_cmd.split()[-1])
|
|
110
|
+
except subprocess.CalledProcessError:
|
|
111
|
+
logger.error("Failed to install package: %s", package_cmd.split()[-1])
|
|
112
|
+
if "cudf" in package_cmd:
|
|
113
|
+
logger.warning("GPU package installation failed, falling back to CPU mode")
|
|
114
|
+
self.use_gpu = False
|
|
115
|
+
return self.install_packages() # Retry with CPU packages
|
|
116
|
+
else:
|
|
117
|
+
raise
|
|
118
|
+
except subprocess.TimeoutExpired:
|
|
119
|
+
logger.error("Installation timeout for package: %s", package_cmd)
|
|
120
|
+
raise
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
class DynamicDataLoader:
|
|
124
|
+
"""Dynamic data loader that adapts to system capabilities."""
|
|
125
|
+
|
|
126
|
+
def __init__(self, config: dict[str, Any]):
|
|
127
|
+
"""Initialize with system detection and dynamic library loading."""
|
|
128
|
+
self.config = config
|
|
129
|
+
self.detector = SystemDetector()
|
|
130
|
+
|
|
131
|
+
# Install packages if needed
|
|
132
|
+
if config.get("auto_install_packages", True):
|
|
133
|
+
self.detector.install_packages()
|
|
134
|
+
|
|
135
|
+
# Import libraries based on system capabilities
|
|
136
|
+
self._import_libraries()
|
|
137
|
+
|
|
138
|
+
# Configuration - exact original parameters
|
|
139
|
+
self.milvus_host = config.get("milvus_host", "localhost")
|
|
140
|
+
self.milvus_port = config.get("milvus_port", "19530")
|
|
141
|
+
self.milvus_user = config.get("milvus_user", "root")
|
|
142
|
+
self.milvus_password = config.get("milvus_password", "Milvus")
|
|
143
|
+
self.milvus_database = config.get("milvus_database", "t2kg_primekg")
|
|
144
|
+
self.data_dir = config.get("data_dir", "./data")
|
|
145
|
+
self.batch_size = config.get("batch_size", 500)
|
|
146
|
+
self.chunk_size = config.get("chunk_size", 5) # Original chunk_size parameter
|
|
147
|
+
|
|
148
|
+
# Dynamic settings based on hardware
|
|
149
|
+
self.use_gpu = self.detector.use_gpu
|
|
150
|
+
self.normalize_vectors = self.use_gpu # Only normalize for GPU (original logic)
|
|
151
|
+
self.vector_index_type = "GPU_CAGRA" if self.use_gpu else "HNSW"
|
|
152
|
+
self.metric_type = "IP" if self.use_gpu else "COSINE"
|
|
153
|
+
self.vector_index_params = self._build_vector_index_params()
|
|
154
|
+
|
|
155
|
+
logger.info("Loader Configuration:")
|
|
156
|
+
logger.info(" Using GPU acceleration: %s", self.use_gpu)
|
|
157
|
+
logger.info(" Vector normalization: %s", self.normalize_vectors)
|
|
158
|
+
logger.info(" Vector index type: %s", self.vector_index_type)
|
|
159
|
+
logger.info(" Metric type: %s", self.metric_type)
|
|
160
|
+
logger.info(" Data directory: %s", self.data_dir)
|
|
161
|
+
logger.info(" Batch size: %s", self.batch_size)
|
|
162
|
+
logger.info(" Chunk size: %s", self.chunk_size)
|
|
163
|
+
|
|
164
|
+
def _import_libraries(self):
|
|
165
|
+
"""Dynamically import libraries - matches original script's import logic."""
|
|
166
|
+
# Always import base libraries
|
|
167
|
+
import numpy as np
|
|
168
|
+
import pandas as pd
|
|
169
|
+
from pymilvus import (
|
|
170
|
+
Collection,
|
|
171
|
+
CollectionSchema,
|
|
172
|
+
DataType,
|
|
173
|
+
FieldSchema,
|
|
174
|
+
connections,
|
|
175
|
+
db,
|
|
176
|
+
utility,
|
|
177
|
+
)
|
|
178
|
+
from tqdm import tqdm
|
|
179
|
+
|
|
180
|
+
self.pd = pd
|
|
181
|
+
self.np = np
|
|
182
|
+
self.tqdm = tqdm
|
|
183
|
+
self.pymilvus_modules = {
|
|
184
|
+
"db": db,
|
|
185
|
+
"connections": connections,
|
|
186
|
+
"FieldSchema": FieldSchema,
|
|
187
|
+
"CollectionSchema": CollectionSchema,
|
|
188
|
+
"DataType": DataType,
|
|
189
|
+
"Collection": Collection,
|
|
190
|
+
"utility": utility,
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
# Conditionally import GPU libraries - matches original error handling
|
|
194
|
+
if self.detector.use_gpu:
|
|
195
|
+
try:
|
|
196
|
+
import cudf # pyright: ignore
|
|
197
|
+
import cupy as cp # pyright: ignore
|
|
198
|
+
|
|
199
|
+
self.cudf = cudf
|
|
200
|
+
self.cp = cp
|
|
201
|
+
logger.info("Successfully imported GPU libraries (cudf, cupy)")
|
|
202
|
+
except ImportError:
|
|
203
|
+
logger.error(
|
|
204
|
+
"[DATA LOADER] cudf or cupy not found. "
|
|
205
|
+
"Please ensure they are installed correctly."
|
|
206
|
+
)
|
|
207
|
+
logger.error("Import error occurred - GPU libraries not available")
|
|
208
|
+
# Match original script's exit behavior for critical GPU import failure
|
|
209
|
+
if not os.getenv("FORCE_CPU", "false").lower() == "true":
|
|
210
|
+
logger.error(
|
|
211
|
+
"GPU libraries required but not available. "
|
|
212
|
+
"Set FORCE_CPU=true to use CPU mode."
|
|
213
|
+
)
|
|
214
|
+
sys.exit(1)
|
|
215
|
+
else:
|
|
216
|
+
logger.warning("Falling back to CPU mode due to FORCE_CPU=true")
|
|
217
|
+
self.detector.use_gpu = False
|
|
218
|
+
self.use_gpu = False
|
|
219
|
+
|
|
220
|
+
def _read_dataframe(
|
|
221
|
+
self, file_path: str, columns: list[str] | None = None
|
|
222
|
+
) -> Union["pd.DataFrame", "cudf.DataFrame"]: # type: ignore[reportUndefinedVariable] # noqa: F821
|
|
223
|
+
"""Read dataframe using appropriate library."""
|
|
224
|
+
if self.use_gpu:
|
|
225
|
+
return self.cudf.read_parquet(file_path, columns=columns)
|
|
226
|
+
else:
|
|
227
|
+
return self.pd.read_parquet(file_path, columns=columns)
|
|
228
|
+
|
|
229
|
+
def _concat_dataframes(
|
|
230
|
+
self, df_list: list, ignore_index: bool = True
|
|
231
|
+
) -> Union["pd.DataFrame", "cudf.DataFrame"]: # type: ignore[reportUndefinedVariable] # noqa: F821
|
|
232
|
+
"""Concatenate dataframes using appropriate library."""
|
|
233
|
+
if self.use_gpu:
|
|
234
|
+
return self.cudf.concat(df_list, ignore_index=ignore_index)
|
|
235
|
+
else:
|
|
236
|
+
return self.pd.concat(df_list, ignore_index=ignore_index)
|
|
237
|
+
|
|
238
|
+
def _normalize_matrix(self, matrix, axis: int = 1):
|
|
239
|
+
"""Normalize matrix using appropriate library."""
|
|
240
|
+
if not self.normalize_vectors:
|
|
241
|
+
return matrix
|
|
242
|
+
|
|
243
|
+
if self.use_gpu:
|
|
244
|
+
# Use cupy for GPU
|
|
245
|
+
matrix_cp = self.cp.asarray(matrix).astype(self.cp.float32)
|
|
246
|
+
norms = self.cp.linalg.norm(matrix_cp, axis=axis, keepdims=True)
|
|
247
|
+
return matrix_cp / norms
|
|
248
|
+
else:
|
|
249
|
+
# Use numpy for CPU (but we don't normalize for CPU/COSINE)
|
|
250
|
+
return matrix
|
|
251
|
+
|
|
252
|
+
def _extract_embeddings(self, df, column_name: str):
|
|
253
|
+
"""Extract embeddings and convert to appropriate format."""
|
|
254
|
+
if self.use_gpu:
|
|
255
|
+
# cuDF list extraction
|
|
256
|
+
emb_data = self.cp.asarray(df[column_name].list.leaves).astype(self.cp.float32)
|
|
257
|
+
return emb_data.reshape(df.shape[0], -1)
|
|
258
|
+
else:
|
|
259
|
+
# pandas extraction
|
|
260
|
+
emb_list = []
|
|
261
|
+
for emb in df[column_name]:
|
|
262
|
+
if isinstance(emb, list):
|
|
263
|
+
emb_list.append(emb)
|
|
264
|
+
else:
|
|
265
|
+
emb_list.append(emb.tolist() if hasattr(emb, "tolist") else emb)
|
|
266
|
+
return self.np.array(emb_list, dtype=self.np.float32)
|
|
267
|
+
|
|
268
|
+
def _to_list(self, data):
|
|
269
|
+
"""Convert data to list format for Milvus insertion."""
|
|
270
|
+
if self.use_gpu:
|
|
271
|
+
# For cuDF data, use to_arrow().to_pylist()
|
|
272
|
+
if hasattr(data, "to_arrow"):
|
|
273
|
+
return data.to_arrow().to_pylist()
|
|
274
|
+
elif hasattr(data, "tolist"):
|
|
275
|
+
# Fallback for cupy arrays
|
|
276
|
+
return data.tolist()
|
|
277
|
+
else:
|
|
278
|
+
return list(data)
|
|
279
|
+
else:
|
|
280
|
+
# For pandas/numpy data
|
|
281
|
+
if hasattr(data, "tolist"):
|
|
282
|
+
return data.tolist()
|
|
283
|
+
elif hasattr(data, "to_arrow"):
|
|
284
|
+
return data.to_arrow().to_pylist()
|
|
285
|
+
else:
|
|
286
|
+
return list(data)
|
|
287
|
+
|
|
288
|
+
def _build_vector_index_params(self) -> dict[str, Any]:
|
|
289
|
+
"""Return index params tuned for the selected backend."""
|
|
290
|
+
base_params: dict[str, Any] = {
|
|
291
|
+
"index_type": self.vector_index_type,
|
|
292
|
+
"metric_type": self.metric_type,
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
if self.vector_index_type == "GPU_CAGRA":
|
|
296
|
+
base_params["params"] = {
|
|
297
|
+
"graph_degree": int(os.getenv("CAGRA_GRAPH_DEGREE", "32")),
|
|
298
|
+
"intermediate_graph_degree": int(
|
|
299
|
+
os.getenv("CAGRA_INTERMEDIATE_GRAPH_DEGREE", "40")
|
|
300
|
+
),
|
|
301
|
+
"search_width": int(os.getenv("CAGRA_SEARCH_WIDTH", "64")),
|
|
302
|
+
}
|
|
303
|
+
elif self.vector_index_type == "HNSW":
|
|
304
|
+
base_params["params"] = {
|
|
305
|
+
"M": int(os.getenv("HNSW_M", "16")),
|
|
306
|
+
"efConstruction": int(os.getenv("HNSW_EF_CONSTRUCTION", "200")),
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
return base_params
|
|
310
|
+
|
|
311
|
+
def connect_to_milvus(self):
|
|
312
|
+
"""Connect to Milvus and setup database."""
|
|
313
|
+
logger.info("Connecting to Milvus at %s:%s", self.milvus_host, self.milvus_port)
|
|
314
|
+
|
|
315
|
+
self.pymilvus_modules["connections"].connect(
|
|
316
|
+
alias="default",
|
|
317
|
+
host=self.milvus_host,
|
|
318
|
+
port=self.milvus_port,
|
|
319
|
+
user=self.milvus_user,
|
|
320
|
+
password=self.milvus_password,
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
# Check if database exists, create if it doesn't
|
|
324
|
+
if self.milvus_database not in self.pymilvus_modules["db"].list_database():
|
|
325
|
+
logger.info("Creating database: %s", self.milvus_database)
|
|
326
|
+
self.pymilvus_modules["db"].create_database(self.milvus_database)
|
|
327
|
+
|
|
328
|
+
# Switch to the desired database
|
|
329
|
+
self.pymilvus_modules["db"].using_database(self.milvus_database)
|
|
330
|
+
logger.info("Using database: %s", self.milvus_database)
|
|
331
|
+
|
|
332
|
+
def load_graph_data(self):
|
|
333
|
+
"""Load the parquet files containing graph data."""
|
|
334
|
+
logger.info("Loading graph data from: %s", self.data_dir)
|
|
335
|
+
|
|
336
|
+
if not os.path.exists(self.data_dir):
|
|
337
|
+
raise FileNotFoundError(f"Data directory not found: {self.data_dir}")
|
|
338
|
+
|
|
339
|
+
graph = {}
|
|
340
|
+
for element in ["nodes", "edges"]:
|
|
341
|
+
graph[element] = {}
|
|
342
|
+
for stage in ["enrichment", "embedding"]:
|
|
343
|
+
logger.info("Processing %s %s", element, stage)
|
|
344
|
+
|
|
345
|
+
file_list = glob.glob(os.path.join(self.data_dir, element, stage, "*.parquet.gzip"))
|
|
346
|
+
logger.info("Found %d files for %s %s", len(file_list), element, stage)
|
|
347
|
+
|
|
348
|
+
if not file_list:
|
|
349
|
+
logger.warning("No files found for %s %s", element, stage)
|
|
350
|
+
continue
|
|
351
|
+
|
|
352
|
+
# For edges embedding, process in chunks due to size
|
|
353
|
+
if element == "edges" and stage == "embedding":
|
|
354
|
+
chunk_size = self.chunk_size
|
|
355
|
+
graph[element][stage] = []
|
|
356
|
+
for i in range(0, len(file_list), chunk_size):
|
|
357
|
+
chunk_files = file_list[i : i + chunk_size]
|
|
358
|
+
chunk_df_list = []
|
|
359
|
+
for f in chunk_files:
|
|
360
|
+
df = self._read_dataframe(f, columns=["triplet_index", "edge_emb"])
|
|
361
|
+
chunk_df_list.append(df)
|
|
362
|
+
chunk_df = self._concat_dataframes(chunk_df_list, ignore_index=True)
|
|
363
|
+
graph[element][stage].append(chunk_df)
|
|
364
|
+
else:
|
|
365
|
+
# For other combinations, read all files
|
|
366
|
+
df_list = []
|
|
367
|
+
for f in file_list:
|
|
368
|
+
df = self._read_dataframe(f)
|
|
369
|
+
df_list.append(df)
|
|
370
|
+
graph[element][stage] = self._concat_dataframes(df_list, ignore_index=True)
|
|
371
|
+
|
|
372
|
+
logger.info("Graph data loaded successfully")
|
|
373
|
+
return graph
|
|
374
|
+
|
|
375
|
+
def _get_embedding_dimension(self, df, column_name: str) -> int:
|
|
376
|
+
"""Get embedding dimension using original script's exact logic."""
|
|
377
|
+
first_emb = df.iloc[0][column_name]
|
|
378
|
+
if self.use_gpu:
|
|
379
|
+
# cuDF format - matches original:
|
|
380
|
+
# len(nodes_df.iloc[0]['desc_emb'].to_arrow().to_pylist()[0])
|
|
381
|
+
return len(first_emb.to_arrow().to_pylist()[0])
|
|
382
|
+
else:
|
|
383
|
+
# pandas format
|
|
384
|
+
if isinstance(first_emb, list):
|
|
385
|
+
return len(first_emb)
|
|
386
|
+
else:
|
|
387
|
+
return len(first_emb.tolist() if hasattr(first_emb, "tolist") else first_emb)
|
|
388
|
+
|
|
389
|
+
def create_nodes_collection(self, nodes_df):
|
|
390
|
+
"""Create and populate the main nodes collection."""
|
|
391
|
+
logger.info("Creating main nodes collection...")
|
|
392
|
+
node_coll_name = f"{self.milvus_database}_nodes"
|
|
393
|
+
|
|
394
|
+
# Get embedding dimension
|
|
395
|
+
emb_dim = self._get_embedding_dimension(nodes_df, "desc_emb")
|
|
396
|
+
|
|
397
|
+
node_fields = [
|
|
398
|
+
self.pymilvus_modules["FieldSchema"](
|
|
399
|
+
name="node_index",
|
|
400
|
+
dtype=self.pymilvus_modules["DataType"].INT64,
|
|
401
|
+
is_primary=True,
|
|
402
|
+
),
|
|
403
|
+
self.pymilvus_modules["FieldSchema"](
|
|
404
|
+
name="node_id",
|
|
405
|
+
dtype=self.pymilvus_modules["DataType"].VARCHAR,
|
|
406
|
+
max_length=1024,
|
|
407
|
+
),
|
|
408
|
+
self.pymilvus_modules["FieldSchema"](
|
|
409
|
+
name="node_name",
|
|
410
|
+
dtype=self.pymilvus_modules["DataType"].VARCHAR,
|
|
411
|
+
max_length=1024,
|
|
412
|
+
enable_analyzer=True,
|
|
413
|
+
enable_match=True,
|
|
414
|
+
),
|
|
415
|
+
self.pymilvus_modules["FieldSchema"](
|
|
416
|
+
name="node_type",
|
|
417
|
+
dtype=self.pymilvus_modules["DataType"].VARCHAR,
|
|
418
|
+
max_length=1024,
|
|
419
|
+
enable_analyzer=True,
|
|
420
|
+
enable_match=True,
|
|
421
|
+
),
|
|
422
|
+
self.pymilvus_modules["FieldSchema"](
|
|
423
|
+
name="desc",
|
|
424
|
+
dtype=self.pymilvus_modules["DataType"].VARCHAR,
|
|
425
|
+
max_length=40960,
|
|
426
|
+
enable_analyzer=True,
|
|
427
|
+
enable_match=True,
|
|
428
|
+
),
|
|
429
|
+
self.pymilvus_modules["FieldSchema"](
|
|
430
|
+
name="desc_emb",
|
|
431
|
+
dtype=self.pymilvus_modules["DataType"].FLOAT_VECTOR,
|
|
432
|
+
dim=emb_dim,
|
|
433
|
+
),
|
|
434
|
+
]
|
|
435
|
+
|
|
436
|
+
schema = self.pymilvus_modules["CollectionSchema"](
|
|
437
|
+
fields=node_fields, description=f"Schema for collection {node_coll_name}"
|
|
438
|
+
)
|
|
439
|
+
|
|
440
|
+
# Create collection if it doesn't exist
|
|
441
|
+
if not self.pymilvus_modules["utility"].has_collection(node_coll_name):
|
|
442
|
+
collection = self.pymilvus_modules["Collection"](name=node_coll_name, schema=schema)
|
|
443
|
+
else:
|
|
444
|
+
collection = self.pymilvus_modules["Collection"](name=node_coll_name)
|
|
445
|
+
|
|
446
|
+
# Create indexes with dynamic parameters
|
|
447
|
+
collection.create_index(
|
|
448
|
+
field_name="node_index",
|
|
449
|
+
index_params={"index_type": "STL_SORT"},
|
|
450
|
+
index_name="node_index_index",
|
|
451
|
+
)
|
|
452
|
+
collection.create_index(
|
|
453
|
+
field_name="node_name",
|
|
454
|
+
index_params={"index_type": "INVERTED"},
|
|
455
|
+
index_name="node_name_index",
|
|
456
|
+
)
|
|
457
|
+
collection.create_index(
|
|
458
|
+
field_name="node_type",
|
|
459
|
+
index_params={"index_type": "INVERTED"},
|
|
460
|
+
index_name="node_type_index",
|
|
461
|
+
)
|
|
462
|
+
collection.create_index(
|
|
463
|
+
field_name="desc",
|
|
464
|
+
index_params={"index_type": "INVERTED"},
|
|
465
|
+
index_name="desc_index",
|
|
466
|
+
)
|
|
467
|
+
collection.create_index(
|
|
468
|
+
field_name="desc_emb",
|
|
469
|
+
index_params=self.vector_index_params.copy(),
|
|
470
|
+
index_name="desc_emb_index",
|
|
471
|
+
)
|
|
472
|
+
|
|
473
|
+
# Prepare and insert data
|
|
474
|
+
desc_emb_data = self._extract_embeddings(nodes_df, "desc_emb")
|
|
475
|
+
desc_emb_normalized = self._normalize_matrix(desc_emb_data, axis=1)
|
|
476
|
+
|
|
477
|
+
data = [
|
|
478
|
+
self._to_list(nodes_df["node_index"]),
|
|
479
|
+
self._to_list(nodes_df["node_id"]),
|
|
480
|
+
self._to_list(nodes_df["node_name"]),
|
|
481
|
+
self._to_list(nodes_df["node_type"]),
|
|
482
|
+
self._to_list(nodes_df["desc"]),
|
|
483
|
+
self._to_list(desc_emb_normalized),
|
|
484
|
+
]
|
|
485
|
+
|
|
486
|
+
# Insert data in batches
|
|
487
|
+
total = len(data[0])
|
|
488
|
+
for i in self.tqdm(range(0, total, self.batch_size), desc="Inserting nodes"):
|
|
489
|
+
batch = [col[i : i + self.batch_size] for col in data]
|
|
490
|
+
collection.insert(batch)
|
|
491
|
+
|
|
492
|
+
collection.flush()
|
|
493
|
+
logger.info("Nodes collection created with %d entities", collection.num_entities)
|
|
494
|
+
|
|
495
|
+
def create_node_type_collections(self, nodes_df):
|
|
496
|
+
"""Create separate collections for each node type."""
|
|
497
|
+
logger.info("Creating node type-specific collections...")
|
|
498
|
+
|
|
499
|
+
for node_type, nodes_df_ in self.tqdm(
|
|
500
|
+
nodes_df.groupby("node_type"), desc="Processing node types"
|
|
501
|
+
):
|
|
502
|
+
node_coll_name = f"{self.milvus_database}_nodes_{node_type.replace('/', '_')}"
|
|
503
|
+
|
|
504
|
+
# Get embedding dimensions
|
|
505
|
+
desc_dim = self._get_embedding_dimension(nodes_df_, "desc_emb")
|
|
506
|
+
feat_dim = self._get_embedding_dimension(nodes_df_, "feat_emb")
|
|
507
|
+
|
|
508
|
+
node_fields = [
|
|
509
|
+
self.pymilvus_modules["FieldSchema"](
|
|
510
|
+
name="node_index",
|
|
511
|
+
dtype=self.pymilvus_modules["DataType"].INT64,
|
|
512
|
+
is_primary=True,
|
|
513
|
+
auto_id=False,
|
|
514
|
+
),
|
|
515
|
+
self.pymilvus_modules["FieldSchema"](
|
|
516
|
+
name="node_id",
|
|
517
|
+
dtype=self.pymilvus_modules["DataType"].VARCHAR,
|
|
518
|
+
max_length=1024,
|
|
519
|
+
),
|
|
520
|
+
self.pymilvus_modules["FieldSchema"](
|
|
521
|
+
name="node_name",
|
|
522
|
+
dtype=self.pymilvus_modules["DataType"].VARCHAR,
|
|
523
|
+
max_length=1024,
|
|
524
|
+
enable_analyzer=True,
|
|
525
|
+
enable_match=True,
|
|
526
|
+
),
|
|
527
|
+
self.pymilvus_modules["FieldSchema"](
|
|
528
|
+
name="node_type",
|
|
529
|
+
dtype=self.pymilvus_modules["DataType"].VARCHAR,
|
|
530
|
+
max_length=1024,
|
|
531
|
+
enable_analyzer=True,
|
|
532
|
+
enable_match=True,
|
|
533
|
+
),
|
|
534
|
+
self.pymilvus_modules["FieldSchema"](
|
|
535
|
+
name="desc",
|
|
536
|
+
dtype=self.pymilvus_modules["DataType"].VARCHAR,
|
|
537
|
+
max_length=40960,
|
|
538
|
+
enable_analyzer=True,
|
|
539
|
+
enable_match=True,
|
|
540
|
+
),
|
|
541
|
+
self.pymilvus_modules["FieldSchema"](
|
|
542
|
+
name="desc_emb",
|
|
543
|
+
dtype=self.pymilvus_modules["DataType"].FLOAT_VECTOR,
|
|
544
|
+
dim=desc_dim,
|
|
545
|
+
),
|
|
546
|
+
self.pymilvus_modules["FieldSchema"](
|
|
547
|
+
name="feat",
|
|
548
|
+
dtype=self.pymilvus_modules["DataType"].VARCHAR,
|
|
549
|
+
max_length=40960,
|
|
550
|
+
enable_analyzer=True,
|
|
551
|
+
enable_match=True,
|
|
552
|
+
),
|
|
553
|
+
self.pymilvus_modules["FieldSchema"](
|
|
554
|
+
name="feat_emb",
|
|
555
|
+
dtype=self.pymilvus_modules["DataType"].FLOAT_VECTOR,
|
|
556
|
+
dim=feat_dim,
|
|
557
|
+
),
|
|
558
|
+
]
|
|
559
|
+
|
|
560
|
+
schema = self.pymilvus_modules["CollectionSchema"](
|
|
561
|
+
fields=node_fields,
|
|
562
|
+
description=f"schema for collection {node_coll_name}",
|
|
563
|
+
)
|
|
564
|
+
|
|
565
|
+
if not self.pymilvus_modules["utility"].has_collection(node_coll_name):
|
|
566
|
+
collection = self.pymilvus_modules["Collection"](name=node_coll_name, schema=schema)
|
|
567
|
+
else:
|
|
568
|
+
collection = self.pymilvus_modules["Collection"](name=node_coll_name)
|
|
569
|
+
|
|
570
|
+
# Create indexes with dynamic parameters
|
|
571
|
+
collection.create_index(
|
|
572
|
+
field_name="node_index",
|
|
573
|
+
index_params={"index_type": "STL_SORT"},
|
|
574
|
+
index_name="node_index_index",
|
|
575
|
+
)
|
|
576
|
+
collection.create_index(
|
|
577
|
+
field_name="node_name",
|
|
578
|
+
index_params={"index_type": "INVERTED"},
|
|
579
|
+
index_name="node_name_index",
|
|
580
|
+
)
|
|
581
|
+
collection.create_index(
|
|
582
|
+
field_name="node_type",
|
|
583
|
+
index_params={"index_type": "INVERTED"},
|
|
584
|
+
index_name="node_type_index",
|
|
585
|
+
)
|
|
586
|
+
collection.create_index(
|
|
587
|
+
field_name="desc",
|
|
588
|
+
index_params={"index_type": "INVERTED"},
|
|
589
|
+
index_name="desc_index",
|
|
590
|
+
)
|
|
591
|
+
collection.create_index(
|
|
592
|
+
field_name="desc_emb",
|
|
593
|
+
index_params=self.vector_index_params.copy(),
|
|
594
|
+
index_name="desc_emb_index",
|
|
595
|
+
)
|
|
596
|
+
collection.create_index(
|
|
597
|
+
field_name="feat_emb",
|
|
598
|
+
index_params=self.vector_index_params.copy(),
|
|
599
|
+
index_name="feat_emb_index",
|
|
600
|
+
)
|
|
601
|
+
|
|
602
|
+
# Prepare data
|
|
603
|
+
desc_emb_data = self._extract_embeddings(nodes_df_, "desc_emb")
|
|
604
|
+
feat_emb_data = self._extract_embeddings(nodes_df_, "feat_emb")
|
|
605
|
+
|
|
606
|
+
desc_emb_normalized = self._normalize_matrix(desc_emb_data, axis=1)
|
|
607
|
+
feat_emb_normalized = self._normalize_matrix(feat_emb_data, axis=1)
|
|
608
|
+
|
|
609
|
+
data = [
|
|
610
|
+
self._to_list(nodes_df_["node_index"]),
|
|
611
|
+
self._to_list(nodes_df_["node_id"]),
|
|
612
|
+
self._to_list(nodes_df_["node_name"]),
|
|
613
|
+
self._to_list(nodes_df_["node_type"]),
|
|
614
|
+
self._to_list(nodes_df_["desc"]),
|
|
615
|
+
self._to_list(desc_emb_normalized),
|
|
616
|
+
self._to_list(nodes_df_["feat"]),
|
|
617
|
+
self._to_list(feat_emb_normalized),
|
|
618
|
+
]
|
|
619
|
+
|
|
620
|
+
# Insert data in batches
|
|
621
|
+
total_rows = len(data[0])
|
|
622
|
+
for i in range(0, total_rows, self.batch_size):
|
|
623
|
+
batch = [col[i : i + self.batch_size] for col in data]
|
|
624
|
+
collection.insert(batch)
|
|
625
|
+
|
|
626
|
+
collection.flush()
|
|
627
|
+
logger.info(
|
|
628
|
+
"Collection %s created with %d entities",
|
|
629
|
+
node_coll_name,
|
|
630
|
+
collection.num_entities,
|
|
631
|
+
)
|
|
632
|
+
|
|
633
|
+
def create_edges_collection(self, edges_enrichment_df, edges_embedding_df: list):
|
|
634
|
+
"""Create and populate the edges collection - exact original logic."""
|
|
635
|
+
logger.info("Creating edges collection...")
|
|
636
|
+
|
|
637
|
+
edge_coll_name = f"{self.milvus_database}_edges"
|
|
638
|
+
|
|
639
|
+
# Get embedding dimension from first chunk - exact original logic
|
|
640
|
+
if self.use_gpu:
|
|
641
|
+
emb_dim = len(edges_embedding_df[0].loc[0, "edge_emb"]) # Original cudf access
|
|
642
|
+
else:
|
|
643
|
+
first_edge_emb = edges_embedding_df[0].iloc[0]["edge_emb"]
|
|
644
|
+
emb_dim = (
|
|
645
|
+
len(first_edge_emb)
|
|
646
|
+
if isinstance(first_edge_emb, list)
|
|
647
|
+
else len(first_edge_emb.tolist())
|
|
648
|
+
)
|
|
649
|
+
|
|
650
|
+
edge_fields = [
|
|
651
|
+
self.pymilvus_modules["FieldSchema"](
|
|
652
|
+
name="triplet_index",
|
|
653
|
+
dtype=self.pymilvus_modules["DataType"].INT64,
|
|
654
|
+
is_primary=True,
|
|
655
|
+
auto_id=False,
|
|
656
|
+
),
|
|
657
|
+
self.pymilvus_modules["FieldSchema"](
|
|
658
|
+
name="head_id",
|
|
659
|
+
dtype=self.pymilvus_modules["DataType"].VARCHAR,
|
|
660
|
+
max_length=1024,
|
|
661
|
+
),
|
|
662
|
+
self.pymilvus_modules["FieldSchema"](
|
|
663
|
+
name="head_index", dtype=self.pymilvus_modules["DataType"].INT64
|
|
664
|
+
),
|
|
665
|
+
self.pymilvus_modules["FieldSchema"](
|
|
666
|
+
name="tail_id",
|
|
667
|
+
dtype=self.pymilvus_modules["DataType"].VARCHAR,
|
|
668
|
+
max_length=1024,
|
|
669
|
+
),
|
|
670
|
+
self.pymilvus_modules["FieldSchema"](
|
|
671
|
+
name="tail_index", dtype=self.pymilvus_modules["DataType"].INT64
|
|
672
|
+
),
|
|
673
|
+
self.pymilvus_modules["FieldSchema"](
|
|
674
|
+
name="edge_type",
|
|
675
|
+
dtype=self.pymilvus_modules["DataType"].VARCHAR,
|
|
676
|
+
max_length=1024,
|
|
677
|
+
),
|
|
678
|
+
self.pymilvus_modules["FieldSchema"](
|
|
679
|
+
name="display_relation",
|
|
680
|
+
dtype=self.pymilvus_modules["DataType"].VARCHAR,
|
|
681
|
+
max_length=1024,
|
|
682
|
+
),
|
|
683
|
+
self.pymilvus_modules["FieldSchema"](
|
|
684
|
+
name="feat",
|
|
685
|
+
dtype=self.pymilvus_modules["DataType"].VARCHAR,
|
|
686
|
+
max_length=40960,
|
|
687
|
+
),
|
|
688
|
+
self.pymilvus_modules["FieldSchema"](
|
|
689
|
+
name="feat_emb",
|
|
690
|
+
dtype=self.pymilvus_modules["DataType"].FLOAT_VECTOR,
|
|
691
|
+
dim=emb_dim,
|
|
692
|
+
),
|
|
693
|
+
]
|
|
694
|
+
|
|
695
|
+
edge_schema = self.pymilvus_modules["CollectionSchema"](
|
|
696
|
+
fields=edge_fields, description="Schema for edges collection"
|
|
697
|
+
)
|
|
698
|
+
|
|
699
|
+
if not self.pymilvus_modules["utility"].has_collection(edge_coll_name):
|
|
700
|
+
collection = self.pymilvus_modules["Collection"](
|
|
701
|
+
name=edge_coll_name, schema=edge_schema
|
|
702
|
+
)
|
|
703
|
+
else:
|
|
704
|
+
collection = self.pymilvus_modules["Collection"](name=edge_coll_name)
|
|
705
|
+
|
|
706
|
+
# Create indexes with dynamic parameters
|
|
707
|
+
collection.create_index(
|
|
708
|
+
field_name="triplet_index",
|
|
709
|
+
index_params={"index_type": "STL_SORT"},
|
|
710
|
+
index_name="triplet_index_index",
|
|
711
|
+
)
|
|
712
|
+
collection.create_index(
|
|
713
|
+
field_name="head_index",
|
|
714
|
+
index_params={"index_type": "STL_SORT"},
|
|
715
|
+
index_name="head_index_index",
|
|
716
|
+
)
|
|
717
|
+
collection.create_index(
|
|
718
|
+
field_name="tail_index",
|
|
719
|
+
index_params={"index_type": "STL_SORT"},
|
|
720
|
+
index_name="tail_index_index",
|
|
721
|
+
)
|
|
722
|
+
collection.create_index(
|
|
723
|
+
field_name="feat_emb",
|
|
724
|
+
index_params=self.vector_index_params.copy(),
|
|
725
|
+
index_name="feat_emb_index",
|
|
726
|
+
)
|
|
727
|
+
|
|
728
|
+
# Iterate over chunked edges embedding df - exact original logic
|
|
729
|
+
for edges_df in self.tqdm(edges_embedding_df, desc="Processing edge chunks"):
|
|
730
|
+
# Merge enrichment with embedding
|
|
731
|
+
merged_edges_df = edges_enrichment_df.merge(
|
|
732
|
+
edges_df[["triplet_index", "edge_emb"]], on="triplet_index", how="inner"
|
|
733
|
+
)
|
|
734
|
+
|
|
735
|
+
# Prepare embeddings - exact original logic for GPU
|
|
736
|
+
if self.use_gpu:
|
|
737
|
+
edge_emb_cp = (
|
|
738
|
+
self.cp.asarray(merged_edges_df["edge_emb"].list.leaves)
|
|
739
|
+
.astype(self.cp.float32)
|
|
740
|
+
.reshape(merged_edges_df.shape[0], -1)
|
|
741
|
+
)
|
|
742
|
+
edge_emb_norm = self._normalize_matrix(edge_emb_cp, axis=1)
|
|
743
|
+
else:
|
|
744
|
+
edge_emb_data = self._extract_embeddings(merged_edges_df, "edge_emb")
|
|
745
|
+
edge_emb_norm = self._normalize_matrix(edge_emb_data, axis=1)
|
|
746
|
+
|
|
747
|
+
data = [
|
|
748
|
+
self._to_list(merged_edges_df["triplet_index"]),
|
|
749
|
+
self._to_list(merged_edges_df["head_id"]),
|
|
750
|
+
self._to_list(merged_edges_df["head_index"]),
|
|
751
|
+
self._to_list(merged_edges_df["tail_id"]),
|
|
752
|
+
self._to_list(merged_edges_df["tail_index"]),
|
|
753
|
+
self._to_list(merged_edges_df["edge_type_str"]), # Original field name
|
|
754
|
+
self._to_list(merged_edges_df["display_relation"]),
|
|
755
|
+
self._to_list(merged_edges_df["feat"]),
|
|
756
|
+
self._to_list(edge_emb_norm),
|
|
757
|
+
]
|
|
758
|
+
|
|
759
|
+
# Insert data in batches
|
|
760
|
+
total = len(data[0])
|
|
761
|
+
for i in self.tqdm(range(0, total, self.batch_size), desc="Inserting edges"):
|
|
762
|
+
batch_data = [d[i : i + self.batch_size] for d in data]
|
|
763
|
+
collection.insert(batch_data)
|
|
764
|
+
|
|
765
|
+
collection.flush()
|
|
766
|
+
logger.info("Edges collection created with %d entities", collection.num_entities)
|
|
767
|
+
|
|
768
|
+
def run(self):
|
|
769
|
+
"""Main execution method."""
|
|
770
|
+
try:
|
|
771
|
+
logger.info("Starting Dynamic Milvus data loading process...")
|
|
772
|
+
logger.info("System: %s %s", self.detector.os_type, self.detector.architecture)
|
|
773
|
+
logger.info("GPU acceleration: %s", self.use_gpu)
|
|
774
|
+
|
|
775
|
+
# Connect to Milvus
|
|
776
|
+
self.connect_to_milvus()
|
|
777
|
+
|
|
778
|
+
# Load graph data
|
|
779
|
+
graph = self.load_graph_data()
|
|
780
|
+
|
|
781
|
+
# Prepare data
|
|
782
|
+
logger.info("Data Preparation started...")
|
|
783
|
+
# Get nodes enrichment and embedding dataframes
|
|
784
|
+
nodes_enrichment_df = graph["nodes"]["enrichment"]
|
|
785
|
+
nodes_embedding_df = graph["nodes"]["embedding"]
|
|
786
|
+
|
|
787
|
+
# Get edges enrichment and embedding dataframes
|
|
788
|
+
edges_enrichment_df = graph["edges"]["enrichment"]
|
|
789
|
+
edges_embedding_df = graph["edges"]["embedding"] # List of dataframes
|
|
790
|
+
|
|
791
|
+
# Merge nodes enrichment and embedding dataframes
|
|
792
|
+
merged_nodes_df = nodes_enrichment_df.merge(
|
|
793
|
+
nodes_embedding_df[["node_id", "desc_emb", "feat_emb"]],
|
|
794
|
+
on="node_id",
|
|
795
|
+
how="left",
|
|
796
|
+
)
|
|
797
|
+
|
|
798
|
+
# Create collections and load data
|
|
799
|
+
self.create_nodes_collection(merged_nodes_df)
|
|
800
|
+
self.create_node_type_collections(merged_nodes_df)
|
|
801
|
+
self.create_edges_collection(edges_enrichment_df, edges_embedding_df)
|
|
802
|
+
|
|
803
|
+
# List all collections for verification
|
|
804
|
+
logger.info("Data loading completed successfully!")
|
|
805
|
+
logger.info("Created collections:")
|
|
806
|
+
for coll in self.pymilvus_modules["utility"].list_collections():
|
|
807
|
+
collection = self.pymilvus_modules["Collection"](name=coll)
|
|
808
|
+
logger.info(" %s: %d entities", coll, collection.num_entities)
|
|
809
|
+
|
|
810
|
+
except Exception:
|
|
811
|
+
logger.exception("Error occurred during data loading")
|
|
812
|
+
raise
|
|
813
|
+
|
|
814
|
+
|
|
815
|
+
def main():
|
|
816
|
+
"""Main function to run the dynamic data loader."""
|
|
817
|
+
# Resolve the fallback data path relative to this script's location
|
|
818
|
+
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
819
|
+
default_data_dir = os.path.join(script_dir, "tests/files/biobridge_multimodal/")
|
|
820
|
+
|
|
821
|
+
# Configuration with environment variable fallbacks - matches original exactly
|
|
822
|
+
config = {
|
|
823
|
+
"milvus_host": os.getenv("MILVUS_HOST", "localhost"),
|
|
824
|
+
"milvus_port": os.getenv("MILVUS_PORT", "19530"),
|
|
825
|
+
"milvus_user": os.getenv("MILVUS_USER", "root"),
|
|
826
|
+
"milvus_password": os.getenv("MILVUS_PASSWORD", "Milvus"),
|
|
827
|
+
"milvus_database": os.getenv("MILVUS_DATABASE", "t2kg_primekg"),
|
|
828
|
+
"data_dir": os.getenv("DATA_DIR", default_data_dir),
|
|
829
|
+
"batch_size": int(os.getenv("BATCH_SIZE", "500")),
|
|
830
|
+
"chunk_size": int(os.getenv("CHUNK_SIZE", "5")),
|
|
831
|
+
"auto_install_packages": os.getenv("AUTO_INSTALL_PACKAGES", "true").lower() == "true",
|
|
832
|
+
}
|
|
833
|
+
|
|
834
|
+
# Override detection for testing/forcing specific modes
|
|
835
|
+
force_cpu = os.getenv("FORCE_CPU", "false").lower() == "true"
|
|
836
|
+
if force_cpu:
|
|
837
|
+
logger.info("FORCE_CPU environment variable set - forcing CPU mode")
|
|
838
|
+
|
|
839
|
+
# Print configuration for debugging - matches original format
|
|
840
|
+
logger.info("=== Dynamic Milvus Data Loader ===")
|
|
841
|
+
logger.info("Configuration:")
|
|
842
|
+
for key, value in config.items():
|
|
843
|
+
# Don't log sensitive information
|
|
844
|
+
if any(
|
|
845
|
+
sensitive in key.lower() for sensitive in ["password", "user", "token", "key", "secret"]
|
|
846
|
+
):
|
|
847
|
+
logger.info(" %s: %s", key, "*" * min(8, len(str(value))))
|
|
848
|
+
else:
|
|
849
|
+
logger.info(" %s: %s", key, value)
|
|
850
|
+
|
|
851
|
+
# Additional environment info
|
|
852
|
+
logger.info("Environment:")
|
|
853
|
+
logger.info(" Python version: %s", sys.version)
|
|
854
|
+
logger.info(" Platform: %s", platform.platform())
|
|
855
|
+
logger.info(" Force CPU mode: %s", force_cpu)
|
|
856
|
+
logger.info(" Script directory: %s", script_dir)
|
|
857
|
+
logger.info(" Default data directory: %s", default_data_dir)
|
|
858
|
+
|
|
859
|
+
try:
|
|
860
|
+
# Create and run dynamic data loader
|
|
861
|
+
loader = DynamicDataLoader(config)
|
|
862
|
+
|
|
863
|
+
# Override GPU detection if forced
|
|
864
|
+
if force_cpu:
|
|
865
|
+
loader.detector.use_gpu = False
|
|
866
|
+
loader.use_gpu = False
|
|
867
|
+
loader.normalize_vectors = False
|
|
868
|
+
loader.vector_index_type = "HNSW"
|
|
869
|
+
loader.metric_type = "COSINE"
|
|
870
|
+
logger.info("Forced CPU mode - updated loader settings")
|
|
871
|
+
|
|
872
|
+
# Run the data loading process
|
|
873
|
+
loader.run()
|
|
874
|
+
|
|
875
|
+
logger.info("=== Data Loading Completed Successfully ===")
|
|
876
|
+
|
|
877
|
+
except KeyboardInterrupt:
|
|
878
|
+
logger.info("Data loading interrupted by user")
|
|
879
|
+
sys.exit(1)
|
|
880
|
+
except Exception:
|
|
881
|
+
logger.exception("Fatal error occurred during data loading")
|
|
882
|
+
sys.exit(1)
|
|
883
|
+
|
|
884
|
+
|
|
885
|
+
if __name__ == "__main__":
|
|
886
|
+
main()
|