evalvault 1.67.0__tar.gz → 1.68.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (858) hide show
  1. {evalvault-1.67.0 → evalvault-1.68.0}/.dockerignore +2 -1
  2. evalvault-1.68.0/.env.offline.example +58 -0
  3. {evalvault-1.67.0 → evalvault-1.68.0}/PKG-INFO +1 -1
  4. {evalvault-1.67.0 → evalvault-1.68.0}/data/rag/user_guide_bm25.json +18 -3
  5. evalvault-1.68.0/docker-compose.offline.yml +118 -0
  6. {evalvault-1.67.0 → evalvault-1.68.0}/docs/INDEX.md +1 -0
  7. evalvault-1.68.0/docs/guides/OFFLINE_DOCKER.md +158 -0
  8. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/USER_GUIDE.md +6 -0
  9. {evalvault-1.67.0 → evalvault-1.68.0}/docs/new_whitepaper/03_data_flow.md +14 -0
  10. {evalvault-1.67.0 → evalvault-1.68.0}/docs/new_whitepaper/12_operations.md +9 -0
  11. evalvault-1.68.0/frontend/Dockerfile +20 -0
  12. evalvault-1.68.0/frontend/nginx.conf +20 -0
  13. {evalvault-1.67.0 → evalvault-1.68.0}/pyproject.toml +1 -1
  14. evalvault-1.68.0/scripts/offline/export_images.sh +26 -0
  15. evalvault-1.68.0/scripts/offline/import_images.sh +16 -0
  16. evalvault-1.68.0/scripts/offline/smoke_test.sh +16 -0
  17. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/dataset/base.py +40 -0
  18. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/dataset/csv_loader.py +16 -0
  19. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/dataset/excel_loader.py +16 -0
  20. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/config/settings.py +15 -4
  21. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/entities/stage.py +22 -6
  22. {evalvault-1.67.0 → evalvault-1.68.0}/tests/integration/test_cli_integration.py +3 -3
  23. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_data_loaders.py +64 -0
  24. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_mlflow_tracker.py +2 -1
  25. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_settings.py +47 -0
  26. evalvault-1.68.0/tests/unit/test_stage_event_schema.py +20 -0
  27. {evalvault-1.67.0 → evalvault-1.68.0}/uv.lock +1 -1
  28. {evalvault-1.67.0 → evalvault-1.68.0}/.env.example +0 -0
  29. {evalvault-1.67.0 → evalvault-1.68.0}/.github/workflows/ci.yml +0 -0
  30. {evalvault-1.67.0 → evalvault-1.68.0}/.github/workflows/release.yml +0 -0
  31. {evalvault-1.67.0 → evalvault-1.68.0}/.github/workflows/stale.yml +0 -0
  32. {evalvault-1.67.0 → evalvault-1.68.0}/.gitignore +0 -0
  33. {evalvault-1.67.0 → evalvault-1.68.0}/.pre-commit-config.yaml +0 -0
  34. {evalvault-1.67.0 → evalvault-1.68.0}/.python-version +0 -0
  35. {evalvault-1.67.0 → evalvault-1.68.0}/AGENTS.md +0 -0
  36. {evalvault-1.67.0 → evalvault-1.68.0}/CHANGELOG.md +0 -0
  37. {evalvault-1.67.0 → evalvault-1.68.0}/CLAUDE.md +0 -0
  38. {evalvault-1.67.0 → evalvault-1.68.0}/CODE_OF_CONDUCT.md +0 -0
  39. {evalvault-1.67.0 → evalvault-1.68.0}/CONTRIBUTING.md +0 -0
  40. {evalvault-1.67.0 → evalvault-1.68.0}/Dockerfile +0 -0
  41. {evalvault-1.67.0 → evalvault-1.68.0}/LICENSE.md +0 -0
  42. {evalvault-1.67.0 → evalvault-1.68.0}/README.en.md +0 -0
  43. {evalvault-1.67.0 → evalvault-1.68.0}/README.md +0 -0
  44. {evalvault-1.67.0 → evalvault-1.68.0}/SECURITY.md +0 -0
  45. {evalvault-1.67.0 → evalvault-1.68.0}/agent/README.md +0 -0
  46. {evalvault-1.67.0 → evalvault-1.68.0}/agent/agent.py +0 -0
  47. {evalvault-1.67.0 → evalvault-1.68.0}/agent/client.py +0 -0
  48. {evalvault-1.67.0 → evalvault-1.68.0}/agent/config.py +0 -0
  49. {evalvault-1.67.0 → evalvault-1.68.0}/agent/main.py +0 -0
  50. {evalvault-1.67.0 → evalvault-1.68.0}/agent/memory/README.md +0 -0
  51. {evalvault-1.67.0 → evalvault-1.68.0}/agent/memory/shared/decisions.md +0 -0
  52. {evalvault-1.67.0 → evalvault-1.68.0}/agent/memory/shared/dependencies.md +0 -0
  53. {evalvault-1.67.0 → evalvault-1.68.0}/agent/memory/templates/coordinator_guide.md +0 -0
  54. {evalvault-1.67.0 → evalvault-1.68.0}/agent/memory/templates/work_log_template.md +0 -0
  55. {evalvault-1.67.0 → evalvault-1.68.0}/agent/memory_integration.py +0 -0
  56. {evalvault-1.67.0 → evalvault-1.68.0}/agent/progress.py +0 -0
  57. {evalvault-1.67.0 → evalvault-1.68.0}/agent/prompts/app_spec.txt +0 -0
  58. {evalvault-1.67.0 → evalvault-1.68.0}/agent/prompts/baseline.txt +0 -0
  59. {evalvault-1.67.0 → evalvault-1.68.0}/agent/prompts/coding_prompt.md +0 -0
  60. {evalvault-1.67.0 → evalvault-1.68.0}/agent/prompts/existing_project_prompt.md +0 -0
  61. {evalvault-1.67.0 → evalvault-1.68.0}/agent/prompts/improvement/architecture_prompt.md +0 -0
  62. {evalvault-1.67.0 → evalvault-1.68.0}/agent/prompts/improvement/base_prompt.md +0 -0
  63. {evalvault-1.67.0 → evalvault-1.68.0}/agent/prompts/improvement/coordinator_prompt.md +0 -0
  64. {evalvault-1.67.0 → evalvault-1.68.0}/agent/prompts/improvement/observability_prompt.md +0 -0
  65. {evalvault-1.67.0 → evalvault-1.68.0}/agent/prompts/initializer_prompt.md +0 -0
  66. {evalvault-1.67.0 → evalvault-1.68.0}/agent/prompts/prompt_manifest.json +0 -0
  67. {evalvault-1.67.0 → evalvault-1.68.0}/agent/prompts/system.txt +0 -0
  68. {evalvault-1.67.0 → evalvault-1.68.0}/agent/prompts.py +0 -0
  69. {evalvault-1.67.0 → evalvault-1.68.0}/agent/requirements.txt +0 -0
  70. {evalvault-1.67.0 → evalvault-1.68.0}/agent/security.py +0 -0
  71. {evalvault-1.67.0 → evalvault-1.68.0}/config/domains/insurance/memory.yaml +0 -0
  72. {evalvault-1.67.0 → evalvault-1.68.0}/config/domains/insurance/terms_dictionary_en.json +0 -0
  73. {evalvault-1.67.0 → evalvault-1.68.0}/config/domains/insurance/terms_dictionary_ko.json +0 -0
  74. {evalvault-1.67.0 → evalvault-1.68.0}/config/methods.yaml +0 -0
  75. {evalvault-1.67.0 → evalvault-1.68.0}/config/models.yaml +0 -0
  76. {evalvault-1.67.0 → evalvault-1.68.0}/config/ragas_prompts_override.yaml +0 -0
  77. {evalvault-1.67.0 → evalvault-1.68.0}/config/regressions/default.json +0 -0
  78. {evalvault-1.67.0 → evalvault-1.68.0}/config/regressions/ux.json +0 -0
  79. {evalvault-1.67.0 → evalvault-1.68.0}/config/stage_metric_playbook.yaml +0 -0
  80. {evalvault-1.67.0 → evalvault-1.68.0}/config/stage_metric_thresholds.json +0 -0
  81. {evalvault-1.67.0 → evalvault-1.68.0}/data/datasets/dummy_test_dataset.json +0 -0
  82. {evalvault-1.67.0 → evalvault-1.68.0}/data/datasets/insurance_qa_korean.csv +0 -0
  83. {evalvault-1.67.0 → evalvault-1.68.0}/data/datasets/insurance_qa_korean.json +0 -0
  84. {evalvault-1.67.0 → evalvault-1.68.0}/data/datasets/insurance_qa_korean_2.json +0 -0
  85. {evalvault-1.67.0 → evalvault-1.68.0}/data/datasets/insurance_qa_korean_3.json +0 -0
  86. {evalvault-1.67.0 → evalvault-1.68.0}/data/datasets/ragas_ko90_en10.json +0 -0
  87. {evalvault-1.67.0 → evalvault-1.68.0}/data/datasets/sample.json +0 -0
  88. {evalvault-1.67.0 → evalvault-1.68.0}/data/datasets/visualization_20q_cluster_map.csv +0 -0
  89. {evalvault-1.67.0 → evalvault-1.68.0}/data/datasets/visualization_20q_korean.json +0 -0
  90. {evalvault-1.67.0 → evalvault-1.68.0}/data/datasets/visualization_2q_cluster_map.csv +0 -0
  91. {evalvault-1.67.0 → evalvault-1.68.0}/data/datasets/visualization_2q_korean.json +0 -0
  92. {evalvault-1.67.0 → evalvault-1.68.0}/data/kg/knowledge_graph.json +0 -0
  93. {evalvault-1.67.0 → evalvault-1.68.0}/data/raw/The Complete Guide to Mastering Suno Advanced Strategies for Professional Music Generation.md +0 -0
  94. {evalvault-1.67.0 → evalvault-1.68.0}/data/raw/edge_cases.json +0 -0
  95. {evalvault-1.67.0 → evalvault-1.68.0}/data/raw/run_mode_full_domain_memory.json +0 -0
  96. {evalvault-1.67.0 → evalvault-1.68.0}/data/raw/sample_rag_knowledge.txt +0 -0
  97. {evalvault-1.67.0 → evalvault-1.68.0}/dataset_templates/dataset_template.csv +0 -0
  98. {evalvault-1.67.0 → evalvault-1.68.0}/dataset_templates/dataset_template.json +0 -0
  99. {evalvault-1.67.0 → evalvault-1.68.0}/dataset_templates/dataset_template.xlsx +0 -0
  100. {evalvault-1.67.0 → evalvault-1.68.0}/dataset_templates/method_input_template.json +0 -0
  101. {evalvault-1.67.0 → evalvault-1.68.0}/docker-compose.langfuse.yml +0 -0
  102. {evalvault-1.67.0 → evalvault-1.68.0}/docker-compose.phoenix.yaml +0 -0
  103. {evalvault-1.67.0 → evalvault-1.68.0}/docker-compose.yml +0 -0
  104. {evalvault-1.67.0 → evalvault-1.68.0}/docs/README.ko.md +0 -0
  105. {evalvault-1.67.0 → evalvault-1.68.0}/docs/ROADMAP.md +0 -0
  106. {evalvault-1.67.0 → evalvault-1.68.0}/docs/STATUS.md +0 -0
  107. {evalvault-1.67.0 → evalvault-1.68.0}/docs/api/adapters/inbound.md +0 -0
  108. {evalvault-1.67.0 → evalvault-1.68.0}/docs/api/adapters/outbound.md +0 -0
  109. {evalvault-1.67.0 → evalvault-1.68.0}/docs/api/config.md +0 -0
  110. {evalvault-1.67.0 → evalvault-1.68.0}/docs/api/domain/entities.md +0 -0
  111. {evalvault-1.67.0 → evalvault-1.68.0}/docs/api/domain/metrics.md +0 -0
  112. {evalvault-1.67.0 → evalvault-1.68.0}/docs/api/domain/services.md +0 -0
  113. {evalvault-1.67.0 → evalvault-1.68.0}/docs/api/ports/inbound.md +0 -0
  114. {evalvault-1.67.0 → evalvault-1.68.0}/docs/api/ports/outbound.md +0 -0
  115. {evalvault-1.67.0 → evalvault-1.68.0}/docs/architecture/open-rag-trace-collector.md +0 -0
  116. {evalvault-1.67.0 → evalvault-1.68.0}/docs/architecture/open-rag-trace-spec.md +0 -0
  117. {evalvault-1.67.0 → evalvault-1.68.0}/docs/getting-started/INSTALLATION.md +0 -0
  118. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/AGENTS_SYSTEM_GUIDE.md +0 -0
  119. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/CHAINLIT_INTEGRATION_PLAN.md +0 -0
  120. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/CLI_MCP_PLAN.md +0 -0
  121. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/CLI_PARALLEL_FEATURES_SPEC.md +0 -0
  122. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/DEV_GUIDE.md +0 -0
  123. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/DOCS_REFRESH_PLAN.md +0 -0
  124. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/EVALVAULT_DIAGNOSTIC_PLAYBOOK.md +0 -0
  125. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/EVALVAULT_RUN_EXCEL_SHEETS.md +0 -0
  126. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/EVALVAULT_WORK_PLAN.md +0 -0
  127. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/EXTERNAL_TRACE_API_SPEC.md +0 -0
  128. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/Extension_2.md +0 -0
  129. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/Extension_Data_Difficulty_Profiling_Custom_Judge_Model.md +0 -0
  130. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/INSURANCE_SUMMARY_METRICS_PLAN.md +0 -0
  131. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/LENA_MVP_IMPLEMENTATION_PLAN.md +0 -0
  132. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/LENA_RAGAS_CALIBRATION_DEV_PLAN.md +0 -0
  133. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/OPEN_RAG_TRACE_INTERNAL_ADAPTER.md +0 -0
  134. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/OPEN_RAG_TRACE_SAMPLES.md +0 -0
  135. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/PARALLEL_WORK_APPROVAL_RULES.md +0 -0
  136. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/PRD_LENA.md +0 -0
  137. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/PROJECT_STATUS_AND_PLAN.md +0 -0
  138. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/RAGAS_HUMAN_FEEDBACK_CALIBRATION_GUIDE.md +0 -0
  139. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/RAG_CLI_WORKFLOW_TEMPLATES.md +0 -0
  140. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/RAG_NOISE_REDUCTION_GUIDE.md +0 -0
  141. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/RAG_PERFORMANCE_IMPLEMENTATION_LOG.md +0 -0
  142. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/RAG_PERFORMANCE_IMPROVEMENT_PROPOSAL.md +0 -0
  143. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/RELEASE_CHECKLIST.md +0 -0
  144. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/WEBUI_CLI_ROLLOUT_PLAN.md +0 -0
  145. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/cli_process.md +0 -0
  146. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/prompt_suggestions_design.md +0 -0
  147. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/rag_human_feedback_calibration_implementation_plan.md +0 -0
  148. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/refactoring_strategy.md +0 -0
  149. {evalvault-1.67.0 → evalvault-1.68.0}/docs/guides/repeat_query.md +0 -0
  150. {evalvault-1.67.0 → evalvault-1.68.0}/docs/mapping/component-to-whitepaper.yaml +0 -0
  151. {evalvault-1.67.0 → evalvault-1.68.0}/docs/new_whitepaper/00_frontmatter.md +0 -0
  152. {evalvault-1.67.0 → evalvault-1.68.0}/docs/new_whitepaper/01_overview.md +0 -0
  153. {evalvault-1.67.0 → evalvault-1.68.0}/docs/new_whitepaper/02_architecture.md +0 -0
  154. {evalvault-1.67.0 → evalvault-1.68.0}/docs/new_whitepaper/04_components.md +0 -0
  155. {evalvault-1.67.0 → evalvault-1.68.0}/docs/new_whitepaper/05_expert_lenses.md +0 -0
  156. {evalvault-1.67.0 → evalvault-1.68.0}/docs/new_whitepaper/06_implementation.md +0 -0
  157. {evalvault-1.67.0 → evalvault-1.68.0}/docs/new_whitepaper/07_advanced.md +0 -0
  158. {evalvault-1.67.0 → evalvault-1.68.0}/docs/new_whitepaper/08_customization.md +0 -0
  159. {evalvault-1.67.0 → evalvault-1.68.0}/docs/new_whitepaper/09_quality.md +0 -0
  160. {evalvault-1.67.0 → evalvault-1.68.0}/docs/new_whitepaper/10_performance.md +0 -0
  161. {evalvault-1.67.0 → evalvault-1.68.0}/docs/new_whitepaper/11_security.md +0 -0
  162. {evalvault-1.67.0 → evalvault-1.68.0}/docs/new_whitepaper/13_standards.md +0 -0
  163. {evalvault-1.67.0 → evalvault-1.68.0}/docs/new_whitepaper/14_roadmap.md +0 -0
  164. {evalvault-1.67.0 → evalvault-1.68.0}/docs/new_whitepaper/INDEX.md +0 -0
  165. {evalvault-1.67.0 → evalvault-1.68.0}/docs/new_whitepaper/STYLE_GUIDE.md +0 -0
  166. {evalvault-1.67.0 → evalvault-1.68.0}/docs/refactor/REFAC_000_master_plan.md +0 -0
  167. {evalvault-1.67.0 → evalvault-1.68.0}/docs/refactor/REFAC_010_agent_playbook.md +0 -0
  168. {evalvault-1.67.0 → evalvault-1.68.0}/docs/refactor/REFAC_020_logging_policy.md +0 -0
  169. {evalvault-1.67.0 → evalvault-1.68.0}/docs/refactor/REFAC_030_phase0_responsibility_map.md +0 -0
  170. {evalvault-1.67.0 → evalvault-1.68.0}/docs/refactor/REFAC_040_wbs_parallel_plan.md +0 -0
  171. {evalvault-1.67.0 → evalvault-1.68.0}/docs/refactor/logs/phase-0-baseline.md +0 -0
  172. {evalvault-1.67.0 → evalvault-1.68.0}/docs/refactor/logs/phase-1-evaluator.md +0 -0
  173. {evalvault-1.67.0 → evalvault-1.68.0}/docs/refactor/logs/phase-2-cli-run.md +0 -0
  174. {evalvault-1.67.0 → evalvault-1.68.0}/docs/refactor/logs/phase-3-analysis.md +0 -0
  175. {evalvault-1.67.0 → evalvault-1.68.0}/docs/security_audit_worklog.md +0 -0
  176. {evalvault-1.67.0 → evalvault-1.68.0}/docs/stylesheets/extra.css +0 -0
  177. {evalvault-1.67.0 → evalvault-1.68.0}/docs/templates/dataset_template.csv +0 -0
  178. {evalvault-1.67.0 → evalvault-1.68.0}/docs/templates/dataset_template.json +0 -0
  179. {evalvault-1.67.0 → evalvault-1.68.0}/docs/templates/dataset_template.xlsx +0 -0
  180. {evalvault-1.67.0 → evalvault-1.68.0}/docs/templates/eval_report_templates.md +0 -0
  181. {evalvault-1.67.0 → evalvault-1.68.0}/docs/templates/kg_template.json +0 -0
  182. {evalvault-1.67.0 → evalvault-1.68.0}/docs/templates/otel_openinference_trace_example.json +0 -0
  183. {evalvault-1.67.0 → evalvault-1.68.0}/docs/templates/ragas_dataset_example_ko90_en10.json +0 -0
  184. {evalvault-1.67.0 → evalvault-1.68.0}/docs/templates/retriever_docs_template.json +0 -0
  185. {evalvault-1.67.0 → evalvault-1.68.0}/docs/tools/generate-whitepaper.py +0 -0
  186. {evalvault-1.67.0 → evalvault-1.68.0}/docs/web_ui_analysis_migration_plan.md +0 -0
  187. {evalvault-1.67.0 → evalvault-1.68.0}/dummy_test_dataset.json +0 -0
  188. {evalvault-1.67.0 → evalvault-1.68.0}/examples/README.md +0 -0
  189. {evalvault-1.67.0 → evalvault-1.68.0}/examples/benchmarks/README.md +0 -0
  190. {evalvault-1.67.0 → evalvault-1.68.0}/examples/benchmarks/korean_rag/faithfulness_test.json +0 -0
  191. {evalvault-1.67.0 → evalvault-1.68.0}/examples/benchmarks/korean_rag/insurance_qa_100.json +0 -0
  192. {evalvault-1.67.0 → evalvault-1.68.0}/examples/benchmarks/korean_rag/keyword_extraction_test.json +0 -0
  193. {evalvault-1.67.0 → evalvault-1.68.0}/examples/benchmarks/korean_rag/retrieval_test.json +0 -0
  194. {evalvault-1.67.0 → evalvault-1.68.0}/examples/benchmarks/output/comparison.json +0 -0
  195. {evalvault-1.67.0 → evalvault-1.68.0}/examples/benchmarks/output/full_results.json +0 -0
  196. {evalvault-1.67.0 → evalvault-1.68.0}/examples/benchmarks/output/leaderboard.json +0 -0
  197. {evalvault-1.67.0 → evalvault-1.68.0}/examples/benchmarks/output/results_mteb.json +0 -0
  198. {evalvault-1.67.0 → evalvault-1.68.0}/examples/benchmarks/output/retrieval_result.json +0 -0
  199. {evalvault-1.67.0 → evalvault-1.68.0}/examples/benchmarks/run_korean_benchmark.py +0 -0
  200. {evalvault-1.67.0 → evalvault-1.68.0}/examples/kg_generator_demo.py +0 -0
  201. {evalvault-1.67.0 → evalvault-1.68.0}/examples/method_plugin_template/README.md +0 -0
  202. {evalvault-1.67.0 → evalvault-1.68.0}/examples/method_plugin_template/pyproject.toml +0 -0
  203. {evalvault-1.67.0 → evalvault-1.68.0}/examples/method_plugin_template/src/method_plugin_template/__init__.py +0 -0
  204. {evalvault-1.67.0 → evalvault-1.68.0}/examples/method_plugin_template/src/method_plugin_template/methods.py +0 -0
  205. {evalvault-1.67.0 → evalvault-1.68.0}/examples/stage_events.jsonl +0 -0
  206. {evalvault-1.67.0 → evalvault-1.68.0}/examples/usecase/comprehensive_workflow_test.py +0 -0
  207. {evalvault-1.67.0 → evalvault-1.68.0}/examples/usecase/insurance_eval_dataset.json +0 -0
  208. {evalvault-1.67.0 → evalvault-1.68.0}/examples/usecase/output/comprehensive_report.html +0 -0
  209. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/.env.example +0 -0
  210. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/.gitignore +0 -0
  211. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/README.md +0 -0
  212. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/e2e/analysis-compare.spec.ts +0 -0
  213. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/e2e/analysis-lab.spec.ts +0 -0
  214. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/e2e/compare-runs.spec.ts +0 -0
  215. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/e2e/dashboard.spec.ts +0 -0
  216. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/e2e/domain-memory.spec.ts +0 -0
  217. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/e2e/evaluation-studio.spec.ts +0 -0
  218. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/e2e/knowledge-base.spec.ts +0 -0
  219. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/e2e/mocks/intents.json +0 -0
  220. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/e2e/mocks/run_details.json +0 -0
  221. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/e2e/mocks/runs.json +0 -0
  222. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/e2e/run-details.spec.ts +0 -0
  223. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/eslint.config.js +0 -0
  224. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/index.html +0 -0
  225. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/package-lock.json +0 -0
  226. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/package.json +0 -0
  227. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/playwright.config.ts +0 -0
  228. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/public/vite.svg +0 -0
  229. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/App.css +0 -0
  230. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/App.tsx +0 -0
  231. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/assets/react.svg +0 -0
  232. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/components/AnalysisNodeOutputs.tsx +0 -0
  233. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/components/InsightSpacePanel.tsx +0 -0
  234. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/components/Layout.tsx +0 -0
  235. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/components/MarkdownContent.tsx +0 -0
  236. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/components/PrioritySummaryPanel.tsx +0 -0
  237. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/components/SpaceLegend.tsx +0 -0
  238. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/components/SpacePlot2D.tsx +0 -0
  239. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/components/SpacePlot3D.tsx +0 -0
  240. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/components/StatusBadge.tsx +0 -0
  241. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/components/VirtualizedText.tsx +0 -0
  242. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/components/ai-elements/Conversation.tsx +0 -0
  243. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/components/ai-elements/Message.tsx +0 -0
  244. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/components/ai-elements/PromptInput.tsx +0 -0
  245. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/components/ai-elements/Response.tsx +0 -0
  246. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/components/ai-elements/index.ts +0 -0
  247. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/config/ui.ts +0 -0
  248. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/config.ts +0 -0
  249. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/hooks/useInsightSpace.ts +0 -0
  250. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/index.css +0 -0
  251. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/main.tsx +0 -0
  252. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/pages/AnalysisCompareView.tsx +0 -0
  253. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/pages/AnalysisLab.tsx +0 -0
  254. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/pages/AnalysisResultView.tsx +0 -0
  255. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/pages/Chat.tsx +0 -0
  256. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/pages/CompareRuns.tsx +0 -0
  257. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/pages/ComprehensiveAnalysis.tsx +0 -0
  258. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/pages/CustomerReport.tsx +0 -0
  259. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/pages/Dashboard.tsx +0 -0
  260. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/pages/DomainMemory.tsx +0 -0
  261. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/pages/EvaluationStudio.tsx +0 -0
  262. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/pages/KnowledgeBase.tsx +0 -0
  263. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/pages/RunDetails.tsx +0 -0
  264. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/pages/Settings.tsx +0 -0
  265. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/pages/Visualization.tsx +0 -0
  266. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/pages/VisualizationHome.tsx +0 -0
  267. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/services/api.ts +0 -0
  268. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/types/plotly.d.ts +0 -0
  269. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/utils/format.ts +0 -0
  270. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/utils/phoenix.ts +0 -0
  271. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/utils/runAnalytics.ts +0 -0
  272. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/utils/score.ts +0 -0
  273. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/src/utils/summaryMetrics.ts +0 -0
  274. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/tailwind.config.js +0 -0
  275. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/tsconfig.app.json +0 -0
  276. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/tsconfig.json +0 -0
  277. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/tsconfig.node.json +0 -0
  278. {evalvault-1.67.0 → evalvault-1.68.0}/frontend/vite.config.ts +0 -0
  279. {evalvault-1.67.0 → evalvault-1.68.0}/mkdocs.yml +0 -0
  280. {evalvault-1.67.0 → evalvault-1.68.0}/package-lock.json +0 -0
  281. {evalvault-1.67.0 → evalvault-1.68.0}/prompts/system_override.txt +0 -0
  282. {evalvault-1.67.0 → evalvault-1.68.0}/reports/.gitkeep +0 -0
  283. {evalvault-1.67.0 → evalvault-1.68.0}/reports/README.md +0 -0
  284. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/artifacts/comparison_0aa9fab0_f1287e90/final_output.json +0 -0
  285. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/artifacts/comparison_0aa9fab0_f1287e90/index.json +0 -0
  286. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/artifacts/comparison_0aa9fab0_f1287e90/load_runs.json +0 -0
  287. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/artifacts/comparison_0aa9fab0_f1287e90/report.json +0 -0
  288. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/artifacts/comparison_0aa9fab0_f1287e90/run_change_detection.json +0 -0
  289. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/artifacts/comparison_0aa9fab0_f1287e90/run_metric_comparison.json +0 -0
  290. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/artifacts/comparison_8f825b22_4516d358/final_output.json +0 -0
  291. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/artifacts/comparison_8f825b22_4516d358/index.json +0 -0
  292. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/artifacts/comparison_8f825b22_4516d358/load_runs.json +0 -0
  293. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/artifacts/comparison_8f825b22_4516d358/report.json +0 -0
  294. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/artifacts/comparison_8f825b22_4516d358/run_change_detection.json +0 -0
  295. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/artifacts/comparison_8f825b22_4516d358/run_metric_comparison.json +0 -0
  296. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/artifacts/comparison_f1287e90_8f825b22/final_output.json +0 -0
  297. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/artifacts/comparison_f1287e90_8f825b22/index.json +0 -0
  298. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/artifacts/comparison_f1287e90_8f825b22/load_runs.json +0 -0
  299. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/artifacts/comparison_f1287e90_8f825b22/report.json +0 -0
  300. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/artifacts/comparison_f1287e90_8f825b22/run_change_detection.json +0 -0
  301. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/artifacts/comparison_f1287e90_8f825b22/run_metric_comparison.json +0 -0
  302. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/comparison_0aa9fab0_9fbf4776.json +0 -0
  303. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/comparison_0aa9fab0_9fbf4776.md +0 -0
  304. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/comparison_0aa9fab0_f1287e90.json +0 -0
  305. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/comparison_0aa9fab0_f1287e90.md +0 -0
  306. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/comparison_8f825b22_4516d358.json +0 -0
  307. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/comparison_8f825b22_4516d358.md +0 -0
  308. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/comparison_9fbf4776_a491fa0e.json +0 -0
  309. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/comparison_9fbf4776_a491fa0e.md +0 -0
  310. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/comparison_f1287e90_8f825b22.json +0 -0
  311. {evalvault-1.67.0 → evalvault-1.68.0}/reports/comparison/comparison_f1287e90_8f825b22.md +0 -0
  312. {evalvault-1.67.0 → evalvault-1.68.0}/reports/debug_report_r1_smoke.md +0 -0
  313. {evalvault-1.67.0 → evalvault-1.68.0}/reports/debug_report_r2_graphrag.md +0 -0
  314. {evalvault-1.67.0 → evalvault-1.68.0}/reports/debug_report_r2_graphrag_openai.md +0 -0
  315. {evalvault-1.67.0 → evalvault-1.68.0}/reports/debug_report_r3_bm25.md +0 -0
  316. {evalvault-1.67.0 → evalvault-1.68.0}/reports/debug_report_r3_bm25_langfuse3.md +0 -0
  317. {evalvault-1.67.0 → evalvault-1.68.0}/reports/debug_report_r3_dense_faiss.md +0 -0
  318. {evalvault-1.67.0 → evalvault-1.68.0}/reports/improvement_1d91a667-4288-4742-be3a-a8f5310c5140.md +0 -0
  319. {evalvault-1.67.0 → evalvault-1.68.0}/reports/r2_graphrag_openai_stage_events.jsonl +0 -0
  320. {evalvault-1.67.0 → evalvault-1.68.0}/reports/r2_graphrag_openai_stage_report.txt +0 -0
  321. {evalvault-1.67.0 → evalvault-1.68.0}/reports/r2_graphrag_stage_events.jsonl +0 -0
  322. {evalvault-1.67.0 → evalvault-1.68.0}/reports/r2_graphrag_stage_report.txt +0 -0
  323. {evalvault-1.67.0 → evalvault-1.68.0}/reports/r3_bm25_langfuse2_stage_events.jsonl +0 -0
  324. {evalvault-1.67.0 → evalvault-1.68.0}/reports/r3_bm25_langfuse3_stage_events.jsonl +0 -0
  325. {evalvault-1.67.0 → evalvault-1.68.0}/reports/r3_bm25_langfuse_stage_events.jsonl +0 -0
  326. {evalvault-1.67.0 → evalvault-1.68.0}/reports/r3_bm25_phoenix_stage_events.jsonl +0 -0
  327. {evalvault-1.67.0 → evalvault-1.68.0}/reports/r3_bm25_stage_events.jsonl +0 -0
  328. {evalvault-1.67.0 → evalvault-1.68.0}/reports/r3_bm25_stage_report.txt +0 -0
  329. {evalvault-1.67.0 → evalvault-1.68.0}/reports/r3_dense_faiss_stage_events.jsonl +0 -0
  330. {evalvault-1.67.0 → evalvault-1.68.0}/reports/r3_dense_faiss_stage_report.txt +0 -0
  331. {evalvault-1.67.0 → evalvault-1.68.0}/reports/retrieval_benchmark_smoke_precision.csv +0 -0
  332. {evalvault-1.67.0 → evalvault-1.68.0}/reports/retrieval_benchmark_smoke_precision_graphrag.csv +0 -0
  333. {evalvault-1.67.0 → evalvault-1.68.0}/reports/retrieval_benchmark_smoke_precision_multi.csv +0 -0
  334. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/benchmark/download_kmmlu.py +0 -0
  335. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/dev/open_rag_trace_demo.py +0 -0
  336. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/dev/open_rag_trace_integration_template.py +0 -0
  337. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/dev/otel-collector-config.yaml +0 -0
  338. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/dev/start_web_ui_with_phoenix.sh +0 -0
  339. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/dev/validate_open_rag_trace.py +0 -0
  340. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/dev_seed_pipeline_results.py +0 -0
  341. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/docs/__init__.py +0 -0
  342. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/docs/analyzer/__init__.py +0 -0
  343. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/docs/analyzer/ast_scanner.py +0 -0
  344. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/docs/analyzer/confidence_scorer.py +0 -0
  345. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/docs/analyzer/graph_builder.py +0 -0
  346. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/docs/analyzer/side_effect_detector.py +0 -0
  347. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/docs/generate_api_docs.py +0 -0
  348. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/docs/models/__init__.py +0 -0
  349. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/docs/models/schema.py +0 -0
  350. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/docs/renderer/__init__.py +0 -0
  351. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/docs/renderer/html_generator.py +0 -0
  352. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/ops/phoenix_watch.py +0 -0
  353. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/perf/backfill_langfuse_trace_url.py +0 -0
  354. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/perf/r3_dense_smoke.py +0 -0
  355. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/perf/r3_evalvault_run_dataset.json +0 -0
  356. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/perf/r3_retriever_docs.json +0 -0
  357. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/perf/r3_smoke_real.jsonl +0 -0
  358. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/perf/r3_stage_events_sample.jsonl +0 -0
  359. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/pipeline_template_inspect.py +0 -0
  360. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/reports/generate_release_notes.py +0 -0
  361. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/run_with_timeout.py +0 -0
  362. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/test_full_evaluation.py +0 -0
  363. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/tests/run_regressions.py +0 -0
  364. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/tests/run_retriever_stage_report_smoke.sh +0 -0
  365. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/validate_tutorials.py +0 -0
  366. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/verify_ragas_compliance.py +0 -0
  367. {evalvault-1.67.0 → evalvault-1.68.0}/scripts/verify_workflows.py +0 -0
  368. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/__init__.py +0 -0
  369. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/__init__.py +0 -0
  370. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/__init__.py +0 -0
  371. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/api/__init__.py +0 -0
  372. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/api/adapter.py +0 -0
  373. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/api/main.py +0 -0
  374. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/api/routers/__init__.py +0 -0
  375. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/api/routers/benchmark.py +0 -0
  376. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/api/routers/chat.py +0 -0
  377. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/api/routers/config.py +0 -0
  378. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/api/routers/domain.py +0 -0
  379. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/api/routers/knowledge.py +0 -0
  380. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/api/routers/mcp.py +0 -0
  381. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/api/routers/pipeline.py +0 -0
  382. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/api/routers/runs.py +0 -0
  383. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/__init__.py +0 -0
  384. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/app.py +0 -0
  385. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/__init__.py +0 -0
  386. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/agent.py +0 -0
  387. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/analyze.py +0 -0
  388. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/api.py +0 -0
  389. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/artifacts.py +0 -0
  390. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/benchmark.py +0 -0
  391. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/calibrate.py +0 -0
  392. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/calibrate_judge.py +0 -0
  393. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/compare.py +0 -0
  394. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/config.py +0 -0
  395. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/debug.py +0 -0
  396. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/domain.py +0 -0
  397. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/experiment.py +0 -0
  398. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/gate.py +0 -0
  399. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/generate.py +0 -0
  400. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/history.py +0 -0
  401. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/init.py +0 -0
  402. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/kg.py +0 -0
  403. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/langfuse.py +0 -0
  404. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/method.py +0 -0
  405. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/ops.py +0 -0
  406. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/phoenix.py +0 -0
  407. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/pipeline.py +0 -0
  408. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/profile_difficulty.py +0 -0
  409. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/prompts.py +0 -0
  410. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/regress.py +0 -0
  411. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/run.py +0 -0
  412. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/run_helpers.py +0 -0
  413. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/commands/stage.py +0 -0
  414. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/utils/__init__.py +0 -0
  415. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/utils/analysis_io.py +0 -0
  416. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/utils/console.py +0 -0
  417. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/utils/errors.py +0 -0
  418. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/utils/formatters.py +0 -0
  419. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/utils/options.py +0 -0
  420. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/utils/presets.py +0 -0
  421. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/utils/progress.py +0 -0
  422. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/cli/utils/validators.py +0 -0
  423. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/mcp/__init__.py +0 -0
  424. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/mcp/schemas.py +0 -0
  425. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/inbound/mcp/tools.py +0 -0
  426. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/__init__.py +0 -0
  427. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/__init__.py +0 -0
  428. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/analysis_report_module.py +0 -0
  429. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/base_module.py +0 -0
  430. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/bm25_searcher_module.py +0 -0
  431. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/causal_adapter.py +0 -0
  432. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/causal_analyzer_module.py +0 -0
  433. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/common.py +0 -0
  434. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/comparison_pipeline_adapter.py +0 -0
  435. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/comparison_report_module.py +0 -0
  436. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/data_loader_module.py +0 -0
  437. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/detailed_report_module.py +0 -0
  438. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/diagnostic_playbook_module.py +0 -0
  439. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/embedding_analyzer_module.py +0 -0
  440. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/embedding_distribution_module.py +0 -0
  441. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/embedding_searcher_module.py +0 -0
  442. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/hybrid_rrf_module.py +0 -0
  443. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/hybrid_weighted_module.py +0 -0
  444. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/hypothesis_generator_module.py +0 -0
  445. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/llm_report_module.py +0 -0
  446. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/low_performer_extractor_module.py +0 -0
  447. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/model_analyzer_module.py +0 -0
  448. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/morpheme_analyzer_module.py +0 -0
  449. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/morpheme_quality_checker_module.py +0 -0
  450. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/network_analyzer_module.py +0 -0
  451. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/nlp_adapter.py +0 -0
  452. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/nlp_analyzer_module.py +0 -0
  453. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/pattern_detector_module.py +0 -0
  454. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/pipeline_factory.py +0 -0
  455. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/pipeline_helpers.py +0 -0
  456. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/priority_summary_module.py +0 -0
  457. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/ragas_evaluator_module.py +0 -0
  458. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/retrieval_analyzer_module.py +0 -0
  459. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/retrieval_benchmark_module.py +0 -0
  460. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/retrieval_quality_checker_module.py +0 -0
  461. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/root_cause_analyzer_module.py +0 -0
  462. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/run_analyzer_module.py +0 -0
  463. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/run_change_detector_module.py +0 -0
  464. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/run_comparator_module.py +0 -0
  465. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/run_loader_module.py +0 -0
  466. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/run_metric_comparator_module.py +0 -0
  467. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/search_comparator_module.py +0 -0
  468. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/statistical_adapter.py +0 -0
  469. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/statistical_analyzer_module.py +0 -0
  470. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/statistical_comparator_module.py +0 -0
  471. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/summary_report_module.py +0 -0
  472. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/time_series_analyzer_module.py +0 -0
  473. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/timeseries_advanced_module.py +0 -0
  474. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/trend_detector_module.py +0 -0
  475. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/analysis/verification_report_module.py +0 -0
  476. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/artifact_fs.py +0 -0
  477. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/benchmark/__init__.py +0 -0
  478. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/benchmark/lm_eval_adapter.py +0 -0
  479. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/cache/__init__.py +0 -0
  480. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/cache/hybrid_cache.py +0 -0
  481. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/cache/memory_cache.py +0 -0
  482. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/dataset/__init__.py +0 -0
  483. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/dataset/json_loader.py +0 -0
  484. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/dataset/loader_factory.py +0 -0
  485. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/dataset/method_input_loader.py +0 -0
  486. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/dataset/streaming_loader.py +0 -0
  487. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/dataset/templates.py +0 -0
  488. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/dataset/thresholds.py +0 -0
  489. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/debug/__init__.py +0 -0
  490. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/debug/report_renderer.py +0 -0
  491. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/documents/__init__.py +0 -0
  492. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/documents/ocr/__init__.py +0 -0
  493. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/documents/ocr/paddleocr_backend.py +0 -0
  494. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/documents/pdf_extractor.py +0 -0
  495. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/documents/versioned_loader.py +0 -0
  496. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/domain_memory/__init__.py +0 -0
  497. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/domain_memory/domain_memory_schema.sql +0 -0
  498. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/domain_memory/sqlite_adapter.py +0 -0
  499. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/filesystem/__init__.py +0 -0
  500. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/filesystem/difficulty_profile_writer.py +0 -0
  501. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/filesystem/ops_snapshot_writer.py +0 -0
  502. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/improvement/__init__.py +0 -0
  503. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/improvement/insight_generator.py +0 -0
  504. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/improvement/pattern_detector.py +0 -0
  505. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/improvement/playbook_loader.py +0 -0
  506. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/improvement/stage_metric_playbook_loader.py +0 -0
  507. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/judge_calibration_adapter.py +0 -0
  508. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/judge_calibration_reporter.py +0 -0
  509. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/kg/__init__.py +0 -0
  510. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/kg/graph_rag_retriever.py +0 -0
  511. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/kg/networkx_adapter.py +0 -0
  512. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/kg/parallel_kg_builder.py +0 -0
  513. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/kg/query_strategies.py +0 -0
  514. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/llm/__init__.py +0 -0
  515. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/llm/anthropic_adapter.py +0 -0
  516. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/llm/azure_adapter.py +0 -0
  517. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/llm/base.py +0 -0
  518. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/llm/factory.py +0 -0
  519. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/llm/instructor_factory.py +0 -0
  520. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/llm/llm_relation_augmenter.py +0 -0
  521. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/llm/ollama_adapter.py +0 -0
  522. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/llm/openai_adapter.py +0 -0
  523. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/llm/token_aware_chat.py +0 -0
  524. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/llm/vllm_adapter.py +0 -0
  525. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/methods/__init__.py +0 -0
  526. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/methods/baseline_oracle.py +0 -0
  527. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/methods/external_command.py +0 -0
  528. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/methods/registry.py +0 -0
  529. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/nlp/__init__.py +0 -0
  530. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/nlp/korean/__init__.py +0 -0
  531. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/nlp/korean/bm25_retriever.py +0 -0
  532. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/nlp/korean/dense_retriever.py +0 -0
  533. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/nlp/korean/document_chunker.py +0 -0
  534. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/nlp/korean/hybrid_retriever.py +0 -0
  535. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/nlp/korean/kiwi_tokenizer.py +0 -0
  536. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/nlp/korean/korean_evaluation.py +0 -0
  537. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/nlp/korean/korean_stopwords.py +0 -0
  538. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/nlp/korean/toolkit.py +0 -0
  539. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/nlp/korean/toolkit_factory.py +0 -0
  540. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/phoenix/sync_service.py +0 -0
  541. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/report/__init__.py +0 -0
  542. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/report/dashboard_generator.py +0 -0
  543. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/report/llm_report_generator.py +0 -0
  544. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/report/markdown_adapter.py +0 -0
  545. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/storage/__init__.py +0 -0
  546. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/storage/base_sql.py +0 -0
  547. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/storage/benchmark_storage_adapter.py +0 -0
  548. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/storage/postgres_adapter.py +0 -0
  549. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/storage/postgres_schema.sql +0 -0
  550. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/storage/schema.sql +0 -0
  551. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/storage/sqlite_adapter.py +0 -0
  552. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/tracer/__init__.py +0 -0
  553. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/tracer/open_rag_log_handler.py +0 -0
  554. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/tracer/open_rag_trace_adapter.py +0 -0
  555. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/tracer/open_rag_trace_decorators.py +0 -0
  556. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/tracer/open_rag_trace_helpers.py +0 -0
  557. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/tracer/phoenix_tracer_adapter.py +0 -0
  558. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/tracker/__init__.py +0 -0
  559. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/tracker/langfuse_adapter.py +0 -0
  560. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/tracker/log_sanitizer.py +0 -0
  561. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/tracker/mlflow_adapter.py +0 -0
  562. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/adapters/outbound/tracker/phoenix_adapter.py +0 -0
  563. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/config/__init__.py +0 -0
  564. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/config/agent_types.py +0 -0
  565. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/config/domain_config.py +0 -0
  566. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/config/instrumentation.py +0 -0
  567. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/config/langfuse_support.py +0 -0
  568. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/config/model_config.py +0 -0
  569. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/config/phoenix_support.py +0 -0
  570. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/config/playbooks/improvement_playbook.yaml +0 -0
  571. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/config/secret_manager.py +0 -0
  572. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/debug_ragas.py +0 -0
  573. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/debug_ragas_real.py +0 -0
  574. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/__init__.py +0 -0
  575. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/entities/__init__.py +0 -0
  576. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/entities/analysis.py +0 -0
  577. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/entities/analysis_pipeline.py +0 -0
  578. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/entities/benchmark.py +0 -0
  579. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/entities/benchmark_run.py +0 -0
  580. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/entities/dataset.py +0 -0
  581. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/entities/debug.py +0 -0
  582. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/entities/experiment.py +0 -0
  583. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/entities/feedback.py +0 -0
  584. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/entities/improvement.py +0 -0
  585. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/entities/judge_calibration.py +0 -0
  586. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/entities/kg.py +0 -0
  587. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/entities/memory.py +0 -0
  588. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/entities/method.py +0 -0
  589. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/entities/prompt.py +0 -0
  590. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/entities/prompt_suggestion.py +0 -0
  591. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/entities/rag_trace.py +0 -0
  592. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/entities/result.py +0 -0
  593. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/metrics/__init__.py +0 -0
  594. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/metrics/analysis_registry.py +0 -0
  595. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/metrics/confidence.py +0 -0
  596. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/metrics/contextual_relevancy.py +0 -0
  597. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/metrics/entity_preservation.py +0 -0
  598. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/metrics/insurance.py +0 -0
  599. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/metrics/no_answer.py +0 -0
  600. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/metrics/registry.py +0 -0
  601. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/metrics/retrieval_rank.py +0 -0
  602. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/metrics/summary_accuracy.py +0 -0
  603. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/metrics/summary_needs_followup.py +0 -0
  604. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/metrics/summary_non_definitive.py +0 -0
  605. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/metrics/summary_risk_coverage.py +0 -0
  606. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/metrics/terms_dictionary.json +0 -0
  607. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/metrics/text_match.py +0 -0
  608. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/__init__.py +0 -0
  609. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/analysis_service.py +0 -0
  610. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/artifact_lint_service.py +0 -0
  611. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/async_batch_executor.py +0 -0
  612. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/batch_executor.py +0 -0
  613. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/benchmark_report_service.py +0 -0
  614. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/benchmark_runner.py +0 -0
  615. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/benchmark_service.py +0 -0
  616. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/cache_metrics.py +0 -0
  617. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/cluster_map_builder.py +0 -0
  618. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/custom_metric_snapshot.py +0 -0
  619. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/dataset_preprocessor.py +0 -0
  620. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/debug_report_service.py +0 -0
  621. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/difficulty_profile_reporter.py +0 -0
  622. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/difficulty_profiling_service.py +0 -0
  623. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/document_chunker.py +0 -0
  624. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/document_versioning.py +0 -0
  625. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/domain_learning_hook.py +0 -0
  626. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/embedding_overlay.py +0 -0
  627. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/entity_extractor.py +0 -0
  628. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/evaluator.py +0 -0
  629. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/experiment_comparator.py +0 -0
  630. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/experiment_manager.py +0 -0
  631. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/experiment_reporter.py +0 -0
  632. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/experiment_repository.py +0 -0
  633. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/experiment_statistics.py +0 -0
  634. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/holdout_splitter.py +0 -0
  635. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/improvement_guide_service.py +0 -0
  636. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/intent_classifier.py +0 -0
  637. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/judge_calibration_service.py +0 -0
  638. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/kg_generator.py +0 -0
  639. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/memory_aware_evaluator.py +0 -0
  640. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/memory_based_analysis.py +0 -0
  641. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/method_runner.py +0 -0
  642. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/ops_snapshot_service.py +0 -0
  643. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/pipeline_orchestrator.py +0 -0
  644. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/pipeline_template_registry.py +0 -0
  645. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/prompt_candidate_service.py +0 -0
  646. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/prompt_manifest.py +0 -0
  647. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/prompt_registry.py +0 -0
  648. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/prompt_scoring_service.py +0 -0
  649. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/prompt_status.py +0 -0
  650. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/prompt_suggestion_reporter.py +0 -0
  651. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/ragas_prompt_overrides.py +0 -0
  652. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/regression_gate_service.py +0 -0
  653. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/retrieval_metrics.py +0 -0
  654. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/retriever_context.py +0 -0
  655. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/run_comparison_service.py +0 -0
  656. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/satisfaction_calibration_service.py +0 -0
  657. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/stage_event_builder.py +0 -0
  658. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/stage_metric_guide_service.py +0 -0
  659. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/stage_metric_service.py +0 -0
  660. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/stage_summary_service.py +0 -0
  661. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/synthetic_qa_generator.py +0 -0
  662. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/testset_generator.py +0 -0
  663. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/threshold_profiles.py +0 -0
  664. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/unified_report_service.py +0 -0
  665. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/domain/services/visual_space_service.py +0 -0
  666. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/mkdocs_helpers.py +0 -0
  667. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/__init__.py +0 -0
  668. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/inbound/__init__.py +0 -0
  669. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/inbound/analysis_pipeline_port.py +0 -0
  670. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/inbound/evaluator_port.py +0 -0
  671. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/inbound/learning_hook_port.py +0 -0
  672. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/inbound/web_port.py +0 -0
  673. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/__init__.py +0 -0
  674. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/analysis_cache_port.py +0 -0
  675. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/analysis_module_port.py +0 -0
  676. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/analysis_port.py +0 -0
  677. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/artifact_fs_port.py +0 -0
  678. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/benchmark_port.py +0 -0
  679. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/causal_analysis_port.py +0 -0
  680. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/comparison_pipeline_port.py +0 -0
  681. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/dataset_port.py +0 -0
  682. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/difficulty_profile_port.py +0 -0
  683. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/domain_memory_port.py +0 -0
  684. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/embedding_port.py +0 -0
  685. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/improvement_port.py +0 -0
  686. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/intent_classifier_port.py +0 -0
  687. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/judge_calibration_port.py +0 -0
  688. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/korean_nlp_port.py +0 -0
  689. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/llm_factory_port.py +0 -0
  690. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/llm_port.py +0 -0
  691. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/method_port.py +0 -0
  692. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/nlp_analysis_port.py +0 -0
  693. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/ops_snapshot_port.py +0 -0
  694. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/relation_augmenter_port.py +0 -0
  695. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/report_port.py +0 -0
  696. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/stage_storage_port.py +0 -0
  697. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/storage_port.py +0 -0
  698. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/tracer_port.py +0 -0
  699. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/ports/outbound/tracker_port.py +0 -0
  700. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/reports/__init__.py +0 -0
  701. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/reports/release_notes.py +0 -0
  702. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/scripts/__init__.py +0 -0
  703. {evalvault-1.67.0 → evalvault-1.68.0}/src/evalvault/scripts/regression_runner.py +0 -0
  704. {evalvault-1.67.0 → evalvault-1.68.0}/tests/__init__.py +0 -0
  705. {evalvault-1.67.0 → evalvault-1.68.0}/tests/conftest.py +0 -0
  706. {evalvault-1.67.0 → evalvault-1.68.0}/tests/fixtures/README.md +0 -0
  707. {evalvault-1.67.0 → evalvault-1.68.0}/tests/fixtures/benchmark/retrieval_ground_truth_min.json +0 -0
  708. {evalvault-1.67.0 → evalvault-1.68.0}/tests/fixtures/benchmark/retrieval_ground_truth_multi.json +0 -0
  709. {evalvault-1.67.0 → evalvault-1.68.0}/tests/fixtures/e2e/auto_insurance_qa_korean_full.json +0 -0
  710. {evalvault-1.67.0 → evalvault-1.68.0}/tests/fixtures/e2e/callcenter_summary_5cases.json +0 -0
  711. {evalvault-1.67.0 → evalvault-1.68.0}/tests/fixtures/e2e/comprehensive_dataset.json +0 -0
  712. {evalvault-1.67.0 → evalvault-1.68.0}/tests/fixtures/e2e/edge_cases.json +0 -0
  713. {evalvault-1.67.0 → evalvault-1.68.0}/tests/fixtures/e2e/edge_cases.xlsx +0 -0
  714. {evalvault-1.67.0 → evalvault-1.68.0}/tests/fixtures/e2e/evaluation_test_sample.json +0 -0
  715. {evalvault-1.67.0 → evalvault-1.68.0}/tests/fixtures/e2e/graphrag_retriever_docs.json +0 -0
  716. {evalvault-1.67.0 → evalvault-1.68.0}/tests/fixtures/e2e/graphrag_smoke.json +0 -0
  717. {evalvault-1.67.0 → evalvault-1.68.0}/tests/fixtures/e2e/insurance_document.txt +0 -0
  718. {evalvault-1.67.0 → evalvault-1.68.0}/tests/fixtures/e2e/insurance_qa_english.csv +0 -0
  719. {evalvault-1.67.0 → evalvault-1.68.0}/tests/fixtures/e2e/insurance_qa_english.json +0 -0
  720. {evalvault-1.67.0 → evalvault-1.68.0}/tests/fixtures/e2e/insurance_qa_english.xlsx +0 -0
  721. {evalvault-1.67.0 → evalvault-1.68.0}/tests/fixtures/e2e/insurance_qa_korean.csv +0 -0
  722. {evalvault-1.67.0 → evalvault-1.68.0}/tests/fixtures/e2e/insurance_qa_korean.json +0 -0
  723. {evalvault-1.67.0 → evalvault-1.68.0}/tests/fixtures/e2e/insurance_qa_korean.xlsx +0 -0
  724. {evalvault-1.67.0 → evalvault-1.68.0}/tests/fixtures/e2e/insurance_qa_korean_versioned_pdf.json +0 -0
  725. {evalvault-1.67.0 → evalvault-1.68.0}/tests/fixtures/e2e/run_mode_full_domain_memory.json +0 -0
  726. {evalvault-1.67.0 → evalvault-1.68.0}/tests/fixtures/e2e/run_mode_simple.json +0 -0
  727. {evalvault-1.67.0 → evalvault-1.68.0}/tests/fixtures/e2e/summary_eval_minimal.json +0 -0
  728. {evalvault-1.67.0 → evalvault-1.68.0}/tests/fixtures/kg/minimal_graph.json +0 -0
  729. {evalvault-1.67.0 → evalvault-1.68.0}/tests/fixtures/sample_dataset.csv +0 -0
  730. {evalvault-1.67.0 → evalvault-1.68.0}/tests/fixtures/sample_dataset.json +0 -0
  731. {evalvault-1.67.0 → evalvault-1.68.0}/tests/fixtures/sample_dataset.xlsx +0 -0
  732. {evalvault-1.67.0 → evalvault-1.68.0}/tests/integration/__init__.py +0 -0
  733. {evalvault-1.67.0 → evalvault-1.68.0}/tests/integration/benchmark/test_benchmark_service_integration.py +0 -0
  734. {evalvault-1.67.0 → evalvault-1.68.0}/tests/integration/conftest.py +0 -0
  735. {evalvault-1.67.0 → evalvault-1.68.0}/tests/integration/test_data_flow.py +0 -0
  736. {evalvault-1.67.0 → evalvault-1.68.0}/tests/integration/test_e2e_scenarios.py +0 -0
  737. {evalvault-1.67.0 → evalvault-1.68.0}/tests/integration/test_evaluation_flow.py +0 -0
  738. {evalvault-1.67.0 → evalvault-1.68.0}/tests/integration/test_full_workflow.py +0 -0
  739. {evalvault-1.67.0 → evalvault-1.68.0}/tests/integration/test_langfuse_flow.py +0 -0
  740. {evalvault-1.67.0 → evalvault-1.68.0}/tests/integration/test_phoenix_flow.py +0 -0
  741. {evalvault-1.67.0 → evalvault-1.68.0}/tests/integration/test_pipeline_api_contracts.py +0 -0
  742. {evalvault-1.67.0 → evalvault-1.68.0}/tests/integration/test_storage_flow.py +0 -0
  743. {evalvault-1.67.0 → evalvault-1.68.0}/tests/integration/test_summary_eval_fixture.py +0 -0
  744. {evalvault-1.67.0 → evalvault-1.68.0}/tests/optional_deps.py +0 -0
  745. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/__init__.py +0 -0
  746. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/adapters/inbound/mcp/test_execute_tools.py +0 -0
  747. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/adapters/inbound/mcp/test_read_tools.py +0 -0
  748. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/adapters/outbound/documents/test_pdf_extractor.py +0 -0
  749. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/adapters/outbound/documents/test_versioned_loader.py +0 -0
  750. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/adapters/outbound/improvement/__init__.py +0 -0
  751. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/adapters/outbound/improvement/test_insight_generator.py +0 -0
  752. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/adapters/outbound/improvement/test_pattern_detector.py +0 -0
  753. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/adapters/outbound/improvement/test_playbook_loader.py +0 -0
  754. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/adapters/outbound/improvement/test_stage_metric_playbook_loader.py +0 -0
  755. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/adapters/outbound/kg/test_graph_rag_retriever.py +0 -0
  756. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/adapters/outbound/kg/test_parallel_kg_builder.py +0 -0
  757. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/adapters/outbound/storage/test_benchmark_storage_adapter.py +0 -0
  758. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/config/test_phoenix_support.py +0 -0
  759. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/conftest.py +0 -0
  760. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/domain/metrics/test_analysis_metric_registry.py +0 -0
  761. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/domain/metrics/test_confidence.py +0 -0
  762. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/domain/metrics/test_contextual_relevancy.py +0 -0
  763. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/domain/metrics/test_entity_preservation.py +0 -0
  764. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/domain/metrics/test_metric_registry.py +0 -0
  765. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/domain/metrics/test_no_answer.py +0 -0
  766. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/domain/metrics/test_retrieval_rank.py +0 -0
  767. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/domain/metrics/test_text_match.py +0 -0
  768. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/domain/services/test_cache_metrics.py +0 -0
  769. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/domain/services/test_claim_level.py +0 -0
  770. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/domain/services/test_dataset_preprocessor.py +0 -0
  771. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/domain/services/test_document_versioning.py +0 -0
  772. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/domain/services/test_evaluator_comprehensive.py +0 -0
  773. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/domain/services/test_holdout_splitter.py +0 -0
  774. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/domain/services/test_improvement_guide_service.py +0 -0
  775. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/domain/services/test_judge_calibration_service.py +0 -0
  776. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/domain/services/test_ops_snapshot_service.py +0 -0
  777. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/domain/services/test_regression_gate_service.py +0 -0
  778. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/domain/services/test_retrieval_metrics.py +0 -0
  779. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/domain/services/test_retriever_context.py +0 -0
  780. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/domain/services/test_stage_event_builder.py +0 -0
  781. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/domain/services/test_stage_metric_guide_service.py +0 -0
  782. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/domain/services/test_synthetic_qa_generator.py +0 -0
  783. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/domain/test_embedding_overlay.py +0 -0
  784. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/domain/test_prompt_manifest.py +0 -0
  785. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/domain/test_prompt_status.py +0 -0
  786. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/reports/test_release_notes.py +0 -0
  787. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/scripts/test_regression_runner.py +0 -0
  788. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_agent_types.py +0 -0
  789. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_analysis_entities.py +0 -0
  790. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_analysis_modules.py +0 -0
  791. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_analysis_pipeline.py +0 -0
  792. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_analysis_service.py +0 -0
  793. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_anthropic_adapter.py +0 -0
  794. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_artifact_lint_service.py +0 -0
  795. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_async_batch_executor.py +0 -0
  796. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_azure_adapter.py +0 -0
  797. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_benchmark_helpers.py +0 -0
  798. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_benchmark_runner.py +0 -0
  799. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_causal_adapter.py +0 -0
  800. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_cli.py +0 -0
  801. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_cli_artifacts.py +0 -0
  802. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_cli_calibrate_judge.py +0 -0
  803. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_cli_domain.py +0 -0
  804. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_cli_init.py +0 -0
  805. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_cli_ops.py +0 -0
  806. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_cli_progress.py +0 -0
  807. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_cli_utils.py +0 -0
  808. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_difficulty_profiling_service.py +0 -0
  809. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_domain_config.py +0 -0
  810. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_domain_memory.py +0 -0
  811. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_entities.py +0 -0
  812. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_entities_kg.py +0 -0
  813. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_entity_extractor.py +0 -0
  814. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_evaluator.py +0 -0
  815. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_experiment.py +0 -0
  816. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_hybrid_cache.py +0 -0
  817. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_instrumentation.py +0 -0
  818. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_insurance_metric.py +0 -0
  819. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_intent_classifier.py +0 -0
  820. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_kg_generator.py +0 -0
  821. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_kg_networkx.py +0 -0
  822. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_kiwi_tokenizer.py +0 -0
  823. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_kiwi_warning_suppression.py +0 -0
  824. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_korean_dense.py +0 -0
  825. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_korean_evaluation.py +0 -0
  826. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_korean_retrieval.py +0 -0
  827. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_langfuse_tracker.py +0 -0
  828. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_llm_relation_augmenter.py +0 -0
  829. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_lm_eval_adapter.py +0 -0
  830. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_markdown_report.py +0 -0
  831. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_memory_cache.py +0 -0
  832. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_memory_services.py +0 -0
  833. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_method_plugins.py +0 -0
  834. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_model_config.py +0 -0
  835. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_nlp_adapter.py +0 -0
  836. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_nlp_entities.py +0 -0
  837. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_ollama_adapter.py +0 -0
  838. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_openai_adapter.py +0 -0
  839. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_phoenix_adapter.py +0 -0
  840. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_pipeline_orchestrator.py +0 -0
  841. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_ports.py +0 -0
  842. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_postgres_storage.py +0 -0
  843. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_prompt_candidate_service.py +0 -0
  844. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_rag_trace_entities.py +0 -0
  845. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_regress_cli.py +0 -0
  846. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_run_comparison_service.py +0 -0
  847. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_run_memory_helpers.py +0 -0
  848. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_run_mode_fixtures.py +0 -0
  849. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_sqlite_storage.py +0 -0
  850. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_stage_cli.py +0 -0
  851. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_stage_metric_service.py +0 -0
  852. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_stage_storage.py +0 -0
  853. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_stage_summary_service.py +0 -0
  854. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_statistical_adapter.py +0 -0
  855. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_streaming_loader.py +0 -0
  856. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_summary_eval_fixture.py +0 -0
  857. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_testset_generator.py +0 -0
  858. {evalvault-1.67.0 → evalvault-1.68.0}/tests/unit/test_web_adapter.py +0 -0
@@ -34,7 +34,8 @@ htmlcov
34
34
  docs/_build
35
35
 
36
36
  # Local data (mounted as volume)
37
- data/
37
+ # Note: data/ is included for offline packaging when needed.
38
+ !data/**
38
39
  reports/
39
40
 
40
41
  # Environment files (use docker env instead)
@@ -0,0 +1,58 @@
1
+ # EvalVault Offline Environment Template
2
+ # Copy to .env.offline and fill required values.
3
+ #
4
+ # Usage:
5
+ # cp .env.offline.example .env.offline
6
+ # # Edit .env.offline with your air-gapped network settings
7
+ # docker compose -f docker-compose.offline.yml --env-file .env.offline up -d
8
+
9
+ # ================================================
10
+ # Profile
11
+ # ================================================
12
+ EVALVAULT_PROFILE=dev
13
+
14
+ # ================================================
15
+ # PostgreSQL (core stack)
16
+ # ================================================
17
+ POSTGRES_USER=evalvault
18
+ POSTGRES_PASSWORD=evalvault
19
+ POSTGRES_DB=evalvault
20
+
21
+ # ================================================
22
+ # Storage (SQLite paths for local file-based storage)
23
+ # ================================================
24
+ EVALVAULT_DB_PATH=data/db/evalvault.db
25
+ EVALVAULT_MEMORY_DB_PATH=data/db/evalvault_memory.db
26
+
27
+ # ================================================
28
+ # API / CORS
29
+ # ================================================
30
+ CORS_ORIGINS=http://localhost:5173,http://127.0.0.1:5173
31
+
32
+ # API_AUTH_TOKENS=
33
+ # KNOWLEDGE_READ_TOKENS=
34
+ # KNOWLEDGE_WRITE_TOKENS=
35
+
36
+ # ================================================
37
+ # External LLM servers (air-gapped network)
38
+ # ================================================
39
+ # IMPORTANT: Model weights are NOT shipped with EvalVault.
40
+ # You must provide the URL to your air-gapped LLM server.
41
+
42
+ # Ollama (if using dev/prod profile)
43
+ # OLLAMA_BASE_URL=
44
+ OLLAMA_TIMEOUT=120
45
+ # OLLAMA_TOOL_MODELS=
46
+
47
+ # vLLM (if using vllm profile)
48
+ # VLLM_BASE_URL=
49
+ # VLLM_API_KEY=
50
+ # VLLM_MODEL=
51
+ # VLLM_EMBEDDING_MODEL=
52
+ # VLLM_EMBEDDING_BASE_URL=
53
+
54
+ # ================================================
55
+ # Faithfulness fallback (optional)
56
+ # ================================================
57
+ # FAITHFULNESS_FALLBACK_PROVIDER=
58
+ # FAITHFULNESS_FALLBACK_MODEL=
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: evalvault
3
- Version: 1.67.0
3
+ Version: 1.68.0
4
4
  Summary: RAG evaluation system using Ragas with Phoenix/Langfuse tracing
5
5
  Project-URL: Homepage, https://github.com/ntts9990/EvalVault
6
6
  Project-URL: Documentation, https://github.com/ntts9990/EvalVault#readme
@@ -1,15 +1,15 @@
1
1
  {
2
2
  "version": 1,
3
3
  "source": "docs/guides/USER_GUIDE.md",
4
- "source_hash": "3371b632290e3f17d7b0b9642586e037269de9f4583e50a165c67b894699e34c",
4
+ "source_hash": "1e7bc84b5056c973203130e11423e01feeb9bb779a32d4a4df8c024df7075015",
5
5
  "chunk_limit": 80,
6
- "created_at": "2026-01-20T08:02:45.258030+00:00",
6
+ "created_at": "2026-01-26T21:32:23.599657+00:00",
7
7
  "documents": [
8
8
  "# EvalVault 사용자 가이드\n\n> RAG 시스템 품질 평가 · 분석 · 추적을 위한 종합 워크플로 가이드\n\n이 문서는 README에서 다룬 간단한 소개를 넘어, 설치부터 Phoenix 연동·Domain Memory·자동화까지 모든 기능을 심층적으로 설명합니다.\n\n---\n\n## 목차\n\n1. [핵심 워크플로](#핵심-워크플로) - 평가 → 자동 분석 → 보고서/아티팩트 저장 → 비교 2. [시작하기](#시작하기) 3. [환경 구성](#환경-구성) 4. [CLI 명령어 참조](#cli-명령어-참조) 5. [Web UI](#web-ui) 6. [분석 워크플로](#분석-워크플로) 7. [Domain Memory 활용](#domain-memory-활용) 8. [관측성 & Phoenix](#관측성--phoenix) - [Open RAG Trace 표준 연동](#open-rag-trace-표준-연동) 9. [프롬프트 관리](#프롬프트-관리) 10. [성능 튜닝](#성능-튜닝) 11. [메서드 플러그인](#메서드-플러그인) 12. [문제 해결](#문제-해결) 13. [참고 자료](#참고-자료)\n\n---\n\n## 핵심 워크플로\n\nEvalVault의 가장 큰 장점은 **평가 → 자동 분석 → 보고서/아티팩트 저장 → 비교**가 하나의 `run_id`로 끊김 없이 이어져서, 재현성과 개선 루프가 매우 빠르다는 점입니다. 점수만 보는 게 아니라 통계·NLP·원인 분석까지 묶어서 바로 \"왜 좋아졌는지/나빠졌는지\"로 이어지는 게 핵심입니다.\n\n### 초간단 실행 (CLI)\n\n```bash uv run evalvault run --mode simple tests/fixtures/e2e/insurance_qa_korean.json \\ --metrics faithfulness,answer_relevancy \\ --profile dev \\ --db data/db/evalvault.db \\ --auto-analyze ```\n\n### 결과 확인 경로\n\n평가 실행 후 자동 분석이 완료되면 다음 파일들이 생성됩니다:\n\n- **요약 JSON**: `reports/analysis/analysis_<RUN_ID>.json` - **Markdown 보고서**: `reports/analysis/analysis_<RUN_ID>.md` - **아티팩트 인덱스**: `reports/analysis/artifacts/analysis_<RUN_ID>/index.json` - **노드별 결과**: `reports/analysis/artifacts/analysis_<RUN_ID>/<node_id>.json`\n\n요약 JSON에는 `artifacts.dir`와 `artifacts.index`가 포함되어 있어 경로 조회가 쉽습니다.\n\n### A/B 비교\n\n두 실행 결과를 비교하려면:\n\n```bash uv run evalvault analyze-compare <RUN_A> <RUN_B> --db data/db/evalvault.db ```\n\n결과는 `reports/comparison/comparison_<RUN_A>_<RUN_B>.md`에 저장됩니다.\n\n### Web UI 연동\n\nCLI와 Web UI가 동일한 DB를 사용하면 Web UI에서 바로 결과를 확인할 수 있습니다:\n\n```bash # Terminal 1: API 서버 uv run evalvault serve-api --reload\n\n# Terminal 2: React 프론트엔드 cd frontend npm install npm run dev ```\n\n동일한 DB(`data/db/evalvault.db`)를 사용하면 Web UI에서 바로 이어서 볼 수 있습니다.\n\n---\n\n## 시작하기\n\n### 시스템 요구 사항\n\n| 항목 | 권장 버전 | 비고 | |------|-----------|------| | Python | 3.12.x | `uv`가 자동 설치 (macOS/Linux/Windows 지원) | | uv | 최신 | [설치 가이드](https://docs.astral.sh/uv/getting-started/installation/) | | Docker (선택) | 최신 | Langfuse/Phoenix 로컬 배포 시 | | Ollama (선택) | 최신 | 폐쇄망/로컬 모델 사용 시 |\n\n### 설치 옵션\n\n#### PyPI ```bash uv pip install evalvault ```\n\n#### 소스 (권장) ```bash git clone https://github.com/ntts9990/EvalVault.git cd EvalVault uv sync --extra dev # 전체 기능 포함 (dev 도구 + 모든 extras) # 경량 설치 예시: uv sync --extra analysis ```\n\nPhoenix 트레이싱은 `dev`에 포함되어 있습니다. 경량 설치라면 `--extra phoenix`를 추가하세요.\n\n**Extras 설명**:\n\n| Extra | 패키지 | 목적 | |-------|--------|------| | `analysis` | scikit-learn | 통계/NLP 분석 모듈 | | `korean` | kiwipiepy, rank-bm25, sentence-transformers | 한국어 토크나이저 및 검색 | | `postgres` | psycopg | PostgreSQL 저장소 지원 | | `mlflow` | mlflow | MLflow 트래커 통합 | | `phoenix` | arize-phoenix + OpenTelemetry exporters | Phoenix 트레이싱, 데이터셋/실험 동기화 | | `anthropic` | anthropic | Anthropic LLM 어댑터 | | `perf` | faiss-cpu, ijson | 대용량 데이터셋 성능 도우미 | | `docs` | mkdocs, mkdocs-material, mkdocstrings | 문서 빌드 |\n\n`.python-version`이 Python 3.12를 고정하므로 추가 설치가 필요 없습니다.\n\n---\n\n## 환경 구성\n\n### 프로젝트 초기화 (init)\n\n빠르게 시작하려면 초기화 명령으로 기본 파일을 생성합니다.\n\n```bash uv run evalvault init ```\n\n- `.env` 템플릿과 `sample_dataset.json`을 생성합니다. - `dataset_templates/`에 JSON/CSV/XLSX 빈 템플릿을 생성합니다. - `--output-dir`로 생성 위치를 바꿀 수 있습니다. - `--skip-env`/`--skip-sample`/`--skip-templates`로 단계별 생성을 끌 수 있습니다.\n\n### .env 작성\n\n`cp .env.example .env` 후 아래 값을 채웁니다.\n\n```bash # 공통 EVALVAULT_PROFILE=dev # config/models.yaml에 정의된 프로필 EVALVAULT_DB_PATH=data/db/evalvault.db # SQLite 저장 경로 (API/CLI 공통) EVALVAULT_MEMORY_DB_PATH=data/db/evalvault_memory.db # 도메인 메모리 DB 경로 OPENAI_API_KEY=sk-...\n\n# Langfuse (선택) LANGFUSE_PUBLIC_KEY=pk-lf-... LANGFUSE_SECRET_KEY=sk-lf-... LANGFUSE_HOST=http://localhost:3000\n\n# Phoenix/OpenTelemetry (선택) PHOENIX_ENABLED=true PHOENIX_ENDPOINT=http://localhost:6006/v1/traces PHOENIX_SAMPLE_RATE=1.0\n\n# React 프론트엔드에서 API 호출 시 (선택) CORS_ORIGINS=http://localhost:5173,http://127.0.0.1:5173\n\n# vLLM(OpenAI-compatible) 사용 예 EVALVAULT_PROFILE=vllm VLLM_BASE_URL=http://localhost:8001/v1 VLLM_MODEL=gpt-oss-120b VLLM_EMBEDDING_MODEL=qwen3-embedding:0.6b # 선택: VLLM_EMBEDDING_BASE_URL=http://localhost:8002/v1 ```\n\nOpenAI를 쓰지 않는다면 `OPENAI_API_KEY`는 비워둬도 됩니다.\n\n### 초간단 시작 (Ollama 3줄)\n\n```bash cp .env.example .env ollama pull gemma3:1b uv run evalvault run tests/fixtures/e2e/insurance_qa_korean.json \\ --metrics faithfulness \\ --db data/db/evalvault.db \\ --profile dev ```\n\nTip: `answer_relevancy` 등 임베딩 메트릭을 쓰려면 `qwen3-embedding:0.6b`도 내려받으세요.\n\n### 초간단 시작 (vLLM 3줄)\n\n```bash cp .env.example .env printf \"\\nEVALVAULT_PROFILE=vllm\\nVLLM_BASE_URL=http://localhost:8001/v1\\nVLLM_MODEL=gpt-oss-120b\\n\" >> .env uv run evalvault run tests/fixtures/e2e/insurance_qa_korean.json \\ --metrics faithfulness \\ --db data/db/evalvault.db ```\n\nTip: 임베딩 메트릭은 `VLLM_EMBEDDING_MODEL`과 `/v1/embeddings` 엔드포인트가 필요합니다.\n\nOllama를 사용할 경우 `OLLAMA_BASE_URL`, `OLLAMA_TIMEOUT`을 추가하고, 평가 전에 `ollama pull`로 모델을 내려받습니다. Tool/function calling 지원 모델을 쓰려면 `.env`에 `OLLAMA_TOOL_MODELS`를 콤마로 지정합니다. 지원 여부는 `ollama show <model>` 출력의 `Capabilities`에 `tools`가 있는지 확인합니다.\n\n> 참고: vLLM이 임베딩 엔드포인트(`/v1/embeddings`)를 제공하지 않으면 임베딩 기반 메트릭은 실패할 수 있습니다. > 이 경우 `faithfulness`, `answer_relevancy` 등 LLM 기반 메트릭만 선택하거나 별도의 임베딩 서버를 지정하세요.\n\n### 임베딩 엔드포인트 체크리스트\n\n- 임베딩이 필요한 메트릭: `answer_relevancy`, `semantic_similarity` - Ollama: `ollama pull qwen3-embedding:0.6b` 후 `ollama list`로 확인 - vLLM: `/v1/embeddings` 응답 확인 - 임베딩 서버가 분리돼 있으면 `VLLM_EMBEDDING_BASE_URL`을 설정\n\n예시: ```bash curl -s http://localhost:8001/v1/embeddings \\ -H \"Authorization: Bearer local\" \\ -H \"Content-Type: application/json\" \\ -d '{\"model\":\"qwen3-embedding:0.6b\",\"input\":\"ping\"}' ```\n\n### Ollama 모델 추가\n\nOllama는 **로컬에 내려받은 모델만** 목록에 노출됩니다. 다음 순서로 추가하세요.",
9
9
  "1. **모델 내려받기** ```bash ollama pull gpt-oss:120b ollama pull gpt-oss-safeguard:120b ``` 2. **목록 확인** ```bash ollama list ``` 3. **EvalVault에서 선택** - Web UI: `Provider = ollama` 선택 후 모델 카드에서 선택 - CLI: `config/models.yaml`의 프로필 모델을 변경하거나 `--profile`로 지정 4. **Tool 지원 모델 등록** - `ollama show <model>`로 `Capabilities: tools` 확인 - 지원 모델은 `.env`의 `OLLAMA_TOOL_MODELS`에 콤마로 추가\n\n미리 받아두면 좋은 모델: `gpt-oss:120b`, `gpt-oss-safeguard:120b`, `gpt-oss-safeguard:20b`.\n\n### 모델 프로필 관리\n\n`config/models.yaml`은 프로필별 LLM/임베딩 구성을 정의합니다.\n\n```yaml profiles: dev: llm: provider: ollama model: gemma3:1b embedding: provider: ollama model: qwen3-embedding:0.6b openai: llm: provider: openai model: gpt-5-mini embedding: provider: openai model: text-embedding-3-small vllm: llm: provider: vllm model: gpt-oss-120b embedding: provider: vllm model: qwen3-embedding:0.6b ```\n\n사용법: - 환경 변수 `EVALVAULT_PROFILE` 설정 - 또는 CLI `--profile <name>` / `-p <name>` (예: dev, openai, vllm)\n\n### 데이터셋 준비\n\n### 요약 메트릭 실무 매뉴얼 (개발 현황 + 평가 방법 + 해석)\n\n이 섹션은 **요약 메트릭 개발 현황**과 **실무 평가 매뉴얼**을 함께 제공합니다.\n\n#### 1) 개발 현황 요약 (현재 코드 기준) - **RAGAS 요약 메트릭** - `summary_score` (RAGAS SummaryScore) - `summary_faithfulness` (RAGAS Faithfulness alias) - **커스텀 요약 메트릭(보험 도메인 확장)** - `entity_preservation` (규칙 기반) - `summary_accuracy` (규칙 기반) - `summary_risk_coverage` (규칙 + metadata.summary_tags) - `summary_non_definitive` (규칙 기반) - `summary_needs_followup` (규칙 + metadata.summary_tags) - **스냅샷/추적** - 커스텀 메트릭은 `custom_metric_snapshot`에 규칙/입력/출력/해시가 저장됩니다. - Excel `CustomMetrics` 시트와 Langfuse/Phoenix/MLflow artifact에도 기록됩니다. - **확장 계획** - 보험 요약 메트릭 확장 설계: `docs/guides/INSURANCE_SUMMARY_METRICS_PLAN.md` - 규칙 + LLM 보정 하이브리드 설계 포함 (경계 사례 보정)\n\n#### 2) 평가 준비(데이터셋) - 필수 컬럼 - `contexts`: 상담 대화 원문/스크립트 - `answer`: 모델 요약 - `ground_truth`: 현업 요약(선택이지만 있으면 해석이 훨씬 명확) - 선택 컬럼 - `metadata.summary_tags`: 리스크 태그 목록 (`exclusion`, `deductible`, `limit`, `needs_followup` 등) - `metadata.summary_intent`: `agent_notes` (내부용 요약 표시) - 권장 포맷 - JSON 데이터셋을 우선 권장 (metadata 지원)\n\n#### 3) 실행 방법 (CLI) ```bash # 기본 요약 메트릭 uv run evalvault run data.json \\ --metrics summary_score,summary_faithfulness,entity_preservation \\ --profile <profile> \\ --db data/db/evalvault.db\n\n# 보험 요약 커스텀 메트릭 포함 uv run evalvault run data.json \\ --metrics summary_accuracy,summary_risk_coverage,summary_non_definitive,summary_needs_followup \\ --threshold-profile summary \\ --profile <profile> \\ --db data/db/evalvault.db ```\n\n#### 3-1) 실행 방법 (Web UI) - Evaluation Studio에서 데이터셋 업로드 후, 요약 메트릭을 선택해 실행합니다. - 결과는 Runs/Reports 화면에서 요약 카드와 메트릭 상세로 확인할 수 있습니다.\n\n#### 3-2) 결과/아티팩트 위치 - DB 저장: `data/db/evalvault.db` (또는 `EVALVAULT_DB_PATH`) - Excel: `data/db/evalvault_run_<RUN_ID>.xlsx` - 시트 구성: `Summary`, `MetricScores`, `MetricsSummary`, `CustomMetrics` - 자동 분석 결과(옵션): - JSON: `reports/analysis/analysis_<RUN_ID>.json` - Markdown: `reports/analysis/analysis_<RUN_ID>.md` - 아티팩트: `reports/analysis/artifacts/analysis_<RUN_ID>/` - 비교 분석 결과: - JSON: `reports/comparison/comparison_<RUN_A>_<RUN_B>.json` - Markdown: `reports/comparison/comparison_<RUN_A>_<RUN_B>.md`\n\n#### 4) 점수 해석 방법 (실무 기준) - `summary_faithfulness`: **근거성** - 낮으면 우선 `contexts` 품질(누락/불일치)을 점검합니다. - `summary_score`: **핵심 보존/간결성** - 동의어/재서술이 많은 요약은 낮게 나올 수 있습니다. - `entity_preservation`: **핵심 엔티티 보존** - 숫자/금액/기간/조건이 누락되면 급격히 낮아집니다. - `summary_accuracy`: **요약의 엔티티 정확성** - 컨텍스트 밖 수치/조건 추가에 민감합니다. - `summary_risk_coverage`: **리스크 항목 누락** - `metadata.summary_tags`가 없으면 1.0으로 처리되므로 태깅 정책이 필수입니다. - `summary_non_definitive`: **단정 표현 억제** - 단 1회 매칭에도 0점이므로 문장 스타일 변화에 민감합니다. - `summary_needs_followup`: **추가 확인 문구** - `needs_followup` 태그가 있는 경우에만 요구됩니다.\n\n#### 5) 운영 해석 팁 - 단일 지표보다 **조합 해석**을 권장합니다. - 예: `summary_faithfulness` + `entity_preservation` + `summary_risk_coverage` - `contexts` 품질이 낮으면 대부분의 요약 지표가 왜곡됩니다. - 태그 기반 지표는 **태그 누락률**을 함께 모니터링해야 합니다.\n\n#### 6) 더 풍부한 해석을 위한 기능 활용 - **자동 분석/리포트**: `--auto-analyze`로 요약 보고서 + 아티팩트 생성 - **A/B 비교 분석**: `evalvault analyze-compare`로 요약 메트릭 개선 여부 통계 비교 - **LLM 인사이트**: `evalvault analyze --enable-llm`로 실패 원인/개선 제안 생성 - **프롬프트 변경 추적**: `prompts diff` + `prompt manifest`로 변경 영향 분석 - **Phoenix/Langfuse**: trace 기반으로 요약 실패 케이스를 세부 분석 - **repeat_query 적용**: 비추론 요약 프롬프트에 반복을 적용해 정확도 개선 가능 - 계획 문서: `docs/guides/repeat_query.md`\n\n### 요약 메트릭 해석 가이드 (summary_score, summary_faithfulness, entity_preservation)\n\n### 커스텀 메트릭 스냅샷 (평가 방식/과정/결과 기록)\n\nEvalVault는 커스텀 메트릭의 평가 방식/과정을 실행 메타데이터에 기록합니다. - 저장 위치: `run.tracker_metadata.custom_metric_snapshot` - Excel: `CustomMetrics` 시트 - 추적 도구: Langfuse/Phoenix/MLflow artifact에 `custom_metric_snapshot` 포함\n\n**기록 항목(요약)** - `evaluation_method`: 규칙 기반/문자열 매칭/랭킹 기반 등 - `inputs`: `answer`, `contexts`, `ground_truth`, `question` 등 - `output`: 점수 범위 또는 판정 규칙 - `evaluation_process`: 핵심 평가 절차 요약 - `rules`: 키워드/정규식/가중치 등 - `implementation_path` / `implementation_hash`: 구현 파일 경로/해시",
10
10
  "**메트릭 목적** - `summary_faithfulness`: 요약의 주장들이 컨텍스트에 근거하는지 평가합니다. 환각/왜곡 리스크를 직접 측정합니다. - `summary_score`: 컨텍스트 대비 요약의 핵심 정보 보존/간결성 균형을 평가합니다. 정답 요약 단일 기준의 편향을 줄입니다. - `entity_preservation`: 보험 요약에서 금액·기간·조건·면책 등 핵심 엔티티가 요약에 유지되는지 평가합니다.\n\n**보험 도메인 특화 근거** - 보험 약관에서 치명적인 요소(면책, 자기부담, 한도, 조건 등)를 키워드로 직접 반영하고, 금액/기간/비율 같은 핵심 엔티티를 보존하도록 설계했습니다. - 범용 규칙(숫자/기간/금액)과 보험 특화 키워드를 함께 사용하므로, 현재 상태는 “보험 리스크 중심의 약한 도메인 특화”로 보는 것이 정확합니다.\n\n**해석 주의사항** - 세 메트릭 모두 `contexts` 품질에 크게 의존합니다. 컨텍스트가 부정확/과도하면 점수가 낮아질 수 있습니다. - `summary_score`는 키프레이즈 기반이므로, 표현이 달라지면 점수가 낮게 나올 수 있습니다. - `entity_preservation`은 보험 도메인에 맞춘 엔티티 위주라, 다른 도메인에서는 해석에 주의가 필요합니다.\n\n**메트릭별 예시(간단)** - `summary_score` (RAGAS, 키프레이즈 기반) - 컨텍스트: “3월 청구서 7만2천원, 데이터 10GB 추가(2만2천원), 4월 5GB 옵션(1만1천원)” - 요약 A: “3월 청구서 7만2천원은 10GB 추가 사용 때문이며 4월부터 5GB 옵션(1만1천원)으로 변경됨” → 금액/수치 키프레이즈 유지 → 점수 ↑ - 요약 B: “지난달 청구액이 늘었고 다음 달부터 요금 옵션을 낮춤” → 수치/금액 누락 → 점수 ↓ - `summary_faithfulness` (RAGAS, 근거성) - 컨텍스트: “5GB 옵션이면 1만1천원 추가” - 요약 A: “5GB 옵션은 1만1천원 추가” → 근거 일치 → 점수 ↑ - 요약 B: “5GB 옵션은 2만2천원 추가” → 근거 불일치 → 점수 ↓ - `entity_preservation` (커스텀, 규칙 기반) - 컨텍스트 엔티티: {10GB, 7만2천원, 2만2천원, 1만1천원} - 요약 A: “10GB, 7만2천원, 1만1천원” 포함 → 보존률 3/4 - 요약 B: 수치 미포함 → 보존률 0/4\n\n### 메트릭 신뢰도/타당도 및 리스크/완화 가이드\n\n이 절은 **메트릭 점수의 신뢰도(재현성/안정성)와 타당도(무엇을 실제로 측정하는지)** 관점에서, 실무에서 자주 발생하는 오해/리스크와 완화책을 정리합니다.\n\n#### RAGAS 메트릭(LLM/임베딩 기반): 신뢰도/타당도\n\n- 공통 특징: LLM/임베딩 호출을 포함하므로 **동일 입력이라도 점수 변동(분산)**이 발생할 수 있습니다. 또한 대부분이 **\"사용자 만족\"이 아니라 \"근거/관련성/정답과의 유사\"라는 대리 신호**를 측정합니다. - `faithfulness`, `summary_faithfulness`: 컨텍스트 근거(groundedness)에 초점을 둔 지표입니다. 컨텍스트가 부족/불일치하면 모델이 사실을 알고 있어도 낮게 나올 수 있고, 반대로 컨텍스트가 장황하거나 유사 문장이 섞이면 과대평가될 수 있습니다. - `answer_relevancy`: 질문-답변 관련성(대체로 임베딩 유사도 + LLM 보조)에 초점을 둡니다. 질문이 모호하거나 답변이 장황하면 관련성 점수 해석이 왜곡될 수 있습니다. - `context_precision`, `context_recall`: `ground_truth`(레퍼런스)가 있을 때만 의미가 커집니다. 레퍼런스 품질이 낮거나 누락되면 지표 자체가 retrieval 성능이 아니라 **레퍼런스 품질/정의의 노이즈**를 반영할 수 있습니다. - `factual_correctness`: `ground_truth` 대비 사실 일치성을 보려는 지표지만, claim 분해/검증 단계가 포함되어 비용이 크고(LLM 다중 호출), 데이터/언어/프롬프트에 따라 분산이 커질 수 있습니다. - `semantic_similarity`: 임베딩 유사도 기반으로, 문장 표현이 바뀌어도 높은 점수가 나올 수 있습니다. 따라서 \"정답과 의미가 비슷\"은 반영하지만, **누락/추가/조건(면책/한도) 같은 위험 요소의 부재**를 직접 보장하지는 않습니다.\n\n#### 요약 메트릭(요약 품질 신호): 신뢰도/타당도",
11
11
  "- `summary_score`(RAGAS): 컨텍스트 대비 핵심 정보 보존/간결성 균형의 **대리 신호**입니다. 요약이 동의어/재서술을 많이 쓰면 키프레이즈 기반 구성상 낮아질 수 있으며, \"좋은 요약\"의 도메인별 정의(보험/콜센터/정책)가 다르면 동일 점수라도 의미가 달라집니다. - `summary_faithfulness`(RAGAS): 요약의 주장들이 컨텍스트에 근거하는지(환각/왜곡 리스크)를 평가합니다. 그러나 LLM 기반 판정이므로, **경계 사례(암시/추론/조건부 문장)**에서 변동이 생길 수 있습니다. - `entity_preservation`(커스텀, 규칙 기반): 컨텍스트에 등장하는 숫자/기간/금액/키워드가 요약에 유지되는지를 봅니다. 규칙 기반이어서 안정적이지만, 엔티티 정의(정규식/키워드 목록)에 없는 표현은 놓칠 수 있고, \"엔티티는 유지됐지만 조건이 바뀐\" 왜곡을 완전히 잡아내지는 못합니다.\n\n#### 요약 커스텀 메트릭(규칙/메타데이터 기반): 신뢰도/타당도\n\n- `summary_accuracy`(규칙 기반): 요약(answer)에서 추출한 엔티티(숫자/기간/금액/키워드)가 컨텍스트에 존재하는지 비율로 계산합니다. 따라서 \"요약이 컨텍스트 밖의 수치/조건을 추가했는가\"에 민감하지만, **동일 의미의 다른 표현/표기**(예: 단위, 띄어쓰기, 약어)는 놓칠 수 있습니다. - `summary_risk_coverage`(규칙+메타데이터): `metadata.summary_tags`에 정의된 기대 리스크 태그(exclusion/limit/...)가 요약에 반영되는지 확인합니다. - `metadata.summary_tags`가 비어 있으면 점수가 `1.0`으로 처리되므로, 태그가 없는 데이터셋에서는 \"리스크 커버\"를 검증했다고 보기 어렵습니다. - `summary_non_definitive`(규칙 기반): \"무조건/반드시/100%\" 같은 단정 표현이 있으면 `0.0`, 없으면 `1.0`입니다. 간단하고 안정적이지만 **단 1회 매칭으로 0점**이므로, 문장 스타일/표현 습관에 매우 민감합니다. - `summary_needs_followup`(규칙+메타데이터): `metadata.summary_tags`에 `needs_followup`가 있는 경우에만 follow-up 문구를 요구합니다. - 태그가 없는데 follow-up 문구가 들어가면 `0.0`이 될 수 있어, 데이터셋 태깅 정책과 함께 해석해야 합니다.\n\n#### QA 커스텀 메트릭(규칙/사전/문자열 기반): 신뢰도/타당도\n\n- `insurance_term_accuracy`(사전 기반): 보험 용어 사전(정규형/변형)을 기준으로 답변 용어가 컨텍스트에 근거하는지 확인합니다. 사전에 없는 신조어/상품명은 놓칠 수 있어, 도메인 확장 시 사전 갱신이 필요합니다. - `contextual_relevancy`(규칙 기반): 질문-컨텍스트 토큰 겹침으로 \"컨텍스트가 질문과 맞는가\"를 빠르게 점검합니다. 의미적 유사(동의어/패러프레이즈)에는 약해, **retriever 디버깅용 보조 신호**로 보는 것이 안전합니다. - `no_answer_accuracy`, `exact_match`, `f1_score`(규칙/문자열 기반): 재현성이 높아 회귀 테스트에는 유리하지만, 문제 정의가 조금만 바뀌면(표현 다양성/서술형 답변) 타당도가 급격히 떨어질 수 있습니다. - `mrr`, `ndcg`, `hit_rate`(규칙 기반): retrieval 순위 품질을 보기 위한 지표이지만, 관련성 판정이 토큰 겹침 기반이므로 **ground_truth 표현과의 어휘 불일치**가 크면 과소평가될 수 있습니다. - `confidence_score`(휴리스틱 결합): 여러 신호를 가중 결합한 참고 지표로, \"품질 확정\"보다는 **리뷰 우선순위/에스컬레이션 신호**로 사용하는 편이 안전합니다.",
12
- "**실사용 주의사항** - LLM 기반 지표(RAGAS 계열)는 단일 run의 점수만으로 결론을 내리지 말고, 최소 샘플 수/분산(재실행) 관점에서 함께 보세요. - `contexts`가 부정확/누락/중복되면 대부분의 지표가 왜곡됩니다. 특히 `faithfulness`/`summary_faithfulness` 저하는 \"모델 문제\"가 아니라 \"근거 입력 문제\"일 수 있습니다. - `ground_truth`가 필요한 지표(`context_precision`, `context_recall`, `factual_correctness`, `semantic_similarity`, 일부 커스텀)는 레퍼런스 품질이 곧 지표 품질입니다. 레퍼런스 정의가 불안정하면 지표가 흔들립니다. - 일부 전처리 경로는 레퍼런스(`reference`) 누락을 질문/답변/컨텍스트 기반으로 보완할 수 있습니다. 이 경우 지표가 \"정답 대비\"가 아니라 \"입력으로 만든 준-정답 대비\"가 되어 타당도가 달라질 수 있으므로, 가능하면 데이터셋에 명시적 `ground_truth`/`reference`를 제공하세요. - `metadata.summary_tags`에 의존하는 지표(`summary_risk_coverage`, `summary_needs_followup`)는 태그가 없으면 점수가 과대평가되거나(항상 1.0), 반대로 불필요한 패널티가 생길 수 있습니다. - 규칙 기반 지표(문자열/정규식/사전)는 안정적이지만, 도메인이 바뀌면 키워드/패턴/사전 갱신이 필요합니다(미갱신 상태에서의 점수는 \"측정 실패\"일 수 있음).\n\n**개선 제안(옵션)** - LLM 기반 지표의 변동을 줄이려면: 동일 모델/프롬프트/프로필을 고정하고, 프롬프트 스냅샷(`--db`, `--ragas-prompts`, `--system-prompt*`)을 함께 저장해 비교 조건을 고정하세요. - 요약 품질을 더 안전하게 보려면: `summary_faithfulness`(근거) + `entity_preservation`/`summary_accuracy`(핵심 엔티티) + `summary_risk_coverage`(리스크 태그)처럼 **서로 다른 실패 모드를 커버하는 조합**으로 해석하세요. - `summary_risk_coverage`/`summary_needs_followup`를 쓰려면: 데이터셋 생성 단계에서 `metadata.summary_tags` 정책을 정의하고(누가/어떻게 태깅), 누락률을 모니터링하세요. - 규칙 기반 지표를 도메인에 맞추려면: 사전(`terms_dictionary.json`)·키워드·정규식을 도메인/제품 정책에 맞춰 확장하고, 변경 시 `custom_metric_snapshot`을 통해 구현 해시가 기록되는지 확인하세요. - RAGAS 점수와 실사용 만족도의 정합성을 높이려면: 대표 샘플링 + 인간 평가로 보정하는 절차를 병행하세요(참고: `docs/guides/RAGAS_HUMAN_FEEDBACK_CALIBRATION_GUIDE.md`).\n\nEvalVault는 JSON/CSV/Excel을 지원합니다. **threshold는 데이터셋에 포함**되며, 값이 없으면 기본값 `0.7`을 사용합니다. Domain Memory를 켜면 신뢰도에 따라 자동 조정될 수 있습니다.\n\nJSON 예시는 아래와 같습니다.\n\n```json { \"name\": \"insurance_qa_korean\", \"version\": \"1.0.0\", \"thresholds\": {\"faithfulness\": 0.8}, \"test_cases\": [ { \"id\": \"tc-001\", \"question\": \"보험 해지 환급금은 어떻게 계산하나요?\", \"answer\": \"...\", \"contexts\": [\"...\"], \"ground_truth\": \"...\" } ] } ```\n\n- `thresholds`: 메트릭별 pass 기준 (0.0~1.0) - `ground_truth`: `context_precision`, `context_recall`, `factual_correctness`, `semantic_similarity`에 필요\n\nCSV/Excel의 경우 `id,question,answer,contexts,ground_truth` 컬럼을 포함하고, 선택적으로 `threshold_*` 컬럼을 넣을 수 있습니다. `threshold_*` 값은 **첫 번째로 채워진 행 기준**으로 데이터셋 전체 임계값으로 사용됩니다. `contexts`는 JSON 배열 문자열 또는 `|` 로 구분합니다. 대용량 파일은 `--stream` 옵션으로 스트리밍 평가를 활성화하세요.\n\n#### 데이터셋 템플릿\n\n빈 템플릿은 아래 위치에서 사용할 수 있습니다. 필요한 값만 채워 바로 사용할 수 있습니다.\n\n- 프로젝트 초기화 시: `dataset_templates/` 폴더에 JSON/CSV/XLSX 템플릿 생성 - 문서 저장소: `docs/templates/dataset_template.json` - 문서 저장소: `docs/templates/dataset_template.csv` - 문서 저장소: `docs/templates/dataset_template.xlsx`\n\nJSON 템플릿의 `thresholds` 값은 `null`로 비워져 있으므로 사용 전 숫자로 채우거나 삭제하세요. CSV/Excel은 `threshold_*` 컬럼에 값을 채우면 동일하게 적용됩니다.\n\n#### 실행 결과 엑셀(컬럼 설명) - 시트/컬럼 상세: `docs/guides/EVALVAULT_RUN_EXCEL_SHEETS.md`\n\n---\n\n## CLI 명령어 참조\n\n### 루트 명령어\n\n#### `init` - 프로젝트 초기화\n\n```bash uv run evalvault init uv run evalvault init --output-dir ./my-project uv run evalvault init --skip-env --skip-sample ```\n\n- `.env` 템플릿과 `sample_dataset.json`을 생성합니다. - `dataset_templates/`에 JSON/CSV/XLSX 템플릿을 생성합니다. - `--output-dir`로 생성 위치를 지정할 수 있습니다. - `--skip-env`/`--skip-sample`/`--skip-templates`로 단계별 생성을 끌 수 있습니다.\n\n#### `run` - 평가 실행\n\n```bash uv run evalvault run tests/fixtures/e2e/insurance_qa_korean.json \\ --metrics faithfulness,answer_relevancy \\ --tracker phoenix \\ --profile dev \\ --db data/db/evalvault.db \\ --auto-analyze ```\n\n**주요 옵션**:",
12
+ "**실사용 주의사항** - LLM 기반 지표(RAGAS 계열)는 단일 run의 점수만으로 결론을 내리지 말고, 최소 샘플 수/분산(재실행) 관점에서 함께 보세요. - `contexts`가 부정확/누락/중복되면 대부분의 지표가 왜곡됩니다. 특히 `faithfulness`/`summary_faithfulness` 저하는 \"모델 문제\"가 아니라 \"근거 입력 문제\"일 수 있습니다. - `ground_truth`가 필요한 지표(`context_precision`, `context_recall`, `factual_correctness`, `semantic_similarity`, 일부 커스텀)는 레퍼런스 품질이 곧 지표 품질입니다. 레퍼런스 정의가 불안정하면 지표가 흔들립니다. - 일부 전처리 경로는 레퍼런스(`reference`) 누락을 질문/답변/컨텍스트 기반으로 보완할 수 있습니다. 이 경우 지표가 \"정답 대비\"가 아니라 \"입력으로 만든 준-정답 대비\"가 되어 타당도가 달라질 수 있으므로, 가능하면 데이터셋에 명시적 `ground_truth`/`reference`를 제공하세요. - `metadata.summary_tags`에 의존하는 지표(`summary_risk_coverage`, `summary_needs_followup`)는 태그가 없으면 점수가 과대평가되거나(항상 1.0), 반대로 불필요한 패널티가 생길 수 있습니다. - 규칙 기반 지표(문자열/정규식/사전)는 안정적이지만, 도메인이 바뀌면 키워드/패턴/사전 갱신이 필요합니다(미갱신 상태에서의 점수는 \"측정 실패\"일 수 있음).\n\n**개선 제안(옵션)** - LLM 기반 지표의 변동을 줄이려면: 동일 모델/프롬프트/프로필을 고정하고, 프롬프트 스냅샷(`--db`, `--ragas-prompts`, `--system-prompt*`)을 함께 저장해 비교 조건을 고정하세요. - 요약 품질을 더 안전하게 보려면: `summary_faithfulness`(근거) + `entity_preservation`/`summary_accuracy`(핵심 엔티티) + `summary_risk_coverage`(리스크 태그)처럼 **서로 다른 실패 모드를 커버하는 조합**으로 해석하세요. - `summary_risk_coverage`/`summary_needs_followup`를 쓰려면: 데이터셋 생성 단계에서 `metadata.summary_tags` 정책을 정의하고(누가/어떻게 태깅), 누락률을 모니터링하세요. - 규칙 기반 지표를 도메인에 맞추려면: 사전(`terms_dictionary.json`)·키워드·정규식을 도메인/제품 정책에 맞춰 확장하고, 변경 시 `custom_metric_snapshot`을 통해 구현 해시가 기록되는지 확인하세요. - RAGAS 점수와 실사용 만족도의 정합성을 높이려면: 대표 샘플링 + 인간 평가로 보정하는 절차를 병행하세요(참고: `docs/guides/RAGAS_HUMAN_FEEDBACK_CALIBRATION_GUIDE.md`).\n\nEvalVault는 JSON/CSV/Excel을 지원합니다. **threshold는 데이터셋에 포함**되며, 값이 없으면 기본값 `0.7`을 사용합니다. Domain Memory를 켜면 신뢰도에 따라 자동 조정될 수 있습니다.\n\nJSON 예시는 아래와 같습니다.\n\n```json { \"name\": \"insurance_qa_korean\", \"version\": \"1.0.0\", \"thresholds\": {\"faithfulness\": 0.8}, \"test_cases\": [ { \"id\": \"tc-001\", \"question\": \"보험 해지 환급금은 어떻게 계산하나요?\", \"answer\": \"...\", \"contexts\": [\"...\"], \"ground_truth\": \"...\" } ] } ```\n\n- `thresholds`: 메트릭별 pass 기준 (0.0~1.0) - `ground_truth`: `context_precision`, `context_recall`, `factual_correctness`, `semantic_similarity`에 필요\n\nCSV/Excel의 경우 `id,question,answer,contexts,ground_truth` 컬럼을 포함하고, 선택적으로 `threshold_*` 컬럼을 넣을 수 있습니다. `threshold_*` 값은 **첫 번째로 채워진 행 기준**으로 데이터셋 전체 임계값으로 사용됩니다. `contexts`는 JSON 배열 문자열 또는 `|` 로 구분합니다.\n\n요약 메트릭용 선택 컬럼: - `summary_tags`: 콤마/파이프 구분 또는 JSON 배열 문자열 - `summary_intent`: 예) `agent_notes` - `metadata`: JSON 객체 문자열 (예: `{ \"priority\": \"high\" }`)\n\n대용량 파일은 `--stream` 옵션으로 스트리밍 평가를 활성화하세요.\n\n#### 데이터셋 템플릿\n\n빈 템플릿은 아래 위치에서 사용할 수 있습니다. 필요한 값만 채워 바로 사용할 수 있습니다.\n\n- 프로젝트 초기화 시: `dataset_templates/` 폴더에 JSON/CSV/XLSX 템플릿 생성 - 문서 저장소: `docs/templates/dataset_template.json` - 문서 저장소: `docs/templates/dataset_template.csv` - 문서 저장소: `docs/templates/dataset_template.xlsx`\n\nJSON 템플릿의 `thresholds` 값은 `null`로 비워져 있으므로 사용 전 숫자로 채우거나 삭제하세요. CSV/Excel은 `threshold_*` 컬럼에 값을 채우면 동일하게 적용됩니다.\n\n#### 실행 결과 엑셀(컬럼 설명) - 시트/컬럼 상세: `docs/guides/EVALVAULT_RUN_EXCEL_SHEETS.md`\n\n---\n\n## CLI 명령어 참조\n\n### 루트 명령어\n\n#### `init` - 프로젝트 초기화\n\n```bash uv run evalvault init uv run evalvault init --output-dir ./my-project uv run evalvault init --skip-env --skip-sample ```\n\n- `.env` 템플릿과 `sample_dataset.json`을 생성합니다. - `dataset_templates/`에 JSON/CSV/XLSX 템플릿을 생성합니다. - `--output-dir`로 생성 위치를 지정할 수 있습니다. - `--skip-env`/`--skip-sample`/`--skip-templates`로 단계별 생성을 끌 수 있습니다.\n\n#### `run` - 평가 실행\n\n```bash uv run evalvault run tests/fixtures/e2e/insurance_qa_korean.json \\ --metrics faithfulness,answer_relevancy \\ --tracker phoenix \\ --profile dev \\ --db data/db/evalvault.db \\ --auto-analyze ```\n\n**주요 옵션**:",
13
13
  "- `--metrics, -m`: 쉼표로 구분된 메트릭 목록 - `--preset`: `quick`/`production`/`comprehensive` 프리셋 적용 - `--mode`: `simple`/`full` 실행 모드 - `--auto-analyze`: 평가 완료 후 통합 분석을 자동 실행하고 보고서를 저장 - `--analysis-json`: 자동 분석 JSON 결과 파일 경로 (기본값: `reports/analysis`) - `--analysis-report`: 자동 분석 Markdown 보고서 경로 (기본값: `reports/analysis`) - `--analysis-dir`: 자동 분석 결과 저장 디렉터리 (기본: `reports/analysis`) - `--parallel, -P`: 병렬 평가 활성화 - `--batch-size, -b`: 배치 크기 (기본: 5) - `--stream, -s`: 대용량 데이터셋 스트리밍 평가 - `--stream-chunk-size`: 스트리밍 청크 크기 (기본: 200) - `--tracker, -t`: 추적기 선택 (`none`, `langfuse`, `mlflow`, `phoenix`) - `--db, -D`: SQLite 저장소 지정 - `--use-domain-memory`: Domain Memory 기반 threshold/컨텍스트 보강 활성화 - `--memory-domain`: Domain Memory 도메인 이름 - `--memory-language`: Domain Memory 언어 코드 (기본: ko) - `--augment-context`: Domain Memory 사실을 컨텍스트에 추가 - `--memory-db, -M`: Domain Memory DB 경로 - `--retriever, -r`: 리트리버 선택 (`bm25`, `dense`, `hybrid`, `graphrag`) - `--retriever-docs`: 리트리버 문서 파일 (.json/.jsonl/.txt) - `--kg, -k`: GraphRAG용 Knowledge Graph JSON 파일 - `--retriever-top-k`: 리트리버 Top-K (기본: 5) - `--phoenix-dataset`: Phoenix Dataset 이름 - `--phoenix-experiment`: Phoenix Experiment 이름 - `--prompt-manifest`: Phoenix prompt manifest JSON 경로 - `--prompt-files`: 프롬프트 파일 목록 (쉼표로 구분) - `--system-prompt`: 시스템 프롬프트 텍스트 - `--system-prompt-file`: 시스템 프롬프트 파일 경로 - `--ragas-prompts`: Ragas 프롬프트 오버라이드 YAML 파일\n\n**Run Modes**:\n\n| 모드 | 명령 | 동작 | |------|------|------| | Simple | `uv run evalvault run --mode simple DATASET.json`<br>`uv run evalvault run-simple DATASET.json` | `faithfulness,answer_relevancy` 메트릭 + Phoenix tracker 고정, Domain Memory/Prompt manifest 비활성 | | Full | `uv run evalvault run --mode full DATASET.json`<br>`uv run evalvault run-full DATASET.json` | 모든 Typer 옵션(프로파일, Prompt manifest, Phoenix dataset/experiment, Domain Memory, streaming)을 노출 |\n\n**Evaluation Presets**:\n\n| 프리셋 | 설명 | 기본 메트릭 | |--------|------|-------------| | `quick` | 빠른 반복 평가 (parallel, batch_size=10) | `faithfulness` | | `production` | 프로덕션 밸런스 (parallel, batch_size=5) | `faithfulness`, `answer_relevancy`, `context_precision`, `context_recall` | | `comprehensive` | 전체 메트릭 평가 (parallel, batch_size=3) | `faithfulness`, `answer_relevancy`, `context_precision`, `context_recall`, `factual_correctness`, `semantic_similarity` |\n\n#### `pipeline` - 분석 파이프라인 실행\n\n```bash uv run evalvault pipeline analyze \"요약해줘\" --run <RUN_ID> --db data/db/evalvault.db ```\n\n의도 분류 후 DAG 모듈을 실행하여 통계/NLP/인과 분석을 수행합니다.\n\n#### `history` - 평가 이력 조회\n\n```bash uv run evalvault history --limit 20 --db data/db/evalvault.db uv run evalvault history --mode simple --db data/db/evalvault.db ```\n\n#### `analyze` - 단일 실행 분석\n\n```bash uv run evalvault analyze <RUN_ID> \\ --db data/db/evalvault.db \\ --nlp --causal \\ --output analysis.json \\ --report analysis.md ```\n\n**옵션**: - `--nlp, -N`: NLP 분석 포함 - `--causal, -c`: 인과 분석 포함 - `--playbook, -B`: 플레이북 기반 개선 분석 포함 - `--enable-llm, -L`: LLM 기반 인사이트 생성 활성화 - `--output, -o`: 출력 JSON 파일 - `--report, -r`: 출력 보고서 파일 (*.md or *.html) - `--save, -S`: 분석 결과를 데이터베이스에 저장\n\n#### `analyze-compare` - A/B 비교 분석\n\n```bash uv run evalvault analyze-compare <RUN_A> <RUN_B> \\ --db data/db/evalvault.db \\ --metrics faithfulness,answer_relevancy \\ --test t-test ```\n\n기본 저장 위치: - JSON 결과: `reports/comparison/comparison_<RUN_A>_<RUN_B>.json` - Markdown 보고서: `reports/comparison/comparison_<RUN_A>_<RUN_B>.md`\n\n비교 보고서는 **프롬프트 변경 요약 + 통계 비교 + 개선 제안**을 자동으로 포함합니다.\n\n#### `generate` - 테스트셋 생성\n\n```bash uv run evalvault generate --from-docs documents.txt --output dataset.json ```\n\n#### `gate` - 품질 게이트 검사\n\n```bash uv run evalvault gate <RUN_ID> --db data/db/evalvault.db --format github-actions ```\n\n#### `agent` - 에이전트 관리\n\n```bash uv run evalvault agent list uv run evalvault agent info <agent_type> uv run evalvault agent run <agent_type> --project-dir . ```\n\n#### `experiment` - 실험 관리\n\n```bash uv run evalvault experiment-create --name \"A/B Test\" --db data/db/evalvault.db uv run evalvault experiment-add-group <EXPERIMENT_ID> --name \"baseline\" uv run evalvault experiment-add-run <EXPERIMENT_ID> <GROUP_NAME> <RUN_ID> --db data/db/evalvault.db uv run evalvault experiment-compare <EXPERIMENT_ID> --db data/db/evalvault.db ```\n\n#### `config` - 설정 확인\n\n```bash uv run evalvault config ```\n\n현재 설정 상태를 확인합니다.\n\n#### `langfuse` - Langfuse 설정 확인\n\n```bash uv run evalvault langfuse-dashboard ```\n\n#### `serve-api` - FastAPI 서버 실행\n\n```bash uv run evalvault serve-api --reload ```\n\n### 서브앱 명령어\n\n#### `kg` - Knowledge Graph\n\n```bash uv run evalvault kg build ./docs --output data/kg/knowledge_graph.json uv run evalvault kg stats ./docs --use-llm --profile dev ```\n\n#### `domain` - Domain Memory\n\n```bash uv run evalvault domain memory stats --db data/db/evalvault_memory.db uv run evalvault domain memory ingest-embeddings phoenix.csv --domain insurance --language ko ```\n\n#### `benchmark` - KMMLU 벤치마크 실행\n\nlm-evaluation-harness를 사용하여 KMMLU(Korean MMLU) 벤치마크를 실행합니다.\n\n```bash # Ollama 백엔드로 실행 uv run evalvault benchmark kmmlu -s Insurance --backend ollama -m gemma3:1b\n\n# Thinking 모델로 실행 (gpt-oss-safeguard, deepseek-r1 등) uv run evalvault benchmark kmmlu -s Accounting --backend ollama -m gpt-oss-safeguard:20b --limit 10\n\n# Phoenix 트레이싱 활성화 uv run evalvault benchmark kmmlu -s Insurance --backend ollama -m gemma3:1b --phoenix\n\n# vLLM 백엔드로 실행 uv run evalvault benchmark kmmlu -s Insurance --backend vllm\n\n# 여러 도메인 동시 실행 uv run evalvault benchmark kmmlu -s \"Insurance,Finance\" -m llama2\n\n# 테스트용 샘플 제한 uv run evalvault benchmark kmmlu -s Insurance --limit 10 -o results.json ```\n\n**주요 옵션**: - `-s, --subjects`: 평가할 KMMLU 도메인 (Insurance, Finance, Accounting 등) - `--backend`: 백엔드 선택 (`ollama`, `vllm`, `hf`, `openai`) - `-m, --model`: 모델 이름 - `--limit`: 테스트 샘플 수 제한 - `--phoenix`: Phoenix 트레이싱 활성화 - `-o, --output`: 결과 JSON 파일 경로\n\n**Thinking Model 지원**: Ollama의 thinking 모델(예: `gpt-oss-safeguard:20b`, `deepseek-r1:*`)은 자동으로 감지됩니다. - `max_gen_toks`가 8192로 증가 (thinking 토큰 포함) - Stop sequence가 `[\"Q:\", \"\\n\\n\\n\"]`로 수정 - MCQ 응답에서 첫 번째 A/B/C/D를 자동 추출\n\n#### `method` - 메서드 플러그인\n\n```bash uv run evalvault method list uv run evalvault method run data.json --method my_team_method --metrics faithfulness ```\n\n#### `phoenix` - Phoenix 연동\n\n```bash uv run evalvault phoenix export-embeddings --dataset ds_123 --output embeddings.csv uv run evalvault phoenix prompt-link agent/prompts/baseline.txt --prompt-id pr-428 uv run evalvault phoenix prompt-diff baseline.txt system.txt --manifest manifest.json ```\n\n#### `prompts` - 프롬프트 관리\n\n```bash uv run evalvault prompts show <RUN_ID> --db data/db/evalvault.db uv run evalvault prompts diff <RUN_A> <RUN_B> --db data/db/evalvault.db ```\n\n프롬프트 언어 기본값은 `ko`이며, 필요 시 API/SDK에서 다음 옵션으로 영어를 지정할 수 있습니다. - 평가/요약 판정: `language=\"en\"` - 프롬프트 후보 평가: `prompt_language=\"en\"`\n\n#### `stage` - 단계별 성능 평가\n\n```bash uv run evalvault stage ingest examples/stage_events.jsonl --db data/db/evalvault.db uv run evalvault stage summary <RUN_ID> --db data/db/evalvault.db uv run evalvault stage compute-metrics <RUN_ID> --db data/db/evalvault.db uv run evalvault stage report <RUN_ID> --db data/db/evalvault.db ```\n\n#### `debug` - 디버그 리포트\n\n```bash uv run evalvault debug report <RUN_ID> --db data/db/evalvault.db ```\n\n### 공통 옵션\n\n| 옵션 | 설명 | 사용 예 | |------|------|---------| | `--profile, -p` | `config/models.yaml`에 정의된 프로필을 적용합니다. | `uv run evalvault run dataset.json -p dev` | | `--db, -D` | 평가 결과를 저장할 SQLite 경로입니다. 기본값은 `EVALVAULT_DB_PATH` 또는 `data/db/evalvault.db`. | `uv run evalvault history -D reports/evalvault.db` | | `--memory-db, -M` | 도메인 메모리 SQLite 경로입니다. 기본값은 `EVALVAULT_MEMORY_DB_PATH` 또는 `data/db/evalvault_memory.db`. | `uv run evalvault domain memory stats -M data/memory.db` |\n\n---\n\n## Web UI\n\n### 실행 방법\n\n```bash # Terminal 1: API 서버 uv run evalvault serve-api --reload\n\n# Terminal 2: React 프론트엔드 cd frontend npm install npm run dev ```\n\n- 기본 접속: http://localhost:5173 - API 기본: http://127.0.0.1:8000 - Vite dev 서버는 `/api`를 API로 프록시합니다.\n\n### 주요 기능\n\n- **Evaluation Studio**: 데이터셋 업로드, 평가 실행, 결과 확인 - **Analysis Lab**: 분석 파이프라인 실행, 결과 저장/불러오기 - **Reports**: 평가 결과 보고서, 히스토리, 비교 뷰\n\n### Web UI와 CLI 연동\n\nCLI와 Web UI가 동일한 DB(`--db` 또는 `EVALVAULT_DB_PATH`)를 사용하면: - CLI에서 실행한 평가 결과를 Web UI에서 바로 확인 가능 - Web UI에서 실행한 평가 결과를 CLI `history` 명령으로 확인 가능 - 분석 결과도 양쪽에서 공유\n\n### 보고서 언어 옵션\n\nLLM 보고서는 기본 한국어이며, 필요 시 영어로 요청할 수 있습니다.\n\n- `GET /api/v1/runs/{run_id}/report?language=en` (기본값: `ko`)\n\n### 피드백 집계 규칙\n\nWeb UI의 별점/Thumb 피드백 집계는 다음 규칙을 따릅니다.\n\n- 집계 기준: 동일 `rater_id` + `test_case_id`의 **최신 피드백만** 반영 - 취소(`thumb_feedback=none` 또는 빈 값)는 집계에서 제외\n\n---\n\n## 분석 워크플로\n\n### 자동 분석 (옵션 방식)\n\n평가 완료 후 자동으로 분석을 실행하려면 `--auto-analyze` 옵션을 사용합니다:\n\n```bash uv run evalvault run data.json \\ --metrics faithfulness,answer_relevancy \\ --db data/db/evalvault.db \\ --auto-analyze ```\n\n### 기본 저장 위치",
14
14
  "- JSON 결과: `reports/analysis/analysis_<run_id>.json` - Markdown 보고서: `reports/analysis/analysis_<run_id>.md` - 아티팩트 인덱스: `reports/analysis/artifacts/analysis_<run_id>/index.json` - 노드별 결과: `reports/analysis/artifacts/analysis_<run_id>/<node_id>.json`\n\n### 저장 위치 커스터마이즈\n\n```bash uv run evalvault run data.json \\ --db data/db/evalvault.db \\ --auto-analyze \\ --analysis-dir reports/custom \\ --analysis-json reports/custom/run_001.json \\ --analysis-report reports/custom/run_001.md ```\n\n### 단일 실행 분석 (수동)\n\n```bash uv run evalvault analyze RUN_ID \\ --db data/db/evalvault.db \\ --nlp --causal ```\n\n필요 시 `--output`, `--report`로 파일 저장 가능합니다.\n\n### A/B 직접 비교 분석\n\n```bash uv run evalvault analyze-compare RUN_A RUN_B \\ --db data/db/evalvault.db \\ --metrics faithfulness,answer_relevancy \\ --test t-test ```\n\n기본 저장 위치: - JSON 결과: `reports/comparison/comparison_<run_a>_<run_b>.json` - Markdown 보고서: `reports/comparison/comparison_<run_a>_<run_b>.md`\n\n비교 보고서는 **프롬프트 변경 요약 + 통계 비교 + 개선 제안**을 자동으로 포함합니다.\n\n### 분석 결과에 포함되는 내용\n\n- **통계 요약**: 평균/분산/상관관계/통과율 - **Ragas 요약**: 메트릭별 평균, 케이스별 점수 - **저성과 케이스**: 낮은 점수 샘플, 우선순위 케이스 - **진단/원인 분석**: 문제 원인 가설 + 개선 힌트 - **패턴/트렌드**: 키워드/질문 유형 패턴, 실행 이력 추세 - **A/B 변경 사항**: 시스템 프롬프트, Ragas 프롬프트, 모델/옵션 차이 - **LLM 종합 보고서**: 원인 분석 + 개선 방향 + 다음 실험 제안\n\n### 평가 → 분석 전체 흐름\n\n1. **평가 실행** - `evalvault run data.json --db ...` 2. **자동 분석 (옵션)** - `--auto-analyze`로 즉시 보고서 생성 3. **추가 분석** - 필요 시 `evalvault analyze`로 상세 분석 4. **A/B 비교** - `evalvault analyze-compare`로 비교 보고서 생성 5. **프롬프트/메트릭 개선** - 보고서의 개선 제안을 반영해 다음 실행\n\n### 품질 확보 팁\n\n- A/B 비교는 **데이터셋 동일** 조건에서 수행하세요. - 프롬프트 변경은 프롬프트 관리 섹션의 흐름대로 스냅샷 저장하세요. - 비교 결과가 애매하면 샘플 수를 늘리고 재실행하세요.\n\n---\n\n## Domain Memory 활용\n\nDomain Memory는 과거 평가 결과에서 도메인 지식/패턴을 축적하여 다음 평가에 활용하는 시스템입니다.\n\n### 기본 사용법\n\n```bash uv run evalvault run data.json \\ --metrics faithfulness,answer_relevancy \\ --use-domain-memory \\ --memory-domain insurance \\ --memory-language ko \\ --augment-context \\ --db data/db/evalvault.db ```\n\n**옵션 설명**: - `--use-domain-memory`: Domain Memory 활성화 - `--memory-domain`: 도메인 이름 (기본값: 데이터셋 메타데이터에서 추출) - `--memory-language`: 언어 코드 (기본: ko) - `--augment-context`: 관련 사실을 컨텍스트에 추가 - `--memory-db, -M`: Domain Memory DB 경로\n\n### 동작 원리\n\n1. **Threshold 자동 조정**: Domain Memory의 신뢰도 점수에 따라 메트릭 임계값을 자동 조정 2. **컨텍스트 보강**: 각 테스트 케이스의 질문으로 관련 사실을 검색하여 컨텍스트에 추가 3. **학습**: 평가 완료 후 Domain Learning Hook이 결과에서 사실/패턴/행동을 추출하여 저장\n\n### MemoryBasedAnalysis\n\n과거 학습 메모리와 현재 결과를 비교하여 추세와 추천을 생성합니다:\n\n```bash uv run evalvault analyze <RUN_ID> \\ --db data/db/evalvault.db \\ --use-domain-memory ```\n\n**제한 사항**: - Streaming 모드(`--stream`)에서는 Domain Memory를 사용할 수 없습니다. - Web UI 인사이트: Domain Memory/MemoryBasedAnalysis 인사이트는 CLI 출력 기준으로만 제공됩니다.\n\n---\n\n## 관측성 & Phoenix\n\n### 트레이싱 활성화\n\n1. `uv sync --extra phoenix` 2. `.env` 에 `PHOENIX_ENABLED=true`, `PHOENIX_ENDPOINT`, `PHOENIX_SAMPLE_RATE`, `PHOENIX_API_TOKEN(선택)` 설정 3. CLI 실행 시 `--tracker phoenix` 또는 `--phoenix-max-traces` 사용\n\nPhoenix 트레이스는 OpenTelemetry 스팬으로 생성되며 `tracker_metadata[\"phoenix\"][\"trace_url\"]` 에 링크가 저장됩니다.\n\n### Dataset/Experiment 동기화\n\n```bash uv run evalvault run tests/fixtures/e2e/insurance_qa_korean.json \\ --metrics faithfulness,answer_relevancy \\ --tracker phoenix \\ --phoenix-dataset insurance-qa-ko \\ --phoenix-dataset-description \"보험 QA v2025.01\" \\ --phoenix-experiment gemma3-ko-baseline \\ --phoenix-experiment-description \"Gemma3 vs OpenAI 비교\" ```\n\n- `--phoenix-dataset`: EvalVault Dataset을 Phoenix Dataset으로 업로드 - `--phoenix-experiment`: Phoenix Experiment 생성 및 메트릭/Pass Rate/Domain Memory 메타데이터 포함 - 생성된 URL은 JSON 출력과 Web UI 히스토리에서 확인할 수 있습니다.\n\n### Open RAG Trace 표준 연동\n\n외부/내부 RAG 시스템을 EvalVault·Phoenix와 동일한 스키마로 연결하려면 OpenTelemetry + OpenInference 기반의 **Open RAG Trace 표준**을 따르세요.\n\n**핵심 규칙** - `rag.module`로 모듈 단위를 식별 (retrieve/llm/eval 등) - 로그는 span event로 흡수 - 표준 필드 외 데이터는 `custom.*`로 보존 - 객체 배열은 `*_json`으로 직렬화 (`retrieval.documents_json` 등)\n\n**연동 순서** 1. Collector 실행 ```bash docker run --rm \\ -p 4317:4317 -p 4318:4318 \\ -e PHOENIX_OTLP_ENDPOINT=http://host.docker.internal:6006 \\ -v \"$(pwd)/scripts/dev/otel-collector-config.yaml:/etc/otelcol/config.yaml\" \\ otel/opentelemetry-collector:latest \\ --config=/etc/otelcol/config.yaml ``` 2. 계측 래퍼 적용 - `OpenRagTraceAdapter`, `trace_module`, `install_open_rag_log_handler` - `build_retrieval_attributes`, `build_llm_attributes` 등 헬퍼 사용 3. OTLP 전송 - Collector: `http://localhost:4318/v1/traces` - Phoenix 직접: `http://localhost:6006/v1/traces` 4. 검증 스크립트 실행 ```bash python3 scripts/dev/validate_open_rag_trace.py --input traces.json ```\n\n**관련 문서** - `docs/architecture/open-rag-trace-spec.md` - `docs/architecture/open-rag-trace-collector.md` - `docs/guides/OPEN_RAG_TRACE_INTERNAL_ADAPTER.md` - `docs/guides/OPEN_RAG_TRACE_SAMPLES.md`\n\n### 임베딩 분석 & 내보내기\n\nPhoenix 12.27.0의 Embeddings Analysis 뷰는 드리프트/클러스터/3D 시각화를 제공합니다. 업로드된 Dataset/Experiment 화면에서 \"Embeddings\" 탭을 열면 EvalVault 질문/답변 벡터 및 Domain Memory 태그를 확인할 수 있습니다.\n\n오프라인 분석이 필요하면 CLI로 내보내세요: ```bash uv run evalvault phoenix export-embeddings \\ --dataset phoenix-dataset-id \\ --endpoint http://localhost:6006 \\ --output tmp/phoenix_embeddings.csv ```\n\nUMAP/HDBSCAN 라이브러리가 없는 경우 자동으로 PCA/DBSCAN으로 대체합니다.\n\n### Prompt Manifest 루프\n\nPrompt Playground와 EvalVault 실행을 동기화하려면 `agent/prompts/prompt_manifest.json`과 전용 명령을 사용합니다.\n\n1. **프롬프트 ↔ Phoenix ID 연결** ```bash uv run evalvault phoenix prompt-link agent/prompts/baseline.txt \\ --prompt-id pr-428 \\ --experiment-id exp-20250115 \\ --notes \"Gemma3 베이스라인\" ``` 2. **Diff 확인** ```bash uv run evalvault phoenix prompt-diff \\ agent/prompts/baseline.txt agent/prompts/system.txt \\ --manifest agent/prompts/prompt_manifest.json --format table ``` 3. **평가 실행에 Prompt 정보 주입** ```bash DATASET=\"tests/fixtures/e2e/insurance_qa_korean.json\" uv run evalvault run \"$DATASET\" --metrics faithfulness \\ --profile prod \\ --tracker phoenix \\ --prompt-files agent/prompts/baseline.txt,agent/prompts/system.txt \\ --prompt-manifest agent/prompts/prompt_manifest.json ```\n\n`tracker_metadata[\"phoenix\"][\"prompts\"]` 에 파일 상태/체크섬/diff가 기록되어 Slack 릴리즈 노트, 히스토리, Web UI에 그대로 노출됩니다.\n\n> **Tip**: Prompt Playground 연동 시에는 Phoenix tool-calling을 지원하는 `prod` 프로필(`gpt-oss-safeguard:20b`)을 사용하면 \"does not support tools\" 오류 없이 메타데이터가 기록됩니다.\n\n### 드리프트 감시 & 릴리스 노트\n\n- `scripts/ops/phoenix_watch.py`: Phoenix Dataset을 주기적으로 조회하여 `embedding_drift_score` 초과 시 Slack 알림 또는 `uv run evalvault gate <run_id>`/회귀 테스트 실행 ```bash uv run python scripts/ops/phoenix_watch.py \\ --endpoint http://localhost:6006 \\ --dataset-id ds_123 \\ --drift-key embedding_drift_score \\ --drift-threshold 0.18 \\ --slack-webhook https://hooks.slack.com/services/... \\ --gate-command \"uv run evalvault gate RUN_ID --format github-actions --db data/db/evalvault.db\" \\ --run-regressions threshold \\ --regression-config config/regressions/default.json ``` - `scripts/reports/generate_release_notes.py`: `uv run evalvault run --output run.json` 결과를 Markdown/Slack 형식 릴리스 노트로 변환하고 Phoenix 링크를 삽입합니다.\n\n---\n\n## 프롬프트 관리",
15
15
  "EvalVault는 **시스템 프롬프트**와 **Ragas 메트릭 프롬프트**를 실행 단위로 스냅샷 저장하고, 실행 간 변경점을 비교할 수 있도록 설계되어 있습니다.\n\n### 저장되는 프롬프트 범위\n\n- **시스템 프롬프트**: 대상 LLM에 실제로 주입한 시스템 메시지 - **Ragas 메트릭 프롬프트**: faithfulness 등 평가 메트릭용 프롬프트 오버라이드 - **Prompt Set 스냅샷**: 위 프롬프트들을 `run_id`와 함께 DB에 저장 (비교/회귀 추적용)\n\n> **중요**: Prompt Set 저장은 `--db` 옵션이 있어야 동작합니다.\n\n### 시스템 프롬프트 등록\n\n#### 텍스트 직접 입력\n\n```bash uv run evalvault run data.json \\ --system-prompt \"당신은 보험 약관 전문가입니다...\" \\ --prompt-set-name \"sys-v2\" \\ --db data/db/evalvault.db ```\n\n#### 파일로 입력\n\n```bash uv run evalvault run data.json \\ --system-prompt-file agent/prompts/system.txt \\ --system-prompt-name sys-v2 \\ --prompt-set-name \"sys-v2\" \\ --db data/db/evalvault.db ```\n\n### Ragas 프롬프트 YAML 오버라이드\n\n#### YAML 예시\n\n```yaml faithfulness: | 너는 답변의 근거가 컨텍스트에 있는지 평가한다...\n\nanswer_relevancy: | 질문 의도와 답변의 연관성을 평가한다... ```\n\n#### 실행 예시\n\n```bash uv run evalvault run data.json \\ --ragas-prompts config/ragas_prompts.yaml \\ --prompt-set-name \"ragas-v3\" \\ --db data/db/evalvault.db ```\n\n> YAML에 있는 메트릭이 `--metrics`에 없으면 경고가 출력됩니다.\n\n### 저장된 프롬프트 확인/비교\n\n#### 스냅샷 보기\n\n```bash uv run evalvault prompts show RUN_ID --db data/db/evalvault.db ```\n\n#### 두 실행 간 비교\n\n```bash uv run evalvault prompts diff RUN_A RUN_B --db data/db/evalvault.db ```\n\n#### 비교 분석 보고서에서 자동 반영\n\n```bash uv run evalvault analyze-compare RUN_A RUN_B --db data/db/evalvault.db ```\n\n`analyze-compare` 결과에는 **프롬프트 변경 요약 + 메트릭 변화**가 함께 포함됩니다.\n\n### 운영 팁\n\n- **Prompt Set 이름 규칙화**: `sys-v3`, `ragas-v2`, `release-2025-02` 등으로 관리 - **A/B 비교 시 데이터셋 고정**: 데이터셋이 바뀌면 비교 해석이 왜곡됩니다 - **Prompt Manifest 활용**: Phoenix Prompt Playground와 연결하려면 관측성 & Phoenix 섹션의 Prompt Manifest 절을 참고하세요.\n\n---\n\n## 성능 튜닝\n\n### TL;DR (우선순위 요약)\n\n1. **병렬 평가 + 배치 크기 조절**로 처리량 확보 2. **느린 메트릭 제외** (특히 `factual_correctness`, `semantic_similarity`) 3. **빠른 LLM/임베딩 모델**로 교체 (프로필/옵션 조정) 4. **컨텍스트 길이/개수 줄이기** (retriever/top_k, 데이터 전처리) 5. **부가 기능 끄기** (Domain Memory, Tracker)\n\n### 병렬 평가와 배치 크기\n\nEvalVault는 배치 단위 `asyncio.gather`로 병렬 평가를 수행합니다. 동시성은 `batch_size`가 결정하며, `parallel`은 병렬 활성화 스위치입니다.\n\n**CLI 예시** ```bash uv run evalvault run data.json --metrics faithfulness --parallel --batch-size 10 ```\n\n> 권장: 로컬 Ollama는 5~10, 외부 API는 레이트리밋에 맞춰 단계적으로 증가\n\n### 메트릭 최소화 (속도 영향 큼)\n\n현재 EvalVault는 메트릭을 **순차적으로 평가**합니다. 필요한 메트릭만 선택해 호출 수를 줄이는 것이 가장 큰 효과를 냅니다.\n\n| 메트릭 | 호출 성격 | 속도 영향 | |--------|-----------|-----------| | `faithfulness` | LLM 호출 | 중 | | `answer_relevancy` | LLM + 임베딩 | 중~높음 | | `context_precision` | LLM | 중 | | `context_recall` | LLM | 중 | | `semantic_similarity` | 임베딩 | 높음 (임베딩 모델 속도 영향) | | `factual_correctness` | LLM 다중 호출 (claim 분해/검증) | 매우 높음 | | 커스텀 메트릭 | 규칙 기반/비LLM | 낮음 |\n\n> 빠른 반복 평가 단계에서는 `faithfulness` 단일 메트릭만으로 시작하세요.\n\n### LLM/임베딩 모델 선택\n\n평가 속도는 모델이 좌우합니다. 빠른 모델을 별도 프로필로 두고 사용하세요.\n\n```bash # 빠른 모델 프로필로 전환 EVALVAULT_PROFILE=dev uv run evalvault run data.json --metrics faithfulness ```\n\n**Ollama**: - `config/models.yaml`에서 `think_level`을 낮추거나 제거하면 속도 개선 - 임베딩 모델은 소형 모델(`qwen3-embedding:0.6b` 등) 권장\n\n### 컨텍스트 길이/개수 줄이기\n\n프롬프트 토큰이 늘어날수록 평가 속도는 급격히 느려집니다.\n\n- 데이터셋의 `contexts`를 **짧게 유지** - `retriever`를 사용할 경우 `top_k`를 낮춤 - 중복/불필요한 컨텍스트 제거\n\n**CLI 예시** ```bash uv run evalvault run data.json \\ --metrics faithfulness \\ --retriever bm25 \\ --retriever-docs docs.jsonl \\ --retriever-top-k 3 ```\n\n> Web UI는 현재 `top_k=5` 고정이므로, 더 낮추려면 CLI 또는 API 사용이 필요합니다.\n\n### 부가 기능 비활성화\n\n아래 기능은 평가 속도에 직접적인 부하를 더합니다.\n\n- Domain Memory (`--use-domain-memory` OFF) - Tracker (`--tracker none`) - Phoenix 자동 트레이싱 (`PHOENIX_ENABLED=false`) - Retriever (컨텍스트가 이미 있으면 비활성화)\n\n---\n\n## 메서드 플러그인\n\nEvalVault는 메서드 플러그인 인터페이스를 지원하여 팀별 RAG 파이프라인을 공유 기본 데이터셋에 대해 실행하고, 표준 메트릭 및 분석 도구로 출력을 평가할 수 있습니다.\n\n### 소스\n\n- **내부 레지스트리**: `config/methods.yaml` - **외부 패키지**: `evalvault.methods` entry points\n\n### 기본 데이터셋 템플릿\n\n`dataset_templates/method_input_template.json`의 질문 우선 템플릿을 사용하세요. `question/ground_truth/contexts/metadata`만 필요하며 팀 간 안정적으로 유지됩니다.\n\n### 내부 레지스트리 예시\n\n```yaml methods: baseline_oracle: class_path: \"evalvault.adapters.outbound.methods.baseline_oracle:BaselineOracleMethod\" description: \"Use ground truth as the answer when available.\" tags: [\"baseline\", \"oracle\"] ```\n\n### Entry Point 예시 (외부 패키지)\n\n```toml [project.entry-points.\"evalvault.methods\"] my_team_method = \"my_team_pkg.methods:MyTeamMethod\" ```\n\n`examples/method_plugin_template`에서 작동하는 스캐폴드를 참고하세요.\n\n### 외부 명령 (의존성 격리)\n\n메서드 의존성이 충돌할 때 별도 venv/컨테이너에서 실행합니다. `config/methods.yaml`에 명령 기반 메서드를 구성하세요:\n\n```yaml methods: team_method_external: runner: external command: \"bash -lc 'cd ../team_method && uv run python -m team_method.run --input \\\"$EVALVAULT_METHOD_INPUT\\\" --output \\\"$EVALVAULT_METHOD_OUTPUT\\\"'\" shell: true timeout_seconds: 3600 description: \"Team method executed in its own env\" ```",
@@ -1939,6 +1939,21 @@
1939
1939
  "문자열",
1940
1940
  "구분",
1941
1941
  "하",
1942
+ "요약",
1943
+ "메",
1944
+ "트릭",
1945
+ "용",
1946
+ "선택",
1947
+ "컬럼",
1948
+ "콤마",
1949
+ "파이프",
1950
+ "구분",
1951
+ "배열",
1952
+ "문자열",
1953
+ "예",
1954
+ "객체",
1955
+ "문자열",
1956
+ "예",
1942
1957
  "대",
1943
1958
  "용량",
1944
1959
  "파일",
@@ -0,0 +1,118 @@
1
+ # EvalVault Offline Deployment - Docker Compose Configuration
2
+ # Air-gapped environment: PostgreSQL + EvalVault API + Web UI
3
+ #
4
+ # Usage:
5
+ # docker compose -f docker-compose.offline.yml --env-file .env.offline.example config
6
+ # docker compose -f docker-compose.offline.yml --env-file .env.offline up -d
7
+ #
8
+ # Prerequisites:
9
+ # - External LLM server (Ollama/vLLM) must be accessible within the air-gapped network
10
+ # - Model weights are NOT shipped; configure OLLAMA_BASE_URL or VLLM_BASE_URL in .env.offline
11
+ #
12
+ # Architecture (linux/amd64 assumed for most air-gapped deployments):
13
+ # - postgres: Evaluation data storage
14
+ # - evalvault-api: FastAPI backend (port 8000 internal)
15
+ # - evalvault-web: Nginx reverse proxy + React frontend (port 5173 exposed on 127.0.0.1)
16
+
17
+ services:
18
+ # PostgreSQL database for evaluation storage
19
+ postgres:
20
+ image: postgres:16-alpine
21
+ container_name: evalvault-postgres
22
+ restart: unless-stopped
23
+ environment:
24
+ POSTGRES_USER: ${POSTGRES_USER:-evalvault}
25
+ POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-evalvault}
26
+ POSTGRES_DB: ${POSTGRES_DB:-evalvault}
27
+ volumes:
28
+ - postgres_data:/var/lib/postgresql/data
29
+ healthcheck:
30
+ test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-evalvault}"]
31
+ interval: 10s
32
+ timeout: 5s
33
+ retries: 5
34
+ networks:
35
+ - evalvault-net
36
+
37
+ # EvalVault API service (FastAPI backend)
38
+ evalvault-api:
39
+ image: evalvault-api:offline
40
+ build:
41
+ context: .
42
+ dockerfile: Dockerfile
43
+ container_name: evalvault-api
44
+ restart: unless-stopped
45
+ depends_on:
46
+ postgres:
47
+ condition: service_healthy
48
+ environment:
49
+ # Profile selection (dev/prod/vllm - see config/models.yaml)
50
+ EVALVAULT_PROFILE: ${EVALVAULT_PROFILE:-dev}
51
+ # SQLite paths (for local file-based storage if needed)
52
+ EVALVAULT_DB_PATH: ${EVALVAULT_DB_PATH:-data/db/evalvault.db}
53
+ EVALVAULT_MEMORY_DB_PATH: ${EVALVAULT_MEMORY_DB_PATH:-data/db/evalvault_memory.db}
54
+ # PostgreSQL connection
55
+ EVALVAULT_DB_HOST: postgres
56
+ EVALVAULT_DB_PORT: 5432
57
+ EVALVAULT_DB_NAME: ${POSTGRES_DB:-evalvault}
58
+ EVALVAULT_DB_USER: ${POSTGRES_USER:-evalvault}
59
+ EVALVAULT_DB_PASSWORD: ${POSTGRES_PASSWORD:-evalvault}
60
+ # Ollama configuration (air-gapped LLM server - user must provide URL)
61
+ OLLAMA_BASE_URL: ${OLLAMA_BASE_URL:-}
62
+ OLLAMA_TIMEOUT: ${OLLAMA_TIMEOUT:-120}
63
+ OLLAMA_TOOL_MODELS: ${OLLAMA_TOOL_MODELS:-}
64
+ # vLLM configuration (alternative air-gapped LLM server - user must provide URL)
65
+ VLLM_BASE_URL: ${VLLM_BASE_URL:-}
66
+ VLLM_API_KEY: ${VLLM_API_KEY:-}
67
+ VLLM_MODEL: ${VLLM_MODEL:-}
68
+ VLLM_EMBEDDING_MODEL: ${VLLM_EMBEDDING_MODEL:-}
69
+ VLLM_EMBEDDING_BASE_URL: ${VLLM_EMBEDDING_BASE_URL:-}
70
+ # Faithfulness fallback (optional)
71
+ FAITHFULNESS_FALLBACK_PROVIDER: ${FAITHFULNESS_FALLBACK_PROVIDER:-}
72
+ FAITHFULNESS_FALLBACK_MODEL: ${FAITHFULNESS_FALLBACK_MODEL:-}
73
+ # API authentication (optional)
74
+ API_AUTH_TOKENS: ${API_AUTH_TOKENS:-}
75
+ KNOWLEDGE_READ_TOKENS: ${KNOWLEDGE_READ_TOKENS:-}
76
+ KNOWLEDGE_WRITE_TOKENS: ${KNOWLEDGE_WRITE_TOKENS:-}
77
+ CORS_ORIGINS: ${CORS_ORIGINS:-http://localhost:5173,http://127.0.0.1:5173}
78
+ volumes:
79
+ - ./data:/app/data
80
+ - ./config:/app/config:ro
81
+ # Override entrypoint to run API server
82
+ command: ["serve-api", "--host", "0.0.0.0", "--port", "8000"]
83
+ healthcheck:
84
+ test: ["CMD-SHELL", "curl -f http://localhost:8000/health || exit 1"]
85
+ interval: 30s
86
+ timeout: 10s
87
+ start_period: 10s
88
+ retries: 3
89
+ networks:
90
+ - evalvault-net
91
+
92
+ # EvalVault Web UI (Nginx reverse proxy + React frontend)
93
+ # Nginx proxies /api/ to evalvault-api:8000
94
+ evalvault-web:
95
+ image: evalvault-web:offline
96
+ build:
97
+ context: ./frontend
98
+ dockerfile: Dockerfile
99
+ container_name: evalvault-web
100
+ restart: unless-stopped
101
+ depends_on:
102
+ evalvault-api:
103
+ condition: service_healthy
104
+ ports:
105
+ - "127.0.0.1:5173:80"
106
+ environment:
107
+ # API backend URL for nginx proxy_pass (used by nginx config)
108
+ API_BACKEND_URL: http://evalvault-api:8000
109
+ networks:
110
+ - evalvault-net
111
+
112
+ volumes:
113
+ postgres_data:
114
+ driver: local
115
+
116
+ networks:
117
+ evalvault-net:
118
+ driver: bridge
@@ -16,6 +16,7 @@
16
16
  - CLI 실행 시나리오 가이드: `guides/RAG_CLI_WORKFLOW_TEMPLATES.md`
17
17
  - 사용자 가이드(운영 포함): `guides/USER_GUIDE.md`
18
18
  - 개발/기여: `guides/DEV_GUIDE.md`
19
+ - 폐쇄망 Docker: `guides/OFFLINE_DOCKER.md`
19
20
  - 진단 플레이북: `guides/EVALVAULT_DIAGNOSTIC_PLAYBOOK.md` (문제→분석→해석→액션 흐름)
20
21
  - RAG 성능 개선 제안서: `guides/RAG_PERFORMANCE_IMPROVEMENT_PROPOSAL.md` (목적/미션·KPI·로드맵)
21
22
  - RAGAS 인간 피드백 보정: `guides/RAGAS_HUMAN_FEEDBACK_CALIBRATION_GUIDE.md`
@@ -0,0 +1,158 @@
1
+ # 폐쇄망(에어갭) Docker 배포 가이드
2
+
3
+ EvalVault를 외부망 없이 운영하기 위한 **오프라인 Docker 패키지** 구성 가이드입니다.
4
+ 모델 가중치는 폐쇄망 내부에 이미 존재한다는 전제로, EvalVault는 **외부 모델 서버**를 호출합니다.
5
+
6
+ ## 목표 구성
7
+
8
+ - EvalVault API + Web UI를 docker-compose로 실행
9
+ - 모델 서버(vLLM/Ollama)는 **외부 엔드포인트**로 연결
10
+ - 필요 시 Postgres는 compose 프로필로 선택
11
+
12
+ ## 핵심 파일
13
+
14
+ - `docker-compose.offline.yml`: 오프라인용 compose
15
+ - `.env.offline.example`: 환경 변수 템플릿
16
+ - `frontend/Dockerfile`: Web UI 정적 서빙 이미지
17
+ - `frontend/nginx.conf`: `/api/*` 프록시 + SPA 라우팅
18
+ - `scripts/offline/*.sh`: 이미지 export/import/smoke-test
19
+
20
+ ## 1) 환경 파일 준비
21
+
22
+ ```bash
23
+ cp .env.offline.example .env.offline
24
+ ```
25
+
26
+ `.env.offline`에 아래 항목을 **직접 입력**하세요.
27
+
28
+ - `EVALVAULT_PROFILE` (dev/prod/vllm)
29
+ - `OLLAMA_BASE_URL` 또는 `VLLM_BASE_URL`
30
+ - `CORS_ORIGINS` (기본: http://localhost:8080)
31
+
32
+ ### 폐쇄망 사용자에게 전달할 필수 정보
33
+
34
+ 아래 내용을 그대로 전달하면 됩니다.
35
+
36
+ **필수 입력값**
37
+ - `EVALVAULT_PROFILE`: `dev`(Ollama) / `openai` / `vllm` 중 선택
38
+ - `OLLAMA_BASE_URL` 또는 `OPENAI_API_KEY` 또는 `VLLM_BASE_URL` 중 하나 이상
39
+ - `CORS_ORIGINS`: 기본 `http://localhost:8080`
40
+
41
+ **포트 안내**
42
+ - API: `http://<HOST>:8000`
43
+ - Web UI: `http://<HOST>:8080`
44
+
45
+ **실행 명령**
46
+ ```bash
47
+ cp .env.offline.example .env.offline
48
+ # .env.offline 편집 후
49
+ docker compose --env-file .env.offline -f docker-compose.offline.yml up -d
50
+ ```
51
+
52
+ **검증 명령**
53
+ ```bash
54
+ curl -f http://<HOST>:8000/health
55
+ curl -f http://<HOST>:8000/api/v1/config/profiles
56
+ curl -f http://<HOST>:8000/api/v1/runs/options/datasets
57
+ curl -I http://<HOST>:8080/
58
+ ```
59
+
60
+ **참고**
61
+ - 모델 서버는 폐쇄망 내부에 이미 존재한다고 가정합니다.
62
+ - vLLM은 폐쇄망에서 사용할 수 있으며, 로컬(macOS)에서는 테스트하지 않았습니다.
63
+
64
+ ### vLLM 사용자 안내
65
+
66
+ 폐쇄망에서 vLLM을 사용할 경우 다음을 설정합니다.
67
+
68
+ **필수 설정**
69
+ - `EVALVAULT_PROFILE=vllm`
70
+ - `VLLM_BASE_URL=http://<VLLM_HOST>:8000/v1`
71
+
72
+ **선택 설정**
73
+ - `VLLM_API_KEY`: vLLM 서버가 인증을 요구할 때만 사용
74
+ - `VLLM_MODEL`: 서버 기본 모델과 다를 때 지정
75
+ - `VLLM_EMBEDDING_MODEL`, `VLLM_EMBEDDING_BASE_URL`: 임베딩 서버를 분리 운용할 때 지정
76
+
77
+ **검증 명령**
78
+ ```bash
79
+ curl -f http://<HOST>:8000/api/v1/config/profiles
80
+ ```
81
+
82
+ `vllm` 프로필이 보이고, `VLLM_BASE_URL`이 실제 vLLM 서버를 가리키면 정상입니다.
83
+
84
+ ## 2) 온라인 빌드/패키징
85
+
86
+ 스크립트를 실행하기 전 권한을 부여하세요.
87
+
88
+ ```bash
89
+ chmod +x scripts/offline/*.sh
90
+ ```
91
+
92
+ ```bash
93
+ ./scripts/offline/export_images.sh
94
+ ```
95
+
96
+ - 산출물: `dist/evalvault_offline.tar`
97
+ - 체크섬: `dist/evalvault_offline.tar.sha256`
98
+
99
+ Postgres 이미지를 함께 포함하려면:
100
+
101
+ ```bash
102
+ INCLUDE_POSTGRES=1 ./scripts/offline/export_images.sh
103
+ ```
104
+
105
+ ## 3) 폐쇄망 반입 및 로드
106
+
107
+ ```bash
108
+ ./scripts/offline/import_images.sh dist/evalvault_offline.tar
109
+ ```
110
+
111
+ ## 4) 오프라인 실행
112
+
113
+ ```bash
114
+ docker compose --env-file .env.offline -f docker-compose.offline.yml up -d
115
+ ```
116
+
117
+ - API: `http://localhost:8000`
118
+ - Web UI: `http://localhost:8080`
119
+
120
+ Postgres를 함께 띄우려면:
121
+
122
+ ```bash
123
+ docker compose --env-file .env.offline -f docker-compose.offline.yml --profile postgres up -d
124
+ ```
125
+
126
+ ## 5) 간단 스모크 테스트
127
+
128
+ ```bash
129
+ ./scripts/offline/smoke_test.sh
130
+ ```
131
+
132
+ 스모크 테스트가 실패하면 다음을 확인하세요.
133
+ - Docker Desktop 실행 상태
134
+ - `.env.offline`의 모델 서버 주소
135
+ - 포트 충돌 여부 (8000/8080)
136
+
137
+ ## 데이터 포함 정책
138
+
139
+ `data/`는 이미지에 포함됩니다.
140
+ 단, `/app/data`를 볼륨으로 마운트하면 **이미지에 포함된 데이터가 가려집니다**.
141
+ 필요 시 아래처럼 선택적으로 마운트하세요.
142
+
143
+ ```yaml
144
+ # docker-compose.override.yml 예시
145
+ services:
146
+ evalvault-api:
147
+ volumes:
148
+ - evalvault_data:/app/data
149
+
150
+ volumes:
151
+ evalvault_data:
152
+ ```
153
+
154
+ ## 참고 문서 (공식 Docker)
155
+
156
+ - Docker image save: https://docs.docker.com/reference/cli/docker/image/save/
157
+ - Docker image load: https://docs.docker.com/reference/cli/docker/image/load/
158
+ - Docker compose pull: https://docs.docker.com/reference/cli/docker/compose/pull/
@@ -497,6 +497,12 @@ JSON 예시는 아래와 같습니다.
497
497
  CSV/Excel의 경우 `id,question,answer,contexts,ground_truth` 컬럼을 포함하고,
498
498
  선택적으로 `threshold_*` 컬럼을 넣을 수 있습니다. `threshold_*` 값은 **첫 번째로 채워진 행 기준**으로
499
499
  데이터셋 전체 임계값으로 사용됩니다. `contexts`는 JSON 배열 문자열 또는 `|` 로 구분합니다.
500
+
501
+ 요약 메트릭용 선택 컬럼:
502
+ - `summary_tags`: 콤마/파이프 구분 또는 JSON 배열 문자열
503
+ - `summary_intent`: 예) `agent_notes`
504
+ - `metadata`: JSON 객체 문자열 (예: `{ "priority": "high" }`)
505
+
500
506
  대용량 파일은 `--stream` 옵션으로 스트리밍 평가를 활성화하세요.
501
507
 
502
508
  #### 데이터셋 템플릿
@@ -92,6 +92,20 @@ uv run evalvault analyze-compare <RUN_A> <RUN_B> --db data/db/evalvault.db
92
92
 
93
93
  > Stage 이벤트는 `--stage-events`(JSONL로 내보내기), `--stage-store`(DB 저장) 같은 옵션과 연결된다.
94
94
 
95
+ 최소 스키마(필수):
96
+ - `run_id` (string)
97
+ - `stage_type` (string, 소문자 정규화)
98
+
99
+ 권장 필드:
100
+ - `stage_id`, `parent_stage_id`
101
+ - `status` (예: success/failed)
102
+ - `started_at`, `finished_at`, `duration_ms`
103
+ - `attributes`, `metadata`
104
+ - `trace_id`, `span_id`
105
+
106
+ 표준 stage_type 최소 집합:
107
+ - `system_prompt`, `input`, `retrieval`, `output`
108
+
95
109
  ### 3.4 자동 분석(`--auto-analyze`)
96
110
 
97
111
  CLI 구현 흐름(요약):
@@ -94,6 +94,15 @@ docker compose -f docker-compose.phoenix.yaml up
94
94
  - 옵션: `--stage-events <path>`
95
95
  - 근거: `src/evalvault/adapters/inbound/cli/commands/run.py`
96
96
 
97
+ 최소 스키마(필수):
98
+ - `run_id` (string)
99
+ - `stage_type` (string, 소문자 정규화)
100
+
101
+ 표준 stage_type 최소 집합:
102
+ - `system_prompt`, `input`, `retrieval`, `output`
103
+
104
+ 필드가 누락되면 `stage summary`에서 `missing_required_stage_types`로 표시된다.
105
+
97
106
  ### 3.2 Stage Metrics(단계 지표)
98
107
 
99
108
  - `StageMetricService.build_metrics(...)`가 stage 이벤트에서
@@ -0,0 +1,20 @@
1
+ # syntax=docker/dockerfile:1
2
+
3
+ FROM node:20-alpine AS build
4
+
5
+ WORKDIR /app
6
+
7
+ COPY package.json package-lock.json ./
8
+ RUN npm ci
9
+
10
+ COPY . ./
11
+ RUN npm run build
12
+
13
+ FROM nginx:1.27-alpine
14
+
15
+ COPY nginx.conf /etc/nginx/conf.d/default.conf
16
+ COPY --from=build /app/dist /usr/share/nginx/html
17
+
18
+ EXPOSE 80
19
+
20
+ CMD ["nginx", "-g", "daemon off;"]
@@ -0,0 +1,20 @@
1
+ server {
2
+ listen 80;
3
+ server_name _;
4
+
5
+ root /usr/share/nginx/html;
6
+ index index.html;
7
+
8
+ location /api/ {
9
+ proxy_pass http://evalvault-api:8000;
10
+ proxy_http_version 1.1;
11
+ proxy_set_header Host $host;
12
+ proxy_set_header X-Real-IP $remote_addr;
13
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
14
+ proxy_set_header X-Forwarded-Proto $scheme;
15
+ }
16
+
17
+ location / {
18
+ try_files $uri $uri/ /index.html;
19
+ }
20
+ }
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "evalvault"
3
- version = "1.67.0"
3
+ version = "1.68.0"
4
4
  description = "RAG evaluation system using Ragas with Phoenix/Langfuse tracing"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.12"