evalscope 0.10.0__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (606) hide show
  1. evalscope/__init__.py +4 -1
  2. evalscope/api/benchmark/__init__.py +11 -0
  3. evalscope/api/benchmark/adapters/__init__.py +7 -0
  4. evalscope/api/benchmark/adapters/agent_adapter.py +8 -0
  5. evalscope/api/benchmark/adapters/default_data_adapter.py +754 -0
  6. evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
  7. evalscope/api/benchmark/adapters/multi_choice_adapter.py +86 -0
  8. evalscope/api/benchmark/adapters/ner_adapter.py +212 -0
  9. evalscope/api/benchmark/adapters/text2image_adapter.py +157 -0
  10. evalscope/api/benchmark/adapters/vision_language_adapter.py +8 -0
  11. evalscope/api/benchmark/benchmark.py +404 -0
  12. evalscope/api/benchmark/meta.py +124 -0
  13. evalscope/api/dataset/__init__.py +2 -0
  14. evalscope/api/dataset/dataset.py +370 -0
  15. evalscope/api/dataset/loader.py +266 -0
  16. evalscope/api/dataset/utils.py +143 -0
  17. evalscope/api/evaluator/__init__.py +3 -0
  18. evalscope/api/evaluator/cache.py +382 -0
  19. evalscope/api/evaluator/evaluator.py +61 -0
  20. evalscope/api/evaluator/state.py +280 -0
  21. evalscope/api/filter/__init__.py +1 -0
  22. evalscope/api/filter/filter.py +72 -0
  23. evalscope/api/messages/__init__.py +12 -0
  24. evalscope/api/messages/chat_message.py +248 -0
  25. evalscope/api/messages/content.py +102 -0
  26. evalscope/api/messages/utils.py +35 -0
  27. evalscope/api/metric/__init__.py +2 -0
  28. evalscope/api/metric/metric.py +60 -0
  29. evalscope/api/metric/scorer.py +113 -0
  30. evalscope/api/mixin/__init__.py +2 -0
  31. evalscope/api/mixin/llm_judge_mixin.py +170 -0
  32. evalscope/api/mixin/sandbox_mixin.py +182 -0
  33. evalscope/api/model/__init__.py +12 -0
  34. evalscope/api/model/generate_config.py +161 -0
  35. evalscope/api/model/model.py +386 -0
  36. evalscope/api/model/model_output.py +285 -0
  37. evalscope/api/registry.py +182 -0
  38. evalscope/api/tool/__init__.py +3 -0
  39. evalscope/api/tool/tool_call.py +101 -0
  40. evalscope/api/tool/tool_info.py +173 -0
  41. evalscope/api/tool/utils.py +64 -0
  42. evalscope/app/__init__.py +28 -0
  43. evalscope/app/app.py +38 -0
  44. evalscope/app/arguments.py +11 -0
  45. evalscope/app/constants.py +22 -0
  46. evalscope/app/ui/__init__.py +20 -0
  47. evalscope/app/ui/app_ui.py +53 -0
  48. evalscope/app/ui/multi_model.py +353 -0
  49. evalscope/app/ui/sidebar.py +42 -0
  50. evalscope/app/ui/single_model.py +220 -0
  51. evalscope/app/ui/visualization.py +36 -0
  52. evalscope/app/utils/data_utils.py +195 -0
  53. evalscope/app/utils/env_utils.py +12 -0
  54. evalscope/app/utils/localization.py +221 -0
  55. evalscope/app/utils/text_utils.py +119 -0
  56. evalscope/app/utils/visualization.py +96 -0
  57. evalscope/arguments.py +32 -9
  58. evalscope/backend/opencompass/api_meta_template.py +2 -1
  59. evalscope/backend/opencompass/backend_manager.py +10 -7
  60. evalscope/backend/rag_eval/__init__.py +1 -1
  61. evalscope/backend/rag_eval/backend_manager.py +23 -6
  62. evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py +33 -21
  63. evalscope/backend/rag_eval/clip_benchmark/task_template.py +8 -4
  64. evalscope/backend/rag_eval/cmteb/arguments.py +14 -1
  65. evalscope/backend/rag_eval/cmteb/task_template.py +19 -3
  66. evalscope/backend/rag_eval/cmteb/tasks/CustomTask.py +1 -1
  67. evalscope/backend/rag_eval/ragas/arguments.py +0 -1
  68. evalscope/backend/rag_eval/ragas/task_template.py +2 -1
  69. evalscope/backend/rag_eval/ragas/tasks/build_distribution.py +2 -1
  70. evalscope/backend/rag_eval/ragas/tasks/build_transform.py +7 -4
  71. evalscope/backend/rag_eval/ragas/tasks/testset_generation.py +9 -3
  72. evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py +2 -6
  73. evalscope/backend/rag_eval/utils/embedding.py +125 -32
  74. evalscope/backend/rag_eval/utils/llm.py +16 -16
  75. evalscope/backend/vlm_eval_kit/backend_manager.py +8 -3
  76. evalscope/benchmarks/__init__.py +17 -5
  77. evalscope/benchmarks/aa_lcr/__init__.py +0 -0
  78. evalscope/benchmarks/aa_lcr/aa_lcr_adapter.py +205 -0
  79. evalscope/benchmarks/ai2d/__init__.py +0 -0
  80. evalscope/benchmarks/ai2d/ai2d_adapter.py +54 -0
  81. evalscope/benchmarks/aime/__init__.py +0 -0
  82. evalscope/benchmarks/aime/aime24_adapter.py +55 -0
  83. evalscope/benchmarks/aime/aime25_adapter.py +181 -0
  84. evalscope/benchmarks/aime/grader.py +307 -0
  85. evalscope/{metrics/math_accuracy.py → benchmarks/aime/math_normalize.py} +61 -72
  86. evalscope/benchmarks/alpaca_eval/__init__.py +0 -0
  87. evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +133 -0
  88. evalscope/benchmarks/amc/__init__.py +0 -0
  89. evalscope/benchmarks/amc/amc_adapter.py +51 -0
  90. evalscope/benchmarks/arc/arc_adapter.py +34 -149
  91. evalscope/benchmarks/arena_hard/__init__.py +0 -0
  92. evalscope/benchmarks/arena_hard/arena_hard_adapter.py +149 -0
  93. evalscope/benchmarks/arena_hard/utils.py +186 -0
  94. evalscope/benchmarks/bbh/bbh_adapter.py +117 -157
  95. evalscope/benchmarks/bfcl/__init__.py +0 -0
  96. evalscope/benchmarks/bfcl/v3/__init__.py +0 -0
  97. evalscope/benchmarks/bfcl/v3/bfcl_v3_adapter.py +370 -0
  98. evalscope/benchmarks/bfcl/v3/generation.py +222 -0
  99. evalscope/benchmarks/bfcl/v3/utils.py +23 -0
  100. evalscope/benchmarks/bfcl/v4/__init__.py +0 -0
  101. evalscope/benchmarks/bfcl/v4/bfcl_v4_adapter.py +229 -0
  102. evalscope/benchmarks/bfcl/v4/utils.py +410 -0
  103. evalscope/benchmarks/biomix_qa/__init__.py +0 -0
  104. evalscope/benchmarks/biomix_qa/biomix_qa_adapter.py +36 -0
  105. evalscope/benchmarks/blink/__init__.py +0 -0
  106. evalscope/benchmarks/blink/blink_adapter.py +61 -0
  107. evalscope/benchmarks/ceval/ceval_adapter.py +93 -174
  108. evalscope/benchmarks/chartqa/__init__.py +0 -0
  109. evalscope/benchmarks/chartqa/chartqa_adapter.py +80 -0
  110. evalscope/benchmarks/chartqa/utils.py +38 -0
  111. evalscope/benchmarks/chinese_simple_qa/__init__.py +0 -0
  112. evalscope/benchmarks/chinese_simple_qa/csimple_qa_adapter.py +170 -0
  113. evalscope/benchmarks/cmmlu/cmmlu_adapter.py +34 -140
  114. evalscope/benchmarks/coin_flip/__init__.py +0 -0
  115. evalscope/benchmarks/coin_flip/coin_flip_adapter.py +128 -0
  116. evalscope/benchmarks/commonsense_qa/__init__.py +0 -0
  117. evalscope/benchmarks/commonsense_qa/commonsense_qa_adapter.py +32 -0
  118. evalscope/benchmarks/competition_math/competition_math_adapter.py +64 -112
  119. evalscope/benchmarks/data_collection/__init__.py +0 -0
  120. evalscope/benchmarks/data_collection/data_collection_adapter.py +215 -0
  121. evalscope/benchmarks/docmath/__init__.py +0 -0
  122. evalscope/benchmarks/docmath/docmath_adapter.py +143 -0
  123. evalscope/benchmarks/docmath/utils.py +219 -0
  124. evalscope/benchmarks/docvqa/__init__.py +0 -0
  125. evalscope/benchmarks/docvqa/docvqa_adapter.py +67 -0
  126. evalscope/benchmarks/drivelology/__init__.py +0 -0
  127. evalscope/benchmarks/drivelology/drivelology_binary_adapter.py +170 -0
  128. evalscope/benchmarks/drivelology/drivelology_multilabel_adapter.py +254 -0
  129. evalscope/benchmarks/drivelology/drivelology_selection_adapter.py +49 -0
  130. evalscope/benchmarks/drivelology/drivelology_writing_adapter.py +218 -0
  131. evalscope/benchmarks/drop/__init__.py +0 -0
  132. evalscope/benchmarks/drop/drop_adapter.py +155 -0
  133. evalscope/benchmarks/drop/utils.py +156 -0
  134. evalscope/benchmarks/frames/__init__.py +0 -0
  135. evalscope/benchmarks/frames/frames_adapter.py +175 -0
  136. evalscope/benchmarks/frames/utils.py +37 -0
  137. evalscope/benchmarks/general_arena/__init__.py +0 -0
  138. evalscope/benchmarks/general_arena/general_arena_adapter.py +454 -0
  139. evalscope/benchmarks/general_arena/utils.py +223 -0
  140. evalscope/benchmarks/general_mcq/__init__.py +0 -0
  141. evalscope/benchmarks/general_mcq/general_mcq_adapter.py +58 -0
  142. evalscope/benchmarks/general_qa/general_qa_adapter.py +75 -107
  143. evalscope/benchmarks/gpqa/__init__.py +0 -0
  144. evalscope/benchmarks/gpqa/gpqa_adapter.py +90 -0
  145. evalscope/benchmarks/gpqa/prompt.py +88 -0
  146. evalscope/benchmarks/gsm8k/gsm8k_adapter.py +77 -144
  147. evalscope/benchmarks/hallusion_bench/__init__.py +0 -0
  148. evalscope/benchmarks/hallusion_bench/hallusion_bench_adapter.py +159 -0
  149. evalscope/benchmarks/halu_eval/__init__.py +0 -0
  150. evalscope/benchmarks/halu_eval/halu_eval_adapter.py +128 -0
  151. evalscope/benchmarks/halu_eval/halu_eval_instructions.py +84 -0
  152. evalscope/benchmarks/healthbench/__init__.py +0 -0
  153. evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
  154. evalscope/benchmarks/healthbench/utils.py +102 -0
  155. evalscope/benchmarks/hellaswag/hellaswag_adapter.py +36 -134
  156. evalscope/benchmarks/hle/__init__.py +0 -0
  157. evalscope/benchmarks/hle/hle_adapter.py +153 -0
  158. evalscope/benchmarks/humaneval/humaneval_adapter.py +80 -88
  159. evalscope/benchmarks/humaneval/utils.py +235 -0
  160. evalscope/benchmarks/ifeval/ifeval_adapter.py +71 -45
  161. evalscope/benchmarks/ifeval/instructions.py +112 -68
  162. evalscope/benchmarks/ifeval/instructions_registry.py +1 -1
  163. evalscope/benchmarks/ifeval/instructions_util.py +2 -3
  164. evalscope/benchmarks/ifeval/utils.py +6 -7
  165. evalscope/benchmarks/image_edit/__init__.py +0 -0
  166. evalscope/benchmarks/image_edit/gedit/__init__.py +0 -0
  167. evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
  168. evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
  169. evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
  170. evalscope/benchmarks/infovqa/__init__.py +0 -0
  171. evalscope/benchmarks/infovqa/infovqa_adapter.py +66 -0
  172. evalscope/benchmarks/iquiz/iquiz_adapter.py +30 -58
  173. evalscope/benchmarks/live_code_bench/__init__.py +0 -0
  174. evalscope/benchmarks/live_code_bench/evaluate_utils.py +195 -0
  175. evalscope/benchmarks/live_code_bench/extract_utils.py +70 -0
  176. evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +150 -0
  177. evalscope/benchmarks/live_code_bench/load_utils.py +63 -0
  178. evalscope/benchmarks/live_code_bench/pass_k_utils.py +56 -0
  179. evalscope/benchmarks/live_code_bench/prompts.py +207 -0
  180. evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
  181. evalscope/benchmarks/live_code_bench/testing_util.py +544 -0
  182. evalscope/benchmarks/logi_qa/__int__.py +0 -0
  183. evalscope/benchmarks/logi_qa/logi_qa_adapter.py +41 -0
  184. evalscope/benchmarks/maritime_bench/__init__.py +0 -0
  185. evalscope/benchmarks/maritime_bench/maritime_bench_adapter.py +56 -0
  186. evalscope/benchmarks/math_500/__init__.py +0 -0
  187. evalscope/benchmarks/math_500/math_500_adapter.py +55 -0
  188. evalscope/benchmarks/math_qa/__init__.py +0 -0
  189. evalscope/benchmarks/math_qa/math_qa_adapter.py +35 -0
  190. evalscope/benchmarks/math_verse/__init__.py +0 -0
  191. evalscope/benchmarks/math_verse/math_verse_adapter.py +105 -0
  192. evalscope/benchmarks/math_vision/__init__.py +0 -0
  193. evalscope/benchmarks/math_vision/math_vision_adapter.py +116 -0
  194. evalscope/benchmarks/math_vista/__init__.py +0 -0
  195. evalscope/benchmarks/math_vista/math_vista_adapter.py +114 -0
  196. evalscope/benchmarks/med_mcqa/__init__.py +0 -0
  197. evalscope/benchmarks/med_mcqa/med_mcqa_adapter.py +32 -0
  198. evalscope/benchmarks/minerva_math/__init__.py +0 -0
  199. evalscope/benchmarks/minerva_math/minerva_math_adapter.py +53 -0
  200. evalscope/benchmarks/mm_bench/__init__.py +0 -0
  201. evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
  202. evalscope/benchmarks/mm_star/__init__.py +0 -0
  203. evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
  204. evalscope/benchmarks/mmlu/mmlu_adapter.py +32 -210
  205. evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +87 -103
  206. evalscope/benchmarks/mmlu_redux/__init__.py +0 -0
  207. evalscope/benchmarks/mmlu_redux/mmlu_redux_adapter.py +139 -0
  208. evalscope/benchmarks/mmmu/__init__.py +0 -0
  209. evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
  210. evalscope/benchmarks/mmmu_pro/__init__.py +0 -0
  211. evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +124 -0
  212. evalscope/benchmarks/mri_mcqa/__init__.py +0 -0
  213. evalscope/benchmarks/mri_mcqa/mri_mcqa_adapter.py +34 -0
  214. evalscope/benchmarks/multi_if/__init__.py +0 -0
  215. evalscope/benchmarks/multi_if/ifeval.py +3354 -0
  216. evalscope/benchmarks/multi_if/metrics.py +120 -0
  217. evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
  218. evalscope/benchmarks/music_trivia/__init__.py +0 -0
  219. evalscope/benchmarks/music_trivia/music_trivia_adapter.py +36 -0
  220. evalscope/benchmarks/musr/__init__.py +0 -0
  221. evalscope/benchmarks/musr/musr_adapter.py +43 -0
  222. evalscope/benchmarks/needle_haystack/__init__.py +0 -0
  223. evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +389 -0
  224. evalscope/benchmarks/needle_haystack/utils.py +79 -0
  225. evalscope/benchmarks/ner/__init__.py +0 -0
  226. evalscope/benchmarks/ner/broad_twitter_corpus_adapter.py +52 -0
  227. evalscope/benchmarks/ner/conll2003_adapter.py +48 -0
  228. evalscope/benchmarks/ner/copious_adapter.py +85 -0
  229. evalscope/benchmarks/ner/cross_ner_adapter.py +120 -0
  230. evalscope/benchmarks/ner/cross_ner_entities/__init__.py +0 -0
  231. evalscope/benchmarks/ner/cross_ner_entities/ai.py +54 -0
  232. evalscope/benchmarks/ner/cross_ner_entities/literature.py +36 -0
  233. evalscope/benchmarks/ner/cross_ner_entities/music.py +39 -0
  234. evalscope/benchmarks/ner/cross_ner_entities/politics.py +37 -0
  235. evalscope/benchmarks/ner/cross_ner_entities/science.py +58 -0
  236. evalscope/benchmarks/ner/genia_ner_adapter.py +66 -0
  237. evalscope/benchmarks/ner/harvey_ner_adapter.py +58 -0
  238. evalscope/benchmarks/ner/mit_movie_trivia_adapter.py +74 -0
  239. evalscope/benchmarks/ner/mit_restaurant_adapter.py +66 -0
  240. evalscope/benchmarks/ner/ontonotes5_adapter.py +87 -0
  241. evalscope/benchmarks/ner/wnut2017_adapter.py +61 -0
  242. evalscope/benchmarks/ocr_bench/__init__.py +0 -0
  243. evalscope/benchmarks/ocr_bench/ocr_bench/__init__.py +0 -0
  244. evalscope/benchmarks/ocr_bench/ocr_bench/ocr_bench_adapter.py +101 -0
  245. evalscope/benchmarks/ocr_bench/ocr_bench_v2/IoUscore_metric.py +87 -0
  246. evalscope/benchmarks/ocr_bench/ocr_bench_v2/TEDS_metric.py +963 -0
  247. evalscope/benchmarks/ocr_bench/ocr_bench_v2/__init__.py +0 -0
  248. evalscope/benchmarks/ocr_bench/ocr_bench_v2/ocr_bench_v2_adapter.py +161 -0
  249. evalscope/benchmarks/ocr_bench/ocr_bench_v2/page_ocr_metric.py +50 -0
  250. evalscope/benchmarks/ocr_bench/ocr_bench_v2/parallel.py +46 -0
  251. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/__init__.py +0 -0
  252. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/readme.txt +26 -0
  253. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/rrc_evaluation_funcs_1_1.py +537 -0
  254. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/script.py +481 -0
  255. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_metric.py +179 -0
  256. evalscope/benchmarks/ocr_bench/ocr_bench_v2/utils.py +433 -0
  257. evalscope/benchmarks/ocr_bench/ocr_bench_v2/vqa_metric.py +254 -0
  258. evalscope/benchmarks/olympiad_bench/__init__.py +0 -0
  259. evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
  260. evalscope/benchmarks/olympiad_bench/utils.py +565 -0
  261. evalscope/benchmarks/omni_bench/__init__.py +0 -0
  262. evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
  263. evalscope/benchmarks/omnidoc_bench/__init__.py +0 -0
  264. evalscope/benchmarks/omnidoc_bench/end2end_eval.py +349 -0
  265. evalscope/benchmarks/omnidoc_bench/metrics.py +547 -0
  266. evalscope/benchmarks/omnidoc_bench/omnidoc_bench_adapter.py +135 -0
  267. evalscope/benchmarks/omnidoc_bench/utils.py +1937 -0
  268. evalscope/benchmarks/piqa/__init__.py +0 -0
  269. evalscope/benchmarks/piqa/piqa_adapter.py +32 -0
  270. evalscope/benchmarks/poly_math/__init__.py +0 -0
  271. evalscope/benchmarks/poly_math/poly_math_adapter.py +132 -0
  272. evalscope/benchmarks/poly_math/utils/instruction.py +105 -0
  273. evalscope/benchmarks/pope/__init__.py +0 -0
  274. evalscope/benchmarks/pope/pope_adapter.py +112 -0
  275. evalscope/benchmarks/process_bench/__init__.py +0 -0
  276. evalscope/benchmarks/process_bench/process_bench_adapter.py +171 -0
  277. evalscope/benchmarks/pumed_qa/__init__.py +0 -0
  278. evalscope/benchmarks/pumed_qa/pubmed_qa_adapter.py +175 -0
  279. evalscope/benchmarks/qasc/__init__.py +0 -0
  280. evalscope/benchmarks/qasc/qasc_adapter.py +35 -0
  281. evalscope/benchmarks/race/race_adapter.py +33 -120
  282. evalscope/benchmarks/real_world_qa/__init__.py +0 -0
  283. evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
  284. evalscope/benchmarks/sciq/__init__.py +0 -0
  285. evalscope/benchmarks/sciq/sciq_adapter.py +36 -0
  286. evalscope/benchmarks/seed_bench_2_plus/__init__.py +0 -0
  287. evalscope/benchmarks/seed_bench_2_plus/seed_bench_2_plus_adapter.py +72 -0
  288. evalscope/benchmarks/simple_qa/__init__.py +0 -0
  289. evalscope/benchmarks/simple_qa/simple_qa_adapter.py +169 -0
  290. evalscope/benchmarks/simple_vqa/__init__.py +0 -0
  291. evalscope/benchmarks/simple_vqa/simple_vqa_adapter.py +169 -0
  292. evalscope/benchmarks/siqa/__init__.py +0 -0
  293. evalscope/benchmarks/siqa/siqa_adapter.py +39 -0
  294. evalscope/benchmarks/super_gpqa/__init__.py +0 -0
  295. evalscope/benchmarks/super_gpqa/prompt.py +88 -0
  296. evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +165 -0
  297. evalscope/benchmarks/super_gpqa/utils.py +86 -0
  298. evalscope/benchmarks/tau_bench/__init__.py +0 -0
  299. evalscope/benchmarks/tau_bench/tau2_bench/__init__.py +0 -0
  300. evalscope/benchmarks/tau_bench/tau2_bench/generation.py +158 -0
  301. evalscope/benchmarks/tau_bench/tau2_bench/tau2_bench_adapter.py +146 -0
  302. evalscope/benchmarks/tau_bench/tau_bench/__init__.py +0 -0
  303. evalscope/benchmarks/tau_bench/tau_bench/generation.py +147 -0
  304. evalscope/benchmarks/tau_bench/tau_bench/tau_bench_adapter.py +168 -0
  305. evalscope/benchmarks/text2image/__init__.py +0 -0
  306. evalscope/benchmarks/text2image/evalmuse_adapter.py +78 -0
  307. evalscope/benchmarks/text2image/genai_bench_adapter.py +53 -0
  308. evalscope/benchmarks/text2image/general_t2i_adapter.py +42 -0
  309. evalscope/benchmarks/text2image/hpdv2_adapter.py +52 -0
  310. evalscope/benchmarks/text2image/tifa_adapter.py +27 -0
  311. evalscope/benchmarks/tool_bench/__init__.py +0 -0
  312. evalscope/benchmarks/tool_bench/tool_bench_adapter.py +102 -0
  313. evalscope/benchmarks/tool_bench/utils.py +203 -0
  314. evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +56 -118
  315. evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +70 -270
  316. evalscope/benchmarks/visu_logic/__init__.py +0 -0
  317. evalscope/benchmarks/visu_logic/visu_logic_adapter.py +75 -0
  318. evalscope/benchmarks/winogrande/__init__.py +0 -0
  319. evalscope/benchmarks/winogrande/winogrande_adapter.py +34 -0
  320. evalscope/benchmarks/wmt/__init__.py +0 -0
  321. evalscope/benchmarks/wmt/wmt24_adapter.py +294 -0
  322. evalscope/benchmarks/zerobench/__init__.py +0 -0
  323. evalscope/benchmarks/zerobench/zerobench_adapter.py +64 -0
  324. evalscope/cli/cli.py +2 -0
  325. evalscope/cli/start_app.py +12 -2
  326. evalscope/cli/start_eval.py +4 -3
  327. evalscope/cli/start_perf.py +10 -2
  328. evalscope/cli/start_server.py +6 -3
  329. evalscope/collections/__init__.py +27 -3
  330. evalscope/collections/sampler.py +12 -11
  331. evalscope/collections/schema.py +13 -12
  332. evalscope/config.py +218 -147
  333. evalscope/constants.py +78 -82
  334. evalscope/evaluator/__init__.py +1 -1
  335. evalscope/evaluator/evaluator.py +334 -318
  336. evalscope/filters/__init__.py +2 -0
  337. evalscope/filters/extraction.py +126 -0
  338. evalscope/filters/selection.py +57 -0
  339. evalscope/metrics/__init__.py +59 -3
  340. evalscope/metrics/bert_score/__init__.py +0 -0
  341. evalscope/metrics/bert_score/scorer.py +338 -0
  342. evalscope/metrics/bert_score/utils.py +697 -0
  343. evalscope/metrics/bundled_rouge_score/rouge_scorer.py +20 -15
  344. evalscope/metrics/llm_judge.py +211 -0
  345. evalscope/metrics/math_parser.py +545 -0
  346. evalscope/metrics/metric.py +611 -0
  347. evalscope/metrics/metrics.py +112 -23
  348. evalscope/metrics/rouge_metric.py +11 -13
  349. evalscope/metrics/t2v_metrics/__init__.py +0 -0
  350. evalscope/metrics/t2v_metrics/clipscore.py +14 -0
  351. evalscope/metrics/t2v_metrics/constants.py +12 -0
  352. evalscope/metrics/t2v_metrics/itmscore.py +14 -0
  353. evalscope/metrics/t2v_metrics/models/__init__.py +0 -0
  354. evalscope/metrics/t2v_metrics/models/clipscore_models/__init__.py +30 -0
  355. evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/__init__.py +0 -0
  356. evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/base_model.py +6 -0
  357. evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/clip_model.py +134 -0
  358. evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/cross_modeling.py +282 -0
  359. evalscope/metrics/t2v_metrics/models/clipscore_models/clip_model.py +115 -0
  360. evalscope/metrics/t2v_metrics/models/clipscore_models/hpsv2_model.py +87 -0
  361. evalscope/metrics/t2v_metrics/models/clipscore_models/mps_model.py +86 -0
  362. evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py +62 -0
  363. evalscope/metrics/t2v_metrics/models/itmscore_models/__init__.py +26 -0
  364. evalscope/metrics/t2v_metrics/models/itmscore_models/blip2_itm_model.py +85 -0
  365. evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py +99 -0
  366. evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/ImageReward.py +176 -0
  367. evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/__init__.py +0 -0
  368. evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py +82 -0
  369. evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward_model.py +74 -0
  370. evalscope/metrics/t2v_metrics/models/model.py +45 -0
  371. evalscope/metrics/t2v_metrics/models/utils.py +25 -0
  372. evalscope/metrics/t2v_metrics/models/vqascore_models/__init__.py +22 -0
  373. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/__init__.py +0 -0
  374. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/__init__.py +1 -0
  375. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/language_model/clip_t5.py +306 -0
  376. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/builder.py +12 -0
  377. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/clip_encoder.py +84 -0
  378. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_projector/builder.py +50 -0
  379. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5_model.py +223 -0
  380. evalscope/metrics/t2v_metrics/models/vqascore_models/gpt4v_model.py +153 -0
  381. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/__init__.py +26 -0
  382. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +465 -0
  383. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +141 -0
  384. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +24 -0
  385. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +190 -0
  386. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +100 -0
  387. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +313 -0
  388. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +416 -0
  389. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +8 -0
  390. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +192 -0
  391. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +320 -0
  392. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/default.yaml +10 -0
  393. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_flant5xl.yaml +42 -0
  394. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt2.7b.yaml +42 -0
  395. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt6.7b.yaml +42 -0
  396. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_coco.yaml +36 -0
  397. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xl.yaml +43 -0
  398. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xxl.yaml +43 -0
  399. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna13b.yaml +43 -0
  400. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml +43 -0
  401. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain.yaml +36 -0
  402. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl.yaml +42 -0
  403. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_no_prefix.yaml +42 -0
  404. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_prefix.yaml +42 -0
  405. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_vitL.yaml +43 -0
  406. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xxl.yaml +42 -0
  407. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt2.7b.yaml +42 -0
  408. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt6.7b.yaml +42 -0
  409. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_vitL.yaml +37 -0
  410. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna13b.yaml +43 -0
  411. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna7b.yaml +43 -0
  412. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config.json +21 -0
  413. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config_albef.json +22 -0
  414. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_large_config.json +21 -0
  415. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/__init__.py +212 -0
  416. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/base_model.py +231 -0
  417. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +1111 -0
  418. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/__init__.py +0 -0
  419. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2.py +211 -0
  420. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_image_text_matching.py +109 -0
  421. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_qformer.py +457 -0
  422. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5.py +370 -0
  423. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5_instruct.py +765 -0
  424. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/fga_blip2.py +274 -0
  425. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_llama.py +896 -0
  426. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_t5.py +1876 -0
  427. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/__init__.py +83 -0
  428. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py +58 -0
  429. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_caption.py +212 -0
  430. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_classification.py +164 -0
  431. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_feature_extractor.py +202 -0
  432. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_image_text_matching.py +187 -0
  433. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_nlvr.py +179 -0
  434. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_outputs.py +115 -0
  435. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_pretrain.py +371 -0
  436. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_vqa.py +348 -0
  437. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +870 -0
  438. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/clip_vit.py +273 -0
  439. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/eva_vit.py +514 -0
  440. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +1291 -0
  441. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/vit.py +476 -0
  442. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/__init__.py +35 -0
  443. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/base_processor.py +27 -0
  444. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/blip_processors.py +233 -0
  445. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/randaugment.py +393 -0
  446. evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py +129 -0
  447. evalscope/metrics/t2v_metrics/models/vqascore_models/vqa_model.py +18 -0
  448. evalscope/metrics/t2v_metrics/score.py +78 -0
  449. evalscope/metrics/t2v_metrics/vqascore.py +14 -0
  450. evalscope/models/__init__.py +23 -13
  451. evalscope/models/image_edit_model.py +125 -0
  452. evalscope/models/mockllm.py +65 -0
  453. evalscope/models/model_apis.py +69 -0
  454. evalscope/models/modelscope.py +455 -0
  455. evalscope/models/openai_compatible.py +144 -0
  456. evalscope/models/text2image_model.py +124 -0
  457. evalscope/models/utils/openai.py +708 -0
  458. evalscope/perf/__init__.py +0 -1
  459. evalscope/perf/arguments.py +103 -69
  460. evalscope/perf/benchmark.py +114 -163
  461. evalscope/perf/http_client.py +59 -89
  462. evalscope/perf/main.py +91 -18
  463. evalscope/perf/plugin/__init__.py +3 -2
  464. evalscope/perf/plugin/api/__init__.py +4 -3
  465. evalscope/perf/plugin/api/base.py +27 -7
  466. evalscope/perf/plugin/api/custom_api.py +170 -57
  467. evalscope/perf/plugin/api/dashscope_api.py +4 -10
  468. evalscope/perf/plugin/api/default_api.py +214 -0
  469. evalscope/perf/plugin/api/openai_api.py +120 -41
  470. evalscope/perf/plugin/datasets/__init__.py +10 -6
  471. evalscope/perf/plugin/datasets/base.py +43 -1
  472. evalscope/perf/plugin/datasets/custom.py +22 -3
  473. evalscope/perf/plugin/datasets/flickr8k.py +5 -27
  474. evalscope/perf/plugin/datasets/kontext_bench.py +28 -0
  475. evalscope/perf/plugin/datasets/line_by_line.py +7 -3
  476. evalscope/perf/plugin/datasets/longalpaca.py +7 -3
  477. evalscope/perf/plugin/datasets/openqa.py +13 -14
  478. evalscope/perf/plugin/datasets/random_dataset.py +67 -0
  479. evalscope/perf/plugin/datasets/random_vl_dataset.py +80 -0
  480. evalscope/perf/plugin/datasets/speed_benchmark.py +11 -0
  481. evalscope/perf/plugin/registry.py +36 -16
  482. evalscope/perf/utils/analysis_result.py +24 -23
  483. evalscope/perf/utils/benchmark_util.py +95 -55
  484. evalscope/perf/utils/db_util.py +115 -78
  485. evalscope/perf/utils/local_server.py +12 -47
  486. evalscope/perf/utils/log_utils.py +63 -0
  487. evalscope/perf/utils/rich_display.py +192 -0
  488. evalscope/report/__init__.py +46 -3
  489. evalscope/report/combinator.py +143 -32
  490. evalscope/report/generator.py +74 -34
  491. evalscope/report/report.py +238 -0
  492. evalscope/run.py +71 -46
  493. evalscope/summarizer.py +5 -5
  494. evalscope/third_party/longbench_write/infer.py +1 -1
  495. evalscope/third_party/thinkbench/__init__.py +3 -0
  496. evalscope/third_party/thinkbench/eval.py +441 -0
  497. evalscope/third_party/thinkbench/infer.py +130 -0
  498. evalscope/third_party/thinkbench/resources/critique_template.txt +17 -0
  499. evalscope/third_party/thinkbench/resources/reformat_template.txt +31 -0
  500. evalscope/third_party/thinkbench/tools/__init__.py +0 -0
  501. evalscope/third_party/thinkbench/tools/llm.py +48 -0
  502. evalscope/third_party/thinkbench/tools/utils.py +13 -0
  503. evalscope/third_party/toolbench_static/llm/swift_infer.py +46 -20
  504. evalscope/third_party/toolbench_static/toolbench_static.py +2 -1
  505. evalscope/utils/__init__.py +82 -2
  506. evalscope/utils/argument_utils.py +64 -0
  507. evalscope/utils/chat_service.py +8 -6
  508. evalscope/utils/deprecation_utils.py +53 -0
  509. evalscope/utils/function_utils.py +266 -0
  510. evalscope/utils/import_utils.py +154 -0
  511. evalscope/utils/io_utils.py +336 -8
  512. evalscope/utils/json_schema.py +231 -0
  513. evalscope/utils/logger.py +121 -31
  514. evalscope/utils/model_utils.py +57 -1
  515. evalscope/utils/multi_choices.py +303 -0
  516. evalscope/utils/ner.py +377 -0
  517. evalscope/utils/url_utils.py +65 -0
  518. evalscope/version.py +2 -2
  519. evalscope-1.2.0.dist-info/METADATA +553 -0
  520. evalscope-1.2.0.dist-info/RECORD +628 -0
  521. {evalscope-0.10.0.dist-info → evalscope-1.2.0.dist-info}/WHEEL +1 -1
  522. {evalscope-0.10.0.dist-info → evalscope-1.2.0.dist-info}/top_level.txt +0 -1
  523. evalscope/backend/vlm_eval_kit/custom_dataset.py +0 -46
  524. evalscope/benchmarks/arc/ai2_arc.py +0 -151
  525. evalscope/benchmarks/benchmark.py +0 -76
  526. evalscope/benchmarks/ceval/ceval_exam.py +0 -146
  527. evalscope/benchmarks/ceval/samples.jsonl +0 -1
  528. evalscope/benchmarks/cmmlu/cmmlu.py +0 -161
  529. evalscope/benchmarks/cmmlu/samples.jsonl +0 -5
  530. evalscope/benchmarks/competition_math/competition_math.py +0 -79
  531. evalscope/benchmarks/data_adapter.py +0 -291
  532. evalscope/benchmarks/gsm8k/gsm8k.py +0 -121
  533. evalscope/benchmarks/hellaswag/hellaswag.py +0 -112
  534. evalscope/benchmarks/humaneval/humaneval.py +0 -79
  535. evalscope/benchmarks/mmlu/mmlu.py +0 -160
  536. evalscope/benchmarks/mmlu/samples.jsonl +0 -5
  537. evalscope/benchmarks/race/race.py +0 -104
  538. evalscope/benchmarks/race/samples.jsonl +0 -5
  539. evalscope/benchmarks/trivia_qa/trivia_qa.py +0 -89
  540. evalscope/benchmarks/truthful_qa/truthful_qa.py +0 -163
  541. evalscope/collections/evaluator.py +0 -198
  542. evalscope/evaluator/rating_eval.py +0 -157
  543. evalscope/evaluator/reviewer/__init__.py +0 -1
  544. evalscope/evaluator/reviewer/auto_reviewer.py +0 -391
  545. evalscope/metrics/code_metric.py +0 -98
  546. evalscope/metrics/named_metrics.py +0 -17
  547. evalscope/metrics/resources/gpt2-zhcn3-v4.bpe +0 -58485
  548. evalscope/metrics/resources/gpt2-zhcn3-v4.json +0 -1
  549. evalscope/models/base_adapter.py +0 -52
  550. evalscope/models/chat_adapter.py +0 -138
  551. evalscope/models/choice_adapter.py +0 -211
  552. evalscope/models/custom/__init__.py +0 -3
  553. evalscope/models/custom/custom_model.py +0 -53
  554. evalscope/models/custom/dummy_model.py +0 -63
  555. evalscope/models/custom_adapter.py +0 -67
  556. evalscope/models/local_model.py +0 -74
  557. evalscope/models/model.py +0 -229
  558. evalscope/models/server_adapter.py +0 -111
  559. evalscope/registry/__init__.py +0 -1
  560. evalscope/registry/config/cfg_arena.yaml +0 -77
  561. evalscope/registry/config/cfg_arena_zhihu.yaml +0 -63
  562. evalscope/registry/config/cfg_pairwise_baseline.yaml +0 -83
  563. evalscope/registry/config/cfg_single.yaml +0 -78
  564. evalscope/registry/data/prompt_template/lmsys_v2.jsonl +0 -8
  565. evalscope/registry/data/prompt_template/prompt_templates.jsonl +0 -8
  566. evalscope/registry/data/qa_browser/battle.jsonl +0 -634
  567. evalscope/registry/data/qa_browser/category_mapping.yaml +0 -10
  568. evalscope/registry/data/question.jsonl +0 -80
  569. evalscope/registry/tasks/arc.yaml +0 -28
  570. evalscope/registry/tasks/bbh.yaml +0 -26
  571. evalscope/registry/tasks/bbh_mini.yaml +0 -26
  572. evalscope/registry/tasks/ceval.yaml +0 -27
  573. evalscope/registry/tasks/ceval_mini.yaml +0 -26
  574. evalscope/registry/tasks/cmmlu.yaml +0 -27
  575. evalscope/registry/tasks/eval_qwen-7b-chat_v100.yaml +0 -28
  576. evalscope/registry/tasks/general_qa.yaml +0 -27
  577. evalscope/registry/tasks/gsm8k.yaml +0 -29
  578. evalscope/registry/tasks/mmlu.yaml +0 -29
  579. evalscope/registry/tasks/mmlu_mini.yaml +0 -27
  580. evalscope/report/app.py +0 -506
  581. evalscope/report/utils.py +0 -133
  582. evalscope/run_arena.py +0 -202
  583. evalscope/utils/arena_utils.py +0 -217
  584. evalscope/utils/completion_parsers.py +0 -82
  585. evalscope/utils/utils.py +0 -301
  586. evalscope-0.10.0.dist-info/METADATA +0 -565
  587. evalscope-0.10.0.dist-info/RECORD +0 -286
  588. tests/__init__.py +0 -1
  589. tests/cli/__init__.py +0 -1
  590. tests/cli/test_collection.py +0 -57
  591. tests/cli/test_run.py +0 -165
  592. tests/perf/__init__.py +0 -1
  593. tests/perf/test_perf.py +0 -101
  594. tests/rag/test_clip_benchmark.py +0 -85
  595. tests/rag/test_mteb.py +0 -138
  596. tests/rag/test_ragas.py +0 -120
  597. tests/swift/__init__.py +0 -1
  598. tests/swift/test_run_swift_eval.py +0 -145
  599. tests/swift/test_run_swift_vlm_eval.py +0 -127
  600. tests/swift/test_run_swift_vlm_jugde_eval.py +0 -156
  601. tests/test_run_all.py +0 -12
  602. tests/vlm/__init__.py +0 -1
  603. tests/vlm/test_vlmeval.py +0 -60
  604. {tests/rag → evalscope/api}/__init__.py +0 -0
  605. {evalscope-0.10.0.dist-info → evalscope-1.2.0.dist-info}/entry_points.txt +0 -0
  606. {evalscope-0.10.0.dist-info → evalscope-1.2.0.dist-info/licenses}/LICENSE +0 -0
evalscope/report/app.py DELETED
@@ -1,506 +0,0 @@
1
- import glob
2
- import gradio as gr
3
- import numpy as np
4
- import os
5
- import pandas as pd
6
- import plotly.express as px
7
- import plotly.graph_objects as go
8
- from dataclasses import dataclass
9
- from typing import Any, List, Union
10
-
11
- from evalscope.constants import DataCollection
12
- from evalscope.report import Report, ReportKey, get_data_frame, get_report_list
13
- from evalscope.utils.io_utils import OutputsStructure, yaml_to_dict
14
- from evalscope.utils.logger import get_logger
15
-
16
- logger = get_logger()
17
-
18
-
19
- def scan_for_report_folders(root_path):
20
- """Scan for folders containing reports subdirectories"""
21
- logger.debug(f'Scanning for report folders in {root_path}')
22
- if not os.path.exists(root_path):
23
- return []
24
-
25
- reports = []
26
- # Iterate over all folders in the root path
27
- for folder in glob.glob(os.path.join(root_path, '*')):
28
- # Check if reports folder exists
29
- reports_path = os.path.join(folder, OutputsStructure.REPORTS_DIR)
30
- if not os.path.exists(reports_path):
31
- continue
32
-
33
- # Iterate over all items in reports folder
34
- for model_item in glob.glob(os.path.join(reports_path, '*')):
35
- if not os.path.isdir(model_item):
36
- continue
37
- datasets = []
38
- for dataset_item in glob.glob(os.path.join(model_item, '*.json')):
39
- datasets.append(os.path.basename(dataset_item).split('.')[0])
40
- datasets = ','.join(datasets)
41
- reports.append(f'{os.path.basename(folder)}@{os.path.basename(model_item)}:{datasets}')
42
-
43
- reports = sorted(reports, reverse=True)
44
- logger.debug(f'reports: {reports}')
45
- return reports
46
-
47
-
48
- def process_report_name(report_name: str):
49
- prefix, report_name = report_name.split('@')
50
- model_name, datasets = report_name.split(':')
51
- datasets = datasets.split(',')
52
- return prefix, model_name, datasets
53
-
54
-
55
- def load_single_report(root_path: str, report_name: str):
56
- prefix, model_name, datasets = process_report_name(report_name)
57
- report_path_list = os.path.join(root_path, prefix, OutputsStructure.REPORTS_DIR, model_name)
58
- report_list = get_report_list([report_path_list])
59
-
60
- task_cfg_path = glob.glob(os.path.join(root_path, prefix, OutputsStructure.CONFIGS_DIR, '*.yaml'))[0]
61
- task_cfg = yaml_to_dict(task_cfg_path)
62
- return report_list, datasets, task_cfg
63
-
64
-
65
- def load_multi_report(root_path: str, report_names: List[str]):
66
- report_list = []
67
- for report_name in report_names:
68
- prefix, model_name, datasets = process_report_name(report_name)
69
- report_path_list = os.path.join(root_path, prefix, OutputsStructure.REPORTS_DIR, model_name)
70
- reports = get_report_list([report_path_list])
71
- report_list.extend(reports)
72
- return report_list
73
-
74
-
75
- def get_acc_report_df(report_list: List[Report]):
76
- data_dict = []
77
- for report in report_list:
78
- if report.name == DataCollection.NAME:
79
- for metric in report.metrics:
80
- for category in metric.categories:
81
- item = {
82
- ReportKey.model_name: report.model_name,
83
- ReportKey.dataset_name: '/'.join(category.name),
84
- ReportKey.score: category.score,
85
- ReportKey.num: category.num,
86
- }
87
- data_dict.append(item)
88
- else:
89
- item = {
90
- ReportKey.model_name: report.model_name,
91
- ReportKey.dataset_name: report.dataset_name,
92
- ReportKey.score: report.score,
93
- ReportKey.num: report.metrics[0].num,
94
- }
95
- data_dict.append(item)
96
- df = pd.DataFrame.from_dict(data_dict, orient='columns')
97
- return df
98
-
99
-
100
- def get_compare_report_df(acc_df: pd.DataFrame):
101
- df = acc_df.pivot_table(index=ReportKey.model_name, columns=ReportKey.dataset_name, values=ReportKey.score)
102
- df.reset_index(inplace=True)
103
- styler = df.style.background_gradient(cmap='RdYlGn', vmin=0.0, vmax=1.0, axis=0)
104
- styler.format(precision=4)
105
- return styler
106
-
107
-
108
- def plot_single_report_scores(df: pd.DataFrame):
109
- plot = px.bar(
110
- df,
111
- x=df[ReportKey.dataset_name],
112
- y=df[ReportKey.score],
113
- color=df[ReportKey.dataset_name],
114
- template='plotly_dark')
115
- return plot
116
-
117
-
118
- def plot_single_report_sunburst(report_list: List[Report]):
119
- if report_list[0].name == DataCollection.NAME:
120
- df = get_data_frame(report_list)
121
- categories = sorted([i for i in df.columns if i.startswith(ReportKey.category_prefix)])
122
- path = categories + [ReportKey.subset_name]
123
- else:
124
- df = get_data_frame(report_list, flatten_metrics=False)
125
- categories = sorted([i for i in df.columns if i.startswith(ReportKey.category_prefix)])
126
- path = [ReportKey.dataset_name] + categories + [ReportKey.subset_name]
127
- logger.debug(f'df: {df}')
128
- df[categories] = df[categories].fillna('default') # NOTE: fillna for empty categories
129
- plot = px.sunburst(
130
- df,
131
- path=path,
132
- values=ReportKey.num,
133
- color=ReportKey.score,
134
- color_continuous_scale='RdYlGn', # see https://plotly.com/python/builtin-colorscales/
135
- color_continuous_midpoint=np.average(df[ReportKey.score], weights=df[ReportKey.num]),
136
- template='plotly_dark',
137
- maxdepth=3)
138
- plot.update_traces(insidetextorientation='radial')
139
- plot.update_layout(margin=dict(t=10, l=10, r=10, b=10), coloraxis=dict(cmin=0, cmax=1))
140
- return plot
141
-
142
-
143
- def get_single_dataset_data(df: pd.DataFrame, dataset_name: str):
144
- return df[df[ReportKey.dataset_name] == dataset_name]
145
-
146
-
147
- def plot_single_dataset_scores(df: pd.DataFrame):
148
- # TODO: add metric radio and relace category name
149
- plot = px.bar(
150
- df,
151
- x=df[ReportKey.metric_name],
152
- y=df[ReportKey.score],
153
- color=df[ReportKey.subset_name],
154
- template='plotly_dark',
155
- barmode='group')
156
- return plot
157
-
158
-
159
- def plot_multi_report_radar(df: pd.DataFrame):
160
- fig = go.Figure()
161
-
162
- grouped = df.groupby(ReportKey.model_name)
163
- common_datasets = set.intersection(*[set(group[ReportKey.dataset_name]) for _, group in grouped])
164
-
165
- for model_name, group in grouped:
166
- common_group = group[group[ReportKey.dataset_name].isin(common_datasets)]
167
- fig.add_trace(
168
- go.Scatterpolar(
169
- r=common_group[ReportKey.score],
170
- theta=common_group[ReportKey.dataset_name],
171
- name=model_name,
172
- fill='toself'))
173
-
174
- fig.update_layout(
175
- template='plotly_dark',
176
- polar=dict(radialaxis=dict(visible=True, range=[0, 1])),
177
- margin=dict(t=20, l=20, r=20, b=20))
178
- return fig
179
-
180
-
181
- def dict_to_markdown(data) -> str:
182
- markdown_lines = []
183
-
184
- for key, value in data.items():
185
- bold_key = f'**{key}**'
186
-
187
- if isinstance(value, list):
188
- value_str = '\n' + '\n'.join([f' - {item}' for item in value])
189
- elif isinstance(value, dict):
190
- value_str = dict_to_markdown(value)
191
- else:
192
- value_str = str(value)
193
-
194
- value_str = process_string(value_str)
195
- markdown_line = f'{bold_key}: {value_str}'
196
- markdown_lines.append(markdown_line)
197
-
198
- return '\n\n'.join(markdown_lines)
199
-
200
-
201
- def process_string(string: str, max_length: int = 2048) -> str:
202
- if len(string) > max_length:
203
- return f'{string[:max_length // 2]}......{string[-max_length // 2:]}'
204
- return string
205
-
206
-
207
- def process_model_prediction(item: Any):
208
- if isinstance(item, dict):
209
- return dict_to_markdown(item)
210
- elif isinstance(item, list):
211
- return '\n'.join([process_model_prediction(item) for item in item])
212
- else:
213
- return process_string(str(item))
214
-
215
-
216
- def normalize_score(score):
217
- if isinstance(score, bool):
218
- return 1.0 if score else 0.0
219
- elif isinstance(score, dict):
220
- for key in score:
221
- return float(score[key])
222
- return 0.0
223
- else:
224
- try:
225
- return float(score)
226
- except (ValueError, TypeError):
227
- return 0.0
228
-
229
-
230
- def get_model_prediction(work_dir: str, model_name: str, dataset_name: str, subset_name: str):
231
- data_path = os.path.join(work_dir, OutputsStructure.REVIEWS_DIR, model_name)
232
- subset_name = subset_name.replace('/', '_') # for collection report
233
- origin_df = pd.read_json(os.path.join(data_path, f'{dataset_name}_{subset_name}.jsonl'), lines=True)
234
- ds = []
235
- for i, item in origin_df.iterrows():
236
- raw_input = item['raw_input']
237
- raw_pred_answer = item['choices'][0]['message']['content']
238
- parsed_gold_answer = item['choices'][0]['review']['gold']
239
- parsed_pred_answer = item['choices'][0]['review']['pred']
240
- score = item['choices'][0]['review']['result']
241
- raw_d = {
242
- 'Input': raw_input,
243
- 'Generated': raw_pred_answer,
244
- 'Gold': parsed_gold_answer if parsed_gold_answer != raw_input else '*Same as Input*',
245
- 'Pred': parsed_pred_answer if parsed_pred_answer != raw_pred_answer else '*Same as Generated*',
246
- 'Score': score,
247
- 'NScore': normalize_score(score)
248
- }
249
- ds.append(raw_d)
250
-
251
- df_subset = pd.DataFrame(ds)
252
- return df_subset
253
-
254
-
255
- def get_table_data(data_review_df: pd.DataFrame, page: int = 1, rows_per_page: int = 1) -> pd.DataFrame:
256
- if data_review_df is None:
257
- return None
258
-
259
- logger.debug(f'page: {page}, rows_per_page: {rows_per_page}')
260
- start = (page - 1) * rows_per_page
261
- end = start + rows_per_page
262
- df_subset = data_review_df.iloc[start:end].copy()
263
- df_subset['Input'] = df_subset['Input'].map(process_model_prediction).astype(str)
264
- df_subset['Score'] = df_subset['Score'].map(process_model_prediction).astype(str)
265
- return df_subset
266
-
267
-
268
- @dataclass
269
- class SidebarComponents:
270
- root_path: gr.Textbox
271
- reports_dropdown: gr.Dropdown
272
- load_btn: gr.Button
273
-
274
-
275
- def create_sidebar():
276
- gr.Markdown('## Settings')
277
- root_path = gr.Textbox(label='Report(s) Root Path', value='./outputs', placeholder='./outputs', lines=1)
278
- reports_dropdown = gr.Dropdown(label='Select Report(s)', choices=[], multiselect=True, interactive=True)
279
- load_btn = gr.Button('Load & View')
280
- gr.Markdown('### Note: Select report(s) and click `Load & View` to view the data!')
281
-
282
- @reports_dropdown.focus(inputs=[root_path], outputs=[reports_dropdown])
283
- def update_dropdown_choices(root_path):
284
- folders = scan_for_report_folders(root_path)
285
- if len(folders) == 0:
286
- gr.Warning('No reports found, please check the path', duration=3)
287
- return gr.update(choices=folders)
288
-
289
- return SidebarComponents(
290
- root_path=root_path,
291
- reports_dropdown=reports_dropdown,
292
- load_btn=load_btn,
293
- )
294
-
295
-
296
- @dataclass
297
- class SingleModelComponents:
298
- report_name: gr.Dropdown
299
-
300
-
301
- def create_single_model_tab(sidebar: SidebarComponents):
302
- report_name = gr.Dropdown(label='Select Report', choices=[], interactive=True)
303
- work_dir = gr.State(None)
304
- model_name = gr.State(None)
305
-
306
- with gr.Accordion('Task Config', open=False):
307
- task_config = gr.JSON(value=None)
308
-
309
- report_list = gr.State([])
310
-
311
- with gr.Tab('Datasets Overview'):
312
- gr.Markdown('### Dataset Components')
313
- sunburst_plot = gr.Plot(value=None, scale=1, label='Components')
314
- gr.Markdown('### Dataset Scores')
315
- score_plot = gr.Plot(value=None, scale=1, label='Scores')
316
- gr.Markdown('### Dataset Scores Table')
317
- score_table = gr.DataFrame(value=None)
318
-
319
- with gr.Tab('Dataset Details'):
320
- dataset_radio = gr.Radio(label='Select Dataset', choices=[], show_label=True, interactive=True)
321
- gr.Markdown('### Dataset Scores')
322
- dataset_plot = gr.Plot(value=None, scale=1, label='Scores')
323
- gr.Markdown('### Dataset Scores Table')
324
- dataset_table = gr.DataFrame(value=None)
325
-
326
- gr.Markdown('### Model Prediction')
327
- subset_radio = gr.Radio(label='Select Subset', choices=[], show_label=True, interactive=True)
328
- with gr.Row():
329
- answer_mode_radio = gr.Radio(
330
- label='Answer Mode', choices=['All', 'Pass', 'Fail'], value='All', interactive=True)
331
- page_number = gr.Number(value=1, label='Page', minimum=1, maximum=1, step=1, interactive=True)
332
- answer_mode_counts = gr.Markdown('', label='Counts')
333
- data_review_df = gr.State(None)
334
- filtered_review_df = gr.State(None)
335
- data_review_table = gr.DataFrame(
336
- value=None,
337
- datatype=['markdown', 'markdown', 'markdown', 'markdown', 'markdown', 'number'],
338
- # column_widths=['500px', '500px'],
339
- wrap=True,
340
- latex_delimiters=[{
341
- 'left': '$$',
342
- 'right': '$$',
343
- 'display': True
344
- }, {
345
- 'left': '$',
346
- 'right': '$',
347
- 'display': False
348
- }, {
349
- 'left': '\\(',
350
- 'right': '\\)',
351
- 'display': False
352
- }, {
353
- 'left': '\\[',
354
- 'right': '\\]',
355
- 'display': True
356
- }],
357
- max_height=500)
358
-
359
- @report_name.change(
360
- inputs=[sidebar.root_path, report_name],
361
- outputs=[report_list, task_config, dataset_radio, work_dir, model_name])
362
- def update_single_report_data(root_path, report_name):
363
- report_list, datasets, task_cfg = load_single_report(root_path, report_name)
364
- work_dir = os.path.join(root_path, report_name.split('@')[0])
365
- model_name = report_name.split('@')[1].split(':')[0]
366
- return (report_list, task_cfg, gr.update(choices=datasets, value=datasets[0]), work_dir, model_name)
367
-
368
- @report_list.change(inputs=[report_list], outputs=[score_plot, score_table, sunburst_plot])
369
- def update_single_report_score(report_list):
370
- report_score_df = get_acc_report_df(report_list)
371
- report_score_plot = plot_single_report_scores(report_score_df)
372
- report_sunburst_plot = plot_single_report_sunburst(report_list)
373
- return report_score_plot, report_score_df, report_sunburst_plot
374
-
375
- @gr.on(
376
- triggers=[dataset_radio.change, report_list.change],
377
- inputs=[dataset_radio, report_list],
378
- outputs=[dataset_plot, dataset_table, subset_radio])
379
- def update_single_report_dataset(dataset_name, report_list):
380
- logger.debug(f'Updating single report dataset: {dataset_name}')
381
- report_df = get_data_frame(report_list)
382
- data_score_df = get_single_dataset_data(report_df, dataset_name)
383
- data_score_plot = plot_single_dataset_scores(data_score_df)
384
- subsets = data_score_df[ReportKey.subset_name].unique().tolist()
385
- logger.debug(f'subsets: {subsets}')
386
- return data_score_plot, data_score_df, gr.update(choices=subsets, value=subsets[0])
387
-
388
- @subset_radio.change(
389
- inputs=[work_dir, model_name, dataset_radio, subset_radio], outputs=[data_review_df, page_number])
390
- def update_single_report_subset(work_dir, model_name, dataset_name, subset_name):
391
- if not subset_name:
392
- return gr.skip()
393
- data_review_df = get_model_prediction(work_dir, model_name, dataset_name, subset_name)
394
- return data_review_df, 1
395
-
396
- @gr.on(
397
- triggers=[data_review_df.change, answer_mode_radio.change],
398
- inputs=[data_review_df, answer_mode_radio],
399
- outputs=[filtered_review_df, page_number, answer_mode_counts])
400
- def filter_data(data_review_df, answer_mode):
401
- if data_review_df is None:
402
- return None, gr.update(value=1, maximum=1), ''
403
-
404
- all_count = len(data_review_df)
405
- pass_df = data_review_df[data_review_df['NScore'] >= 0.99]
406
- pass_count = len(pass_df)
407
- fail_count = all_count - pass_count
408
-
409
- counts_text = f'### All: {all_count} | Pass: {pass_count} | Fail: {fail_count}'
410
-
411
- if answer_mode == 'Pass':
412
- filtered_df = pass_df
413
- elif answer_mode == 'Fail':
414
- filtered_df = data_review_df[data_review_df['NScore'] < 0.99]
415
- else:
416
- filtered_df = data_review_df
417
-
418
- max_page = max(1, len(filtered_df))
419
-
420
- return (filtered_df, gr.update(value=1, maximum=max_page), counts_text)
421
-
422
- @gr.on(
423
- triggers=[filtered_review_df.change, page_number.change],
424
- inputs=[filtered_review_df, page_number],
425
- outputs=[data_review_table])
426
- def update_table(filtered_df, page_number):
427
- subset_df = get_table_data(filtered_df, page_number)
428
- if subset_df is None:
429
- return gr.skip()
430
- return subset_df
431
-
432
- return SingleModelComponents(report_name=report_name)
433
-
434
-
435
- @dataclass
436
- class MultiModelComponents:
437
- multi_report_name: gr.Dropdown
438
-
439
-
440
- def create_multi_model_tab(sidebar: SidebarComponents):
441
- multi_report_name = gr.Dropdown(label='Select Reports', choices=[], multiselect=True, interactive=True)
442
- gr.Markdown('### Model Radar')
443
- radar_plot = gr.Plot(value=None)
444
- gr.Markdown('### Model Scores')
445
- score_table = gr.DataFrame(value=None)
446
-
447
- @multi_report_name.change(inputs=[sidebar.root_path, multi_report_name], outputs=[radar_plot, score_table])
448
- def update_multi_report_data(root_path, multi_report_name):
449
- if not multi_report_name:
450
- return gr.skip()
451
- report_list = load_multi_report(root_path, multi_report_name)
452
- report_df = get_acc_report_df(report_list)
453
- report_radar_plot = plot_multi_report_radar(report_df)
454
- report_compare_df = get_compare_report_df(report_df)
455
- return report_radar_plot, report_compare_df
456
-
457
- return MultiModelComponents(multi_report_name=multi_report_name)
458
-
459
-
460
- def create_app():
461
- with gr.Blocks(title='Evalscope Dashboard') as demo:
462
- with gr.Row():
463
- with gr.Column(scale=0, min_width=35):
464
- toggle_btn = gr.Button('<')
465
- with gr.Column(scale=1):
466
- gr.HTML('<h1 style="text-align: left;">Evalscope Dashboard</h1>') # 文本列
467
-
468
- with gr.Row():
469
- with gr.Column(scale=1) as sidebar_column:
470
- sidebar_visible = gr.State(True)
471
- sidebar = create_sidebar()
472
-
473
- with gr.Column(scale=5):
474
-
475
- with gr.Column(visible=True):
476
- gr.Markdown('## Visualization')
477
- with gr.Tabs():
478
- with gr.Tab('Single Model'):
479
- single = create_single_model_tab(sidebar)
480
-
481
- with gr.Tab('Multi Model'):
482
- multi = create_multi_model_tab(sidebar)
483
-
484
- @sidebar.load_btn.click(
485
- inputs=[sidebar.reports_dropdown], outputs=[single.report_name, multi.multi_report_name])
486
- def update_displays(reports_dropdown):
487
- if not reports_dropdown:
488
- gr.Warning('No reports found, please check the path', duration=3)
489
- return gr.skip()
490
-
491
- return (
492
- gr.update(choices=reports_dropdown, value=reports_dropdown[0]), # update single model dropdown
493
- gr.update(choices=reports_dropdown, value=reports_dropdown) # update multi model dropdown
494
- )
495
-
496
- @toggle_btn.click(inputs=[sidebar_visible], outputs=[sidebar_column, sidebar_visible, toggle_btn])
497
- def toggle_sidebar(visible):
498
- new_visible = not visible
499
- text = '<' if new_visible else '>'
500
- return gr.update(visible=new_visible), new_visible, gr.update(value=text)
501
-
502
- demo.launch()
503
-
504
-
505
- if __name__ == '__main__':
506
- create_app()
evalscope/report/utils.py DELETED
@@ -1,133 +0,0 @@
1
- import json
2
- import pandas as pd
3
- from collections import defaultdict
4
- from dataclasses import asdict, dataclass, field
5
- from typing import Any, Dict, List
6
-
7
- from evalscope.metrics import macro_mean, micro_mean
8
- from evalscope.utils import normalize_score
9
-
10
-
11
- @dataclass
12
- class Subset:
13
- name: str = 'default_subset'
14
- score: float = 0.0
15
- num: int = 0
16
-
17
- def __post_init__(self):
18
- self.score = normalize_score(self.score)
19
-
20
-
21
- @dataclass
22
- class Category:
23
- name: tuple[str] = field(default_factory=tuple)
24
- num: int = 0
25
- score: float = 0.0
26
- macro_score: float = 0.0
27
- subsets: List[Subset] = field(default_factory=list)
28
-
29
- def __post_init__(self):
30
- if isinstance(self.name, str):
31
- # ensure name is tuple format
32
- self.name = (self.name, )
33
- self.num = sum(subset.num for subset in self.subsets)
34
- self.score = normalize_score(micro_mean(self.subsets))
35
- self.macro_score = normalize_score(macro_mean(self.subsets))
36
-
37
- @classmethod
38
- def from_dict(cls, data: dict):
39
- subsets = [Subset(**subset) for subset in data.get('subsets', [])]
40
- return cls(name=data['name'], subsets=subsets)
41
-
42
-
43
- @dataclass
44
- class Metric:
45
- name: str = 'default_metric'
46
- num: int = 0
47
- score: float = 0.0
48
- macro_score: float = 0.0
49
- categories: List[Category] = field(default_factory=list)
50
-
51
- def __post_init__(self):
52
- self.num = sum(category.num for category in self.categories)
53
- self.score = normalize_score(micro_mean(self.categories))
54
- self.macro_score = normalize_score(macro_mean(self.categories))
55
-
56
- @classmethod
57
- def from_dict(cls, data: dict):
58
- categories = [Category.from_dict(category) for category in data.get('categories', [])]
59
- return cls(name=data['name'], categories=categories)
60
-
61
-
62
- class ReportKey:
63
- model_name = 'Model'
64
- dataset_name = 'Dataset'
65
- metric_name = 'Metric'
66
- category_name = 'Category'
67
- category_prefix = 'Cat.'
68
- subset_name = 'Subset'
69
- num = 'Num'
70
- score = 'Score'
71
-
72
-
73
- @dataclass
74
- class Report:
75
- name: str = 'default_report'
76
- dataset_name: str = 'default_dataset'
77
- model_name: str = 'default_model'
78
- score: float = 0.0
79
- metrics: List[Metric] = field(default_factory=list)
80
-
81
- def __post_init__(self):
82
- self.score = self.metrics[0].score # NOTE: only use the first metric by default
83
-
84
- def to_dict(self) -> Dict[str, Any]:
85
- return asdict(self)
86
-
87
- @classmethod
88
- def from_dict(cls, data: dict):
89
- metrics = [Metric.from_dict(metric) for metric in data.get('metrics', [])]
90
- return cls(
91
- name=data['name'],
92
- score=data['score'],
93
- metrics=metrics,
94
- dataset_name=data['dataset_name'],
95
- model_name=data['model_name'])
96
-
97
- @classmethod
98
- def from_json(cls, json_file: str):
99
- with open(json_file, 'r') as f:
100
- data = json.load(f)
101
- return cls.from_dict(data)
102
-
103
- def to_dataframe(self, flatten_metrics: bool = True, flatten_categories: bool = True):
104
- table = defaultdict(list)
105
- for metric in self.metrics:
106
- for category in metric.categories:
107
- for subset in category.subsets:
108
- table[ReportKey.model_name].append(self.model_name)
109
- table[ReportKey.dataset_name].append(self.dataset_name)
110
- table[ReportKey.metric_name].append(metric.name)
111
- table[ReportKey.category_name].append(category.name)
112
- table[ReportKey.subset_name].append(subset.name)
113
- table[ReportKey.num].append(subset.num)
114
- table[ReportKey.score].append(subset.score) # TODO: convert to percentage
115
- # NOTE: only flatten metrics if needed, use the first metric by default
116
- if not flatten_metrics:
117
- break
118
- df = pd.DataFrame.from_dict(table, orient='columns')
119
- if flatten_categories:
120
- df = self._flatten_categories(df)
121
- return df
122
-
123
- def _flatten_categories(self, df: pd.DataFrame):
124
- # expand categories to multiple rows
125
- df_categories = df.copy()
126
- # multi-level aggregation for categories
127
- max_depth = df_categories[ReportKey.category_name].apply(len).max()
128
- for level in range(max_depth):
129
- df_categories[f'{ReportKey.category_prefix}{level}'] = df_categories[ReportKey.category_name].apply(
130
- lambda x: x[level] if len(x) > level else None)
131
-
132
- df_categories.drop(columns=[ReportKey.category_name], inplace=True)
133
- return df_categories