evalscope 1.0.0__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (324) hide show
  1. evalscope/api/benchmark/__init__.py +9 -1
  2. evalscope/api/benchmark/adapters/__init__.py +4 -0
  3. evalscope/api/benchmark/adapters/agent_adapter.py +8 -0
  4. evalscope/api/benchmark/adapters/default_data_adapter.py +75 -4
  5. evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
  6. evalscope/api/benchmark/adapters/multi_choice_adapter.py +5 -2
  7. evalscope/api/benchmark/adapters/ner_adapter.py +212 -0
  8. evalscope/api/benchmark/adapters/text2image_adapter.py +12 -10
  9. evalscope/api/benchmark/adapters/vision_language_adapter.py +8 -0
  10. evalscope/api/benchmark/benchmark.py +85 -2
  11. evalscope/api/benchmark/meta.py +10 -1
  12. evalscope/api/dataset/dataset.py +27 -6
  13. evalscope/api/dataset/loader.py +8 -3
  14. evalscope/api/evaluator/cache.py +31 -4
  15. evalscope/api/evaluator/evaluator.py +5 -0
  16. evalscope/api/evaluator/state.py +17 -1
  17. evalscope/api/messages/__init__.py +1 -0
  18. evalscope/api/messages/chat_message.py +52 -2
  19. evalscope/api/metric/__init__.py +1 -1
  20. evalscope/api/metric/metric.py +6 -1
  21. evalscope/api/metric/scorer.py +15 -7
  22. evalscope/api/mixin/__init__.py +1 -1
  23. evalscope/api/mixin/llm_judge_mixin.py +2 -0
  24. evalscope/api/mixin/sandbox_mixin.py +182 -0
  25. evalscope/api/model/generate_config.py +10 -6
  26. evalscope/api/model/model.py +5 -2
  27. evalscope/api/tool/tool_info.py +1 -1
  28. evalscope/app/app.py +3 -0
  29. evalscope/app/ui/multi_model.py +6 -1
  30. evalscope/app/ui/single_model.py +11 -5
  31. evalscope/app/utils/data_utils.py +8 -7
  32. evalscope/app/utils/env_utils.py +12 -0
  33. evalscope/app/utils/text_utils.py +14 -12
  34. evalscope/app/utils/visualization.py +2 -2
  35. evalscope/arguments.py +8 -4
  36. evalscope/backend/opencompass/backend_manager.py +0 -2
  37. evalscope/backend/rag_eval/utils/embedding.py +9 -1
  38. evalscope/benchmarks/aa_lcr/aa_lcr_adapter.py +205 -0
  39. evalscope/benchmarks/ai2d/ai2d_adapter.py +54 -0
  40. evalscope/benchmarks/aime/aime24_adapter.py +5 -0
  41. evalscope/benchmarks/aime/aime25_adapter.py +136 -1
  42. evalscope/benchmarks/aime/grader.py +307 -0
  43. evalscope/benchmarks/aime/math_normalize.py +189 -0
  44. evalscope/benchmarks/amc/amc_adapter.py +51 -0
  45. evalscope/benchmarks/arena_hard/arena_hard_adapter.py +1 -0
  46. evalscope/benchmarks/bbh/bbh_adapter.py +43 -17
  47. evalscope/benchmarks/bfcl/{bfcl_adapter.py → v3/bfcl_v3_adapter.py} +131 -19
  48. evalscope/benchmarks/bfcl/{generation.py → v3/generation.py} +9 -9
  49. evalscope/benchmarks/bfcl/v3/utils.py +23 -0
  50. evalscope/benchmarks/bfcl/v4/__init__.py +0 -0
  51. evalscope/benchmarks/bfcl/v4/bfcl_v4_adapter.py +229 -0
  52. evalscope/benchmarks/bfcl/v4/utils.py +410 -0
  53. evalscope/benchmarks/biomix_qa/__init__.py +0 -0
  54. evalscope/benchmarks/biomix_qa/biomix_qa_adapter.py +36 -0
  55. evalscope/benchmarks/blink/__init__.py +0 -0
  56. evalscope/benchmarks/blink/blink_adapter.py +61 -0
  57. evalscope/benchmarks/ceval/ceval_adapter.py +1 -2
  58. evalscope/benchmarks/chartqa/__init__.py +0 -0
  59. evalscope/benchmarks/chartqa/chartqa_adapter.py +80 -0
  60. evalscope/benchmarks/chartqa/utils.py +38 -0
  61. evalscope/benchmarks/coin_flip/__init__.py +0 -0
  62. evalscope/benchmarks/coin_flip/coin_flip_adapter.py +128 -0
  63. evalscope/benchmarks/commonsense_qa/__init__.py +0 -0
  64. evalscope/benchmarks/commonsense_qa/commonsense_qa_adapter.py +32 -0
  65. evalscope/benchmarks/competition_math/competition_math_adapter.py +5 -0
  66. evalscope/benchmarks/data_collection/data_collection_adapter.py +24 -19
  67. evalscope/benchmarks/docvqa/__init__.py +0 -0
  68. evalscope/benchmarks/docvqa/docvqa_adapter.py +67 -0
  69. evalscope/benchmarks/drivelology/__init__.py +0 -0
  70. evalscope/benchmarks/drivelology/drivelology_binary_adapter.py +170 -0
  71. evalscope/benchmarks/drivelology/drivelology_multilabel_adapter.py +254 -0
  72. evalscope/benchmarks/drivelology/drivelology_selection_adapter.py +49 -0
  73. evalscope/benchmarks/drivelology/drivelology_writing_adapter.py +218 -0
  74. evalscope/benchmarks/drop/drop_adapter.py +15 -44
  75. evalscope/benchmarks/drop/utils.py +97 -0
  76. evalscope/benchmarks/frames/frames_adapter.py +2 -1
  77. evalscope/benchmarks/general_arena/general_arena_adapter.py +7 -2
  78. evalscope/benchmarks/general_arena/utils.py +2 -1
  79. evalscope/benchmarks/general_mcq/general_mcq_adapter.py +1 -1
  80. evalscope/benchmarks/general_qa/general_qa_adapter.py +1 -1
  81. evalscope/benchmarks/gsm8k/gsm8k_adapter.py +25 -9
  82. evalscope/benchmarks/hallusion_bench/__init__.py +0 -0
  83. evalscope/benchmarks/hallusion_bench/hallusion_bench_adapter.py +159 -0
  84. evalscope/benchmarks/halu_eval/__init__.py +0 -0
  85. evalscope/benchmarks/halu_eval/halu_eval_adapter.py +128 -0
  86. evalscope/benchmarks/halu_eval/halu_eval_instructions.py +84 -0
  87. evalscope/benchmarks/healthbench/__init__.py +0 -0
  88. evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
  89. evalscope/benchmarks/healthbench/utils.py +102 -0
  90. evalscope/benchmarks/hle/hle_adapter.py +3 -2
  91. evalscope/benchmarks/humaneval/humaneval_adapter.py +24 -52
  92. evalscope/benchmarks/humaneval/utils.py +235 -0
  93. evalscope/benchmarks/ifeval/instructions_util.py +2 -3
  94. evalscope/benchmarks/image_edit/__init__.py +0 -0
  95. evalscope/benchmarks/image_edit/gedit/__init__.py +0 -0
  96. evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
  97. evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
  98. evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
  99. evalscope/benchmarks/infovqa/__init__.py +0 -0
  100. evalscope/benchmarks/infovqa/infovqa_adapter.py +66 -0
  101. evalscope/benchmarks/live_code_bench/evaluate_utils.py +13 -6
  102. evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +66 -54
  103. evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
  104. evalscope/benchmarks/logi_qa/__int__.py +0 -0
  105. evalscope/benchmarks/logi_qa/logi_qa_adapter.py +41 -0
  106. evalscope/benchmarks/math_500/math_500_adapter.py +5 -1
  107. evalscope/benchmarks/math_qa/__init__.py +0 -0
  108. evalscope/benchmarks/math_qa/math_qa_adapter.py +35 -0
  109. evalscope/benchmarks/math_verse/__init__.py +0 -0
  110. evalscope/benchmarks/math_verse/math_verse_adapter.py +105 -0
  111. evalscope/benchmarks/math_vision/__init__.py +0 -0
  112. evalscope/benchmarks/math_vision/math_vision_adapter.py +116 -0
  113. evalscope/benchmarks/math_vista/__init__.py +0 -0
  114. evalscope/benchmarks/math_vista/math_vista_adapter.py +114 -0
  115. evalscope/benchmarks/med_mcqa/__init__.py +0 -0
  116. evalscope/benchmarks/med_mcqa/med_mcqa_adapter.py +32 -0
  117. evalscope/benchmarks/minerva_math/__init__.py +0 -0
  118. evalscope/benchmarks/minerva_math/minerva_math_adapter.py +53 -0
  119. evalscope/benchmarks/mm_bench/__init__.py +0 -0
  120. evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
  121. evalscope/benchmarks/mm_star/__init__.py +0 -0
  122. evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
  123. evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +1 -1
  124. evalscope/benchmarks/mmmu/__init__.py +0 -0
  125. evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
  126. evalscope/benchmarks/mmmu_pro/__init__.py +0 -0
  127. evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +124 -0
  128. evalscope/benchmarks/mri_mcqa/__init__.py +0 -0
  129. evalscope/benchmarks/mri_mcqa/mri_mcqa_adapter.py +34 -0
  130. evalscope/benchmarks/multi_if/__init__.py +0 -0
  131. evalscope/benchmarks/multi_if/ifeval.py +3354 -0
  132. evalscope/benchmarks/multi_if/metrics.py +120 -0
  133. evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
  134. evalscope/benchmarks/music_trivia/__init__.py +0 -0
  135. evalscope/benchmarks/music_trivia/music_trivia_adapter.py +36 -0
  136. evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +7 -6
  137. evalscope/benchmarks/ner/__init__.py +0 -0
  138. evalscope/benchmarks/ner/broad_twitter_corpus_adapter.py +52 -0
  139. evalscope/benchmarks/ner/conll2003_adapter.py +48 -0
  140. evalscope/benchmarks/ner/copious_adapter.py +85 -0
  141. evalscope/benchmarks/ner/cross_ner_adapter.py +120 -0
  142. evalscope/benchmarks/ner/cross_ner_entities/__init__.py +0 -0
  143. evalscope/benchmarks/ner/cross_ner_entities/ai.py +54 -0
  144. evalscope/benchmarks/ner/cross_ner_entities/literature.py +36 -0
  145. evalscope/benchmarks/ner/cross_ner_entities/music.py +39 -0
  146. evalscope/benchmarks/ner/cross_ner_entities/politics.py +37 -0
  147. evalscope/benchmarks/ner/cross_ner_entities/science.py +58 -0
  148. evalscope/benchmarks/ner/genia_ner_adapter.py +66 -0
  149. evalscope/benchmarks/ner/harvey_ner_adapter.py +58 -0
  150. evalscope/benchmarks/ner/mit_movie_trivia_adapter.py +74 -0
  151. evalscope/benchmarks/ner/mit_restaurant_adapter.py +66 -0
  152. evalscope/benchmarks/ner/ontonotes5_adapter.py +87 -0
  153. evalscope/benchmarks/ner/wnut2017_adapter.py +61 -0
  154. evalscope/benchmarks/ocr_bench/__init__.py +0 -0
  155. evalscope/benchmarks/ocr_bench/ocr_bench/__init__.py +0 -0
  156. evalscope/benchmarks/ocr_bench/ocr_bench/ocr_bench_adapter.py +101 -0
  157. evalscope/benchmarks/ocr_bench/ocr_bench_v2/IoUscore_metric.py +87 -0
  158. evalscope/benchmarks/ocr_bench/ocr_bench_v2/TEDS_metric.py +963 -0
  159. evalscope/benchmarks/ocr_bench/ocr_bench_v2/__init__.py +0 -0
  160. evalscope/benchmarks/ocr_bench/ocr_bench_v2/ocr_bench_v2_adapter.py +161 -0
  161. evalscope/benchmarks/ocr_bench/ocr_bench_v2/page_ocr_metric.py +50 -0
  162. evalscope/benchmarks/ocr_bench/ocr_bench_v2/parallel.py +46 -0
  163. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/__init__.py +0 -0
  164. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/readme.txt +26 -0
  165. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/rrc_evaluation_funcs_1_1.py +537 -0
  166. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/script.py +481 -0
  167. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_metric.py +179 -0
  168. evalscope/benchmarks/ocr_bench/ocr_bench_v2/utils.py +433 -0
  169. evalscope/benchmarks/ocr_bench/ocr_bench_v2/vqa_metric.py +254 -0
  170. evalscope/benchmarks/olympiad_bench/__init__.py +0 -0
  171. evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
  172. evalscope/benchmarks/olympiad_bench/utils.py +565 -0
  173. evalscope/benchmarks/omni_bench/__init__.py +0 -0
  174. evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
  175. evalscope/benchmarks/omnidoc_bench/__init__.py +0 -0
  176. evalscope/benchmarks/omnidoc_bench/end2end_eval.py +349 -0
  177. evalscope/benchmarks/omnidoc_bench/metrics.py +547 -0
  178. evalscope/benchmarks/omnidoc_bench/omnidoc_bench_adapter.py +135 -0
  179. evalscope/benchmarks/omnidoc_bench/utils.py +1937 -0
  180. evalscope/benchmarks/piqa/__init__.py +0 -0
  181. evalscope/benchmarks/piqa/piqa_adapter.py +32 -0
  182. evalscope/benchmarks/poly_math/__init__.py +0 -0
  183. evalscope/benchmarks/poly_math/poly_math_adapter.py +132 -0
  184. evalscope/benchmarks/poly_math/utils/instruction.py +105 -0
  185. evalscope/benchmarks/pope/__init__.py +0 -0
  186. evalscope/benchmarks/pope/pope_adapter.py +112 -0
  187. evalscope/benchmarks/process_bench/process_bench_adapter.py +1 -0
  188. evalscope/benchmarks/pumed_qa/__init__.py +0 -0
  189. evalscope/benchmarks/pumed_qa/pubmed_qa_adapter.py +175 -0
  190. evalscope/benchmarks/qasc/__init__.py +0 -0
  191. evalscope/benchmarks/qasc/qasc_adapter.py +35 -0
  192. evalscope/benchmarks/real_world_qa/__init__.py +0 -0
  193. evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
  194. evalscope/benchmarks/sciq/__init__.py +0 -0
  195. evalscope/benchmarks/sciq/sciq_adapter.py +36 -0
  196. evalscope/benchmarks/seed_bench_2_plus/__init__.py +0 -0
  197. evalscope/benchmarks/seed_bench_2_plus/seed_bench_2_plus_adapter.py +72 -0
  198. evalscope/benchmarks/simple_qa/simple_qa_adapter.py +1 -1
  199. evalscope/benchmarks/simple_vqa/__init__.py +0 -0
  200. evalscope/benchmarks/simple_vqa/simple_vqa_adapter.py +169 -0
  201. evalscope/benchmarks/siqa/__init__.py +0 -0
  202. evalscope/benchmarks/siqa/siqa_adapter.py +39 -0
  203. evalscope/benchmarks/tau_bench/tau2_bench/__init__.py +0 -0
  204. evalscope/benchmarks/tau_bench/tau2_bench/generation.py +158 -0
  205. evalscope/benchmarks/tau_bench/tau2_bench/tau2_bench_adapter.py +146 -0
  206. evalscope/benchmarks/tau_bench/tau_bench/__init__.py +0 -0
  207. evalscope/benchmarks/tau_bench/{generation.py → tau_bench/generation.py} +1 -1
  208. evalscope/benchmarks/tau_bench/{tau_bench_adapter.py → tau_bench/tau_bench_adapter.py} +29 -29
  209. evalscope/benchmarks/text2image/__init__.py +0 -0
  210. evalscope/benchmarks/{aigc/t2i → text2image}/evalmuse_adapter.py +3 -1
  211. evalscope/benchmarks/{aigc/t2i → text2image}/genai_bench_adapter.py +2 -2
  212. evalscope/benchmarks/{aigc/t2i → text2image}/general_t2i_adapter.py +1 -1
  213. evalscope/benchmarks/{aigc/t2i → text2image}/hpdv2_adapter.py +7 -2
  214. evalscope/benchmarks/{aigc/t2i → text2image}/tifa_adapter.py +1 -0
  215. evalscope/benchmarks/tool_bench/tool_bench_adapter.py +3 -3
  216. evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +1 -2
  217. evalscope/benchmarks/visu_logic/__init__.py +0 -0
  218. evalscope/benchmarks/visu_logic/visu_logic_adapter.py +75 -0
  219. evalscope/benchmarks/wmt/__init__.py +0 -0
  220. evalscope/benchmarks/wmt/wmt24_adapter.py +294 -0
  221. evalscope/benchmarks/zerobench/__init__.py +0 -0
  222. evalscope/benchmarks/zerobench/zerobench_adapter.py +64 -0
  223. evalscope/cli/start_app.py +7 -1
  224. evalscope/cli/start_perf.py +7 -1
  225. evalscope/config.py +103 -18
  226. evalscope/constants.py +18 -0
  227. evalscope/evaluator/evaluator.py +138 -82
  228. evalscope/metrics/bert_score/__init__.py +0 -0
  229. evalscope/metrics/bert_score/scorer.py +338 -0
  230. evalscope/metrics/bert_score/utils.py +697 -0
  231. evalscope/metrics/llm_judge.py +19 -7
  232. evalscope/metrics/math_parser.py +14 -0
  233. evalscope/metrics/metric.py +317 -13
  234. evalscope/metrics/metrics.py +37 -0
  235. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +0 -0
  236. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +0 -0
  237. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +0 -0
  238. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +0 -0
  239. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +0 -0
  240. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +0 -0
  241. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +0 -0
  242. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +0 -0
  243. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +0 -0
  244. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +0 -0
  245. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +2 -6
  246. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +2 -6
  247. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +2 -6
  248. evalscope/models/image_edit_model.py +125 -0
  249. evalscope/models/model_apis.py +22 -0
  250. evalscope/models/openai_compatible.py +21 -0
  251. evalscope/models/text2image_model.py +2 -2
  252. evalscope/models/utils/openai.py +16 -6
  253. evalscope/perf/arguments.py +26 -4
  254. evalscope/perf/benchmark.py +76 -89
  255. evalscope/perf/http_client.py +31 -16
  256. evalscope/perf/main.py +15 -2
  257. evalscope/perf/plugin/api/base.py +9 -7
  258. evalscope/perf/plugin/api/custom_api.py +13 -58
  259. evalscope/perf/plugin/api/default_api.py +188 -79
  260. evalscope/perf/plugin/api/openai_api.py +85 -20
  261. evalscope/perf/plugin/datasets/base.py +21 -0
  262. evalscope/perf/plugin/datasets/custom.py +2 -3
  263. evalscope/perf/plugin/datasets/flickr8k.py +2 -2
  264. evalscope/perf/plugin/datasets/kontext_bench.py +2 -2
  265. evalscope/perf/plugin/datasets/line_by_line.py +2 -3
  266. evalscope/perf/plugin/datasets/longalpaca.py +2 -3
  267. evalscope/perf/plugin/datasets/openqa.py +2 -4
  268. evalscope/perf/plugin/datasets/random_dataset.py +1 -3
  269. evalscope/perf/plugin/datasets/random_vl_dataset.py +2 -2
  270. evalscope/perf/utils/benchmark_util.py +43 -27
  271. evalscope/perf/utils/db_util.py +14 -19
  272. evalscope/perf/utils/local_server.py +3 -44
  273. evalscope/perf/utils/log_utils.py +21 -6
  274. evalscope/report/__init__.py +13 -3
  275. evalscope/report/combinator.py +91 -20
  276. evalscope/report/generator.py +8 -87
  277. evalscope/report/report.py +8 -4
  278. evalscope/run.py +13 -5
  279. evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -4
  280. evalscope/utils/argument_utils.py +1 -1
  281. evalscope/utils/chat_service.py +1 -1
  282. evalscope/utils/function_utils.py +249 -12
  283. evalscope/utils/import_utils.py +73 -1
  284. evalscope/utils/io_utils.py +132 -7
  285. evalscope/utils/json_schema.py +25 -2
  286. evalscope/utils/logger.py +69 -18
  287. evalscope/utils/model_utils.py +4 -3
  288. evalscope/utils/multi_choices.py +39 -7
  289. evalscope/utils/ner.py +377 -0
  290. evalscope/version.py +2 -2
  291. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/METADATA +252 -408
  292. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/RECORD +290 -154
  293. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/WHEEL +1 -1
  294. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/top_level.txt +0 -1
  295. evalscope/api/mixin/dataset_mixin.py +0 -105
  296. evalscope/benchmarks/aigc/i2i/general_i2i_adapter.py +0 -44
  297. tests/__init__.py +0 -1
  298. tests/aigc/__init__.py +0 -1
  299. tests/aigc/test_t2i.py +0 -142
  300. tests/benchmark/__init__.py +0 -1
  301. tests/benchmark/test_eval.py +0 -386
  302. tests/cli/__init__.py +0 -1
  303. tests/cli/test_all.py +0 -229
  304. tests/cli/test_collection.py +0 -96
  305. tests/cli/test_custom.py +0 -268
  306. tests/perf/__init__.py +0 -1
  307. tests/perf/test_perf.py +0 -176
  308. tests/rag/test_clip_benchmark.py +0 -90
  309. tests/rag/test_mteb.py +0 -213
  310. tests/rag/test_ragas.py +0 -128
  311. tests/swift/__init__.py +0 -1
  312. tests/swift/test_run_swift_eval.py +0 -146
  313. tests/swift/test_run_swift_vlm_eval.py +0 -128
  314. tests/swift/test_run_swift_vlm_jugde_eval.py +0 -157
  315. tests/test_run_all.py +0 -12
  316. tests/utils.py +0 -13
  317. tests/vlm/__init__.py +0 -1
  318. tests/vlm/test_vlmeval.py +0 -102
  319. /evalscope/benchmarks/{aigc → aa_lcr}/__init__.py +0 -0
  320. /evalscope/benchmarks/{aigc/i2i → ai2d}/__init__.py +0 -0
  321. /evalscope/benchmarks/{aigc/t2i → amc}/__init__.py +0 -0
  322. {tests/rag → evalscope/benchmarks/bfcl/v3}/__init__.py +0 -0
  323. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/entry_points.txt +0 -0
  324. {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info/licenses}/LICENSE +0 -0
@@ -0,0 +1,64 @@
1
+ import re
2
+ from typing import Any, Dict, List
3
+
4
+ from evalscope.api.benchmark import BenchmarkMeta, VisionLanguageAdapter
5
+ from evalscope.api.dataset import Sample
6
+ from evalscope.api.evaluator import TaskState
7
+ from evalscope.api.messages import ChatMessageUser, Content, ContentImage, ContentText
8
+ from evalscope.api.registry import register_benchmark
9
+ from evalscope.constants import Tags
10
+ from evalscope.utils.io_utils import bytes_to_base64
11
+ from evalscope.utils.logger import get_logger
12
+
13
+ logger = get_logger()
14
+
15
+ SUBSET_LIST = ['default']
16
+
17
+ OPEN_PROMPT = (
18
+ 'Read the picture and solve the following problem step by step.'
19
+ 'The last line of your response should be of the form'
20
+ ' "ANSWER: $ANSWER" (without quotes) where $ANSWER is the answer to the problem.\n\n'
21
+ '{question}\n\n'
22
+ 'Remember to put your answer on its own line at the end in the form'
23
+ ' "ANSWER: $ANSWER" (without quotes) where $ANSWER is the answer to the problem,'
24
+ ' and you do not need to use a \\boxed command.'
25
+ )
26
+
27
+
28
+ @register_benchmark(
29
+ BenchmarkMeta(
30
+ name='real_world_qa',
31
+ pretty_name='RealWorldQA',
32
+ tags=[Tags.MULTI_MODAL, Tags.KNOWLEDGE, Tags.QA],
33
+ description=
34
+ 'RealWorldQA is a benchmark designed to evaluate the real-world spatial understanding capabilities of multimodal AI models, contributed by XAI. It assesses how well these models comprehend physical environments. The benchmark consists of 700+ images, each accompanied by a question and a verifiable answer. These images are drawn from real-world scenarios, including those captured from vehicles. The goal is to advance AI models\' understanding of our physical world.', # noqa: E501
35
+ dataset_id='lmms-lab/RealWorldQA',
36
+ subset_list=SUBSET_LIST,
37
+ metric_list=['acc'],
38
+ eval_split='test',
39
+ prompt_template=OPEN_PROMPT,
40
+ )
41
+ )
42
+ class RealWorldQAAdapter(VisionLanguageAdapter):
43
+
44
+ def __init__(self, **kwargs):
45
+ super().__init__(**kwargs)
46
+
47
+ def record_to_sample(self, record: Dict[str, Any]) -> Sample:
48
+ content_list: list[Content] = [ContentText(text=OPEN_PROMPT.format(question=record['question']))]
49
+ image = record.get('image')
50
+ if image:
51
+ image_base64 = bytes_to_base64(image['bytes'], format='webp', add_header=True)
52
+ content_list.append(ContentImage(image=image_base64))
53
+ return Sample(
54
+ input=[ChatMessageUser(content=content_list)],
55
+ target=record['answer'],
56
+ metadata={'image_path': record['image_path']}
57
+ )
58
+
59
+ def extract_answer(self, prediction: str, task_state: TaskState) -> str:
60
+ pattern = r'ANSWER:\s*(.*)'
61
+ match = re.search(pattern, prediction)
62
+ if match:
63
+ return match.group(1).strip()
64
+ return ''
File without changes
@@ -0,0 +1,36 @@
1
+ from evalscope.api.benchmark import BenchmarkMeta, MultiChoiceAdapter
2
+ from evalscope.api.dataset import Sample
3
+ from evalscope.api.registry import register_benchmark
4
+ from evalscope.constants import Tags
5
+ from evalscope.utils.multi_choices import MultipleChoiceTemplate
6
+
7
+ DESCRIPTION = (
8
+ 'The SciQ dataset contains crowdsourced science exam questions about Physics, '
9
+ 'Chemistry and Biology, among others. For the majority of the questions, '
10
+ 'an additional paragraph with supporting evidence for the correct answer is provided.'
11
+ ) # noqa: E501
12
+
13
+
14
+ @register_benchmark(
15
+ BenchmarkMeta(
16
+ name='sciq',
17
+ pretty_name='SciQ',
18
+ tags=[Tags.READING_COMPREHENSION, Tags.KNOWLEDGE, Tags.MULTIPLE_CHOICE],
19
+ description=DESCRIPTION.strip(),
20
+ dataset_id='extraordinarylab/sciq',
21
+ metric_list=['acc'],
22
+ few_shot_num=0,
23
+ train_split=None,
24
+ eval_split='test',
25
+ prompt_template=MultipleChoiceTemplate.SINGLE_ANSWER,
26
+ )
27
+ )
28
+ class SciQAdapter(MultiChoiceAdapter):
29
+
30
+ def record_to_sample(self, record) -> Sample:
31
+ return Sample(
32
+ input=record['question'],
33
+ choices=record['choices'],
34
+ target=record['answer'],
35
+ metadata={},
36
+ )
File without changes
@@ -0,0 +1,72 @@
1
+ # flake8: noqa: E501
2
+ import re
3
+ from typing import Any, Dict, List
4
+
5
+ from evalscope.api.benchmark import BenchmarkMeta, VisionLanguageAdapter
6
+ from evalscope.api.dataset import Sample
7
+ from evalscope.api.evaluator import TaskState
8
+ from evalscope.api.messages import ChatMessageUser, Content, ContentImage, ContentText
9
+ from evalscope.api.registry import register_benchmark
10
+ from evalscope.constants import Tags
11
+ from evalscope.utils.io_utils import bytes_to_base64
12
+ from evalscope.utils.logger import get_logger
13
+ from evalscope.utils.multi_choices import MultipleChoiceTemplate, parse_answers, prompt
14
+
15
+ logger = get_logger()
16
+
17
+ MULT_CHOICE_PROMPT = MultipleChoiceTemplate.SINGLE_ANSWER_COT
18
+
19
+ SUBSET_LIST = ['chart', 'web', 'map']
20
+
21
+
22
+ @register_benchmark(
23
+ BenchmarkMeta(
24
+ name='seed_bench_2_plus',
25
+ pretty_name='SEED-Bench-2-Plus',
26
+ dataset_id='evalscope/SEED-Bench-2-Plus',
27
+ tags=[Tags.KNOWLEDGE, Tags.REASONING, Tags.MULTIPLE_CHOICE, Tags.MULTI_MODAL],
28
+ description=
29
+ 'SEED-Bench-2-Plus is a large-scale benchmark to evaluate Multimodal Large Language Models (MLLMs). It consists of 2.3K multiple-choice questions with precise human annotations, spanning three broad categories: Charts, Maps, and Webs, each of which covers a wide spectrum of text-rich scenarios in the real world.',
30
+ subset_list=SUBSET_LIST,
31
+ metric_list=['acc'],
32
+ eval_split='test',
33
+ prompt_template=MULT_CHOICE_PROMPT,
34
+ )
35
+ )
36
+ class SeedBench2PlusAdapter(VisionLanguageAdapter):
37
+
38
+ def __init__(self, **kwargs):
39
+ super().__init__(**kwargs)
40
+ self.reformat_subset = True
41
+
42
+ def record_to_sample(self, record: Dict[str, Any]) -> Sample:
43
+ question = record['question']
44
+ answers_list = [record['choice_A'], record['choice_B'], record['choice_C'], record['choice_D']]
45
+ input_text = prompt(question=question, choices=answers_list, template=self.prompt_template)
46
+ content_list: List[Content] = [ContentText(text=input_text)]
47
+ image = record['image']
48
+ if image:
49
+ image_base64 = bytes_to_base64(image['bytes'], format='png', add_header=True)
50
+ content_list.append(ContentImage(image=image_base64))
51
+ metadata = {
52
+ 'data_id': record['data_id'],
53
+ 'question_id': record['question_id'],
54
+ 'question_image_subtype': record['question_image_subtype'],
55
+ 'data_source': record['data_source'],
56
+ 'data_type': record['data_type'],
57
+ 'level': record['level'],
58
+ 'subpart': record['subpart'],
59
+ 'version': record['version'],
60
+ }
61
+ label_answer = record['answer']
62
+ return Sample(
63
+ input=[ChatMessageUser(content=content_list)],
64
+ choices=answers_list,
65
+ target=label_answer,
66
+ subset_key=record['question_image_type'],
67
+ metadata=metadata,
68
+ )
69
+
70
+ def extract_answer(self, prediction: str, task_state: TaskState) -> str:
71
+ answers = parse_answers(task_state)
72
+ return ''.join(sorted(list(answers)))
@@ -101,7 +101,7 @@ Just return the letters "A", "B", or "C", with no text around it.
101
101
  tags=[Tags.KNOWLEDGE, Tags.QA],
102
102
  description=
103
103
  'SimpleQA is a benchmark designed to evaluate the performance of language models on simple question-answering tasks. It includes a set of straightforward questions that require basic reasoning and understanding capabilities.', # noqa: E501
104
- dataset_id='AI-ModelScope/SimpleQA',
104
+ dataset_id='evalscope/SimpleQA',
105
105
  metric_list=['is_correct', 'is_incorrect', 'is_not_attempted'],
106
106
  few_shot_num=0,
107
107
  train_split=None,
File without changes
@@ -0,0 +1,169 @@
1
+ # flake8: noqa: E501
2
+ import re
3
+ from typing import Any, Dict
4
+
5
+ from evalscope.api.benchmark import BenchmarkMeta, VisionLanguageAdapter
6
+ from evalscope.api.dataset import Sample
7
+ from evalscope.api.evaluator import TaskState
8
+ from evalscope.api.messages import ChatMessageUser, Content, ContentImage, ContentText
9
+ from evalscope.api.metric.scorer import Score
10
+ from evalscope.api.registry import register_benchmark
11
+ from evalscope.constants import Tags
12
+ from evalscope.utils.logger import get_logger
13
+
14
+ logger = get_logger()
15
+
16
+ GRADER_TEMPLATE = """
17
+ Your job is to look at a question, a gold target, and a predicted answer, and then assign a grade of either ["CORRECT", "INCORRECT", "NOT_ATTEMPTED"].
18
+ First, I will give examples of each grade, and then you will grade a new example.
19
+
20
+
21
+ The following are examples of CORRECT predicted answers.
22
+ ```
23
+ Question: What are the names of Barack Obama's children?
24
+ Gold target: Malia Obama and Sasha Obama
25
+ Predicted answer 1: sasha and malia obama
26
+ Predicted answer 2: most people would say Malia and Sasha, but I'm not sure and would have to double check
27
+ Predicted answer 3: Barack Obama has two daughters. Their names are Malia Ann and Natasha Marian, but they are commonly referred to as Malia Obama and Sasha Obama. Malia was born on July 4, 1998, and Sasha was born on June 10, 2001.
28
+ ```
29
+ These predicted answers are all CORRECT because:
30
+ - They fully contain the important information in the gold target.
31
+ - They do not contain any information that contradicts the gold target.
32
+ - Only semantic meaning matters; capitalization, punctuation, grammar, and order don't matter.
33
+ - Hedging and guessing are permissible, provided that the gold target is fully included and the response contains no incorrect information or contradictions.
34
+
35
+
36
+ The following are examples of INCORRECT predicted answers.
37
+ ```
38
+ Question: What are the names of Barack Obama's children?
39
+ Gold target: Malia and Sasha
40
+ Predicted answer 1: Malia.
41
+ Predicted answer 2: Malia, Sasha, and Susan.
42
+ Predicted answer 3: Barack Obama does not have any children.
43
+ Predicted answer 4: I think it's either Malia and Sasha. Or it could be Malia and Jackie. Or it could be Joey and Malia.
44
+ Predicted answer 4: While I don't know their exact names, I can tell you that Barack Obama has three children.
45
+ Predicted answer 5: It's possible you may mean Betsy and Olivia. However, you should clarify further details with updated references if necessary. Is that the correct answer?
46
+ Predicted answer 6: It may be the case that Obama's child is named James. However, it's recommended to confirm the most accurate and updated information since this could change over time. This model may not always reflect the most current information.
47
+ ```
48
+ These predicted answers are all INCORRECT because:
49
+ - A factual statement in the answer contradicts the gold target. Incorrect statements that have some hedging (e.g., "it is possible that", "although i'm not sure, i think") are also considered incorrect.
50
+
51
+
52
+ The following are examples of NOT_ATTEMPTED predicted answers.
53
+ ```
54
+ Question: What are the names of Barack Obama's children?
55
+ Gold target: Malia and Sasha
56
+ Predicted answer 1: I don't know.
57
+ Predicted answer 2: I need more context about which Obama you are talking about.
58
+ Predicted answer 3: Without researching the web, I cannot answer this question. However, I can tell you that Barack Obama has two children.
59
+ Predicted answer 4: Barack Obama has two children. I know that one of them is Malia, but I'm not sure about the other one.
60
+ ```
61
+ These predicted answers are all NOT_ATTEMPTED because:
62
+ - The important information in the gold target is not included in the answer.
63
+ - No statements in the answer contradict the gold target.
64
+
65
+
66
+ Also note the following things:
67
+ - For grading questions where the gold target is a number, the predicted answer needs to be correct to the last significant figure in the gold answer. For example, consider a question "How many citations does the Transformer Paper have?" with gold target "120k".
68
+ - Predicted answers "120k", "124k", and 115k" are all CORRECT.
69
+ - Predicted answers "100k" and "113k" are INCORRECT.
70
+ - Predicted answers "around 100k" and "more than 50k" are considered NOT_ATTEMPTED because they neither confirm nor contradict the gold target.
71
+ - The gold target may contain more information than the question. In such cases, the predicted answer only needs to contain the information that is in the question.
72
+ - For example, consider the question "What episode did Derek and Meredith get legally married in Grey's Anatomy?" with gold target "Season 7, Episode 20: White Wedding". Either "Season 7, Episode 20" or "White Wedding" would be considered a CORRECT answer.
73
+ - Do not punish predicted answers if they omit information that would be clearly inferred from the question.
74
+ - For example, consider the question "What city is OpenAI headquartered in?" and the gold target "San Francisco, California". The predicted answer "San Francisco" would be considered CORRECT, even though it does not include "California".
75
+ - Consider the question "What award did A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity win at NAACL '24?", the gold target is "Outstanding Paper Award". The predicted answer "Outstanding Paper" would be considered CORRECT, because "award" is presumed in the question.
76
+ - For the question "What is the height of Jason Wei in meters?", the gold target is "1.73 m". The predicted answer "1.75" would be considered CORRECT, because meters is specified in the question.
77
+ - For the question "What is the name of Barack Obama's wife?", the gold target is "Michelle Obama". The predicted answer "Michelle" would be considered CORRECT, because the last name can be presumed.
78
+ - Do not punish for typos in people's name if it's clearly the same name.
79
+ - For example, if the gold target is "Hyung Won Chung", you can consider the following predicted answers as correct: "Hyoong Won Choong", "Hyungwon Chung", or "Hyun Won Chung".
80
+
81
+
82
+ Here is a new example. Simply reply with either CORRECT, INCORRECT, NOT_ATTEMPTED. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
83
+ ```
84
+ Question: {question}
85
+ Gold target: {target}
86
+ Predicted answer: {predicted_answer}
87
+ ```
88
+
89
+ Grade the predicted answer of this new question as one of:
90
+ A: CORRECT
91
+ B: INCORRECT
92
+ C: NOT_ATTEMPTED
93
+
94
+ Just return the letters "A", "B", or "C", with no text around it.
95
+ """.strip() # noqa: E501
96
+
97
+
98
+ @register_benchmark(
99
+ BenchmarkMeta(
100
+ name='simple_vqa',
101
+ pretty_name='SimpleVQA',
102
+ dataset_id='m-a-p/SimpleVQA',
103
+ tags=[Tags.REASONING, Tags.MULTI_MODAL, Tags.QA],
104
+ description=
105
+ 'SimpleVQA, the first comprehensive multi-modal benchmark to evaluate the factuality ability of MLLMs to answer natural language short questions. SimpleVQA is characterized by six key features: it covers multiple tasks and multiple scenarios, ensures high quality and challenging queries, maintains static and timeless reference answers, and is straightforward to evaluate.',
106
+ metric_list=['acc'],
107
+ eval_split='test',
108
+ prompt_template='Answer the question:\n\n{question}',
109
+ )
110
+ )
111
+ class SimpleVQAAdapter(VisionLanguageAdapter):
112
+
113
+ def __init__(self, **kwargs):
114
+ super().__init__(**kwargs)
115
+ self._use_llm_judge = True # Use LLM as a judge by default
116
+
117
+ def record_to_sample(self, record: Dict[str, Any]) -> Sample:
118
+ content_list: list[Content] = [ContentText(text=self.prompt_template.format(question=record['question']))]
119
+ image_base64 = record['image']
120
+ content_list.append(ContentImage(image=f'data:image/jpeg;base64,{image_base64}'))
121
+ return Sample(
122
+ input=[ChatMessageUser(content=content_list)],
123
+ target=record['answer'],
124
+ metadata={
125
+ 'data_id': record['data_id'],
126
+ 'image_description': record['image_description'],
127
+ 'language': record['language'],
128
+ 'original_category': record['original_category'],
129
+ 'source': record['source'],
130
+ 'atomic_question': record['atomic_question'],
131
+ 'atomic_fact': record['atomic_fact'],
132
+ }
133
+ )
134
+
135
+ def llm_match_score(
136
+ self,
137
+ original_prediction: str,
138
+ filtered_prediction: str,
139
+ reference: str,
140
+ task_state: TaskState,
141
+ ) -> Score:
142
+ score = Score(
143
+ extracted_prediction=filtered_prediction,
144
+ prediction=original_prediction,
145
+ )
146
+
147
+ question = task_state.input_text
148
+
149
+ # Request judge and obtain score
150
+ prompt = GRADER_TEMPLATE.format(question=question, target=reference, predicted_answer=filtered_prediction)
151
+ judge_response = self.llm_judge.judge(prompt)
152
+ # parse grading response
153
+ match = re.search(r'(A|B|C)', judge_response)
154
+ res = match.group(0) if match else 'C'
155
+
156
+ # Set score based on the match result
157
+ score.value = {
158
+ 'is_correct': 1 if res == 'A' else 0,
159
+ 'is_incorrect': 1 if res == 'B' else 0,
160
+ 'is_not_attempted': 1 if res == 'C' else 0,
161
+ }
162
+ score.explanation = f'LLM judge: {judge_response}'
163
+ score.metadata = {
164
+ 'source': 'llm_judge',
165
+ 'judge_strategy': self.judge_strategy,
166
+ 'model': self.llm_judge.model_id
167
+ }
168
+ score.main_score_name = 'is_correct'
169
+ return score
File without changes
@@ -0,0 +1,39 @@
1
+ from evalscope.api.benchmark import BenchmarkMeta, MultiChoiceAdapter
2
+ from evalscope.api.dataset import Sample
3
+ from evalscope.api.registry import register_benchmark
4
+ from evalscope.constants import Tags
5
+ from evalscope.utils.multi_choices import MultipleChoiceTemplate
6
+
7
+ DESCRIPTION = (
8
+ 'Social Interaction QA (SIQA) is a question-answering benchmark for testing social commonsense intelligence. '
9
+ 'Contrary to many prior benchmarks that focus on physical or taxonomic knowledge, Social IQa focuses on '
10
+ "reasoning about people's actions and their social implications."
11
+ )
12
+
13
+
14
+ @register_benchmark(
15
+ BenchmarkMeta(
16
+ name='siqa',
17
+ pretty_name='SIQA',
18
+ tags=[Tags.COMMONSENSE, Tags.REASONING, Tags.MULTIPLE_CHOICE],
19
+ description=DESCRIPTION.strip(),
20
+ dataset_id='extraordinarylab/siqa',
21
+ metric_list=['acc'],
22
+ few_shot_num=0,
23
+ train_split=None,
24
+ eval_split='validation',
25
+ prompt_template=MultipleChoiceTemplate.SINGLE_ANSWER,
26
+ )
27
+ )
28
+ class SIQAAdapter(MultiChoiceAdapter):
29
+
30
+ def __init__(self, **kwargs):
31
+ super().__init__(**kwargs)
32
+
33
+ def record_to_sample(self, record) -> Sample:
34
+ return Sample(
35
+ input=record['question'],
36
+ choices=record['choices'],
37
+ target=record['answer'],
38
+ metadata={},
39
+ )
File without changes
@@ -0,0 +1,158 @@
1
+ import json
2
+ import sys
3
+ import tau2.utils.llm_utils as tau_llm_utils
4
+ from tau2.data_model.message import AssistantMessage, Message, ToolCall
5
+ from tau2.data_model.tasks import Task
6
+ from tau2.environment.tool import Tool
7
+ from tau2.run import run_task
8
+ from tau2.utils.llm_utils import to_litellm_messages
9
+ from typing import Any, Callable, Dict, List, Optional, Sequence
10
+
11
+ from evalscope.api.dataset.dataset import Sample
12
+ from evalscope.api.messages.chat_message import dict_to_chat_message
13
+ from evalscope.api.model import GenerateConfig, get_model
14
+ from evalscope.api.model.model import Model
15
+ from evalscope.api.model.model_output import ChatCompletionChoice, ModelOutput
16
+ from evalscope.api.tool.tool_info import ToolInfo
17
+ from evalscope.constants import EvalType
18
+ from evalscope.models.utils.openai import openai_chat_choices
19
+ from evalscope.utils.function_utils import run_once
20
+
21
+ MODEL_DICT: Dict[str, Model] = {
22
+ 'agent': None,
23
+ 'user': None,
24
+ }
25
+
26
+ _MODEL_PATCHED: bool = False
27
+ _ORIGINAL_TAU2_GENERATE: Optional[Callable[..., Any]] = None
28
+
29
+
30
+ def _patch_tau2_generate(new_generate: Callable[..., Any]) -> None:
31
+ """Fan-out monkey patch for Tau2 when consumers did `from ... import generate`."""
32
+ global _MODEL_PATCHED, _ORIGINAL_TAU2_GENERATE
33
+ if _MODEL_PATCHED:
34
+ return
35
+
36
+ original = getattr(tau_llm_utils, 'generate', None)
37
+ if original is None:
38
+ raise RuntimeError('tau2.utils.llm_utils.generate not found')
39
+
40
+ # Replace on the source module first
41
+ if original is not new_generate:
42
+ tau_llm_utils.generate = new_generate
43
+
44
+ # Fan-out to all tau2 submodules that may hold a direct reference
45
+ for mod_name, mod in list(sys.modules.items()):
46
+ if not (isinstance(mod_name, str) and mod_name.startswith('tau2')):
47
+ continue
48
+ mod_obj = sys.modules.get(mod_name)
49
+ if mod_obj is None:
50
+ continue
51
+ try:
52
+ # Common direct binding: `generate` at module top-level
53
+ if getattr(mod_obj, 'generate', None) is original:
54
+ setattr(mod_obj, 'generate', new_generate)
55
+ # Replace any other aliases that equal the original function
56
+ for attr, val in list(vars(mod_obj).items()):
57
+ if val is original:
58
+ setattr(mod_obj, attr, new_generate)
59
+ except Exception:
60
+ # Best-effort: ignore modules that disallow setattr or have weird loaders
61
+ pass
62
+
63
+ _ORIGINAL_TAU2_GENERATE = original
64
+ _MODEL_PATCHED = True
65
+
66
+
67
+ @run_once
68
+ def build_model(agent_model, adapter_instance):
69
+
70
+ user_server = get_model(
71
+ model=adapter_instance.user_model,
72
+ eval_type=EvalType.SERVICE,
73
+ base_url=adapter_instance.api_base,
74
+ api_key=adapter_instance.api_key,
75
+ config=GenerateConfig(**adapter_instance.generation_config)
76
+ )
77
+ MODEL_DICT['user'] = user_server
78
+ MODEL_DICT['agent'] = agent_model
79
+ # Patch Tau2 generate function for `from ... import generate` consumers
80
+ _patch_tau2_generate(patched_generate)
81
+
82
+
83
+ def patched_generate(
84
+ model: str,
85
+ messages: List[Message],
86
+ tools: Optional[List[Tool]] = None,
87
+ tool_choice: Optional[Any] = None,
88
+ **kwargs: Any,
89
+ ) -> AssistantMessage:
90
+ """
91
+ Generate a response via an OpenAI-compatible /chat/completions call.
92
+
93
+ - Reads EVALSCOPE_API_KEY and EVALSCOPE_BASE_URL from environment.
94
+ - Uses OpenAI chat format for messages/tools/tool_choice.
95
+ - Returns Tau2 AssistantMessage with optional tool_calls and usage.
96
+ """
97
+ global MODEL_DICT
98
+
99
+ oa_model = MODEL_DICT.get(model)
100
+ assert oa_model is not None, f'Model {model} not found in MODEL_DICT'
101
+
102
+ oa_messages = to_litellm_messages(messages)
103
+ tools = [tool.openai_schema for tool in tools] if tools else None
104
+ if tools and tool_choice is None:
105
+ tool_choice = 'auto'
106
+
107
+ # Perform request
108
+ completion = oa_model.generate(
109
+ input=[dict_to_chat_message(msg) for msg in oa_messages],
110
+ tools=[ToolInfo.model_validate(tool['function']) for tool in tools] if tools else None,
111
+ tool_choice=tool_choice,
112
+ )
113
+
114
+ oa_choices = openai_chat_choices(completion.choices, include_reasoning=False)
115
+ choice = oa_choices[0]
116
+ msg = choice.message
117
+
118
+ tool_calls = msg.tool_calls or []
119
+ tool_calls = [
120
+ ToolCall(
121
+ id=tool_call.id,
122
+ name=tool_call.function.name,
123
+ arguments=json.loads(tool_call.function.arguments),
124
+ ) for tool_call in tool_calls
125
+ ]
126
+ tool_calls = tool_calls or None
127
+ usage = completion.usage.model_dump(exclude_none=True)
128
+
129
+ return AssistantMessage(
130
+ role='assistant',
131
+ content=msg.content,
132
+ tool_calls=tool_calls,
133
+ cost=None,
134
+ usage=usage,
135
+ raw_data=completion.model_dump(),
136
+ )
137
+
138
+
139
+ def predict(model: Model, sample: Sample, adapter_instance) -> ModelOutput:
140
+
141
+ build_model(agent_model=model, adapter_instance=adapter_instance)
142
+
143
+ domain = sample.subset_key
144
+ task = Task.model_validate(sample.metadata)
145
+ res = run_task(
146
+ domain=domain,
147
+ task=task,
148
+ agent='llm_agent_gt',
149
+ user='user_simulator',
150
+ llm_agent='agent',
151
+ llm_user='user',
152
+ )
153
+
154
+ sample.metadata['task_result'] = res.reward_info.model_dump()
155
+ return ModelOutput(
156
+ model=model.name,
157
+ choices=[ChatCompletionChoice.from_content(res.model_dump_json(indent=2))],
158
+ )