evalscope 0.16.3__py3-none-any.whl → 0.17.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (157) hide show
  1. evalscope/app/app.py +9 -762
  2. evalscope/app/constants.py +1 -0
  3. evalscope/app/ui/__init__.py +20 -0
  4. evalscope/app/ui/app_ui.py +52 -0
  5. evalscope/app/ui/multi_model.py +323 -0
  6. evalscope/app/ui/sidebar.py +42 -0
  7. evalscope/app/ui/single_model.py +202 -0
  8. evalscope/app/ui/visualization.py +36 -0
  9. evalscope/app/utils/data_utils.py +178 -0
  10. evalscope/app/utils/localization.py +221 -0
  11. evalscope/app/utils/text_utils.py +119 -0
  12. evalscope/app/utils/visualization.py +91 -0
  13. evalscope/backend/opencompass/backend_manager.py +2 -1
  14. evalscope/backend/rag_eval/backend_manager.py +2 -1
  15. evalscope/backend/rag_eval/utils/embedding.py +1 -1
  16. evalscope/backend/vlm_eval_kit/backend_manager.py +4 -1
  17. evalscope/benchmarks/__init__.py +15 -1
  18. evalscope/benchmarks/aime/aime24_adapter.py +2 -1
  19. evalscope/benchmarks/aime/aime25_adapter.py +2 -1
  20. evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +1 -1
  21. evalscope/benchmarks/arc/arc_adapter.py +1 -1
  22. evalscope/benchmarks/arena_hard/arena_hard_adapter.py +1 -1
  23. evalscope/benchmarks/arena_hard/utils.py +0 -12
  24. evalscope/benchmarks/bfcl/bfcl_adapter.py +1 -1
  25. evalscope/benchmarks/ceval/ceval_adapter.py +5 -16
  26. evalscope/benchmarks/cmmlu/cmmlu_adapter.py +9 -21
  27. evalscope/benchmarks/competition_math/competition_math_adapter.py +2 -1
  28. evalscope/benchmarks/data_adapter.py +29 -9
  29. evalscope/benchmarks/general_arena/__init__.py +0 -0
  30. evalscope/benchmarks/general_arena/general_arena_adapter.py +411 -0
  31. evalscope/benchmarks/general_arena/utils.py +226 -0
  32. evalscope/benchmarks/general_mcq/general_mcq_adapter.py +3 -2
  33. evalscope/benchmarks/general_qa/general_qa_adapter.py +44 -30
  34. evalscope/benchmarks/hellaswag/hellaswag_adapter.py +1 -1
  35. evalscope/benchmarks/hle/__init__.py +0 -0
  36. evalscope/benchmarks/hle/hle_adapter.py +118 -0
  37. evalscope/benchmarks/humaneval/humaneval_adapter.py +5 -21
  38. evalscope/benchmarks/ifeval/ifeval_adapter.py +2 -4
  39. evalscope/benchmarks/iquiz/iquiz_adapter.py +1 -1
  40. evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +0 -6
  41. evalscope/benchmarks/maritime_bench/maritime_bench_adapter.py +1 -1
  42. evalscope/benchmarks/math_500/math_500_adapter.py +2 -1
  43. evalscope/benchmarks/mmlu/mmlu_adapter.py +2 -2
  44. evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +1 -1
  45. evalscope/benchmarks/mmlu_redux/mmlu_redux_adapter.py +1 -1
  46. evalscope/benchmarks/musr/musr_adapter.py +1 -1
  47. evalscope/benchmarks/race/race_adapter.py +1 -1
  48. evalscope/benchmarks/tau_bench/__init__.py +0 -0
  49. evalscope/benchmarks/tau_bench/tau_bench_adapter.py +110 -0
  50. evalscope/benchmarks/tool_bench/tool_bench_adapter.py +7 -1
  51. evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +9 -4
  52. evalscope/benchmarks/utils.py +2 -2
  53. evalscope/benchmarks/winogrande/winogrande_adapter.py +1 -1
  54. evalscope/config.py +8 -123
  55. evalscope/constants.py +5 -21
  56. evalscope/evaluator/__init__.py +1 -1
  57. evalscope/evaluator/evaluator.py +20 -15
  58. evalscope/metrics/__init__.py +9 -1
  59. evalscope/{utils/utils.py → metrics/completion_parsers.py} +71 -176
  60. evalscope/metrics/llm_judge.py +106 -20
  61. evalscope/metrics/metrics.py +20 -8
  62. evalscope/models/__init__.py +4 -8
  63. evalscope/models/adapters/__init__.py +4 -9
  64. evalscope/models/adapters/base_adapter.py +4 -0
  65. evalscope/models/adapters/bfcl_adapter.py +2 -0
  66. evalscope/models/adapters/chat_adapter.py +3 -0
  67. evalscope/models/adapters/choice_adapter.py +4 -0
  68. evalscope/models/adapters/custom_adapter.py +7 -3
  69. evalscope/models/adapters/server_adapter.py +4 -2
  70. evalscope/models/adapters/t2i_adapter.py +3 -0
  71. evalscope/models/adapters/tau_bench_adapter.py +189 -0
  72. evalscope/models/custom/dummy_model.py +3 -3
  73. evalscope/models/register.py +0 -14
  74. evalscope/perf/arguments.py +15 -16
  75. evalscope/perf/benchmark.py +38 -39
  76. evalscope/perf/http_client.py +30 -86
  77. evalscope/perf/main.py +3 -3
  78. evalscope/perf/plugin/__init__.py +3 -2
  79. evalscope/perf/plugin/api/__init__.py +4 -3
  80. evalscope/perf/plugin/api/base.py +22 -4
  81. evalscope/perf/plugin/api/custom_api.py +212 -55
  82. evalscope/perf/plugin/api/dashscope_api.py +4 -10
  83. evalscope/perf/plugin/api/default_api.py +105 -0
  84. evalscope/perf/plugin/api/openai_api.py +17 -19
  85. evalscope/perf/plugin/datasets/__init__.py +10 -7
  86. evalscope/perf/plugin/datasets/base.py +22 -1
  87. evalscope/perf/plugin/datasets/custom.py +2 -1
  88. evalscope/perf/plugin/datasets/flickr8k.py +4 -27
  89. evalscope/perf/plugin/datasets/kontext_bench.py +28 -0
  90. evalscope/perf/plugin/datasets/line_by_line.py +2 -1
  91. evalscope/perf/plugin/datasets/longalpaca.py +2 -1
  92. evalscope/perf/plugin/datasets/openqa.py +2 -1
  93. evalscope/perf/plugin/datasets/random_dataset.py +15 -4
  94. evalscope/perf/plugin/datasets/random_vl_dataset.py +80 -0
  95. evalscope/perf/plugin/registry.py +36 -16
  96. evalscope/perf/utils/analysis_result.py +24 -23
  97. evalscope/perf/utils/benchmark_util.py +14 -20
  98. evalscope/perf/utils/db_util.py +79 -61
  99. evalscope/report/__init__.py +1 -1
  100. evalscope/report/utils.py +34 -15
  101. evalscope/run.py +1 -1
  102. evalscope/summarizer.py +1 -2
  103. evalscope/utils/__init__.py +63 -2
  104. evalscope/utils/argument_utils.py +64 -0
  105. evalscope/utils/import_utils.py +16 -0
  106. evalscope/utils/io_utils.py +55 -4
  107. evalscope/utils/model_utils.py +37 -1
  108. evalscope/version.py +2 -2
  109. {evalscope-0.16.3.dist-info → evalscope-0.17.1.dist-info}/METADATA +100 -51
  110. {evalscope-0.16.3.dist-info → evalscope-0.17.1.dist-info}/RECORD +129 -133
  111. tests/aigc/test_t2i.py +1 -1
  112. tests/cli/test_all.py +68 -4
  113. tests/cli/test_collection.py +1 -1
  114. tests/cli/test_custom.py +261 -0
  115. tests/cli/test_run.py +34 -70
  116. tests/perf/test_perf.py +31 -4
  117. tests/rag/test_clip_benchmark.py +2 -1
  118. tests/rag/test_mteb.py +3 -1
  119. tests/rag/test_ragas.py +3 -1
  120. tests/swift/test_run_swift_eval.py +2 -1
  121. tests/swift/test_run_swift_vlm_eval.py +2 -1
  122. tests/swift/test_run_swift_vlm_jugde_eval.py +2 -1
  123. tests/utils.py +13 -0
  124. tests/vlm/test_vlmeval.py +8 -2
  125. evalscope/evaluator/rating_eval.py +0 -157
  126. evalscope/evaluator/reviewer/__init__.py +0 -1
  127. evalscope/evaluator/reviewer/auto_reviewer.py +0 -391
  128. evalscope/models/model.py +0 -189
  129. evalscope/registry/__init__.py +0 -1
  130. evalscope/registry/config/cfg_arena.yaml +0 -77
  131. evalscope/registry/config/cfg_arena_zhihu.yaml +0 -63
  132. evalscope/registry/config/cfg_pairwise_baseline.yaml +0 -83
  133. evalscope/registry/config/cfg_single.yaml +0 -78
  134. evalscope/registry/data/prompt_template/lmsys_v2.jsonl +0 -8
  135. evalscope/registry/data/prompt_template/prompt_templates.jsonl +0 -8
  136. evalscope/registry/data/qa_browser/battle.jsonl +0 -634
  137. evalscope/registry/data/qa_browser/category_mapping.yaml +0 -10
  138. evalscope/registry/data/question.jsonl +0 -80
  139. evalscope/registry/tasks/arc.yaml +0 -28
  140. evalscope/registry/tasks/bbh.yaml +0 -26
  141. evalscope/registry/tasks/bbh_mini.yaml +0 -26
  142. evalscope/registry/tasks/ceval.yaml +0 -27
  143. evalscope/registry/tasks/ceval_mini.yaml +0 -26
  144. evalscope/registry/tasks/cmmlu.yaml +0 -27
  145. evalscope/registry/tasks/eval_qwen-7b-chat_v100.yaml +0 -28
  146. evalscope/registry/tasks/general_qa.yaml +0 -27
  147. evalscope/registry/tasks/gsm8k.yaml +0 -29
  148. evalscope/registry/tasks/mmlu.yaml +0 -29
  149. evalscope/registry/tasks/mmlu_mini.yaml +0 -27
  150. evalscope/run_arena.py +0 -202
  151. evalscope/utils/arena_utils.py +0 -217
  152. evalscope/utils/completion_parsers.py +0 -82
  153. /evalscope/{utils → benchmarks}/filters.py +0 -0
  154. {evalscope-0.16.3.dist-info → evalscope-0.17.1.dist-info}/LICENSE +0 -0
  155. {evalscope-0.16.3.dist-info → evalscope-0.17.1.dist-info}/WHEEL +0 -0
  156. {evalscope-0.16.3.dist-info → evalscope-0.17.1.dist-info}/entry_points.txt +0 -0
  157. {evalscope-0.16.3.dist-info → evalscope-0.17.1.dist-info}/top_level.txt +0 -0
@@ -1,157 +0,0 @@
1
- # Copyright (c) Alibaba, Inc. and its affiliates.
2
-
3
- import pandas as pd
4
- import pyarrow as pa
5
- from typing import List, Union
6
-
7
- from evalscope.constants import MetricMembers
8
- from evalscope.utils.arena_utils import compute_elo
9
- from evalscope.utils.io_utils import jsonl_to_list
10
- from evalscope.utils.logger import get_logger
11
-
12
- logger = get_logger()
13
-
14
- DEFAULT_COLUMNS_MAPPING = {'model_a': 'model_a', 'model_b': 'model_b', 'win': 'win', 'tstamp': 'ts', 'language': 'lang'}
15
-
16
-
17
- class RatingEvaluate(object):
18
-
19
- def __init__(self, metrics: list, baseline_model: str = None, **kwargs):
20
- self.metrics = metrics
21
- self.baseline_model = baseline_model
22
- self.kwargs = kwargs
23
-
24
- def preprocess(self, raw_data_df: pd.DataFrame, **kwargs):
25
-
26
- # Get battles data
27
- raw_data_df = raw_data_df.sort_values(ascending=True, by=['tstamp'])
28
- battles = raw_data_df[raw_data_df['anony']].reset_index(drop=True)
29
-
30
- return battles
31
-
32
- def compute_elo_rating(self, raw_data):
33
- battles = self.preprocess(raw_data_df=raw_data)
34
- elo_ratings = compute_elo(battles)
35
- col_model = 'Model'
36
- col_elo_rating = 'Elo_Rating'
37
- elo_ratings_res = pd.DataFrame([[n, elo_ratings[n]] for n in elo_ratings.keys()],
38
- columns=[col_model, col_elo_rating]).sort_values(
39
- col_elo_rating, ascending=False).reset_index(drop=True)
40
- elo_ratings_res = elo_ratings_res.round({col_elo_rating: 1})
41
- return elo_ratings_res
42
-
43
- def get_single_pairwise_rating(self, row: pd.Series):
44
- tie = False
45
- if 'win' in row:
46
- win = row['win']
47
- if win == 'tie':
48
- tie = True
49
- else:
50
- if win == 'model_a':
51
- winner = row['model_a']
52
- loser = row['model_b']
53
- else:
54
- winner = row['model_b']
55
- loser = row['model_a']
56
- elif 'win_1' in row:
57
- win_1 = row['win_1']
58
- win_2 = row['win_2']
59
- if win_1 == 'tie' or win_1 != win_2:
60
- tie = True
61
- else:
62
- if win_1 == 'model_a':
63
- winner = row['model_a']
64
- loser = row['model_b']
65
- else:
66
- winner = row['model_b']
67
- loser = row['model_a']
68
- else:
69
- raise ValueError('Unsupported data format')
70
-
71
- if tie:
72
- return [{
73
- 'model': row['model_a'],
74
- 'win': 0,
75
- 'loss': 0,
76
- 'tie': 1
77
- }, {
78
- 'model': row['model_b'],
79
- 'win': 0,
80
- 'loss': 0,
81
- 'tie': 1
82
- }]
83
- else:
84
- return [{'model': winner, 'win': 1, 'loss': 0, 'tie': 0}, {'model': loser, 'win': 0, 'loss': 1, 'tie': 0}]
85
-
86
- def compute_pairwise_rating(self, raw_data):
87
- df_all = self.preprocess(raw_data_df=raw_data)
88
- model_list = (df_all['model_a'].unique().tolist() + df_all['model_b'].unique().tolist())
89
- model_list = list(set(model_list))
90
-
91
- list_res = []
92
- # traverse df row by row
93
- for index, row in df_all.iterrows():
94
- if self.baseline_model is not None:
95
- if self.baseline_model not in [row['model_a'], row['model_b']]:
96
- logger.warning(
97
- f'One of the models in the battle should be the baseline model: {self.baseline_model}')
98
- continue
99
- rating = self.get_single_pairwise_rating(row)
100
- list_res = list_res + rating
101
-
102
- df = pd.DataFrame(list_res)
103
- df = df.groupby(['model']).sum()
104
-
105
- # remove baseline model
106
- if self.baseline_model is not None:
107
- df = df[df.index != self.baseline_model]
108
- # add win rate
109
- df['win_rate'] = df['win'] / (df['win'] + df['loss'] + df['tie'])
110
- df['loss_rate'] = df['loss'] / (df['win'] + df['loss'] + df['tie'])
111
- df['tie_rate'] = df['tie'] / (df['win'] + df['loss'] + df['tie'])
112
- return df.sort_values(by='win_rate', ascending=False)
113
-
114
- def compute_score_rating(self, raw_data):
115
- df_all = self.preprocess(raw_data_df=raw_data)
116
- df = df_all[['model', 'score']]
117
-
118
- df_score = df.groupby(['model']).mean()
119
- return df_score.sort_values(by='score', ascending=False)
120
-
121
- def eval_samples(self, data_list: list):
122
- res_all = []
123
-
124
- raw_data: pd.DataFrame = None
125
-
126
- if len(data_list) > 0:
127
- raw_data = data_list[0]
128
-
129
- for metric in self.metrics:
130
-
131
- if metric == MetricMembers.ELO:
132
- res = self.compute_elo_rating(raw_data)
133
- res_all.append(res)
134
-
135
- elif metric == MetricMembers.PAIRWISE:
136
- res = self.compute_pairwise_rating(raw_data)
137
- res_all.append(res)
138
-
139
- elif metric == MetricMembers.SCORE:
140
- res = self.compute_score_rating(raw_data)
141
- res_all.append(res)
142
-
143
- else:
144
- raise ValueError(f'Unsupported metric: {metric}')
145
-
146
- return res_all
147
-
148
- def run(self, prompts: Union[str, list], **kwargs) -> List[pd.DataFrame]:
149
- """
150
- Load the predicted samples and evaluate them in arena mode.
151
- """
152
- # raw_data = pd.read_json(prompts)
153
- data_list = jsonl_to_list(prompts)
154
- data_df = pa.Table.from_pylist(data_list).to_pandas()
155
- res_list = self.eval_samples([data_df])
156
-
157
- return res_list
@@ -1 +0,0 @@
1
- # Copyright (c) Alibaba, Inc. and its affiliates.
@@ -1,391 +0,0 @@
1
- # Copyright (c) Alibaba, Inc. and its affiliates.
2
- # flake8: noqa
3
-
4
- import os
5
- import pandas as pd
6
- import random
7
- import sys
8
- import time
9
- from abc import ABC, abstractmethod
10
- from functools import partial
11
- from typing import Any, List, Tuple
12
-
13
- from evalscope.constants import ArenaMode, EvalConfigKeys, FnCompletionParser, PositionBiasMitigation
14
- from evalscope.models import OpenAIModel
15
- from evalscope.utils import completion_parsers, random_seeded_choice
16
- from evalscope.utils.arena_utils import get_battle_pairs, merge_ques_ans, shuffle_pairwise_preferences
17
- from evalscope.utils.io_utils import dump_jsonl_data, jsonl_to_list
18
- from evalscope.utils.logger import get_logger
19
-
20
- logger = get_logger()
21
-
22
-
23
- class BaseReviewer(ABC):
24
-
25
- def __init__(self, **kwargs):
26
- ...
27
-
28
- @abstractmethod
29
- def run(self, *args, **kwargs):
30
- """
31
- Run pairwise battles with given models.
32
- """
33
- raise NotImplementedError('run() method must be implemented in your subclass.')
34
-
35
-
36
- class AutoReviewerGpt4(BaseReviewer):
37
- """
38
- Auto-review target answers(models) pairwise with GPT-4.
39
-
40
- Args:
41
- prompt_file: path to prompt templates file.
42
- answer_file_list: list of paths to answer files.
43
- review_result_file: path to review result file.
44
- reviewer_args: config for reviewer(GPT-4).
45
-
46
- Examples:
47
- >>> from evalscope.evaluator.reviewer.auto_reviewer import AutoReviewerGpt4
48
- >>> input_kwargs = dict(prompt_file='/path/to/prompt_file.jsonl', answer_file_list=['/path/to/ans1_file.jsonl',
49
- '/path/to/ans2_file.jsonl', ...], review_file='/path/to/review_file.jsonl',
50
- reviewer_args={'model': 'gpt-4', 'mode': 'single'})
51
- >>> auto_reviewer = AutoReviewerGpt4(**input_kwargs)
52
- >>> auto_reviewer.run(dry_run=False)
53
- """
54
-
55
- MODEL_NAME = 'gpt-4'
56
-
57
- def __init__(self,
58
- prompt_file: str,
59
- answer_file_list: list,
60
- review_result_file: str,
61
- baseline_file: str = None,
62
- reference_file: str = None,
63
- reviewer_args: dict = None,
64
- cache_file: str = None,
65
- **kwargs):
66
- super().__init__(**kwargs)
67
-
68
- self.review_result_file = review_result_file
69
- self.prompt_list = jsonl_to_list(prompt_file)
70
- self.answer_list = [jsonl_to_list(answer_file) for answer_file in answer_file_list]
71
- self.reference_list = jsonl_to_list(reference_file) if reference_file else []
72
- self.cache_list = jsonl_to_list(cache_file) if cache_file and os.path.isfile(cache_file) else []
73
-
74
- self.reviewer_args = reviewer_args if reviewer_args \
75
- else self._get_default_args()
76
-
77
- self.review_mode = self.reviewer_args.pop('mode', ArenaMode.PAIRWISE)
78
- if self.review_mode == ArenaMode.PAIRWISE_BASELINE:
79
- assert baseline_file is not None, f'baseline_file is required for {ArenaMode.PAIRWISE_BASELINE} mode'
80
- self.answer_list.append(jsonl_to_list(baseline_file))
81
- self.baseline_idx = len(self.answer_list) - 1
82
-
83
- self.position_bias_mitigation = self.reviewer_args.pop(EvalConfigKeys.POSITION_BIAS_MITIGATION,
84
- PositionBiasMitigation.NONE)
85
- if self.position_bias_mitigation == PositionBiasMitigation.RANDOMIZE_ORDER:
86
- self.random_seed = self.reviewer_args.pop(EvalConfigKeys.RANDOM_SEED, 123)
87
-
88
- fn_completion_parser = self.reviewer_args.pop(EvalConfigKeys.FN_COMPLETION_PARSER,
89
- FnCompletionParser.LMSYS_PARSER)
90
- completion_parser_kwargs = self.reviewer_args.pop(EvalConfigKeys.COMPLETION_PARSER_KWARGS, {})
91
- if isinstance(fn_completion_parser, str):
92
- fn_completion_parser = getattr(completion_parsers, fn_completion_parser)
93
-
94
- self.fn_completion_parser = partial(fn_completion_parser, **completion_parser_kwargs)
95
- self.gpt_predictor = OpenAIModel(model_cfg=self.reviewer_args)
96
-
97
- @staticmethod
98
- def _get_default_args():
99
- return dict(
100
- model=AutoReviewerGpt4.MODEL_NAME,
101
- mode=ArenaMode.PAIRWISE,
102
- position_bias_mitigation=PositionBiasMitigation.NONE,
103
- fn_completion_parser=FnCompletionParser.LMSYS_PARSER,
104
- random_seed=123,
105
- )
106
-
107
- @staticmethod
108
- def gen_prompt(prompts_list: list,
109
- type: str,
110
- category: str,
111
- ques: str,
112
- ans1: str,
113
- ans2: str = None,
114
- ans_ref: str = None):
115
- """
116
- Generate prompt for Auto-reviewer with GPT-4.
117
- """
118
-
119
- # Default to general category (idx 0)
120
- target_prompt_dict = prompts_list[0]
121
- for item in prompts_list:
122
- is_category_match = category in item['category'] if isinstance(item['category'],
123
- list) else item['category'] == category
124
- is_type_match = item.get('type', ArenaMode.PAIRWISE) == type
125
- if is_category_match and is_type_match:
126
- target_prompt_dict = item
127
- break
128
- elif is_type_match and target_prompt_dict.get('type', ArenaMode.PAIRWISE) != type:
129
- target_prompt_dict = item # fallback to type match
130
-
131
- sys_prompt = target_prompt_dict['system_prompt']
132
- prompt_template = target_prompt_dict['prompt_template']
133
- defaults = target_prompt_dict.get('defaults', dict({}))
134
- output_format = target_prompt_dict.get('output_format', '[[rating_a,rating_b]]')
135
-
136
- if type == ArenaMode.SINGLE:
137
- user_prompt = prompt_template.format(question=ques, answer=ans1, ref_answer_1=ans_ref, **defaults)
138
- else:
139
- user_prompt = prompt_template.format(
140
- question=ques, answer_a=ans1, answer_b=ans2, ref_answer_1=ans_ref, **defaults)
141
-
142
- return sys_prompt, user_prompt, output_format
143
-
144
- def get_review_cache(self, model_a, model_b, question) -> list:
145
- if model_b:
146
- cache_hit = next((r for r in self.cache_list
147
- if r['model_a'] == model_a and r['model_b'] == model_b and r['question'] == question),
148
- None)
149
- else:
150
- cache_hit = next((r for r in self.cache_list if r['model'] == model_a and r['question'] == question), None)
151
- return cache_hit
152
-
153
- def get_review_pair(self, item: List[dict], dry_run=False, **kwargs) -> dict:
154
-
155
- question = item[0]['text']
156
- question_id = item[0]['question_id']
157
- category = item[0]['category']
158
-
159
- model_a = item[0]['model_id']
160
- model_b = item[1]['model_id']
161
-
162
- ans1 = item[0]['answer']
163
- ans2 = item[1]['answer']
164
-
165
- review_cache = self.get_review_cache(model_a, model_b, question)
166
- if review_cache:
167
- logger.info(f'Use cache review for {model_a} vs {model_b} ...')
168
- return review_cache
169
-
170
- if self.position_bias_mitigation == PositionBiasMitigation.SWAP_POSITION:
171
- review_text_1, winner_1, score_1 = self._get_review_pair(
172
- model_a, model_b, question, category, ans1, ans2, dry_run=dry_run, **kwargs)
173
- review_text_2, winner_2, score_2 = self._get_review_pair(
174
- model_b, model_a, question, category, ans2, ans1, dry_run=dry_run, **kwargs)
175
-
176
- # Swap winner for the second round.
177
- if winner_2 == 'model_a':
178
- winner_2 = 'model_b'
179
- elif winner_2 == 'model_b':
180
- winner_2 = 'model_a'
181
- review_result = dict(
182
- model_a=model_a,
183
- model_b=model_b,
184
- win_1=winner_1,
185
- win_2=winner_2,
186
- anony=True,
187
- tstamp=time.time(),
188
- language=item[0].get('language', 'NA'),
189
- question_id=question_id,
190
- category=category,
191
- question=question,
192
- review_text_1=review_text_1,
193
- review_text_2=review_text_2)
194
- else:
195
- review_text, winner, scores = self._get_review_pair(
196
- model_a, model_b, question, category, ans1, ans2, dry_run=dry_run, **kwargs)
197
-
198
- if dry_run:
199
- scores = [round(random.random(), 1), round(random.random(), 1)]
200
- winner = 'model_a' if scores[0] > scores[1] else 'model_b'
201
-
202
- review_result = dict(
203
- model_a=model_a,
204
- model_b=model_b,
205
- scores=scores,
206
- win=winner,
207
- anony=True,
208
- tstamp=time.time(),
209
- language=item[0].get('language', 'NA'),
210
- question_id=question_id,
211
- category=category,
212
- question=question,
213
- review_text=review_text)
214
- return review_result
215
-
216
- def get_review_single(self, row: List[dict], dry_run: bool = False, **kwargs):
217
- item = row[0]
218
- model = item['model_id']
219
- question = item['text']
220
- question_id = item['question_id']
221
- category = item['category']
222
- answer = item['answer']
223
-
224
- review_cache = self.get_review_cache(model, None, question)
225
- if review_cache:
226
- logger.info(f'Use cache review for {model} ...')
227
- return review_cache
228
-
229
- review_text, score = self._get_review_single(model, question, category, answer, dry_run=dry_run, **kwargs)
230
-
231
- review_result = dict(
232
- model=model,
233
- score=score,
234
- anony=True,
235
- tstamp=time.time(),
236
- language=item.get('language', 'NA'),
237
- question_id=question_id,
238
- category=category,
239
- question=question,
240
- review_text=review_text)
241
- return review_result
242
-
243
- def _get_review_pair(self,
244
- model_a,
245
- model_b,
246
- question,
247
- category,
248
- ans1,
249
- ans2,
250
- dry_run=False,
251
- **kwargs) -> Tuple[str, Any]:
252
- input_msg = dict(ques=question, category=category, ans1=ans1, ans2=ans2)
253
-
254
- if self.reference_list:
255
- ans_ref = next((ref for ref in self.reference_list if ref.get('text') == question), None)
256
- assert ans_ref['answer']
257
- input_msg['ans_ref'] = ans_ref['answer']
258
-
259
- sys_prompt, user_prompt, output_format = AutoReviewerGpt4.gen_prompt(
260
- prompts_list=self.prompt_list,
261
- type=ArenaMode.SINGLE if self.review_mode == ArenaMode.SINGLE else ArenaMode.PAIRWISE,
262
- **input_msg)
263
-
264
- if dry_run:
265
- review_text = self._get_reviewer_prediction_dummy(sys_prompt, user_prompt, output_format)
266
- else:
267
- review_text = self._get_reviewer_prediction(sys_prompt, user_prompt, **kwargs)
268
-
269
- result = self.fn_completion_parser(review_text, output_format=output_format)
270
- if not isinstance(result, tuple):
271
- result = (result, None)
272
- return review_text, *result
273
-
274
- def _get_review_single(self, model, question, category, answer, dry_run=False, **kwargs) -> Tuple[str, Any]:
275
- input_msg = dict(ques=question, category=category, ans1=answer)
276
-
277
- if self.reference_list:
278
- ans_ref = next((ref for ref in self.reference_list if ref.get('text') == question), None)
279
- assert ans_ref['answer']
280
- input_msg['ans_ref'] = ans_ref['answer']
281
-
282
- sys_prompt, user_prompt, output_format = AutoReviewerGpt4.gen_prompt(
283
- prompts_list=self.prompt_list,
284
- type=ArenaMode.SINGLE if self.review_mode == ArenaMode.SINGLE else ArenaMode.PAIRWISE,
285
- **input_msg)
286
-
287
- if dry_run:
288
- review_text = self._get_reviewer_prediction_dummy(sys_prompt, user_prompt, output_format)
289
- else:
290
- review_text = self._get_reviewer_prediction(sys_prompt, user_prompt, **kwargs)
291
-
292
- score = self.fn_completion_parser(review_text, output_format)
293
- return review_text, score
294
-
295
- def _get_reviewer_prediction_dummy(self, sys_prompt: str, user_prompt: str, output_format) -> str:
296
- logger.info('Get dummy scores for input prompt ...')
297
- if output_format == '[[rating]]':
298
- return f'[[{round(random.random(), 2)}]]'
299
- if output_format == '[[rating_a,rating_b]]':
300
- ans_list = [round(random.random(), 2), round(random.random(), 2)]
301
- return ' '.join(str(element) for element in ans_list)
302
- elif output_format == '[[A]]':
303
- return random.choice(['[[A]]', '[[B]]', '[[C]]'])
304
- elif output_format == "[{'model': <model-name>, 'rank': <model-rank>}, " \
305
- "{'model': <model-name>, 'rank': <model-rank>}]":
306
- rank_1 = random.choice([1, 2])
307
- rank_2 = 1 if rank_1 == 2 else 2
308
- return f"[{{'model': 'model_a', 'rank': {rank_1}}}, {{'model': 'model_b', 'rank': {rank_2}}}]"
309
-
310
- def _get_reviewer_prediction(self, sys_prompt: str, user_prompt: str, **kwargs) -> str:
311
-
312
- input_msg = dict(sys_prompt=sys_prompt, user_prompt=user_prompt)
313
-
314
- # Call GPT-4 predictor
315
- # TODO: Add more reviewer implementation
316
- resp = self.gpt_predictor.predict(model_id=self.MODEL_NAME, inputs=input_msg, **kwargs)
317
-
318
- if resp is None or len(resp) == 0:
319
- logger.error(f'Failed to get response from {self.MODEL_NAME} for input: {input_msg}')
320
-
321
- ans_text = resp['ans_text']
322
- # model_id = resp['model_id']
323
-
324
- return ans_text
325
-
326
- def run(self, dry_run: bool = False, **kwargs):
327
- print(f'Run battles for models with dry_run={dry_run} ...')
328
-
329
- os.makedirs(os.path.dirname(self.review_result_file), exist_ok=True)
330
-
331
- if len(self.answer_list) == 0:
332
- raise Exception('The answer list cannot be empty.')
333
-
334
- merge_key = 'question_id'
335
- merged_ans_df = merge_ques_ans(self.answer_list, merge_key=merge_key)
336
- merged_ans_df = merged_ans_df.drop(columns=['question_id'])
337
-
338
- if self.review_mode == ArenaMode.PAIRWISE:
339
- battle_pairs = get_battle_pairs(merged_ans_df.columns)
340
- elif self.review_mode == ArenaMode.PAIRWISE_BASELINE:
341
- battle_pairs = get_battle_pairs(merged_ans_df.columns, self.baseline_idx)
342
- elif self.review_mode == ArenaMode.SINGLE:
343
- battle_pairs = [(col, ) for col in merged_ans_df.columns]
344
- else:
345
- raise Exception(f'NotSupported mode: {self.review_mode}')
346
-
347
- res_list = []
348
- for t in battle_pairs:
349
- pair_df = merged_ans_df[list(t)]
350
- if self.position_bias_mitigation == PositionBiasMitigation.RANDOMIZE_ORDER:
351
- pair_df.columns = ['output_1', 'output_2']
352
- pair_df['is_switched_outputs'] = pair_df.apply(
353
- lambda x: random_seeded_choice(
354
- seed='is_switched_outputs' + x[0]['text'] + str(self.random_seed),
355
- choices=[False, True],
356
- ),
357
- axis=1,
358
- )
359
- pair_df = shuffle_pairwise_preferences(pair_df, pair_df['is_switched_outputs'])
360
-
361
- for index, row in pair_df.iterrows():
362
- row_result = self.get_review_pair(row.to_list(), dry_run=dry_run, **kwargs) \
363
- if self.review_mode != ArenaMode.SINGLE \
364
- else self.get_review_single(row.to_list(), dry_run=dry_run, **kwargs)
365
- res_list.append(row_result)
366
- dump_jsonl_data(res_list, self.review_result_file)
367
-
368
-
369
- if __name__ == '__main__':
370
- from pathlib import Path
371
-
372
- work_path = os.path.join(Path(__file__).absolute().parent, '../../../')
373
- prompt_template_path = os.path.join(work_path, 'evalscope/registry/data/prompt_template/prompt_templates.jsonl')
374
- answer_file_list = [
375
- os.path.join(work_path, 'outputs/arena/default/answers/answer_chatglm2-6b.jsonl'),
376
- os.path.join(work_path, 'outputs/arena/default/answers/answer_llama2-7b.jsonl')
377
- ]
378
- review_result_file_path = os.path.join(work_path, 'outputs/arena/default/reviews/review_gpt4.jsonl')
379
-
380
- input_kwargs = dict(
381
- prompt_file=prompt_template_path,
382
- answer_file_list=answer_file_list,
383
- review_result_file=review_result_file_path,
384
- reviewer_args={},
385
- baseline_file='',
386
- reference_file='',
387
- cache_file='',
388
- )
389
-
390
- auto_reviewer = AutoReviewerGpt4(**input_kwargs)
391
- auto_reviewer.run(dry_run=True)