evalscope 1.0.0__py3-none-any.whl → 1.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of evalscope might be problematic. Click here for more details.
- evalscope/api/benchmark/__init__.py +1 -1
- evalscope/api/benchmark/adapters/__init__.py +2 -0
- evalscope/api/benchmark/adapters/default_data_adapter.py +7 -4
- evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
- evalscope/api/benchmark/adapters/multi_choice_adapter.py +5 -2
- evalscope/api/benchmark/adapters/text2image_adapter.py +12 -10
- evalscope/api/benchmark/adapters/vision_language_adapter.py +8 -0
- evalscope/api/benchmark/benchmark.py +62 -2
- evalscope/api/benchmark/meta.py +9 -0
- evalscope/api/dataset/dataset.py +6 -6
- evalscope/api/dataset/loader.py +2 -1
- evalscope/api/evaluator/cache.py +24 -1
- evalscope/api/evaluator/evaluator.py +5 -0
- evalscope/api/evaluator/state.py +17 -1
- evalscope/api/messages/__init__.py +1 -0
- evalscope/api/messages/chat_message.py +52 -2
- evalscope/api/metric/scorer.py +15 -7
- evalscope/api/mixin/__init__.py +1 -1
- evalscope/api/mixin/llm_judge_mixin.py +2 -0
- evalscope/api/mixin/sandbox_mixin.py +204 -0
- evalscope/api/model/generate_config.py +1 -6
- evalscope/api/model/model.py +5 -2
- evalscope/api/tool/tool_info.py +1 -1
- evalscope/app/app.py +3 -0
- evalscope/app/ui/single_model.py +3 -3
- evalscope/app/utils/data_utils.py +7 -7
- evalscope/app/utils/env_utils.py +12 -0
- evalscope/app/utils/text_utils.py +14 -12
- evalscope/arguments.py +8 -4
- evalscope/backend/opencompass/backend_manager.py +0 -2
- evalscope/backend/rag_eval/utils/embedding.py +9 -1
- evalscope/benchmarks/ai2d/ai2d_adapter.py +53 -0
- evalscope/benchmarks/amc/amc_adapter.py +46 -0
- evalscope/benchmarks/bbh/bbh_adapter.py +43 -17
- evalscope/benchmarks/bfcl/bfcl_adapter.py +142 -7
- evalscope/benchmarks/bfcl/generation.py +9 -9
- evalscope/benchmarks/ceval/ceval_adapter.py +1 -2
- evalscope/benchmarks/data_collection/data_collection_adapter.py +23 -19
- evalscope/benchmarks/drop/drop_adapter.py +1 -1
- evalscope/benchmarks/frames/frames_adapter.py +2 -1
- evalscope/benchmarks/general_arena/general_arena_adapter.py +5 -1
- evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
- evalscope/benchmarks/healthbench/utils.py +102 -0
- evalscope/benchmarks/humaneval/humaneval_adapter.py +19 -35
- evalscope/benchmarks/humaneval/utils.py +235 -0
- evalscope/benchmarks/ifeval/instructions_util.py +2 -3
- evalscope/benchmarks/image_edit/__init__.py +0 -0
- evalscope/benchmarks/image_edit/gedit/__init__.py +0 -0
- evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
- evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
- evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
- evalscope/benchmarks/live_code_bench/evaluate_utils.py +13 -6
- evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +60 -37
- evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
- evalscope/benchmarks/math_500/math_500_adapter.py +0 -1
- evalscope/benchmarks/math_vista/__init__.py +0 -0
- evalscope/benchmarks/math_vista/math_vista_adapter.py +129 -0
- evalscope/benchmarks/minerva_math/__init__.py +0 -0
- evalscope/benchmarks/minerva_math/minerva_math_adapter.py +48 -0
- evalscope/benchmarks/mm_bench/__init__.py +0 -0
- evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
- evalscope/benchmarks/mm_star/__init__.py +0 -0
- evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
- evalscope/benchmarks/mmmu/__init__.py +0 -0
- evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
- evalscope/benchmarks/mmmu_pro/__init__.py +0 -0
- evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +124 -0
- evalscope/benchmarks/multi_if/__init__.py +0 -0
- evalscope/benchmarks/multi_if/ifeval.py +3354 -0
- evalscope/benchmarks/multi_if/metrics.py +120 -0
- evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
- evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +6 -5
- evalscope/benchmarks/olympiad_bench/__init__.py +0 -0
- evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
- evalscope/benchmarks/olympiad_bench/utils.py +565 -0
- evalscope/benchmarks/omni_bench/__init__.py +0 -0
- evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
- evalscope/benchmarks/real_world_qa/__init__.py +0 -0
- evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
- evalscope/benchmarks/tau_bench/generation.py +1 -1
- evalscope/benchmarks/tau_bench/tau_bench_adapter.py +20 -19
- evalscope/benchmarks/text2image/__init__.py +0 -0
- evalscope/benchmarks/{aigc/t2i → text2image}/evalmuse_adapter.py +3 -1
- evalscope/benchmarks/{aigc/t2i → text2image}/genai_bench_adapter.py +2 -2
- evalscope/benchmarks/{aigc/t2i → text2image}/general_t2i_adapter.py +1 -1
- evalscope/benchmarks/{aigc/t2i → text2image}/hpdv2_adapter.py +7 -2
- evalscope/benchmarks/{aigc/t2i → text2image}/tifa_adapter.py +1 -0
- evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +1 -2
- evalscope/cli/start_app.py +7 -1
- evalscope/cli/start_perf.py +7 -1
- evalscope/config.py +96 -14
- evalscope/constants.py +11 -0
- evalscope/evaluator/evaluator.py +30 -10
- evalscope/metrics/llm_judge.py +19 -7
- evalscope/metrics/metric.py +27 -2
- evalscope/models/image_edit_model.py +125 -0
- evalscope/models/model_apis.py +22 -0
- evalscope/models/openai_compatible.py +3 -0
- evalscope/models/text2image_model.py +2 -2
- evalscope/models/utils/openai.py +8 -6
- evalscope/perf/arguments.py +2 -0
- evalscope/perf/benchmark.py +2 -0
- evalscope/perf/plugin/api/base.py +2 -2
- evalscope/perf/plugin/api/default_api.py +7 -7
- evalscope/perf/plugin/api/openai_api.py +83 -19
- evalscope/perf/plugin/datasets/flickr8k.py +2 -2
- evalscope/perf/plugin/datasets/kontext_bench.py +2 -2
- evalscope/perf/plugin/datasets/random_vl_dataset.py +2 -2
- evalscope/perf/utils/benchmark_util.py +7 -5
- evalscope/perf/utils/local_server.py +3 -0
- evalscope/report/__init__.py +0 -1
- evalscope/report/combinator.py +0 -25
- evalscope/report/generator.py +8 -87
- evalscope/report/report.py +8 -4
- evalscope/run.py +9 -5
- evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -4
- evalscope/utils/chat_service.py +1 -1
- evalscope/utils/function_utils.py +41 -0
- evalscope/utils/import_utils.py +73 -1
- evalscope/utils/io_utils.py +56 -7
- evalscope/utils/json_schema.py +23 -2
- evalscope/utils/logger.py +19 -0
- evalscope/utils/model_utils.py +4 -3
- evalscope/utils/multi_choices.py +23 -6
- evalscope/version.py +2 -2
- {evalscope-1.0.0.dist-info → evalscope-1.0.2.dist-info}/METADATA +17 -24
- {evalscope-1.0.0.dist-info → evalscope-1.0.2.dist-info}/RECORD +145 -103
- tests/benchmark/test_eval.py +80 -37
- tests/benchmark/test_image_edit.py +65 -0
- tests/benchmark/test_sandbox.py +81 -0
- tests/benchmark/test_vlm.py +137 -0
- tests/cli/test_all.py +83 -43
- tests/cli/test_collection.py +8 -5
- tests/cli/test_reasoning.py +81 -0
- tests/common.py +73 -0
- tests/perf/test_perf.py +44 -14
- tests/rag/test_clip_benchmark.py +0 -3
- evalscope/api/mixin/dataset_mixin.py +0 -105
- evalscope/benchmarks/aigc/i2i/general_i2i_adapter.py +0 -44
- tests/aigc/__init__.py +0 -1
- /evalscope/benchmarks/{aigc → ai2d}/__init__.py +0 -0
- /evalscope/benchmarks/{aigc/i2i → amc}/__init__.py +0 -0
- /evalscope/benchmarks/{aigc/t2i → healthbench}/__init__.py +0 -0
- {evalscope-1.0.0.dist-info → evalscope-1.0.2.dist-info}/LICENSE +0 -0
- {evalscope-1.0.0.dist-info → evalscope-1.0.2.dist-info}/WHEEL +0 -0
- {evalscope-1.0.0.dist-info → evalscope-1.0.2.dist-info}/entry_points.txt +0 -0
- {evalscope-1.0.0.dist-info → evalscope-1.0.2.dist-info}/top_level.txt +0 -0
- /tests/{aigc → benchmark}/test_t2i.py +0 -0
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import re
|
|
3
|
+
from collections import defaultdict
|
|
4
|
+
|
|
5
|
+
from evalscope.utils import get_logger
|
|
6
|
+
|
|
7
|
+
logger = get_logger()
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def parse_json_to_dict(json_string: str) -> dict:
|
|
11
|
+
# Remove markdown-style ```json``` markers if present
|
|
12
|
+
json_cleaned = re.sub(r'^```json\s*|\s*```$', '', json_string.strip())
|
|
13
|
+
|
|
14
|
+
try:
|
|
15
|
+
return json.loads(json_cleaned)
|
|
16
|
+
except json.JSONDecodeError as e:
|
|
17
|
+
logger.warning(f'JSON decoding failed: {e}')
|
|
18
|
+
return {}
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class RubricItem:
|
|
22
|
+
|
|
23
|
+
def __init__(self, criterion: str, points: float, tags: list[str]):
|
|
24
|
+
self.criterion = criterion
|
|
25
|
+
self.points = points
|
|
26
|
+
self.tags = tags
|
|
27
|
+
|
|
28
|
+
def __str__(self):
|
|
29
|
+
return f'[{self.points}] {self.criterion}'
|
|
30
|
+
|
|
31
|
+
def to_dict(self):
|
|
32
|
+
return {
|
|
33
|
+
'criterion': self.criterion,
|
|
34
|
+
'points': self.points,
|
|
35
|
+
'tags': self.tags,
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
@classmethod
|
|
39
|
+
def from_dict(cls, d: dict):
|
|
40
|
+
return cls(
|
|
41
|
+
criterion=d['criterion'],
|
|
42
|
+
points=d['points'],
|
|
43
|
+
tags=d['tags'],
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def calculate_score(rubric_items: list[RubricItem], grading_response_list: list[dict]) -> float | None:
|
|
48
|
+
total_possible_points = sum(rubric_item.points for rubric_item in rubric_items if rubric_item.points > 0)
|
|
49
|
+
if total_possible_points == 0:
|
|
50
|
+
# should not happen for overall score, but may happen for tags
|
|
51
|
+
return None
|
|
52
|
+
|
|
53
|
+
achieved_points = sum(
|
|
54
|
+
rubric_item.points
|
|
55
|
+
for rubric_item, grading_response in zip(rubric_items, grading_response_list, strict=True)
|
|
56
|
+
if grading_response['criteria_met']
|
|
57
|
+
)
|
|
58
|
+
overall_score = achieved_points / total_possible_points
|
|
59
|
+
return overall_score
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def calculate_rubric_tag_scores(rubric_items: list[RubricItem], grading_response_list: list[dict]) -> dict[str, float]:
|
|
63
|
+
rubric_tag_items_grades = defaultdict(list)
|
|
64
|
+
axis_grades = defaultdict(list)
|
|
65
|
+
for rubric_item, grading_response in zip(rubric_items, grading_response_list):
|
|
66
|
+
curr_item_tags = set() # Ensure no duplicates in a rubric item.
|
|
67
|
+
for tag in rubric_item.tags:
|
|
68
|
+
rubric_tag_items_grades[tag].append((rubric_item, grading_response))
|
|
69
|
+
assert tag not in curr_item_tags
|
|
70
|
+
curr_item_tags.add(tag)
|
|
71
|
+
|
|
72
|
+
rubric_tag_scores = {}
|
|
73
|
+
for tag, items_grades in rubric_tag_items_grades.items():
|
|
74
|
+
items, grades = zip(*items_grades)
|
|
75
|
+
score = calculate_score(items, grades)
|
|
76
|
+
if score is not None: # implies at least one positive criterion
|
|
77
|
+
rubric_tag_scores[tag] = score
|
|
78
|
+
if tag.startswith('axis:'):
|
|
79
|
+
axis_grades[tag.split(':')[1]] = score
|
|
80
|
+
|
|
81
|
+
return rubric_tag_scores, axis_grades
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def construct_readable_explanation(rubric_items: list[RubricItem], grading_response_list: list[dict]) -> str:
|
|
85
|
+
rubric_items_with_grades = []
|
|
86
|
+
readable_explanation_list = []
|
|
87
|
+
for rubric_item, grading_response in zip(rubric_items, grading_response_list):
|
|
88
|
+
explanation = grading_response.get('explanation', 'No explanation provided')
|
|
89
|
+
criteria_met = grading_response['criteria_met']
|
|
90
|
+
readable_explanation = (f'[{criteria_met}] {rubric_item}\n\tExplanation: {explanation}')
|
|
91
|
+
readable_explanation_list.append(readable_explanation)
|
|
92
|
+
rubric_items_with_grades.append({
|
|
93
|
+
**rubric_item.to_dict(),
|
|
94
|
+
'criteria_met': criteria_met,
|
|
95
|
+
'explanation': explanation,
|
|
96
|
+
})
|
|
97
|
+
|
|
98
|
+
readable_explanation_list.sort(key=lambda x: x.startswith('[False]'), reverse=True)
|
|
99
|
+
readable_explanation_str = '\n\n'.join(readable_explanation_list)
|
|
100
|
+
readable_explanation_str = f'\n\n{readable_explanation_str}'
|
|
101
|
+
|
|
102
|
+
return readable_explanation_str
|
|
@@ -14,9 +14,6 @@ from evalscope.utils.logger import get_logger
|
|
|
14
14
|
|
|
15
15
|
logger = get_logger()
|
|
16
16
|
|
|
17
|
-
# Example:
|
|
18
|
-
# {"task_id": "HumanEval/0", "prompt": "from typing import List\n\n\ndef has_close_elements(numbers: List[float], threshold: float) -> bool:\n \"\"\" Check if in given list of numbers, are any two numbers closer to each other than\n given threshold.\n >>> has_close_elements([1.0, 2.0, 3.0], 0.5)\n False\n >>> has_close_elements([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.3)\n True\n \"\"\"\n", "entry_point": "has_close_elements", "canonical_solution": " for idx, elem in enumerate(numbers):\n for idx2, elem2 in enumerate(numbers):\n if idx != idx2:\n distance = abs(elem - elem2)\n if distance < threshold:\n return True\n\n return False\n", "test": "\n\nMETADATA = {\n 'author': 'jt',\n 'dataset': 'test'\n}\n\n\ndef check(candidate):\n assert candidate([1.0, 2.0, 3.9, 4.0, 5.0, 2.2], 0.3) == True\n assert candidate([1.0, 2.0, 3.9, 4.0, 5.0, 2.2], 0.05) == False\n assert candidate([1.0, 2.0, 5.9, 4.0, 5.0], 0.95) == True\n assert candidate([1.0, 2.0, 5.9, 4.0, 5.0], 0.8) == False\n assert candidate([1.0, 2.0, 3.0, 4.0, 5.0, 2.0], 0.1) == True\n assert candidate([1.1, 2.2, 3.1, 4.1, 5.1], 1.0) == True\n assert candidate([1.1, 2.2, 3.1, 4.1, 5.1], 0.5) == False\n\n"} # noqa
|
|
19
|
-
|
|
20
17
|
|
|
21
18
|
@register_benchmark(
|
|
22
19
|
BenchmarkMeta(
|
|
@@ -31,10 +28,7 @@ logger = get_logger()
|
|
|
31
28
|
eval_split='test',
|
|
32
29
|
prompt_template=
|
|
33
30
|
'Read the following function signature and docstring, and fully implement the function described. Your response should only contain the code for this function.\n{question}',
|
|
34
|
-
|
|
35
|
-
'num_workers': 4,
|
|
36
|
-
'timeout': 4
|
|
37
|
-
},
|
|
31
|
+
review_timeout=4,
|
|
38
32
|
)
|
|
39
33
|
)
|
|
40
34
|
class HumanevalAdapter(DefaultDataAdapter):
|
|
@@ -42,27 +36,6 @@ class HumanevalAdapter(DefaultDataAdapter):
|
|
|
42
36
|
HumanEval adapter using the new data processing framework.
|
|
43
37
|
"""
|
|
44
38
|
|
|
45
|
-
def __init__(self, **kwargs):
|
|
46
|
-
try:
|
|
47
|
-
from human_eval.data import stream_jsonl, write_jsonl
|
|
48
|
-
from human_eval.evaluation import check_correctness
|
|
49
|
-
except ImportError:
|
|
50
|
-
raise ImportError(
|
|
51
|
-
'Please install human_eval:'
|
|
52
|
-
'https://github.com/openai/human-eval/tree/master#installation , '
|
|
53
|
-
'Note that you need to enable the execution code in the human_eval/execution.py first.'
|
|
54
|
-
)
|
|
55
|
-
super().__init__(**kwargs)
|
|
56
|
-
|
|
57
|
-
extra_params = kwargs.get('extra_params', {})
|
|
58
|
-
self.k = [1]
|
|
59
|
-
self.num_workers = extra_params.get('num_workers', 4)
|
|
60
|
-
self.timeout = extra_params.get('timeout', 4)
|
|
61
|
-
|
|
62
|
-
self.read_problems_func = stream_jsonl
|
|
63
|
-
self.write_jsonl_func = write_jsonl
|
|
64
|
-
self.eval_func = check_correctness
|
|
65
|
-
|
|
66
39
|
def record_to_sample(self, record: Dict[str, Any]) -> Sample:
|
|
67
40
|
"""Convert a data record to a Sample object."""
|
|
68
41
|
query = record['prompt']
|
|
@@ -94,18 +67,29 @@ class HumanevalAdapter(DefaultDataAdapter):
|
|
|
94
67
|
def match_score(
|
|
95
68
|
self, original_prediction: str, filtered_prediction: str, reference: str, task_state: TaskState
|
|
96
69
|
) -> Score:
|
|
70
|
+
|
|
97
71
|
score = Score(
|
|
98
72
|
extracted_prediction=filtered_prediction,
|
|
99
73
|
prediction=original_prediction,
|
|
100
74
|
)
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
75
|
+
problem = task_state.metadata
|
|
76
|
+
completion = filtered_prediction
|
|
77
|
+
|
|
78
|
+
if not self.use_sandbox:
|
|
79
|
+
from .utils import check_correctness
|
|
80
|
+
|
|
81
|
+
# Execute the code and check correctness
|
|
82
|
+
res = check_correctness(problem=problem, completion=completion, timeout=self.review_timeout)
|
|
83
|
+
passed = res['passed']
|
|
84
|
+
else:
|
|
85
|
+
check_program = (
|
|
86
|
+
problem['prompt'] + completion + '\n' + problem['test'] + '\n' + f"check({problem['entry_point']})"
|
|
87
|
+
)
|
|
88
|
+
res = self.execute_code_in_sandbox(code=check_program, timeout=self.review_timeout, language='python')
|
|
89
|
+
passed = res.get('status') == 'success'
|
|
90
|
+
# Set score values
|
|
106
91
|
score.value = {'pass': passed}
|
|
107
|
-
score.
|
|
108
|
-
score.metadata = {'task_id': task_state.metadata['task_id'], 'timeout': self.timeout, 'execution_result': res}
|
|
92
|
+
score.metadata = {'task_id': problem['task_id'], 'timeout': self.review_timeout, 'execution_result': res}
|
|
109
93
|
score.main_score_name = 'pass'
|
|
110
94
|
|
|
111
95
|
return score
|
|
@@ -0,0 +1,235 @@
|
|
|
1
|
+
import contextlib
|
|
2
|
+
import faulthandler
|
|
3
|
+
import io
|
|
4
|
+
import multiprocessing
|
|
5
|
+
import os
|
|
6
|
+
import platform
|
|
7
|
+
import signal
|
|
8
|
+
import tempfile
|
|
9
|
+
from typing import Dict, Optional
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def unsafe_execute(problem: Dict, completion: str, timeout: float, result):
|
|
13
|
+
with create_tempdir():
|
|
14
|
+
|
|
15
|
+
# These system calls are needed when cleaning up tempdir.
|
|
16
|
+
import os
|
|
17
|
+
import shutil
|
|
18
|
+
|
|
19
|
+
rmtree = shutil.rmtree
|
|
20
|
+
rmdir = os.rmdir
|
|
21
|
+
chdir = os.chdir
|
|
22
|
+
|
|
23
|
+
# Disable functionalities that can make destructive changes to the test.
|
|
24
|
+
reliability_guard()
|
|
25
|
+
|
|
26
|
+
# Construct the check program and run it.
|
|
27
|
+
check_program = (
|
|
28
|
+
problem['prompt'] + completion + '\n' + problem['test'] + '\n' + f"check({problem['entry_point']})"
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
try:
|
|
32
|
+
exec_globals = {}
|
|
33
|
+
with swallow_io():
|
|
34
|
+
with time_limit(timeout):
|
|
35
|
+
# WARNING
|
|
36
|
+
# This program exists to execute untrusted model-generated code. Although
|
|
37
|
+
# it is highly unlikely that model-generated code will do something overtly
|
|
38
|
+
# malicious in response to this test suite, model-generated code may act
|
|
39
|
+
# destructively due to a lack of model capability or alignment.
|
|
40
|
+
# Users are strongly encouraged to sandbox this evaluation suite so that it
|
|
41
|
+
# does not perform destructive actions on their host or network. For more
|
|
42
|
+
# information on how OpenAI sandboxes its code, see the accompanying paper.
|
|
43
|
+
# Once you have read this disclaimer and taken appropriate precautions,
|
|
44
|
+
# uncomment the following line and proceed at your own risk:
|
|
45
|
+
exec(check_program, exec_globals)
|
|
46
|
+
result.append('passed')
|
|
47
|
+
except TimeoutException:
|
|
48
|
+
result.append('timed out')
|
|
49
|
+
except BaseException as e:
|
|
50
|
+
result.append(f'failed: {e}')
|
|
51
|
+
|
|
52
|
+
# Needed for cleaning up.
|
|
53
|
+
shutil.rmtree = rmtree
|
|
54
|
+
os.rmdir = rmdir
|
|
55
|
+
os.chdir = chdir
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def check_correctness(problem: Dict, completion: str, timeout: float, completion_id: Optional[int] = None) -> Dict:
|
|
59
|
+
"""
|
|
60
|
+
Evaluates the functional correctness of a completion by running the test
|
|
61
|
+
suite provided in the problem.
|
|
62
|
+
|
|
63
|
+
:param completion_id: an optional completion ID so we can match
|
|
64
|
+
the results later even if execution finishes asynchronously.
|
|
65
|
+
"""
|
|
66
|
+
|
|
67
|
+
manager = multiprocessing.Manager()
|
|
68
|
+
result = manager.list()
|
|
69
|
+
|
|
70
|
+
p = multiprocessing.Process(target=unsafe_execute, args=(problem, completion, timeout, result))
|
|
71
|
+
p.start()
|
|
72
|
+
p.join(timeout=timeout + 1)
|
|
73
|
+
if p.is_alive():
|
|
74
|
+
p.kill()
|
|
75
|
+
|
|
76
|
+
if not result:
|
|
77
|
+
result.append('timed out')
|
|
78
|
+
|
|
79
|
+
return dict(
|
|
80
|
+
task_id=problem['task_id'],
|
|
81
|
+
passed=result[0] == 'passed',
|
|
82
|
+
result=result[0],
|
|
83
|
+
completion_id=completion_id,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
@contextlib.contextmanager
|
|
88
|
+
def time_limit(seconds: float):
|
|
89
|
+
|
|
90
|
+
def signal_handler(signum, frame):
|
|
91
|
+
raise TimeoutException('Timed out!')
|
|
92
|
+
|
|
93
|
+
signal.setitimer(signal.ITIMER_REAL, seconds)
|
|
94
|
+
signal.signal(signal.SIGALRM, signal_handler)
|
|
95
|
+
try:
|
|
96
|
+
yield
|
|
97
|
+
finally:
|
|
98
|
+
signal.setitimer(signal.ITIMER_REAL, 0)
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
@contextlib.contextmanager
|
|
102
|
+
def swallow_io():
|
|
103
|
+
stream = WriteOnlyStringIO()
|
|
104
|
+
with contextlib.redirect_stdout(stream):
|
|
105
|
+
with contextlib.redirect_stderr(stream):
|
|
106
|
+
with redirect_stdin(stream):
|
|
107
|
+
yield
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
@contextlib.contextmanager
|
|
111
|
+
def create_tempdir():
|
|
112
|
+
with tempfile.TemporaryDirectory() as dirname:
|
|
113
|
+
with chdir(dirname):
|
|
114
|
+
yield dirname
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
class TimeoutException(Exception):
|
|
118
|
+
pass
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
class WriteOnlyStringIO(io.StringIO):
|
|
122
|
+
"""StringIO that throws an exception when it's read from"""
|
|
123
|
+
|
|
124
|
+
def read(self, *args, **kwargs):
|
|
125
|
+
raise IOError
|
|
126
|
+
|
|
127
|
+
def readline(self, *args, **kwargs):
|
|
128
|
+
raise IOError
|
|
129
|
+
|
|
130
|
+
def readlines(self, *args, **kwargs):
|
|
131
|
+
raise IOError
|
|
132
|
+
|
|
133
|
+
def readable(self, *args, **kwargs):
|
|
134
|
+
"""Returns True if the IO object can be read."""
|
|
135
|
+
return False
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
class redirect_stdin(contextlib._RedirectStream): # type: ignore
|
|
139
|
+
_stream = 'stdin'
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
@contextlib.contextmanager
|
|
143
|
+
def chdir(root):
|
|
144
|
+
if root == '.':
|
|
145
|
+
yield
|
|
146
|
+
return
|
|
147
|
+
cwd = os.getcwd()
|
|
148
|
+
os.chdir(root)
|
|
149
|
+
try:
|
|
150
|
+
yield
|
|
151
|
+
except BaseException as exc:
|
|
152
|
+
raise exc
|
|
153
|
+
finally:
|
|
154
|
+
os.chdir(cwd)
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def reliability_guard(maximum_memory_bytes: Optional[int] = None):
|
|
158
|
+
"""
|
|
159
|
+
This disables various destructive functions and prevents the generated code
|
|
160
|
+
from interfering with the test (e.g. fork bomb, killing other processes,
|
|
161
|
+
removing filesystem files, etc.)
|
|
162
|
+
|
|
163
|
+
WARNING
|
|
164
|
+
This function is NOT a security sandbox. Untrusted code, including, model-
|
|
165
|
+
generated code, should not be blindly executed outside of one. See the
|
|
166
|
+
Codex paper for more information about OpenAI's code sandbox, and proceed
|
|
167
|
+
with caution.
|
|
168
|
+
"""
|
|
169
|
+
|
|
170
|
+
if maximum_memory_bytes is not None:
|
|
171
|
+
import resource
|
|
172
|
+
|
|
173
|
+
resource.setrlimit(resource.RLIMIT_AS, (maximum_memory_bytes, maximum_memory_bytes))
|
|
174
|
+
resource.setrlimit(resource.RLIMIT_DATA, (maximum_memory_bytes, maximum_memory_bytes))
|
|
175
|
+
if not platform.uname().system == 'Darwin':
|
|
176
|
+
resource.setrlimit(resource.RLIMIT_STACK, (maximum_memory_bytes, maximum_memory_bytes))
|
|
177
|
+
|
|
178
|
+
faulthandler.disable()
|
|
179
|
+
|
|
180
|
+
import builtins
|
|
181
|
+
|
|
182
|
+
builtins.exit = None
|
|
183
|
+
builtins.quit = None
|
|
184
|
+
|
|
185
|
+
import os
|
|
186
|
+
|
|
187
|
+
os.environ['OMP_NUM_THREADS'] = '1'
|
|
188
|
+
|
|
189
|
+
os.kill = None
|
|
190
|
+
os.system = None
|
|
191
|
+
os.putenv = None
|
|
192
|
+
os.remove = None
|
|
193
|
+
os.removedirs = None
|
|
194
|
+
os.rmdir = None
|
|
195
|
+
os.fchdir = None
|
|
196
|
+
os.setuid = None
|
|
197
|
+
os.fork = None
|
|
198
|
+
os.forkpty = None
|
|
199
|
+
os.killpg = None
|
|
200
|
+
os.rename = None
|
|
201
|
+
os.renames = None
|
|
202
|
+
os.truncate = None
|
|
203
|
+
os.replace = None
|
|
204
|
+
os.unlink = None
|
|
205
|
+
os.fchmod = None
|
|
206
|
+
os.fchown = None
|
|
207
|
+
os.chmod = None
|
|
208
|
+
os.chown = None
|
|
209
|
+
os.chroot = None
|
|
210
|
+
os.fchdir = None
|
|
211
|
+
os.lchflags = None
|
|
212
|
+
os.lchmod = None
|
|
213
|
+
os.lchown = None
|
|
214
|
+
os.getcwd = None
|
|
215
|
+
os.chdir = None
|
|
216
|
+
|
|
217
|
+
import shutil
|
|
218
|
+
|
|
219
|
+
shutil.rmtree = None
|
|
220
|
+
shutil.move = None
|
|
221
|
+
shutil.chown = None
|
|
222
|
+
|
|
223
|
+
import subprocess
|
|
224
|
+
|
|
225
|
+
subprocess.Popen = None # type: ignore
|
|
226
|
+
|
|
227
|
+
__builtins__['help'] = None
|
|
228
|
+
|
|
229
|
+
import sys
|
|
230
|
+
|
|
231
|
+
sys.modules['ipdb'] = None
|
|
232
|
+
sys.modules['joblib'] = None
|
|
233
|
+
sys.modules['resource'] = None
|
|
234
|
+
sys.modules['psutil'] = None
|
|
235
|
+
sys.modules['tkinter'] = None
|
|
@@ -14,7 +14,6 @@
|
|
|
14
14
|
"""Utility library of instructions."""
|
|
15
15
|
|
|
16
16
|
import functools
|
|
17
|
-
import immutabledict
|
|
18
17
|
import nltk
|
|
19
18
|
import os
|
|
20
19
|
import random
|
|
@@ -1551,7 +1550,7 @@ WORD_LIST = [
|
|
|
1551
1550
|
] # pylint: disable=line-too-long
|
|
1552
1551
|
|
|
1553
1552
|
# ISO 639-1 codes to language names.
|
|
1554
|
-
LANGUAGE_CODES =
|
|
1553
|
+
LANGUAGE_CODES = {
|
|
1555
1554
|
'en': 'English',
|
|
1556
1555
|
'es': 'Spanish',
|
|
1557
1556
|
'pt': 'Portuguese',
|
|
@@ -1582,7 +1581,7 @@ LANGUAGE_CODES = immutabledict.immutabledict({
|
|
|
1582
1581
|
'pa': 'Punjabi',
|
|
1583
1582
|
'ml': 'Malayalam',
|
|
1584
1583
|
'fi': 'Finnish',
|
|
1585
|
-
}
|
|
1584
|
+
}
|
|
1586
1585
|
|
|
1587
1586
|
_ALPHABETS = '([A-Za-z])'
|
|
1588
1587
|
_PREFIXES = '(Mr|St|Mrs|Ms|Dr)[.]'
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
2
|
+
import copy
|
|
3
|
+
import os
|
|
4
|
+
from typing import Any, Dict, List
|
|
5
|
+
|
|
6
|
+
from evalscope.api.benchmark import BenchmarkMeta, ImageEditAdapter
|
|
7
|
+
from evalscope.api.dataset import Sample
|
|
8
|
+
from evalscope.api.evaluator.state import TaskState
|
|
9
|
+
from evalscope.api.messages import ChatMessage, ChatMessageUser, Content, ContentImage, ContentText
|
|
10
|
+
from evalscope.api.metric.scorer import Score
|
|
11
|
+
from evalscope.api.registry import register_benchmark
|
|
12
|
+
from evalscope.constants import FileConstants, Tags
|
|
13
|
+
from evalscope.utils.io_utils import bytes_to_base64
|
|
14
|
+
from evalscope.utils.logger import get_logger
|
|
15
|
+
|
|
16
|
+
logger = get_logger()
|
|
17
|
+
|
|
18
|
+
SUBSET_LIST = [
|
|
19
|
+
'background_change', 'color_alter', 'material_alter', 'motion_change', 'ps_human', 'style_change', 'subject-add',
|
|
20
|
+
'subject-remove', 'subject-replace', 'text_change', 'tone_transfer'
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
LANGUAGE_LIST = ['en', 'cn']
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@register_benchmark(
|
|
27
|
+
BenchmarkMeta(
|
|
28
|
+
name='gedit',
|
|
29
|
+
pretty_name='GEdit-Bench',
|
|
30
|
+
dataset_id='stepfun-ai/GEdit-Bench',
|
|
31
|
+
description='GEdit-Bench Image Editing Benchmark, grounded in real-world '
|
|
32
|
+
'usages is developed to support more authentic and '
|
|
33
|
+
'comprehensive evaluation of image editing models.',
|
|
34
|
+
tags=[Tags.IMAGE_EDITING],
|
|
35
|
+
subset_list=SUBSET_LIST,
|
|
36
|
+
metric_list=['Semantic Consistency', 'Perceptual Similarity'],
|
|
37
|
+
few_shot_num=0,
|
|
38
|
+
train_split=None,
|
|
39
|
+
eval_split='train',
|
|
40
|
+
extra_params={'language': f'# language of the instruction, choose from {LANGUAGE_LIST}, default to `en`'}
|
|
41
|
+
)
|
|
42
|
+
)
|
|
43
|
+
class GEditAdapter(ImageEditAdapter):
|
|
44
|
+
|
|
45
|
+
def __init__(self, **kwargs):
|
|
46
|
+
super().__init__(**kwargs)
|
|
47
|
+
|
|
48
|
+
self.language = self.extra_params.get('language', 'en')
|
|
49
|
+
if self.language not in LANGUAGE_LIST:
|
|
50
|
+
logger.warning(f"Invalid language '{self.language}', fallback to 'en'")
|
|
51
|
+
self.language = 'en'
|
|
52
|
+
self.reformat_subset = True
|
|
53
|
+
self._use_llm_judge = True
|
|
54
|
+
|
|
55
|
+
self.load_prompt()
|
|
56
|
+
|
|
57
|
+
def load_prompt(self):
|
|
58
|
+
from . import vie_prompts
|
|
59
|
+
|
|
60
|
+
self.context = vie_prompts._context_no_delimit
|
|
61
|
+
self.SC_prompt = '\n'.join([
|
|
62
|
+
self.context, vie_prompts._prompts_0shot_two_image_edit_rule, vie_prompts._prompts_0shot_tie_rule_SC
|
|
63
|
+
])
|
|
64
|
+
self.PQ_prompt = '\n'.join([self.context, vie_prompts._prompts_0shot_rule_PQ])
|
|
65
|
+
|
|
66
|
+
def record_to_sample(self, record: Dict[str, Any]) -> Sample:
|
|
67
|
+
record = copy.deepcopy(record)
|
|
68
|
+
|
|
69
|
+
# Process instruction and image
|
|
70
|
+
instruction = record['instruction']
|
|
71
|
+
image_bytes = record['input_image']['bytes']
|
|
72
|
+
input_image = bytes_to_base64(image_bytes, format='png', add_header=True)
|
|
73
|
+
record['input_image'] = input_image
|
|
74
|
+
record[FileConstants.ID] = record['key']
|
|
75
|
+
del record['input_image_raw']
|
|
76
|
+
|
|
77
|
+
text_content = ContentText(text=instruction)
|
|
78
|
+
image_content = ContentImage(image=input_image)
|
|
79
|
+
|
|
80
|
+
messages: List[ChatMessage] = [
|
|
81
|
+
ChatMessageUser(content=[text_content, image_content]),
|
|
82
|
+
]
|
|
83
|
+
|
|
84
|
+
return Sample(input=messages, subset_key=record['task_type'], metadata=record)
|
|
85
|
+
|
|
86
|
+
def sample_filter(self, sample: Sample) -> bool:
|
|
87
|
+
language = sample.metadata.get('instruction_language', 'en')
|
|
88
|
+
return super().sample_filter(sample) and language == self.language
|
|
89
|
+
|
|
90
|
+
def llm_match_score(self, original_prediction, filtered_prediction, reference, task_state: TaskState) -> Score:
|
|
91
|
+
import math
|
|
92
|
+
|
|
93
|
+
from .utils import mllm_output_to_dict
|
|
94
|
+
|
|
95
|
+
metadata = task_state.metadata
|
|
96
|
+
text_prompt = metadata['instruction']
|
|
97
|
+
input_image = metadata['input_image'] # base64 image
|
|
98
|
+
edited_image = metadata[FileConstants.IMAGE_PATH] # local image path
|
|
99
|
+
_SC_prompt = self.SC_prompt.replace('<instruction>', text_prompt)
|
|
100
|
+
|
|
101
|
+
# Initialize the score object with prediction details
|
|
102
|
+
score = Score(
|
|
103
|
+
extracted_prediction=edited_image,
|
|
104
|
+
prediction=edited_image,
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
# Build prompts
|
|
108
|
+
SC_prompt_final = [
|
|
109
|
+
ChatMessageUser(
|
|
110
|
+
content=[
|
|
111
|
+
ContentImage(image=input_image),
|
|
112
|
+
ContentImage(image=edited_image),
|
|
113
|
+
ContentText(text=_SC_prompt)
|
|
114
|
+
]
|
|
115
|
+
)
|
|
116
|
+
]
|
|
117
|
+
PQ_prompt_final = [
|
|
118
|
+
ChatMessageUser(content=[ContentImage(image=edited_image),
|
|
119
|
+
ContentText(text=self.PQ_prompt)])
|
|
120
|
+
]
|
|
121
|
+
|
|
122
|
+
guess_if_cannot_parse = True
|
|
123
|
+
result_SC = self.llm_judge.judge(messages=SC_prompt_final)
|
|
124
|
+
result_PQ = self.llm_judge.judge(messages=PQ_prompt_final)
|
|
125
|
+
SC_dict = mllm_output_to_dict(result_SC, give_up_parsing=guess_if_cannot_parse)
|
|
126
|
+
PQ_dict = mllm_output_to_dict(result_PQ, give_up_parsing=guess_if_cannot_parse)
|
|
127
|
+
|
|
128
|
+
SC_score = min(SC_dict['score'])
|
|
129
|
+
PQ_score = min(PQ_dict['score'])
|
|
130
|
+
O_score = math.sqrt(SC_score * PQ_score)
|
|
131
|
+
|
|
132
|
+
score.value = {'Semantic Consistency': SC_score, 'Perceptual Quality': PQ_score, 'Overall': O_score}
|
|
133
|
+
score.main_score_name = 'Overall'
|
|
134
|
+
score.metadata = {
|
|
135
|
+
'SC_dict': SC_dict,
|
|
136
|
+
'PQ_dict': PQ_dict,
|
|
137
|
+
}
|
|
138
|
+
return score
|