eval-framework 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eval_framework/__init__.py +7 -0
- eval_framework/base_config.py +36 -0
- eval_framework/context/__init__.py +0 -0
- eval_framework/context/determined.py +170 -0
- eval_framework/context/eval.py +114 -0
- eval_framework/context/local.py +52 -0
- eval_framework/evaluation_generator.py +231 -0
- eval_framework/exceptions.py +2 -0
- eval_framework/external/ifeval_impl/README.md +5 -0
- eval_framework/external/ifeval_impl/instructions.py +1523 -0
- eval_framework/external/ifeval_impl/instructions_registry.py +161 -0
- eval_framework/external/ifeval_impl/instructions_util.py +1689 -0
- eval_framework/external/ifeval_impl/utils.py +135 -0
- eval_framework/llm/__init__.py +0 -0
- eval_framework/llm/aleph_alpha.py +323 -0
- eval_framework/llm/base.py +58 -0
- eval_framework/llm/huggingface.py +332 -0
- eval_framework/llm/mistral.py +73 -0
- eval_framework/llm/models.py +16 -0
- eval_framework/llm/openai.py +205 -0
- eval_framework/llm/vllm.py +438 -0
- eval_framework/logger.py +3 -0
- eval_framework/main.py +187 -0
- eval_framework/metrics/__init__.py +0 -0
- eval_framework/metrics/base.py +40 -0
- eval_framework/metrics/completion/__init__.py +1 -0
- eval_framework/metrics/completion/accuracy_completion.py +16 -0
- eval_framework/metrics/completion/bleu.py +76 -0
- eval_framework/metrics/completion/chrf.py +62 -0
- eval_framework/metrics/completion/code_assertion.py +44 -0
- eval_framework/metrics/completion/code_execution_pass_at_one.py +126 -0
- eval_framework/metrics/completion/comet.py +56 -0
- eval_framework/metrics/completion/concordance_index.py +38 -0
- eval_framework/metrics/completion/csv_format.py +102 -0
- eval_framework/metrics/completion/cwe_accuracy.py +49 -0
- eval_framework/metrics/completion/exponential_similarity.py +65 -0
- eval_framework/metrics/completion/f1.py +42 -0
- eval_framework/metrics/completion/format_checker.py +56 -0
- eval_framework/metrics/completion/grid_difference.py +77 -0
- eval_framework/metrics/completion/ifeval.py +73 -0
- eval_framework/metrics/completion/json_format.py +171 -0
- eval_framework/metrics/completion/language_checker.py +74 -0
- eval_framework/metrics/completion/length_control.py +83 -0
- eval_framework/metrics/completion/math_reasoning_completion.py +303 -0
- eval_framework/metrics/completion/niah_accuracy.py +163 -0
- eval_framework/metrics/completion/placeholder_checker.py +27 -0
- eval_framework/metrics/completion/repetition.py +88 -0
- eval_framework/metrics/completion/rouge_1.py +35 -0
- eval_framework/metrics/completion/rouge_2.py +45 -0
- eval_framework/metrics/completion/rouge_geometric_mean.py +36 -0
- eval_framework/metrics/completion/rouge_l.py +52 -0
- eval_framework/metrics/completion/struct_eval_metrics.py +248 -0
- eval_framework/metrics/completion/ter.py +67 -0
- eval_framework/metrics/completion/text_counter.py +182 -0
- eval_framework/metrics/efficiency/__init__.py +0 -0
- eval_framework/metrics/efficiency/bytes_per_sequence_position.py +48 -0
- eval_framework/metrics/llm/__init__.py +0 -0
- eval_framework/metrics/llm/base.py +8 -0
- eval_framework/metrics/llm/graders/chatbot_style_grader.py +92 -0
- eval_framework/metrics/llm/graders/comparison_grader.py +146 -0
- eval_framework/metrics/llm/graders/conciseness_grader.py +93 -0
- eval_framework/metrics/llm/graders/contains_names_grader.py +71 -0
- eval_framework/metrics/llm/graders/format_correctness_grader.py +109 -0
- eval_framework/metrics/llm/graders/instruction_grader.py +177 -0
- eval_framework/metrics/llm/graders/language.py +56 -0
- eval_framework/metrics/llm/graders/long_context_grader.py +72 -0
- eval_framework/metrics/llm/graders/models.py +74 -0
- eval_framework/metrics/llm/graders/refusal_grader.py +57 -0
- eval_framework/metrics/llm/graders/sql_quality_grader.py +145 -0
- eval_framework/metrics/llm/graders/summary_world_knowledge_grader.py +103 -0
- eval_framework/metrics/llm/llm_judge_chatbot_style.py +36 -0
- eval_framework/metrics/llm/llm_judge_completion_accuracy.py +39 -0
- eval_framework/metrics/llm/llm_judge_conciseness.py +37 -0
- eval_framework/metrics/llm/llm_judge_contains_names.py +36 -0
- eval_framework/metrics/llm/llm_judge_format_correctness.py +43 -0
- eval_framework/metrics/llm/llm_judge_instruction.py +58 -0
- eval_framework/metrics/llm/llm_judge_mtbench_pair.py +205 -0
- eval_framework/metrics/llm/llm_judge_mtbench_single.py +188 -0
- eval_framework/metrics/llm/llm_judge_refusal.py +35 -0
- eval_framework/metrics/llm/llm_judge_sql.py +394 -0
- eval_framework/metrics/llm/llm_judge_world_knowledge.py +37 -0
- eval_framework/metrics/loglikelihood/__init__.py +0 -0
- eval_framework/metrics/loglikelihood/accuracy_loglikelihood.py +51 -0
- eval_framework/metrics/loglikelihood/probability_mass.py +56 -0
- eval_framework/py.typed +0 -0
- eval_framework/response_generator.py +416 -0
- eval_framework/result_processors/__init__.py +0 -0
- eval_framework/result_processors/base.py +74 -0
- eval_framework/result_processors/hf_processor.py +87 -0
- eval_framework/result_processors/result_processor.py +129 -0
- eval_framework/run.py +314 -0
- eval_framework/run_direct.py +42 -0
- eval_framework/shared/types.py +227 -0
- eval_framework/tasks/__init__.py +6 -0
- eval_framework/tasks/base.py +314 -0
- eval_framework/tasks/benchmarks/__init__.py +0 -0
- eval_framework/tasks/benchmarks/arc.py +46 -0
- eval_framework/tasks/benchmarks/arc_de.py +46 -0
- eval_framework/tasks/benchmarks/arc_fi.py +46 -0
- eval_framework/tasks/benchmarks/belebele.py +60 -0
- eval_framework/tasks/benchmarks/bigcodebench.py +155 -0
- eval_framework/tasks/benchmarks/casehold.py +47 -0
- eval_framework/tasks/benchmarks/chembench.py +85 -0
- eval_framework/tasks/benchmarks/copa.py +39 -0
- eval_framework/tasks/benchmarks/duc.py +91 -0
- eval_framework/tasks/benchmarks/flores200.py +62 -0
- eval_framework/tasks/benchmarks/flores_plus.py +84 -0
- eval_framework/tasks/benchmarks/gpqa.py +177 -0
- eval_framework/tasks/benchmarks/gsm8k.py +148 -0
- eval_framework/tasks/benchmarks/hellaswag.py +44 -0
- eval_framework/tasks/benchmarks/hellaswag_de.py +52 -0
- eval_framework/tasks/benchmarks/humaneval.py +97 -0
- eval_framework/tasks/benchmarks/ifeval.py +78 -0
- eval_framework/tasks/benchmarks/include.py +119 -0
- eval_framework/tasks/benchmarks/infinitebench.py +302 -0
- eval_framework/tasks/benchmarks/math_reasoning.py +569 -0
- eval_framework/tasks/benchmarks/mbpp.py +192 -0
- eval_framework/tasks/benchmarks/mmlu.py +190 -0
- eval_framework/tasks/benchmarks/mmlu_de.py +109 -0
- eval_framework/tasks/benchmarks/mmlu_pro.py +139 -0
- eval_framework/tasks/benchmarks/mmmlu.py +529 -0
- eval_framework/tasks/benchmarks/openbookqa.py +37 -0
- eval_framework/tasks/benchmarks/opengptx_eu20.py +363 -0
- eval_framework/tasks/benchmarks/pawsx.py +65 -0
- eval_framework/tasks/benchmarks/piqa.py +39 -0
- eval_framework/tasks/benchmarks/quality.py +56 -0
- eval_framework/tasks/benchmarks/sciq.py +44 -0
- eval_framework/tasks/benchmarks/sphyr.py +75 -0
- eval_framework/tasks/benchmarks/squad.py +89 -0
- eval_framework/tasks/benchmarks/struct_eval.py +110 -0
- eval_framework/tasks/benchmarks/tablebench.py +117 -0
- eval_framework/tasks/benchmarks/triviaqa.py +42 -0
- eval_framework/tasks/benchmarks/truthfulqa.py +95 -0
- eval_framework/tasks/benchmarks/winogender.py +39 -0
- eval_framework/tasks/benchmarks/winogrande.py +44 -0
- eval_framework/tasks/benchmarks/winox.py +57 -0
- eval_framework/tasks/benchmarks/wmt.py +160 -0
- eval_framework/tasks/benchmarks/zero_scrolls.py +197 -0
- eval_framework/tasks/eval_config.py +112 -0
- eval_framework/tasks/perturbation.py +83 -0
- eval_framework/tasks/registry.py +186 -0
- eval_framework/tasks/task_loader.py +80 -0
- eval_framework/tasks/task_names.py +138 -0
- eval_framework/tasks/utils.py +578 -0
- eval_framework/utils/constants.py +9 -0
- eval_framework/utils/generate_task_docs.py +229 -0
- eval_framework/utils/helpers.py +3 -0
- eval_framework/utils/logging.py +50 -0
- eval_framework/utils/packaging.py +52 -0
- eval_framework-0.2.0.dist-info/METADATA +514 -0
- eval_framework-0.2.0.dist-info/RECORD +161 -0
- eval_framework-0.2.0.dist-info/WHEEL +4 -0
- eval_framework-0.2.0.dist-info/entry_points.txt +3 -0
- template_formatting/README.md +83 -0
- template_formatting/__init__.py +0 -0
- template_formatting/formatter.py +536 -0
- template_formatting/mistral_formatter.py +159 -0
- template_formatting/py.typed +0 -0
- template_formatting/tests/test_formatter_eval.py +408 -0
- template_formatting/tests/test_formatter_scaling.py +253 -0
- template_formatting/tests/test_mistral_formatter.py +136 -0
|
@@ -0,0 +1,229 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import inspect
|
|
3
|
+
import os
|
|
4
|
+
import re
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
import tqdm
|
|
8
|
+
|
|
9
|
+
from eval_framework.tasks.registry import get_task, registered_task_names
|
|
10
|
+
from eval_framework.tasks.task_loader import load_extra_tasks
|
|
11
|
+
from template_formatting.formatter import BaseFormatter, ConcatFormatter, Llama3Formatter
|
|
12
|
+
|
|
13
|
+
DEFAULT_OUTPUT_DOCS_DIRECTORY = Path("docs/tasks")
|
|
14
|
+
|
|
15
|
+
EXCLUDED_TASKS = [
|
|
16
|
+
"SQUAD", # failing loading the dataset: Feature type 'List' not found
|
|
17
|
+
"SQUAD2", # failing loading the dataset: Feature type 'List' not found
|
|
18
|
+
]
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def parse_args(cli_args: list[str] | None = None) -> argparse.Namespace:
|
|
22
|
+
"""Parse command line arguments for the script."""
|
|
23
|
+
|
|
24
|
+
parser = argparse.ArgumentParser()
|
|
25
|
+
parser.add_argument(
|
|
26
|
+
"--add-prompt-examples",
|
|
27
|
+
action="store_true",
|
|
28
|
+
default=False,
|
|
29
|
+
required=False,
|
|
30
|
+
help="If set, examples prompts for each of the formatters will be added in the generated docs.",
|
|
31
|
+
)
|
|
32
|
+
parser.add_argument(
|
|
33
|
+
"--exclude-tasks",
|
|
34
|
+
nargs="*",
|
|
35
|
+
type=str,
|
|
36
|
+
default=[],
|
|
37
|
+
required=False,
|
|
38
|
+
help="List of task names to exclude from documentation generation.",
|
|
39
|
+
)
|
|
40
|
+
parser.add_argument(
|
|
41
|
+
"--extra-task-modules",
|
|
42
|
+
nargs="*",
|
|
43
|
+
type=str,
|
|
44
|
+
default=[],
|
|
45
|
+
required=False,
|
|
46
|
+
help="List of files and folders containing additional task definitions.",
|
|
47
|
+
)
|
|
48
|
+
parser.add_argument(
|
|
49
|
+
"--formatter",
|
|
50
|
+
nargs="*",
|
|
51
|
+
type=str,
|
|
52
|
+
required=False,
|
|
53
|
+
default=["ConcatFormatter", "Llama3Formatter"],
|
|
54
|
+
help="Specify which formatter to use for formatting the task samples. "
|
|
55
|
+
"If not explicitly specified, default formatters will be used.",
|
|
56
|
+
)
|
|
57
|
+
parser.add_argument(
|
|
58
|
+
"--only-tasks",
|
|
59
|
+
nargs="*",
|
|
60
|
+
type=str,
|
|
61
|
+
default=[],
|
|
62
|
+
required=False,
|
|
63
|
+
help="List of task names to generate documentation for. If empty, all tasks will be processed.",
|
|
64
|
+
)
|
|
65
|
+
return parser.parse_args(args=cli_args)
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def generate_docs_for_task(
|
|
69
|
+
output_docs_directory: Path, task_name: str, formatters: list[BaseFormatter], add_prompt_examples: bool
|
|
70
|
+
) -> None:
|
|
71
|
+
"""Generate documentation for a specific task."""
|
|
72
|
+
task_class = get_task(task_name)
|
|
73
|
+
|
|
74
|
+
try:
|
|
75
|
+
num_fewshot = 1
|
|
76
|
+
task = task_class(num_fewshot=num_fewshot)
|
|
77
|
+
except Exception:
|
|
78
|
+
try:
|
|
79
|
+
num_fewshot = 0
|
|
80
|
+
task = task_class(num_fewshot=num_fewshot)
|
|
81
|
+
except Exception as e:
|
|
82
|
+
print(f"Failed to instantiate task {task_name}: {e}")
|
|
83
|
+
return
|
|
84
|
+
|
|
85
|
+
with open(f"{output_docs_directory}/{task_name}.md", "w") as f:
|
|
86
|
+
f.write(f"# {task_name}\n\n")
|
|
87
|
+
http_path = f"https://huggingface.co/datasets/{task.DATASET_PATH}" if task.DATASET_PATH else None
|
|
88
|
+
|
|
89
|
+
f.write("````\n") # fence with 4 thicks because some prompts have code blocks with 3 thicks
|
|
90
|
+
f.write(f"NAME = {task_name}".strip() + "\n")
|
|
91
|
+
if hasattr(task, "DATASET_PATH"):
|
|
92
|
+
f.write(f"DATASET_PATH = {task.DATASET_PATH}".strip() + "\n")
|
|
93
|
+
if hasattr(task, "SAMPLE_SPLIT"):
|
|
94
|
+
f.write(f"SAMPLE_SPLIT = {task.SAMPLE_SPLIT}".strip() + "\n")
|
|
95
|
+
if hasattr(task, "FEWSHOT_SPLIT"):
|
|
96
|
+
f.write(f"FEWSHOT_SPLIT = {task.FEWSHOT_SPLIT}".strip() + "\n")
|
|
97
|
+
if hasattr(task, "RESPONSE_TYPE"):
|
|
98
|
+
f.write(f"RESPONSE_TYPE = {task.RESPONSE_TYPE.name}".strip() + "\n")
|
|
99
|
+
if hasattr(task, "METRICS"):
|
|
100
|
+
metrics_list = [f"{m.__name__}" for m in task.METRICS]
|
|
101
|
+
f.write(f"METRICS = [{', '.join(metrics_list)}]".strip() + "\n")
|
|
102
|
+
if hasattr(task, "SUBJECTS"):
|
|
103
|
+
f.write(f"SUBJECTS = {repr(task.SUBJECTS)}".strip() + "\n")
|
|
104
|
+
if hasattr(task, "LANGUAGE"):
|
|
105
|
+
f.write(f"LANGUAGE = {repr(task.LANGUAGE)}".strip() + "\n")
|
|
106
|
+
f.write("````\n\n")
|
|
107
|
+
|
|
108
|
+
f.write(f"- Module: [{task_class.__module__}]({task_class.__module__})\n\n")
|
|
109
|
+
task_file = re.sub(".*eval_framework/", "src/eval_framework/", inspect.getfile(task_class))
|
|
110
|
+
f.write(f"- File: [{task_file}](../../{task_file})\n\n")
|
|
111
|
+
|
|
112
|
+
if http_path:
|
|
113
|
+
f.write(f"- Link to dataset: [{http_path}]({http_path})\n\n")
|
|
114
|
+
|
|
115
|
+
if not add_prompt_examples:
|
|
116
|
+
f.write(
|
|
117
|
+
f"More detailed documentation, with prompt examples and ground truth completions, can be generated "
|
|
118
|
+
f"with `uv run python src/eval_framework/utils/generate_task_docs.py --add-prompt-examples "
|
|
119
|
+
f'--only-tasks "{task_name}"`.\n'
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
else:
|
|
123
|
+
s = next(iter(task.iterate_samples(1)))
|
|
124
|
+
for split in task.dataset:
|
|
125
|
+
f.write(f"- `{split}` has {len(task.dataset[split])} samples\n\n")
|
|
126
|
+
|
|
127
|
+
for formatter in formatters:
|
|
128
|
+
f.write(f"## Example prompt with {formatter.__class__.__name__} ({num_fewshot}-shot)\n\n")
|
|
129
|
+
formatted_sample = formatter.format(s.messages, output_mode="string")
|
|
130
|
+
f.write("````\n")
|
|
131
|
+
f.write(f'"{formatted_sample}"')
|
|
132
|
+
f.write("\n````\n\n")
|
|
133
|
+
|
|
134
|
+
f.write("## Possible completions:\n\n")
|
|
135
|
+
f.write("````\n")
|
|
136
|
+
if s.possible_completions:
|
|
137
|
+
for item in (
|
|
138
|
+
s.possible_completions if isinstance(s.possible_completions, list) else [s.possible_completions]
|
|
139
|
+
):
|
|
140
|
+
f.write(f'- "{item}"\n')
|
|
141
|
+
else:
|
|
142
|
+
f.write("None\n")
|
|
143
|
+
f.write("````\n\n")
|
|
144
|
+
|
|
145
|
+
f.write("## Ground truth:\n\n")
|
|
146
|
+
f.write("````\n")
|
|
147
|
+
if s.ground_truth:
|
|
148
|
+
for item in s.ground_truth if isinstance(s.ground_truth, list) else [s.ground_truth]:
|
|
149
|
+
f.write(f'- "{item}"\n')
|
|
150
|
+
else:
|
|
151
|
+
f.write("None\n")
|
|
152
|
+
f.write("````\n")
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def generate_readme_list(output_docs_directory: Path) -> None:
|
|
156
|
+
"""Generate a README file listing all tasks based on the list of files present in the target directory."""
|
|
157
|
+
|
|
158
|
+
with open(f"{output_docs_directory}/README.md", "w") as f:
|
|
159
|
+
f.write(
|
|
160
|
+
"# Task documentation\n\n"
|
|
161
|
+
"This directory contains the generated documentation for all tasks available in `eval-framework`.\n\n"
|
|
162
|
+
"The documentation can be generated or updated with "
|
|
163
|
+
"`uv run python src/eval_framework/utils/generate_task_docs.py`.\n\n"
|
|
164
|
+
"NOTE: This is an automatically generated file. Any manual modifications will not be preserved when"
|
|
165
|
+
"the file is next updated.\n\n"
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
f.write("## List of tasks\n\n")
|
|
169
|
+
# sort files alphabetically and ignore README.md
|
|
170
|
+
for file in sorted(os.listdir(output_docs_directory)):
|
|
171
|
+
if file.endswith(".md") and file != "README.md":
|
|
172
|
+
task_name = file[:-3]
|
|
173
|
+
f.write(f"- [{task_name}]({task_name}.md)\n")
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
def generate_all_docs(args: argparse.Namespace, output_docs_directory: Path) -> None:
|
|
177
|
+
# Load extra tasks if specified
|
|
178
|
+
if args.extra_task_modules:
|
|
179
|
+
print(f"Loading extra tasks from: {args.extra_task_modules}")
|
|
180
|
+
load_extra_tasks(args.extra_task_modules)
|
|
181
|
+
|
|
182
|
+
# List the tasks to process
|
|
183
|
+
filtered_tasks = []
|
|
184
|
+
for task_name in registered_task_names():
|
|
185
|
+
if args.only_tasks and task_name not in args.only_tasks:
|
|
186
|
+
continue
|
|
187
|
+
if task_name in args.exclude_tasks or task_name in EXCLUDED_TASKS:
|
|
188
|
+
continue
|
|
189
|
+
filtered_tasks.append(task_name)
|
|
190
|
+
filtered_tasks.sort()
|
|
191
|
+
|
|
192
|
+
print(f"Found {len(filtered_tasks)} tasks to process: {', '.join([task_name for task_name in filtered_tasks])}")
|
|
193
|
+
|
|
194
|
+
# List the formatters to use
|
|
195
|
+
supported_formatters = {f.__class__.__name__: f for f in [ConcatFormatter(), Llama3Formatter()]}
|
|
196
|
+
formatters = []
|
|
197
|
+
for f in args.formatter:
|
|
198
|
+
if f in supported_formatters:
|
|
199
|
+
formatters.append(supported_formatters[f])
|
|
200
|
+
else:
|
|
201
|
+
raise ValueError(f"Unsupported formatter: {f}")
|
|
202
|
+
|
|
203
|
+
# Create the output directory if it does not exist
|
|
204
|
+
os.makedirs(output_docs_directory, exist_ok=True)
|
|
205
|
+
|
|
206
|
+
for task_name in tqdm.tqdm(filtered_tasks, desc="Generating documentation for tasks"):
|
|
207
|
+
try:
|
|
208
|
+
generate_docs_for_task(
|
|
209
|
+
output_docs_directory=output_docs_directory,
|
|
210
|
+
task_name=task_name,
|
|
211
|
+
formatters=formatters,
|
|
212
|
+
add_prompt_examples=args.add_prompt_examples,
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
except Exception as e:
|
|
216
|
+
print("---")
|
|
217
|
+
print(f"failed generating documentation for task {task_name}: {e}")
|
|
218
|
+
file_path = f"{output_docs_directory}/{task_name}.md"
|
|
219
|
+
if os.path.exists(file_path):
|
|
220
|
+
os.remove(file_path)
|
|
221
|
+
print("---")
|
|
222
|
+
|
|
223
|
+
generate_readme_list(output_docs_directory=output_docs_directory)
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
if __name__ == "__main__":
|
|
227
|
+
print("Generating task documentation...")
|
|
228
|
+
args = parse_args()
|
|
229
|
+
generate_all_docs(args, output_docs_directory=DEFAULT_OUTPUT_DOCS_DIRECTORY)
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import sys
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def setup_logging(
|
|
7
|
+
output_dir: Path | None = None, log_level: int = logging.INFO, log_filename: str = "evaluation.log"
|
|
8
|
+
) -> logging.Logger:
|
|
9
|
+
"""
|
|
10
|
+
Set up centralized logging configuration for the entire framework.
|
|
11
|
+
|
|
12
|
+
Args:
|
|
13
|
+
output_dir: Directory to save log files. If None, logs only to console.
|
|
14
|
+
log_level: Logging level (default: INFO)
|
|
15
|
+
log_filename: Name of the log file
|
|
16
|
+
|
|
17
|
+
Returns:
|
|
18
|
+
Configured root logger
|
|
19
|
+
"""
|
|
20
|
+
# Create formatter
|
|
21
|
+
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
|
22
|
+
|
|
23
|
+
# Get root logger and clear any existing handlers
|
|
24
|
+
root_logger = logging.getLogger()
|
|
25
|
+
root_logger.handlers.clear()
|
|
26
|
+
root_logger.setLevel(log_level)
|
|
27
|
+
|
|
28
|
+
# Console handler (always present)
|
|
29
|
+
console_handler = logging.StreamHandler(sys.stdout)
|
|
30
|
+
console_handler.setLevel(log_level)
|
|
31
|
+
console_handler.setFormatter(formatter)
|
|
32
|
+
root_logger.addHandler(console_handler)
|
|
33
|
+
|
|
34
|
+
# File handler (if output directory provided)
|
|
35
|
+
if output_dir:
|
|
36
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
37
|
+
log_file = output_dir / log_filename
|
|
38
|
+
|
|
39
|
+
file_handler = logging.FileHandler(log_file, mode="w")
|
|
40
|
+
file_handler.setLevel(log_level)
|
|
41
|
+
file_handler.setFormatter(formatter)
|
|
42
|
+
root_logger.addHandler(file_handler)
|
|
43
|
+
|
|
44
|
+
root_logger.info(f"Logging configured. File: {log_file}")
|
|
45
|
+
else:
|
|
46
|
+
root_logger.info("Logging configured (console only)")
|
|
47
|
+
|
|
48
|
+
print(f"Output directory for logs: {output_dir if output_dir else 'None'}")
|
|
49
|
+
|
|
50
|
+
return root_logger
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
import importlib
|
|
2
|
+
import importlib.metadata
|
|
3
|
+
from collections.abc import Sequence
|
|
4
|
+
|
|
5
|
+
from packaging.requirements import Requirement
|
|
6
|
+
from packaging.version import Version
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def validate_package_extras(extras: str | Sequence[str], /, *, package: str = "eval_framework") -> Sequence[str]:
|
|
10
|
+
"""Validate that the specified extras are valid for the given package."""
|
|
11
|
+
if isinstance(extras, str):
|
|
12
|
+
extras = [extras]
|
|
13
|
+
|
|
14
|
+
metadata = importlib.metadata.metadata(package)
|
|
15
|
+
package_extras = set(metadata.get_all("Provides-Extra") or [])
|
|
16
|
+
for extra in extras:
|
|
17
|
+
if extra not in package_extras:
|
|
18
|
+
raise ValueError(f"Invalid extra: {extra}. Options are {package_extras}")
|
|
19
|
+
|
|
20
|
+
return extras
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def extra_requires(extra: str, /, *, package: str = "eval_framework") -> list[str]:
|
|
24
|
+
"""Return a list of requirements for the specified extra."""
|
|
25
|
+
validate_package_extras(extra, package=package)
|
|
26
|
+
dist = importlib.metadata.distribution(package)
|
|
27
|
+
requires = dist.requires or []
|
|
28
|
+
extra_str = f"extra == '{extra}'"
|
|
29
|
+
return [r.split(";")[0].strip() for r in requires if r.endswith(extra_str)]
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _dependency_satisfied(dep: str, /) -> bool:
|
|
33
|
+
"""Return True if the dependency string is satisfied.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
A dependency string: for example "torch~=2.0".
|
|
37
|
+
"""
|
|
38
|
+
try:
|
|
39
|
+
dist = importlib.metadata.distribution(Requirement(dep).name)
|
|
40
|
+
installed_version = Version(dist.version)
|
|
41
|
+
req = Requirement(dep)
|
|
42
|
+
return installed_version in req.specifier
|
|
43
|
+
except (importlib.metadata.PackageNotFoundError, Exception):
|
|
44
|
+
return False
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def is_extra_installed(extra: str, package: str = "eval_framework") -> bool:
|
|
48
|
+
"""Return `True` if all dependencies for a given extra are installed."""
|
|
49
|
+
for req in extra_requires(extra, package=package):
|
|
50
|
+
if not _dependency_satisfied(req):
|
|
51
|
+
return False
|
|
52
|
+
return True
|