isa-model 0.0.1__tar.gz → 0.0.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. {isa_model-0.0.1/isa_model.egg-info → isa_model-0.0.3}/PKG-INFO +1 -1
  2. isa_model-0.0.3/isa_model/eval/__init__.py +56 -0
  3. isa_model-0.0.3/isa_model/eval/benchmarks.py +469 -0
  4. isa_model-0.0.3/isa_model/eval/factory.py +582 -0
  5. isa_model-0.0.3/isa_model/eval/metrics.py +628 -0
  6. isa_model-0.0.3/isa_model/training/__init__.py +44 -0
  7. isa_model-0.0.3/isa_model/training/factory.py +393 -0
  8. {isa_model-0.0.1 → isa_model-0.0.3/isa_model.egg-info}/PKG-INFO +1 -1
  9. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model.egg-info/SOURCES.txt +6 -0
  10. {isa_model-0.0.1 → isa_model-0.0.3}/pyproject.toml +1 -1
  11. {isa_model-0.0.1 → isa_model-0.0.3}/LICENSE +0 -0
  12. {isa_model-0.0.1 → isa_model-0.0.3}/MANIFEST.in +0 -0
  13. {isa_model-0.0.1 → isa_model-0.0.3}/README.md +0 -0
  14. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/__init__.py +0 -0
  15. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/core/model_manager.py +0 -0
  16. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/core/model_registry.py +0 -0
  17. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/core/model_router.py +0 -0
  18. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/core/model_storage.py +0 -0
  19. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/core/model_version.py +0 -0
  20. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/core/resource_manager.py +0 -0
  21. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/core/storage/hf_storage.py +0 -0
  22. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/core/storage/local_storage.py +0 -0
  23. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/core/storage/minio_storage.py +0 -0
  24. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/deployment/gpu_fp16_ds8/models/deepseek_r1/1/model.py +0 -0
  25. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/deployment/gpu_fp16_ds8/scripts/download_model.py +0 -0
  26. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/deployment/gpu_int8_ds8/app/server.py +0 -0
  27. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/deployment/gpu_int8_ds8/scripts/test_client.py +0 -0
  28. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/deployment/gpu_int8_ds8/scripts/test_client_os.py +0 -0
  29. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/__init__.py +0 -0
  30. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/adapter/unified_api.py +0 -0
  31. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/ai_factory.py +0 -0
  32. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/base.py +0 -0
  33. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/providers/__init__.py +0 -0
  34. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/providers/base_provider.py +0 -0
  35. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/providers/model_cache_manager.py +0 -0
  36. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/providers/ollama_provider.py +0 -0
  37. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/providers/openai_provider.py +0 -0
  38. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/providers/replicate_provider.py +0 -0
  39. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/providers/triton_provider.py +0 -0
  40. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/services/__init__.py +0 -0
  41. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/services/audio/base_stt_service.py +0 -0
  42. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/services/audio/base_tts_service.py +0 -0
  43. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/services/audio/openai_tts_service.py +0 -0
  44. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/services/base_service.py +0 -0
  45. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/services/embedding/ollama_embed_service.py +0 -0
  46. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/services/embedding/openai_embed_service.py +0 -0
  47. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/services/llm/__init__.py +0 -0
  48. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/services/llm/base_llm_service.py +0 -0
  49. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/services/llm/ollama_llm_service.py +0 -0
  50. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/services/llm/openai_llm_service.py +0 -0
  51. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/services/others/table_transformer_service.py +0 -0
  52. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/services/vision/__init__.py +0 -0
  53. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/services/vision/helpers/image_utils.py +0 -0
  54. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/services/vision/helpers/text_splitter.py +0 -0
  55. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/services/vision/ollama_vision_service.py +0 -0
  56. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/services/vision/openai_vision_service.py +0 -0
  57. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/services/vision/replicate_image_gen_service.py +0 -0
  58. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/utils/conversion/bge_rerank_convert.py +0 -0
  59. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/utils/conversion/onnx_converter.py +0 -0
  60. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/inference/utils/conversion/torch_converter.py +0 -0
  61. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/scripts/inference_tracker.py +0 -0
  62. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/scripts/mlflow_manager.py +0 -0
  63. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/scripts/model_registry.py +0 -0
  64. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/scripts/start_mlflow.py +0 -0
  65. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/scripts/training_tracker.py +0 -0
  66. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/engine/llama_factory/__init__.py +0 -0
  67. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/engine/llama_factory/config.py +0 -0
  68. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/engine/llama_factory/data_adapter.py +0 -0
  69. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/engine/llama_factory/examples/__init__.py +0 -0
  70. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/engine/llama_factory/examples/finetune_with_tracking.py +0 -0
  71. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/engine/llama_factory/examples/rlhf_with_tracking.py +0 -0
  72. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/engine/llama_factory/factory.py +0 -0
  73. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/engine/llama_factory/rl.py +0 -0
  74. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/engine/llama_factory/trainer.py +0 -0
  75. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/image_model/configs/create_config.py +0 -0
  76. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/image_model/configs/create_flux_config.py +0 -0
  77. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/image_model/configs/create_lora_config.py +0 -0
  78. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/image_model/prepare_massed_compute.py +0 -0
  79. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/image_model/prepare_upload.py +0 -0
  80. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/image_model/raw_data/create_captions.py +0 -0
  81. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/image_model/raw_data/create_lora_captions.py +0 -0
  82. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/image_model/raw_data/pre_processing.py +0 -0
  83. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/image_model/train/train.py +0 -0
  84. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/image_model/train/train_flux.py +0 -0
  85. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/image_model/train/train_lora.py +0 -0
  86. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/image_model/train_main.py +0 -0
  87. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/llm_model/annotation/annotation_schema.py +0 -0
  88. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/llm_model/annotation/processors/annotation_processor.py +0 -0
  89. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/llm_model/annotation/storage/dataset_manager.py +0 -0
  90. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/llm_model/annotation/storage/dataset_schema.py +0 -0
  91. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/llm_model/annotation/tests/test_annotation_flow.py +0 -0
  92. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/llm_model/annotation/tests/test_minio copy.py +0 -0
  93. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/llm_model/annotation/tests/test_minio_upload.py +0 -0
  94. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model/training/llm_model/annotation/views/annotation_controller.py +0 -0
  95. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model.egg-info/dependency_links.txt +0 -0
  96. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model.egg-info/requires.txt +0 -0
  97. {isa_model-0.0.1 → isa_model-0.0.3}/isa_model.egg-info/top_level.txt +0 -0
  98. {isa_model-0.0.1 → isa_model-0.0.3}/setup.cfg +0 -0
  99. {isa_model-0.0.1 → isa_model-0.0.3}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: isa-model
3
- Version: 0.0.1
3
+ Version: 0.0.3
4
4
  Summary: Unified AI model serving framework
5
5
  Author-email: isA_Model Contributors <your.email@example.com>
6
6
  License-Expression: MIT
@@ -0,0 +1,56 @@
1
+ """
2
+ ISA Model Evaluation Framework
3
+
4
+ This module provides comprehensive evaluation capabilities for AI models:
5
+ - LLM evaluation (perplexity, BLEU, ROUGE, custom metrics)
6
+ - Image model evaluation (FID, IS, LPIPS)
7
+ - Benchmark testing (MMLU, HellaSwag, ARC, etc.)
8
+ - Custom evaluation pipelines
9
+
10
+ Usage:
11
+ from isa_model.eval import EvaluationFactory
12
+
13
+ # Create evaluation factory
14
+ evaluator = EvaluationFactory()
15
+
16
+ # Evaluate LLM performance
17
+ results = evaluator.evaluate_llm(
18
+ model_path="path/to/model",
19
+ dataset_path="test_data.json",
20
+ metrics=["perplexity", "bleu", "rouge"]
21
+ )
22
+
23
+ # Run benchmark tests
24
+ benchmark_results = evaluator.run_benchmark(
25
+ model_path="path/to/model",
26
+ benchmark="mmlu"
27
+ )
28
+ """
29
+
30
+ from .factory import EvaluationFactory
31
+ from .metrics import (
32
+ LLMMetrics,
33
+ ImageMetrics,
34
+ BenchmarkRunner,
35
+ MetricType
36
+ )
37
+ from .benchmarks import (
38
+ MMLU,
39
+ HellaSwag,
40
+ ARC,
41
+ GSM8K,
42
+ BenchmarkConfig
43
+ )
44
+
45
+ __all__ = [
46
+ "EvaluationFactory",
47
+ "LLMMetrics",
48
+ "ImageMetrics",
49
+ "BenchmarkRunner",
50
+ "MetricType",
51
+ "MMLU",
52
+ "HellaSwag",
53
+ "ARC",
54
+ "GSM8K",
55
+ "BenchmarkConfig"
56
+ ]
@@ -0,0 +1,469 @@
1
+ """
2
+ Standard AI Benchmarks for ISA Model Framework
3
+
4
+ This module provides implementations of standard AI benchmarks:
5
+ - MMLU (Massive Multitask Language Understanding)
6
+ - HellaSwag (Commonsense Reasoning)
7
+ - ARC (AI2 Reasoning Challenge)
8
+ - GSM8K (Grade School Math)
9
+ """
10
+
11
+ import os
12
+ import json
13
+ import logging
14
+ from typing import Dict, List, Any, Optional
15
+ from abc import ABC, abstractmethod
16
+ from dataclasses import dataclass
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+
21
+ @dataclass
22
+ class BenchmarkConfig:
23
+ """Configuration for benchmark evaluation."""
24
+ name: str
25
+ description: str
26
+ num_choices: int = 4
27
+ few_shot_examples: int = 5
28
+ max_samples: Optional[int] = None
29
+ subjects: Optional[List[str]] = None
30
+
31
+
32
+ class BaseBenchmark(ABC):
33
+ """Base class for all benchmarks."""
34
+
35
+ def __init__(self, config: BenchmarkConfig):
36
+ self.config = config
37
+ self.name = config.name
38
+ self.data = None
39
+
40
+ @abstractmethod
41
+ def load_data(self, max_samples: Optional[int] = None) -> List[Dict[str, Any]]:
42
+ """Load benchmark data."""
43
+ pass
44
+
45
+ @abstractmethod
46
+ def evaluate_sample(self, sample: Dict[str, Any], prediction: str) -> bool:
47
+ """Evaluate a single sample."""
48
+ pass
49
+
50
+ def format_prompt(self, sample: Dict[str, Any], few_shot_examples: Optional[List[Dict[str, Any]]] = None) -> str:
51
+ """Format prompt for the sample."""
52
+ prompt = ""
53
+
54
+ # Add few-shot examples if provided
55
+ if few_shot_examples:
56
+ for example in few_shot_examples:
57
+ prompt += self._format_single_example(example, include_answer=True) + "\n\n"
58
+
59
+ # Add the actual question
60
+ prompt += self._format_single_example(sample, include_answer=False)
61
+
62
+ return prompt
63
+
64
+ @abstractmethod
65
+ def _format_single_example(self, sample: Dict[str, Any], include_answer: bool = False) -> str:
66
+ """Format a single example."""
67
+ pass
68
+
69
+
70
+ class MMLU(BaseBenchmark):
71
+ """
72
+ MMLU (Massive Multitask Language Understanding) Benchmark
73
+
74
+ Tests knowledge across 57 subjects including mathematics, history,
75
+ computer science, law, and more.
76
+ """
77
+
78
+ def __init__(self, subjects: Optional[List[str]] = None):
79
+ config = BenchmarkConfig(
80
+ name="MMLU",
81
+ description="Massive Multitask Language Understanding",
82
+ num_choices=4,
83
+ few_shot_examples=5,
84
+ subjects=subjects
85
+ )
86
+ super().__init__(config)
87
+
88
+ # MMLU subjects
89
+ self.all_subjects = [
90
+ "abstract_algebra", "anatomy", "astronomy", "business_ethics",
91
+ "clinical_knowledge", "college_biology", "college_chemistry",
92
+ "college_computer_science", "college_mathematics", "college_medicine",
93
+ "college_physics", "computer_security", "conceptual_physics",
94
+ "econometrics", "electrical_engineering", "elementary_mathematics",
95
+ "formal_logic", "global_facts", "high_school_biology",
96
+ "high_school_chemistry", "high_school_computer_science",
97
+ "high_school_european_history", "high_school_geography",
98
+ "high_school_government_and_politics", "high_school_macroeconomics",
99
+ "high_school_mathematics", "high_school_microeconomics",
100
+ "high_school_physics", "high_school_psychology", "high_school_statistics",
101
+ "high_school_us_history", "high_school_world_history", "human_aging",
102
+ "human_sexuality", "international_law", "jurisprudence",
103
+ "logical_fallacies", "machine_learning", "management", "marketing",
104
+ "medical_genetics", "miscellaneous", "moral_disputes", "moral_scenarios",
105
+ "nutrition", "philosophy", "prehistory", "professional_accounting",
106
+ "professional_law", "professional_medicine", "professional_psychology",
107
+ "public_relations", "security_studies", "sociology", "us_foreign_policy",
108
+ "virology", "world_religions"
109
+ ]
110
+
111
+ self.subjects = subjects or self.all_subjects[:10] # Use first 10 subjects by default
112
+
113
+ def load_data(self, max_samples: Optional[int] = None) -> List[Dict[str, Any]]:
114
+ """Load MMLU data (simplified implementation)."""
115
+ # This is a simplified implementation
116
+ # In practice, you'd load from the actual MMLU dataset
117
+
118
+ data = []
119
+
120
+ for subject in self.subjects:
121
+ # Generate sample questions for each subject
122
+ for i in range(min(10, max_samples // len(self.subjects) if max_samples else 10)):
123
+ sample = {
124
+ "subject": subject,
125
+ "question": f"Sample {subject} question {i+1}",
126
+ "choices": [
127
+ f"Option A for {subject}",
128
+ f"Option B for {subject}",
129
+ f"Option C for {subject}",
130
+ f"Option D for {subject}"
131
+ ],
132
+ "answer": "A", # Simplified
133
+ "id": f"{subject}_{i}"
134
+ }
135
+ data.append(sample)
136
+
137
+ if max_samples:
138
+ data = data[:max_samples]
139
+
140
+ logger.info(f"Loaded {len(data)} MMLU samples across {len(self.subjects)} subjects")
141
+ return data
142
+
143
+ def evaluate_sample(self, sample: Dict[str, Any], prediction: str) -> bool:
144
+ """Evaluate a single MMLU sample."""
145
+ # Extract the letter choice from prediction
146
+ prediction = prediction.strip().upper()
147
+
148
+ # Handle various response formats
149
+ if prediction in ["A", "B", "C", "D"]:
150
+ return prediction == sample["answer"]
151
+ elif prediction.startswith("(") and prediction.endswith(")"):
152
+ letter = prediction[1]
153
+ return letter == sample["answer"]
154
+ else:
155
+ # Try to find A, B, C, or D in the response
156
+ for choice in ["A", "B", "C", "D"]:
157
+ if choice in prediction:
158
+ return choice == sample["answer"]
159
+
160
+ return False
161
+
162
+ def _format_single_example(self, sample: Dict[str, Any], include_answer: bool = False) -> str:
163
+ """Format a single MMLU example."""
164
+ prompt = f"Subject: {sample['subject'].replace('_', ' ').title()}\n"
165
+ prompt += f"Question: {sample['question']}\n"
166
+
167
+ choices = sample['choices']
168
+ for i, choice in enumerate(choices):
169
+ letter = chr(65 + i) # A, B, C, D
170
+ prompt += f"{letter}. {choice}\n"
171
+
172
+ if include_answer:
173
+ prompt += f"Answer: {sample['answer']}"
174
+ else:
175
+ prompt += "Answer:"
176
+
177
+ return prompt
178
+
179
+
180
+ class HellaSwag(BaseBenchmark):
181
+ """
182
+ HellaSwag Benchmark
183
+
184
+ Tests commonsense reasoning about physical situations.
185
+ """
186
+
187
+ def __init__(self):
188
+ config = BenchmarkConfig(
189
+ name="HellaSwag",
190
+ description="Commonsense Reasoning about Physical Situations",
191
+ num_choices=4,
192
+ few_shot_examples=10
193
+ )
194
+ super().__init__(config)
195
+
196
+ def load_data(self, max_samples: Optional[int] = None) -> List[Dict[str, Any]]:
197
+ """Load HellaSwag data (simplified implementation)."""
198
+ # This is a simplified implementation
199
+ # In practice, you'd load from the actual HellaSwag dataset
200
+
201
+ data = []
202
+
203
+ sample_contexts = [
204
+ "A person is washing dishes in the kitchen",
205
+ "Someone is riding a bicycle down a hill",
206
+ "A chef is preparing ingredients for cooking",
207
+ "A student is taking notes in class",
208
+ "A gardener is planting flowers"
209
+ ]
210
+
211
+ for i, context in enumerate(sample_contexts):
212
+ if max_samples and i >= max_samples:
213
+ break
214
+
215
+ sample = {
216
+ "context": context,
217
+ "question": "What happens next?",
218
+ "choices": [
219
+ f"They continue with the logical next step for scenario {i+1}",
220
+ f"They do something completely unrelated to scenario {i+1}",
221
+ f"They stop and do something random in scenario {i+1}",
222
+ f"They repeat the same action in scenario {i+1}"
223
+ ],
224
+ "answer": "A", # First choice is usually most logical
225
+ "id": f"hellaswag_{i}"
226
+ }
227
+ data.append(sample)
228
+
229
+ logger.info(f"Loaded {len(data)} HellaSwag samples")
230
+ return data
231
+
232
+ def evaluate_sample(self, sample: Dict[str, Any], prediction: str) -> bool:
233
+ """Evaluate a single HellaSwag sample."""
234
+ prediction = prediction.strip().upper()
235
+
236
+ if prediction in ["A", "B", "C", "D"]:
237
+ return prediction == sample["answer"]
238
+
239
+ # Try to extract choice from longer response
240
+ for choice in ["A", "B", "C", "D"]:
241
+ if choice in prediction:
242
+ return choice == sample["answer"]
243
+
244
+ return False
245
+
246
+ def _format_single_example(self, sample: Dict[str, Any], include_answer: bool = False) -> str:
247
+ """Format a single HellaSwag example."""
248
+ prompt = f"Context: {sample['context']}\n"
249
+ prompt += f"Question: {sample['question']}\n"
250
+
251
+ choices = sample['choices']
252
+ for i, choice in enumerate(choices):
253
+ letter = chr(65 + i) # A, B, C, D
254
+ prompt += f"{letter}. {choice}\n"
255
+
256
+ if include_answer:
257
+ prompt += f"Answer: {sample['answer']}"
258
+ else:
259
+ prompt += "Answer:"
260
+
261
+ return prompt
262
+
263
+
264
+ class ARC(BaseBenchmark):
265
+ """
266
+ ARC (AI2 Reasoning Challenge) Benchmark
267
+
268
+ Tests scientific reasoning with grade-school level science questions.
269
+ """
270
+
271
+ def __init__(self, challenge_set: str = "easy"):
272
+ config = BenchmarkConfig(
273
+ name=f"ARC-{challenge_set}",
274
+ description=f"AI2 Reasoning Challenge ({challenge_set})",
275
+ num_choices=4,
276
+ few_shot_examples=25
277
+ )
278
+ super().__init__(config)
279
+ self.challenge_set = challenge_set # "easy" or "challenge"
280
+
281
+ def load_data(self, max_samples: Optional[int] = None) -> List[Dict[str, Any]]:
282
+ """Load ARC data (simplified implementation)."""
283
+ # This is a simplified implementation
284
+ # In practice, you'd load from the actual ARC dataset
285
+
286
+ data = []
287
+
288
+ sample_questions = [
289
+ {
290
+ "question": "What happens to water when it freezes?",
291
+ "choices": ["It becomes ice", "It becomes gas", "It disappears", "It becomes hot"],
292
+ "answer": "A"
293
+ },
294
+ {
295
+ "question": "Which planet is closest to the Sun?",
296
+ "choices": ["Earth", "Mars", "Mercury", "Venus"],
297
+ "answer": "C"
298
+ },
299
+ {
300
+ "question": "What do plants need to make their own food?",
301
+ "choices": ["Sunlight and water", "Only water", "Only sunlight", "Soil only"],
302
+ "answer": "A"
303
+ },
304
+ {
305
+ "question": "What is the main gas in Earth's atmosphere?",
306
+ "choices": ["Oxygen", "Carbon dioxide", "Nitrogen", "Hydrogen"],
307
+ "answer": "C"
308
+ },
309
+ {
310
+ "question": "How many legs does a spider have?",
311
+ "choices": ["6", "8", "10", "12"],
312
+ "answer": "B"
313
+ }
314
+ ]
315
+
316
+ for i, q in enumerate(sample_questions):
317
+ if max_samples and i >= max_samples:
318
+ break
319
+
320
+ sample = {
321
+ "question": q["question"],
322
+ "choices": q["choices"],
323
+ "answer": q["answer"],
324
+ "challenge_set": self.challenge_set,
325
+ "id": f"arc_{self.challenge_set}_{i}"
326
+ }
327
+ data.append(sample)
328
+
329
+ logger.info(f"Loaded {len(data)} ARC-{self.challenge_set} samples")
330
+ return data
331
+
332
+ def evaluate_sample(self, sample: Dict[str, Any], prediction: str) -> bool:
333
+ """Evaluate a single ARC sample."""
334
+ prediction = prediction.strip().upper()
335
+
336
+ if prediction in ["A", "B", "C", "D"]:
337
+ return prediction == sample["answer"]
338
+
339
+ # Try to extract choice from longer response
340
+ for choice in ["A", "B", "C", "D"]:
341
+ if choice in prediction:
342
+ return choice == sample["answer"]
343
+
344
+ return False
345
+
346
+ def _format_single_example(self, sample: Dict[str, Any], include_answer: bool = False) -> str:
347
+ """Format a single ARC example."""
348
+ prompt = f"Question: {sample['question']}\n"
349
+
350
+ choices = sample['choices']
351
+ for i, choice in enumerate(choices):
352
+ letter = chr(65 + i) # A, B, C, D
353
+ prompt += f"{letter}. {choice}\n"
354
+
355
+ if include_answer:
356
+ prompt += f"Answer: {sample['answer']}"
357
+ else:
358
+ prompt += "Answer:"
359
+
360
+ return prompt
361
+
362
+
363
+ class GSM8K(BaseBenchmark):
364
+ """
365
+ GSM8K Benchmark
366
+
367
+ Tests mathematical reasoning with grade school math word problems.
368
+ """
369
+
370
+ def __init__(self):
371
+ config = BenchmarkConfig(
372
+ name="GSM8K",
373
+ description="Grade School Math 8K",
374
+ num_choices=1, # Open-ended numerical answers
375
+ few_shot_examples=8
376
+ )
377
+ super().__init__(config)
378
+
379
+ def load_data(self, max_samples: Optional[int] = None) -> List[Dict[str, Any]]:
380
+ """Load GSM8K data (simplified implementation)."""
381
+ # This is a simplified implementation
382
+ # In practice, you'd load from the actual GSM8K dataset
383
+
384
+ data = []
385
+
386
+ sample_problems = [
387
+ {
388
+ "question": "Janet has 12 apples. She gives 3 apples to her friend and eats 2 apples. How many apples does Janet have left?",
389
+ "answer": "7"
390
+ },
391
+ {
392
+ "question": "A school has 24 students in each class. If there are 5 classes, how many students are there in total?",
393
+ "answer": "120"
394
+ },
395
+ {
396
+ "question": "Tom buys 4 books for $8 each. How much money does Tom spend in total?",
397
+ "answer": "32"
398
+ },
399
+ {
400
+ "question": "Sarah has 36 stickers. She wants to put them equally into 6 albums. How many stickers will be in each album?",
401
+ "answer": "6"
402
+ },
403
+ {
404
+ "question": "A rectangle has a length of 15 cm and a width of 8 cm. What is the area of the rectangle?",
405
+ "answer": "120"
406
+ }
407
+ ]
408
+
409
+ for i, problem in enumerate(sample_problems):
410
+ if max_samples and i >= max_samples:
411
+ break
412
+
413
+ sample = {
414
+ "question": problem["question"],
415
+ "answer": problem["answer"],
416
+ "id": f"gsm8k_{i}"
417
+ }
418
+ data.append(sample)
419
+
420
+ logger.info(f"Loaded {len(data)} GSM8K samples")
421
+ return data
422
+
423
+ def evaluate_sample(self, sample: Dict[str, Any], prediction: str) -> bool:
424
+ """Evaluate a single GSM8K sample."""
425
+ # Extract numerical answer from prediction
426
+ prediction = prediction.strip()
427
+
428
+ # Try to find the numerical answer
429
+ import re
430
+ numbers = re.findall(r'\d+', prediction)
431
+
432
+ if numbers:
433
+ # Take the last number found (often the final answer)
434
+ predicted_answer = numbers[-1]
435
+ return predicted_answer == sample["answer"]
436
+
437
+ return False
438
+
439
+ def _format_single_example(self, sample: Dict[str, Any], include_answer: bool = False) -> str:
440
+ """Format a single GSM8K example."""
441
+ prompt = f"Problem: {sample['question']}\n"
442
+
443
+ if include_answer:
444
+ prompt += f"Answer: {sample['answer']}"
445
+ else:
446
+ prompt += "Answer:"
447
+
448
+ return prompt
449
+
450
+
451
+ # Convenience functions for creating benchmark instances
452
+ def create_mmlu_benchmark(subjects: Optional[List[str]] = None) -> MMLU:
453
+ """Create MMLU benchmark instance."""
454
+ return MMLU(subjects=subjects)
455
+
456
+
457
+ def create_hellaswag_benchmark() -> HellaSwag:
458
+ """Create HellaSwag benchmark instance."""
459
+ return HellaSwag()
460
+
461
+
462
+ def create_arc_benchmark(challenge_set: str = "easy") -> ARC:
463
+ """Create ARC benchmark instance."""
464
+ return ARC(challenge_set=challenge_set)
465
+
466
+
467
+ def create_gsm8k_benchmark() -> GSM8K:
468
+ """Create GSM8K benchmark instance."""
469
+ return GSM8K()