wisent 0.7.701__py3-none-any.whl → 0.7.1045__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (391) hide show
  1. wisent/__init__.py +1 -1
  2. wisent/comparison/__init__.py +1 -0
  3. wisent/comparison/detect_bos_features.py +275 -0
  4. wisent/comparison/fgaa.py +465 -0
  5. wisent/comparison/lora.py +669 -0
  6. wisent/comparison/lora_dpo.py +592 -0
  7. wisent/comparison/main.py +444 -0
  8. wisent/comparison/ours.py +76 -0
  9. wisent/comparison/sae.py +304 -0
  10. wisent/comparison/utils.py +381 -0
  11. wisent/core/activations/activation_cache.py +393 -0
  12. wisent/core/activations/activations.py +3 -3
  13. wisent/core/activations/activations_collector.py +12 -7
  14. wisent/core/activations/classifier_inference_strategy.py +12 -11
  15. wisent/core/activations/extraction_strategy.py +260 -84
  16. wisent/core/classifiers/classifiers/core/atoms.py +3 -2
  17. wisent/core/cli/__init__.py +2 -1
  18. wisent/core/cli/agent/train_classifier.py +16 -3
  19. wisent/core/cli/check_linearity.py +35 -3
  20. wisent/core/cli/cluster_benchmarks.py +4 -6
  21. wisent/core/cli/create_steering_vector.py +6 -4
  22. wisent/core/cli/diagnose_vectors.py +7 -4
  23. wisent/core/cli/estimate_unified_goodness_time.py +6 -4
  24. wisent/core/cli/generate_pairs_from_task.py +9 -56
  25. wisent/core/cli/generate_vector_from_task.py +11 -20
  26. wisent/core/cli/geometry_search.py +137 -0
  27. wisent/core/cli/get_activations.py +2 -2
  28. wisent/core/cli/method_optimizer.py +4 -3
  29. wisent/core/cli/modify_weights.py +3 -2
  30. wisent/core/cli/optimize_sample_size.py +1 -1
  31. wisent/core/cli/optimize_steering.py +14 -16
  32. wisent/core/cli/optimize_weights.py +2 -1
  33. wisent/core/cli/preview_pairs.py +203 -0
  34. wisent/core/cli/steering_method_trainer.py +3 -3
  35. wisent/core/cli/tasks.py +19 -76
  36. wisent/core/cli/train_unified_goodness.py +3 -3
  37. wisent/core/contrastive_pairs/diagnostics/control_vectors.py +4 -4
  38. wisent/core/contrastive_pairs/diagnostics/linearity.py +7 -0
  39. wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/agentic_search.py +37 -347
  40. wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/aider_polyglot.py +113 -136
  41. wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/codeforces.py +2 -12
  42. wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/coding_benchmarks.py +124 -504
  43. wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/faithbench.py +40 -63
  44. wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/flames.py +46 -89
  45. wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/flores.py +15 -4
  46. wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/frames.py +36 -20
  47. wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/hallucinations_leaderboard.py +3 -45
  48. wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/livemathbench.py +42 -4
  49. wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/longform_writing.py +2 -112
  50. wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/math500.py +39 -4
  51. wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/medium_priority_benchmarks.py +475 -525
  52. wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/mercury.py +65 -42
  53. wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/olympiadbench.py +2 -12
  54. wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/planbench.py +78 -219
  55. wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/polymath.py +37 -4
  56. wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/recode.py +84 -69
  57. wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/refusalbench.py +168 -160
  58. wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/simpleqa.py +44 -25
  59. wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/tau_bench.py +3 -103
  60. wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/toolbench.py +3 -97
  61. wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/toolemu.py +48 -182
  62. wisent/core/contrastive_pairs/lm_eval_pairs/lm_extractor_manifest.py +3 -0
  63. wisent/core/contrastive_pairs/lm_eval_pairs/lm_extractor_registry.py +19 -1
  64. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/aclue.py +1 -3
  65. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/acp_bench.py +1 -3
  66. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/acp_bench_hard.py +1 -3
  67. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/advanced.py +2 -4
  68. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/aexams.py +1 -3
  69. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/afrimmlu.py +1 -3
  70. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/afrixnli.py +2 -2
  71. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arabculture.py +1 -3
  72. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arabic.py +1 -3
  73. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arabic_exams.py +1 -3
  74. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arabic_leaderboard_complete.py +1 -3
  75. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arabic_leaderboard_light.py +1 -3
  76. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arabicmmlu.py +1 -3
  77. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/aradice.py +1 -3
  78. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arc.py +1 -3
  79. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arc_challenge.py +1 -2
  80. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arc_easy.py +1 -2
  81. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arithmetic.py +2 -2
  82. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/asdiv.py +2 -2
  83. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/babi.py +36 -2
  84. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/basque_bench.py +1 -3
  85. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/bbq.py +1 -3
  86. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/belebele.py +1 -3
  87. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/benchmarks.py +1 -3
  88. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/bertaqa.py +1 -3
  89. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/bhs.py +1 -3
  90. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/bhtc.py +3 -5
  91. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/blimp.py +1 -3
  92. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/blimp_nl.py +1 -3
  93. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/boolq.py +22 -5
  94. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/c4.py +1 -3
  95. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cabbq.py +1 -3
  96. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/careqa.py +1 -3
  97. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/catalan_bench.py +1 -3
  98. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/catalanqa.py +1 -3
  99. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/catcola.py +1 -3
  100. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cb.py +10 -3
  101. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/ceval.py +1 -3
  102. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/ceval_valid.py +1 -3
  103. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/chain.py +1 -3
  104. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/chartqa.py +1 -3
  105. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/claim.py +1 -3
  106. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/click.py +1 -3
  107. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cmmlu.py +1 -3
  108. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cnn.py +1 -3
  109. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cocoteros.py +1 -3
  110. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/coedit.py +1 -3
  111. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/commonsense.py +1 -3
  112. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/commonsense_qa.py +1 -3
  113. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/copa.py +2 -2
  114. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/copal_id.py +1 -3
  115. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/coqa.py +3 -4
  116. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/csatqa.py +1 -3
  117. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cycle.py +1 -3
  118. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/darija_bench.py +1 -3
  119. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/darijahellaswag.py +2 -6
  120. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/darijammlu.py +1 -3
  121. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/dbpedia.py +1 -3
  122. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/discrim_eval.py +1 -3
  123. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/doc.py +1 -3
  124. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/drop.py +2 -2
  125. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/epec.py +1 -3
  126. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eq.py +1 -3
  127. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eq_bench.py +1 -3
  128. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eq_bench_ca.py +1 -3
  129. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eq_bench_es.py +1 -3
  130. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/esbbq.py +1 -3
  131. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/ethics.py +1 -3
  132. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eus.py +1 -3
  133. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eus_exams.py +1 -3
  134. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eus_proficiency.py +1 -3
  135. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eus_reading.py +1 -3
  136. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eus_trivia.py +1 -3
  137. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/evalita_llm.py +1 -3
  138. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/financial.py +1 -3
  139. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/flan.py +1 -3
  140. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/french_bench.py +1 -3
  141. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/galician_bench.py +1 -3
  142. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/gaokao.py +2 -2
  143. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/glianorex.py +1 -3
  144. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/global_mmlu.py +1 -3
  145. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/global_piqa.py +1 -3
  146. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/gpt3.py +1 -3
  147. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/groundcocoa.py +1 -3
  148. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/haerae.py +1 -3
  149. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/headqa.py +2 -2
  150. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/hellaswag.py +2 -2
  151. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/hendrycks_ethics.py +5 -9
  152. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/hendrycks_math.py +63 -16
  153. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/histoires_morales.py +1 -3
  154. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/hrm8k.py +1 -3
  155. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/humaneval_infilling.py +1 -3
  156. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/icelandic_winogrande.py +1 -3
  157. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/inverse.py +1 -3
  158. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/inverse_scaling.py +1 -3
  159. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/ja.py +1 -3
  160. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/japanese_leaderboard.py +1 -3
  161. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/japanese_leaderboard_mc.py +1 -1
  162. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/kmmlu.py +1 -3
  163. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/kobest.py +1 -3
  164. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/kormedmcqa.py +5 -17
  165. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/lambada_cloze.py +1 -3
  166. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/lambada_multilingual.py +1 -3
  167. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/law.py +1 -3
  168. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/leaderboard.py +1 -3
  169. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/lingoly.py +1 -3
  170. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/llama3.py +1 -3
  171. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/lm_syneval.py +1 -3
  172. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/logiqa.py +2 -2
  173. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/logiqa2.py +2 -2
  174. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/longbench.py +1 -3
  175. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/longbenchv2.py +1 -3
  176. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mastermind.py +2 -4
  177. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mc-taco.py +2 -2
  178. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/med_concepts_qa.py +2 -4
  179. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/meddialog.py +1 -3
  180. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/medical.py +1 -3
  181. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/medmcqa.py +1 -3
  182. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/medqa.py +2 -2
  183. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mela.py +2 -2
  184. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/metabench.py +1 -3
  185. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/minerva_math.py +1 -3
  186. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mmlu.py +1 -3
  187. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mmlusr.py +3 -4
  188. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mrpc.py +2 -2
  189. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/multiblimp.py +2 -5
  190. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/multirc.py +2 -2
  191. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mutual.py +2 -2
  192. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/non.py +1 -3
  193. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/noreval.py +1 -3
  194. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/noreval_exact.py +1 -3
  195. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/noreval_gen_exact.py +1 -3
  196. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/noreval_mc.py +4 -8
  197. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/noreval_mc_log_likelihoods.py +4 -8
  198. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/nq_open.py +2 -2
  199. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/okapi_arc_multilingual.py +1 -3
  200. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/okapi_hellaswag_multilingual.py +1 -3
  201. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/okapi_mmlu_multilingual.py +1 -3
  202. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/okapi_truthfulqa_multilingual.py +2 -5
  203. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/olaph.py +1 -3
  204. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/openbookqa.py +2 -2
  205. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/option.py +1 -3
  206. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/parafraseja.py +1 -3
  207. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/parafrases.py +1 -3
  208. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/paws.py +1 -3
  209. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/paws_x.py +1 -3
  210. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/pawsx.py +2 -2
  211. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/persona.py +1 -3
  212. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/phrases.py +1 -3
  213. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/pile.py +1 -3
  214. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/piqa.py +2 -2
  215. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/portuguese_bench.py +1 -3
  216. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/prompt.py +1 -3
  217. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/prost.py +2 -2
  218. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/pubmedqa.py +2 -2
  219. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/qa4mre.py +2 -2
  220. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/qasper.py +2 -2
  221. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/qasper_bool.py +2 -2
  222. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/qnli.py +2 -2
  223. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/qnlieu.py +1 -3
  224. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/qqp.py +2 -2
  225. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/race.py +2 -2
  226. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/random.py +1 -3
  227. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/record.py +2 -2
  228. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/reversed.py +1 -3
  229. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/rte.py +2 -2
  230. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/ruler.py +1 -3
  231. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/sciq.py +2 -2
  232. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/score.py +1 -3
  233. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/scrolls.py +1 -3
  234. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/scrolls_mc.py +1 -3
  235. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/self.py +1 -3
  236. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/sglue.py +1 -3
  237. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/sglue_rte.py +2 -1
  238. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/siqa.py +4 -7
  239. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/social_iqa.py +2 -2
  240. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/spanish_bench.py +1 -3
  241. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/storycloze.py +2 -6
  242. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/summarization.py +1 -3
  243. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/super.py +1 -3
  244. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/super_glue.py +1 -3
  245. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/swag.py +2 -2
  246. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/swde.py +1 -3
  247. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/sycophancy.py +1 -3
  248. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/t0.py +1 -3
  249. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/teca.py +1 -3
  250. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinyarc.py +1 -3
  251. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinybenchmarks.py +1 -3
  252. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinygsm8k.py +1 -3
  253. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinyhellaswag.py +1 -3
  254. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinymmlu.py +1 -3
  255. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinytruthfulqa.py +1 -3
  256. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinywinogrande.py +1 -3
  257. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tmmluplus.py +1 -3
  258. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/triviaqa.py +2 -2
  259. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/truthfulqa.py +1 -3
  260. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/truthfulqa_mc1.py +9 -4
  261. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/truthfulqa_mc2.py +1 -3
  262. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/turblimp_core.py +1 -3
  263. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/turkishmmlu.py +1 -3
  264. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/turkishmmlu_mc.py +0 -2
  265. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/unscramble.py +1 -3
  266. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/vaxx.py +2 -2
  267. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/webqs.py +2 -2
  268. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/wic.py +3 -4
  269. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/winogrande.py +2 -2
  270. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/wmdp.py +1 -3
  271. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/wnli.py +2 -2
  272. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/wsc.py +2 -2
  273. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/wsc273.py +1 -3
  274. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/xcopa.py +1 -3
  275. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/xlsum.py +1 -3
  276. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/xnli.py +2 -2
  277. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/xquad.py +2 -4
  278. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/xstorycloze.py +2 -3
  279. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/xwinograd.py +2 -2
  280. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/zhoblimp.py +1 -3
  281. wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_pairs_generation.py +173 -6
  282. wisent/core/data_loaders/loaders/lm_loader.py +12 -1
  283. wisent/core/geometry_runner.py +995 -0
  284. wisent/core/geometry_search_space.py +237 -0
  285. wisent/core/hyperparameter_optimizer.py +1 -1
  286. wisent/core/main.py +3 -0
  287. wisent/core/models/core/atoms.py +5 -3
  288. wisent/core/models/wisent_model.py +1 -1
  289. wisent/core/optuna/classifier/optuna_classifier_optimizer.py +2 -2
  290. wisent/core/parser_arguments/check_linearity_parser.py +12 -2
  291. wisent/core/parser_arguments/generate_vector_from_synthetic_parser.py +2 -2
  292. wisent/core/parser_arguments/generate_vector_from_task_parser.py +6 -13
  293. wisent/core/parser_arguments/geometry_search_parser.py +61 -0
  294. wisent/core/parser_arguments/get_activations_parser.py +5 -14
  295. wisent/core/parser_arguments/main_parser.py +8 -0
  296. wisent/core/parser_arguments/train_unified_goodness_parser.py +2 -2
  297. wisent/core/steering.py +5 -3
  298. wisent/core/steering_methods/methods/hyperplane.py +2 -1
  299. wisent/core/synthetic/generators/nonsense_generator.py +30 -18
  300. wisent/core/trainers/steering_trainer.py +2 -2
  301. wisent/core/utils/device.py +27 -27
  302. wisent/core/utils/layer_combinations.py +70 -0
  303. wisent/examples/__init__.py +1 -0
  304. wisent/examples/scripts/__init__.py +1 -0
  305. wisent/examples/scripts/count_all_benchmarks.py +121 -0
  306. wisent/examples/scripts/discover_directions.py +469 -0
  307. wisent/examples/scripts/extract_benchmark_info.py +71 -0
  308. wisent/examples/scripts/search_all_short_names.py +31 -0
  309. wisent/examples/scripts/test_all_benchmarks.py +138 -0
  310. wisent/examples/scripts/test_all_benchmarks_new.py +28 -0
  311. wisent/examples/scripts/test_contrastive_pairs_all_supported.py +230 -0
  312. wisent/examples/scripts/test_nonsense_baseline.py +261 -0
  313. wisent/examples/scripts/test_one_benchmark.py +324 -0
  314. wisent/examples/scripts/test_one_coding_benchmark.py +293 -0
  315. wisent/parameters/lm_eval/broken_in_lm_eval.json +179 -2
  316. wisent/parameters/lm_eval/category_directions.json +137 -0
  317. wisent/parameters/lm_eval/repair_plan.json +282 -0
  318. wisent/parameters/lm_eval/weak_contrastive_pairs.json +38 -0
  319. wisent/parameters/lm_eval/working_benchmarks.json +206 -0
  320. wisent/parameters/lm_eval/working_benchmarks_categorized.json +236 -0
  321. wisent/tests/test_detector_accuracy.py +1 -1
  322. wisent/tests/visualize_geometry.py +1 -1
  323. {wisent-0.7.701.dist-info → wisent-0.7.1045.dist-info}/METADATA +5 -1
  324. {wisent-0.7.701.dist-info → wisent-0.7.1045.dist-info}/RECORD +328 -358
  325. wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/browsecomp.py +0 -245
  326. wisent/examples/contrastive_pairs/humanization_human_vs_ai.json +0 -2112
  327. wisent/examples/scripts/1/test_basqueglue_evaluation.json +0 -51
  328. wisent/examples/scripts/1/test_basqueglue_pairs.json +0 -14
  329. wisent/examples/scripts/1/test_bec2016eu_evaluation.json +0 -51
  330. wisent/examples/scripts/1/test_bec2016eu_pairs.json +0 -14
  331. wisent/examples/scripts/1/test_belebele_evaluation.json +0 -51
  332. wisent/examples/scripts/1/test_belebele_pairs.json +0 -14
  333. wisent/examples/scripts/1/test_benchmarks_evaluation.json +0 -51
  334. wisent/examples/scripts/1/test_benchmarks_pairs.json +0 -14
  335. wisent/examples/scripts/1/test_bertaqa_evaluation.json +0 -51
  336. wisent/examples/scripts/1/test_bertaqa_pairs.json +0 -14
  337. wisent/examples/scripts/1/test_bhtc_v2_evaluation.json +0 -30
  338. wisent/examples/scripts/1/test_bhtc_v2_pairs.json +0 -8
  339. wisent/examples/scripts/1/test_boolq-seq2seq_evaluation.json +0 -30
  340. wisent/examples/scripts/1/test_boolq-seq2seq_pairs.json +0 -8
  341. wisent/examples/scripts/1/test_cabreu_evaluation.json +0 -30
  342. wisent/examples/scripts/1/test_cabreu_pairs.json +0 -8
  343. wisent/examples/scripts/1/test_careqa_en_evaluation.json +0 -30
  344. wisent/examples/scripts/1/test_careqa_en_pairs.json +0 -8
  345. wisent/examples/scripts/1/test_careqa_evaluation.json +0 -30
  346. wisent/examples/scripts/1/test_careqa_pairs.json +0 -8
  347. wisent/examples/scripts/1/test_catalanqa_evaluation.json +0 -30
  348. wisent/examples/scripts/1/test_catalanqa_pairs.json +0 -8
  349. wisent/examples/scripts/1/test_catcola_evaluation.json +0 -30
  350. wisent/examples/scripts/1/test_catcola_pairs.json +0 -8
  351. wisent/examples/scripts/1/test_chartqa_evaluation.json +0 -30
  352. wisent/examples/scripts/1/test_chartqa_pairs.json +0 -8
  353. wisent/examples/scripts/1/test_claim_stance_topic_evaluation.json +0 -30
  354. wisent/examples/scripts/1/test_claim_stance_topic_pairs.json +0 -8
  355. wisent/examples/scripts/1/test_cnn_dailymail_evaluation.json +0 -30
  356. wisent/examples/scripts/1/test_cnn_dailymail_pairs.json +0 -8
  357. wisent/examples/scripts/1/test_cocoteros_es_evaluation.json +0 -30
  358. wisent/examples/scripts/1/test_cocoteros_es_pairs.json +0 -8
  359. wisent/examples/scripts/1/test_coedit_gec_evaluation.json +0 -30
  360. wisent/examples/scripts/1/test_coedit_gec_pairs.json +0 -8
  361. wisent/examples/scripts/1/test_cola_evaluation.json +0 -30
  362. wisent/examples/scripts/1/test_cola_pairs.json +0 -8
  363. wisent/examples/scripts/1/test_coqcat_evaluation.json +0 -30
  364. wisent/examples/scripts/1/test_coqcat_pairs.json +0 -8
  365. wisent/examples/scripts/1/test_dbpedia_14_evaluation.json +0 -30
  366. wisent/examples/scripts/1/test_dbpedia_14_pairs.json +0 -8
  367. wisent/examples/scripts/1/test_epec_koref_bin_evaluation.json +0 -30
  368. wisent/examples/scripts/1/test_epec_koref_bin_pairs.json +0 -8
  369. wisent/examples/scripts/1/test_ethos_binary_evaluation.json +0 -30
  370. wisent/examples/scripts/1/test_ethos_binary_pairs.json +0 -8
  371. wisent/examples/scripts/2/test_afrimgsm_direct_amh_evaluation.json +0 -30
  372. wisent/examples/scripts/2/test_afrimgsm_direct_amh_pairs.json +0 -8
  373. wisent/examples/scripts/2/test_afrimmlu_direct_amh_evaluation.json +0 -30
  374. wisent/examples/scripts/2/test_afrimmlu_direct_amh_pairs.json +0 -8
  375. wisent/examples/scripts/2/test_afrixnli_en_direct_amh_evaluation.json +0 -30
  376. wisent/examples/scripts/2/test_afrixnli_en_direct_amh_pairs.json +0 -8
  377. wisent/examples/scripts/2/test_arc_ar_evaluation.json +0 -30
  378. wisent/examples/scripts/2/test_arc_ar_pairs.json +0 -8
  379. wisent/examples/scripts/2/test_atis_evaluation.json +0 -30
  380. wisent/examples/scripts/2/test_atis_pairs.json +0 -8
  381. wisent/examples/scripts/2/test_babi_evaluation.json +0 -30
  382. wisent/examples/scripts/2/test_babi_pairs.json +0 -8
  383. wisent/examples/scripts/2/test_babilong_evaluation.json +0 -30
  384. wisent/examples/scripts/2/test_babilong_pairs.json +0 -8
  385. wisent/examples/scripts/2/test_bangla_mmlu_evaluation.json +0 -30
  386. wisent/examples/scripts/2/test_bangla_mmlu_pairs.json +0 -8
  387. wisent/examples/scripts/2/test_basque-glue_pairs.json +0 -14
  388. {wisent-0.7.701.dist-info → wisent-0.7.1045.dist-info}/WHEEL +0 -0
  389. {wisent-0.7.701.dist-info → wisent-0.7.1045.dist-info}/entry_points.txt +0 -0
  390. {wisent-0.7.701.dist-info → wisent-0.7.1045.dist-info}/licenses/LICENSE +0 -0
  391. {wisent-0.7.701.dist-info → wisent-0.7.1045.dist-info}/top_level.txt +0 -0
@@ -1,10 +1,187 @@
1
1
  [
2
+ "20_newsgroups",
3
+ "acp_bench_hard",
4
+ "acpbench",
5
+ "african_flores",
6
+ "afrimgsm",
7
+ "afrobench_adr",
8
+ "ag_news",
9
+ "agentharm",
10
+ "agieval",
11
+ "aime2024",
12
+ "aime2025",
13
+ "alpaca_eval",
14
+ "anagrams1",
15
+ "anagrams2",
16
+ "aradice",
17
+ "argument_topic",
18
+ "assin_entailment",
19
+ "banking77",
20
+ "basque_bench",
21
+ "benchmarks",
22
+ "bfcl",
23
+ "bhs",
24
+ "bhtc",
25
+ "bigbench",
26
+ "bigbench_generate_until",
27
+ "cabbq",
28
+ "cabreu_abstractive",
29
+ "chain",
30
+ "claim_stance_topic",
31
+ "click",
32
+ "cluewsc",
33
+ "cnn_dailymail",
34
+ "cocoteros_va",
35
+ "code2text",
2
36
  "code_x_glue",
37
+ "codeforces",
38
+ "codexglue_code_to_text_go",
39
+ "codexglue_code_to_text_java",
40
+ "codexglue_code_to_text_javascript",
41
+ "codexglue_code_to_text_php",
42
+ "codexglue_code_to_text_python",
43
+ "codexglue_code_to_text_ruby",
44
+ "coedit",
45
+ "copa_ca",
46
+ "cycle_letters",
47
+ "dbpedia_14",
48
+ "discrim_eval",
49
+ "doc",
50
+ "egyhellaswag",
51
+ "egymmlu",
52
+ "epec",
3
53
  "epec_koref_bin",
54
+ "esbbq",
55
+ "escola",
56
+ "ethos_binary",
57
+ "evalita-mp",
58
+ "evalita-sp",
59
+ "financial_tweets",
60
+ "flan",
4
61
  "flan_held_in",
62
+ "flores_ca-eu",
63
+ "gpt3_translation_benchmarks",
64
+ "harmbench",
65
+ "hle",
66
+ "icelandic_winogrande",
67
+ "ifeval",
68
+ "instruct_humaneval",
69
+ "instructhumaneval",
70
+ "jailbreakbench",
71
+ "japanese_leaderboard",
72
+ "jsonschema_bench",
73
+ "kmmlu",
74
+ "kmmlu_accounting",
75
+ "law_stack_exchange",
76
+ "leaderboard",
77
+ "ledgar",
78
+ "libra",
79
+ "librusec_history",
80
+ "livemathbench_cnmo_zh",
81
+ "llama3",
82
+ "lm_syneval",
83
+ "long_context_multiq",
84
+ "longbench",
85
+ "longbenchv2",
86
+ "matreshka_names",
87
+ "mbpp",
88
+ "mbpp_plus",
89
+ "mc-taco",
90
+ "medical_abstracts",
91
+ "mediqa_qa2019",
92
+ "medtext",
93
+ "meqsum",
94
+ "mgsm_direct_eu",
95
+ "mimic_repsum",
96
+ "minerva_math",
97
+ "mmlu_redux",
98
+ "mmmlu",
99
+ "mts_dialog",
100
+ "multi_swe_bench",
101
+ "multiblimp",
102
+ "multilingual",
103
+ "ncb",
104
+ "niah_single_1",
105
+ "norbelebele_p0",
106
+ "norec_document_p0",
107
+ "noreval",
108
+ "noropenbookqa_nno_p0",
109
+ "norrewrite_instruct",
110
+ "norsumm_nno_p0",
111
+ "norsummarize_instruct",
112
+ "okapi",
113
+ "okapi/arc_multilingual",
114
+ "olaph",
115
+ "olympiadbench",
116
+ "openbookqa_ca",
117
+ "or_bench",
118
+ "parafraseja",
119
+ "parafrases_gl",
120
+ "passkey",
121
+ "penn_treebank",
122
+ "pile",
123
+ "pile_10k",
124
+ "piqa_eu",
125
+ "portuguese_bench",
126
+ "ptb",
127
+ "qnlieu",
128
+ "quac",
129
+ "random",
130
+ "reversed",
131
+ "ru_2wikimultihopqa",
5
132
  "ruler",
133
+ "scrolls",
134
+ "sglue",
135
+ "social_iqa",
136
+ "sorry_bench",
137
+ "squad2",
138
+ "summarization_gl",
139
+ "super-glue-t5-prompt",
140
+ "super_glue",
141
+ "super_glue-wsc-t5-prompt",
142
+ "superglue",
143
+ "supergpqa",
144
+ "swe_bench_multilingual",
145
+ "sycophancy_eval",
146
+ "t0",
6
147
  "t0_eval",
148
+ "tatoeba_eng_nno_p0",
149
+ "teca",
150
+ "tinyArc",
151
+ "tinyBenchmarks",
152
+ "tinyGSM8k",
153
+ "tinyHellaswag",
154
+ "tinyMMLU",
155
+ "tinyTruthfulQA",
156
+ "tinyWinogrande",
157
+ "tinyarc",
158
+ "tinybenchmarks",
159
+ "tinygsm8k",
160
+ "tinyhellaswag",
161
+ "tinymmlu",
162
+ "tinytruthfulqa",
163
+ "tinywinogrande",
7
164
  "tmlu",
165
+ "trasnlation_all_flores",
166
+ "truthfulqa-multi",
167
+ "truthfulqa_gl_mc1",
168
+ "turblimp_core",
169
+ "turkishmmlu",
170
+ "turkishmmlu_biology",
171
+ "twenty_newsgroups",
172
+ "unfair_tos",
173
+ "unitxt",
174
+ "unscramble",
175
+ "vaxx",
8
176
  "vaxx_stance",
9
- "wiceu"
10
- ]
177
+ "wiceu",
178
+ "wikitext103",
179
+ "wildguard",
180
+ "wmt16-en-ro",
181
+ "wmt16-ro-en",
182
+ "wmt16_en_ro",
183
+ "wmt16_ro_en",
184
+ "wmt2014",
185
+ "xlsum_es",
186
+ "xnli_gl"
187
+ ]
@@ -0,0 +1,137 @@
1
+ {
2
+ "coding": {
3
+ "description": "Code generation, understanding, and debugging capabilities",
4
+ "hypothesized_directions": [
5
+ "code_correctness",
6
+ "code_completeness",
7
+ "algorithmic_thinking",
8
+ "code_style"
9
+ ]
10
+ },
11
+ "math": {
12
+ "description": "Mathematical reasoning and computation",
13
+ "hypothesized_directions": [
14
+ "numerical_accuracy",
15
+ "algebraic_reasoning",
16
+ "problem_decomposition",
17
+ "mathematical_rigor"
18
+ ]
19
+ },
20
+ "reasoning_logic": {
21
+ "description": "Logical deduction and multi-step reasoning",
22
+ "hypothesized_directions": [
23
+ "deductive_reasoning",
24
+ "causal_reasoning",
25
+ "planning",
26
+ "constraint_satisfaction"
27
+ ]
28
+ },
29
+ "hallucination_factuality": {
30
+ "description": "Truthfulness and factual accuracy",
31
+ "hypothesized_directions": [
32
+ "factual_recall",
33
+ "uncertainty_awareness",
34
+ "source_grounding",
35
+ "confabulation_resistance"
36
+ ]
37
+ },
38
+ "safety_bias": {
39
+ "description": "Safety, fairness, and bias mitigation",
40
+ "hypothesized_directions": [
41
+ "harm_avoidance",
42
+ "stereotype_resistance",
43
+ "fairness",
44
+ "toxicity_avoidance"
45
+ ]
46
+ },
47
+ "multilingual": {
48
+ "description": "Cross-lingual and language-specific capabilities",
49
+ "hypothesized_directions": [
50
+ "language_transfer",
51
+ "cultural_awareness",
52
+ "script_handling",
53
+ "cross_lingual_consistency"
54
+ ]
55
+ },
56
+ "knowledge_qa": {
57
+ "description": "World knowledge and question answering",
58
+ "hypothesized_directions": [
59
+ "factual_knowledge",
60
+ "knowledge_retrieval",
61
+ "answer_precision",
62
+ "domain_expertise"
63
+ ]
64
+ },
65
+ "reading_comprehension": {
66
+ "description": "Understanding and extracting information from text",
67
+ "hypothesized_directions": [
68
+ "information_extraction",
69
+ "inference_making",
70
+ "context_tracking",
71
+ "summarization"
72
+ ]
73
+ },
74
+ "commonsense": {
75
+ "description": "Everyday reasoning and world understanding",
76
+ "hypothesized_directions": [
77
+ "physical_intuition",
78
+ "social_reasoning",
79
+ "temporal_reasoning",
80
+ "spatial_reasoning"
81
+ ]
82
+ },
83
+ "science_medical": {
84
+ "description": "Scientific and medical domain knowledge",
85
+ "hypothesized_directions": [
86
+ "scientific_accuracy",
87
+ "medical_knowledge",
88
+ "technical_precision",
89
+ "evidence_based_reasoning"
90
+ ]
91
+ },
92
+ "instruction_following": {
93
+ "description": "Following complex instructions and user intent",
94
+ "hypothesized_directions": [
95
+ "instruction_adherence",
96
+ "format_compliance",
97
+ "constraint_following",
98
+ "intent_understanding"
99
+ ]
100
+ },
101
+ "tool_use_agents": {
102
+ "description": "Using tools and acting as an agent",
103
+ "hypothesized_directions": [
104
+ "tool_selection",
105
+ "api_usage",
106
+ "action_planning",
107
+ "error_recovery"
108
+ ]
109
+ },
110
+ "language_understanding": {
111
+ "description": "Core linguistic competence",
112
+ "hypothesized_directions": [
113
+ "syntactic_knowledge",
114
+ "semantic_understanding",
115
+ "pragmatic_competence",
116
+ "lexical_knowledge"
117
+ ]
118
+ },
119
+ "translation": {
120
+ "description": "Cross-lingual translation",
121
+ "hypothesized_directions": [
122
+ "translation_accuracy",
123
+ "fluency",
124
+ "terminology_handling",
125
+ "style_preservation"
126
+ ]
127
+ },
128
+ "ethics_values": {
129
+ "description": "Ethical reasoning and value alignment",
130
+ "hypothesized_directions": [
131
+ "moral_reasoning",
132
+ "value_consistency",
133
+ "norm_awareness",
134
+ "ethical_sensitivity"
135
+ ]
136
+ }
137
+ }
@@ -0,0 +1,282 @@
1
+ {
2
+ "summary": {
3
+ "total_tested": 321,
4
+ "ok": 179,
5
+ "failed": 142,
6
+ "success_rate": "56%"
7
+ },
8
+ "case_sensitive_fix": {
9
+ "description": "Task exists in lm-eval but our code uses wrong case",
10
+ "repair": "Fix case in manifest or loader",
11
+ "tasks": [
12
+ {"our_name": "AraDiCE_ArabicMMLU_lev", "correct_name": "AraDiCE_ArabicMMLU_lev", "note": "timeout issue, not case"},
13
+ {"our_name": "aexams_IslamicStudies", "correct_name": "aexams_IslamicStudies", "note": "our code lowercases to aexams_islamicstudies"}
14
+ ]
15
+ },
16
+ "task_name_mappings": {
17
+ "description": "Task exists in lm-eval but under different name - verified with lm_eval --tasks",
18
+ "repair": "Update manifest to use correct lm-eval task name",
19
+ "tasks": [
20
+ {
21
+ "our_name": "mc-taco",
22
+ "correct_name": "mc_taco",
23
+ "reason": "lm_eval --tasks mc-taco fails, lm_eval --tasks mc_taco works (hyphen vs underscore)"
24
+ }
25
+ ]
26
+ },
27
+ "lm_eval_bugs": {
28
+ "description": "Folder exists in lm-eval/tasks but group/task name not registered - bug in lm-eval harness",
29
+ "repair": "Use one of the existing subtasks, or wait for lm-eval fix, or move to HF_EXTRACTORS",
30
+ "tasks": [
31
+ {
32
+ "our_name": "acpbench",
33
+ "folder": "lm_eval/tasks/acpbench",
34
+ "note": "Folder exists but task 'acpbench' not registered. README says task is 'acp_bench'",
35
+ "available_groups": ["acp_bench", "acp_bench_hard", "acp_bench_hard_with_pddl"],
36
+ "available_subtasks": ["acp_bool_cot_2shot", "acp_gen_2shot", "acp_mcq_cot_2shot", "acp_app_bool", "etc."]
37
+ },
38
+ {
39
+ "our_name": "afrimgsm",
40
+ "folder": "lm_eval/tasks/afrimgsm",
41
+ "note": "Folder README says 'afrimgsm: All afrimgsm tasks' but this group is NOT defined in any YAML. YAMLs define 'afrimgsm-irokobench' etc.",
42
+ "available_groups": ["afrimgsm-irokobench", "afrimgsm_cot-irokobench", "afrimgsm_tt-irokobench", "afrimgsm_tt_cot-irokobench"],
43
+ "available_subtasks": ["afrimgsm_tasks_prompt_1", "afrimgsm_amh_prompt_1", "etc."]
44
+ },
45
+ {
46
+ "our_name": "afrimmlu",
47
+ "folder": "lm_eval/tasks/afrimmlu",
48
+ "note": "Same issue as afrimgsm - folder exists, group 'afrimmlu' not registered",
49
+ "available_groups": ["afrimmlu-irokobench"]
50
+ },
51
+ {
52
+ "our_name": "llama3",
53
+ "folder": "lm_eval/tasks/llama3",
54
+ "note": "Folder exists but no 'llama3' group. Contains MMLU variants for Llama evaluation",
55
+ "available_groups": ["mmlu_llama", "mmlu_cot_llama", "mmlu_de_llama", "mmlu_es_llama", "mmlu_fr_llama", "etc."]
56
+ },
57
+ {
58
+ "our_name": "mmmlu",
59
+ "folder": "lm_eval/tasks/okapi/mmlu_multilingual",
60
+ "note": "No 'mmmlu' task. Multilingual MMLU is 'm_mmlu' group in okapi folder",
61
+ "available_groups": ["m_mmlu"],
62
+ "available_subtasks": ["m_mmlu_ar", "m_mmlu_de", "m_mmlu_es", "m_mmlu_fr", "etc."]
63
+ },
64
+ {
65
+ "our_name": "okapi",
66
+ "folder": "lm_eval/tasks/okapi",
67
+ "note": "Folder exists but no 'okapi' group. Contains multilingual variants of benchmarks",
68
+ "available_groups": ["m_arc", "m_hellaswag", "m_mmlu", "m_truthfulqa"],
69
+ "subfolders": ["arc_multilingual", "hellaswag_multilingual", "mmlu_multilingual", "truthfulqa_multilingual"]
70
+ },
71
+ {
72
+ "our_name": "sglue",
73
+ "folder": "lm_eval/tasks/super_glue",
74
+ "note": "No 'sglue' group. Super GLUE tasks are registered individually or as 'super-glue-lm-eval-v1'",
75
+ "available_groups": ["super-glue-lm-eval-v1", "super-glue-lm-eval-v1-seq2seq", "super-glue-t5-prompt"],
76
+ "available_subtasks": ["boolq", "cb", "copa", "multirc", "record", "sglue_rte", "wic", "wsc"]
77
+ },
78
+ {
79
+ "our_name": "superglue",
80
+ "folder": "lm_eval/tasks/super_glue",
81
+ "note": "Same as sglue - no 'superglue' group",
82
+ "available_groups": ["super-glue-lm-eval-v1", "super-glue-lm-eval-v1-seq2seq", "super-glue-t5-prompt"]
83
+ }
84
+ ]
85
+ },
86
+ "truly_not_in_lmeval": {
87
+ "description": "Task truly does not exist in lm-eval harness - need to move to HF_EXTRACTORS or remove",
88
+ "repair": "Move to HF_EXTRACTORS with custom data loader, or remove from manifest",
89
+ "tasks": [
90
+ "bhs",
91
+ "bhtc",
92
+ "cabbq",
93
+ "chain",
94
+ "click",
95
+ "code2text",
96
+ "coedit",
97
+ "discrim_eval",
98
+ "doc",
99
+ "egyhellaswag",
100
+ "egymmlu",
101
+ "epec",
102
+ "esbbq",
103
+ "evalita-sp",
104
+ "flan",
105
+ "icelandic_winogrande",
106
+ "libra",
107
+ "librusec_history",
108
+ "lm_syneval",
109
+ "long_context_multiq",
110
+ "longbenchv2",
111
+ "matreshka_names",
112
+ "multiblimp",
113
+ "multilingual",
114
+ "passkey",
115
+ "quac",
116
+ "random",
117
+ "reversed",
118
+ "ru_2wikimultihopqa",
119
+ "t0",
120
+ "tinybenchmarks",
121
+ "truthfulqa-multi",
122
+ "turblimp_core",
123
+ "twenty_newsgroups",
124
+ "vaxx",
125
+ "wmt2014"
126
+ ]
127
+ },
128
+ "needs_unitxt": {
129
+ "description": "Requires unitxt package installation",
130
+ "repair": "pip install unitxt",
131
+ "tasks": [
132
+ "20_newsgroups",
133
+ "ag_news",
134
+ "argument_topic",
135
+ "banking77",
136
+ "claim_stance_topic",
137
+ "cnn_dailymail",
138
+ "dbpedia_14",
139
+ "ethos_binary",
140
+ "financial_tweets",
141
+ "law_stack_exchange",
142
+ "ledgar",
143
+ "medical_abstracts",
144
+ "unfair_tos",
145
+ "unitxt"
146
+ ]
147
+ },
148
+ "import_error": {
149
+ "description": "Extractor module cannot be imported - likely missing dependencies or syntax error",
150
+ "repair": "Fix the extractor module import issues",
151
+ "tasks": [
152
+ {"name": "african_flores", "module": "flores"},
153
+ {"name": "afrobench_adr", "module": "afrobench"},
154
+ {"name": "agieval", "module": "agieval"},
155
+ {"name": "evalita-mp", "module": "evalita_mp"},
156
+ {"name": "flores_ca-eu", "module": "flores"},
157
+ {"name": "super-glue-t5-prompt", "module": "super_glue_t5_prompt"},
158
+ {"name": "super_glue-wsc-t5-prompt", "module": "super_glue_t5_prompt"},
159
+ {"name": "trasnlation_all_flores", "module": "flores"}
160
+ ]
161
+ },
162
+ "no_pairs_returned": {
163
+ "description": "Extractor ran but returned no contrastive pairs - likely data loading or extraction logic issue",
164
+ "repair": "Debug extractor to ensure pairs are generated from the dataset",
165
+ "tasks": [
166
+ "anagrams1",
167
+ "anagrams2",
168
+ "assin_entailment",
169
+ "cabreu_abstractive",
170
+ "cocoteros_va",
171
+ "copa_ca",
172
+ "cycle_letters",
173
+ "escola",
174
+ "gpt3_translation_benchmarks",
175
+ "kmmlu_accounting",
176
+ "mgsm_direct_eu",
177
+ "ncb",
178
+ "norbelebele_p0",
179
+ "norec_document_p0",
180
+ "noropenbookqa_nno_p0",
181
+ "norrewrite_instruct",
182
+ "norsummarize_instruct",
183
+ "openbookqa_ca",
184
+ "parafraseja",
185
+ "parafrases_gl",
186
+ "piqa_eu",
187
+ "qnlieu",
188
+ "summarization_gl",
189
+ "tatoeba_eng_nno_p0",
190
+ "teca",
191
+ "truthfulqa_gl_mc1",
192
+ "turkishmmlu_biology",
193
+ "unscramble",
194
+ "wikitext103",
195
+ "wmt16-en-ro",
196
+ "wmt16-ro-en",
197
+ "xlsum_es",
198
+ "xnli_gl"
199
+ ]
200
+ },
201
+ "timeout": {
202
+ "description": "Task loading exceeded 30 second timeout",
203
+ "repair": "Increase timeout or optimize data loading",
204
+ "tasks": [
205
+ "bigbench_generate_until",
206
+ "portuguese_bench",
207
+ "scrolls"
208
+ ]
209
+ },
210
+ "missing_dependencies": {
211
+ "description": "Missing Python packages required by lm-eval tasks",
212
+ "repair_commands": {
213
+ "tinyBenchmarks": "pip install git+https://github.com/felipemaiapolo/tinyBenchmarks",
214
+ "langdetect": "pip install langdetect",
215
+ "emoji": "pip install lm_eval[japanese_leaderboard]",
216
+ "jsonschema": "pip install jsonschema[format]",
217
+ "longbench": "pip install lm_eval[longbench]",
218
+ "bert_score": "pip install evaluate bert-score",
219
+ "minerva_math": "pip install sympy math_verify antlr4-python3-runtime==4.11",
220
+ "ruler": "pip install lm_eval[ruler]",
221
+ "noreval": "pip install sacrebleu bert_score rouge_score"
222
+ },
223
+ "tasks": [
224
+ {"name": "benchmarks", "dep": "tinyBenchmarks"},
225
+ {"name": "ifeval", "dep": "langdetect"},
226
+ {"name": "japanese_leaderboard", "dep": "emoji"},
227
+ {"name": "jsonschema_bench", "dep": "jsonschema"},
228
+ {"name": "longbench", "dep": "longbench"},
229
+ {"name": "mediqa_qa2019", "dep": "bert_score"},
230
+ {"name": "medtext", "dep": "bert_score"},
231
+ {"name": "meqsum", "dep": "bert_score"},
232
+ {"name": "mimic_repsum", "dep": "bert_score"},
233
+ {"name": "minerva_math", "dep": "minerva_math"},
234
+ {"name": "mts_dialog", "dep": "bert_score"},
235
+ {"name": "niah_single_1", "dep": "ruler"},
236
+ {"name": "olaph", "dep": "bert_score"},
237
+ {"name": "ruler", "dep": "ruler"},
238
+ {"name": "tinyArc", "dep": "tinyBenchmarks"},
239
+ {"name": "tinyBenchmarks", "dep": "tinyBenchmarks"},
240
+ {"name": "tinyGSM8k", "dep": "tinyBenchmarks"},
241
+ {"name": "tinyHellaswag", "dep": "tinyBenchmarks"},
242
+ {"name": "tinyMMLU", "dep": "tinyBenchmarks"},
243
+ {"name": "tinyTruthfulQA", "dep": "tinyBenchmarks"},
244
+ {"name": "tinyWinogrande", "dep": "tinyBenchmarks"},
245
+ {"name": "tinyarc", "dep": "tinyBenchmarks"},
246
+ {"name": "tinygsm8k", "dep": "tinyBenchmarks"},
247
+ {"name": "tinyhellaswag", "dep": "tinyBenchmarks"},
248
+ {"name": "tinymmlu", "dep": "tinyBenchmarks"},
249
+ {"name": "tinytruthfulqa", "dep": "tinyBenchmarks"},
250
+ {"name": "tinywinogrande", "dep": "tinyBenchmarks"}
251
+ ]
252
+ },
253
+ "other_errors": {
254
+ "description": "Various other errors requiring individual investigation",
255
+ "tasks": [
256
+ {"name": "aradice", "error": "HF dataset file not found"},
257
+ {"name": "basque_bench", "error": "HF dataset file not found"},
258
+ {"name": "hle", "error": "Fallback dataset loading not permitted"},
259
+ {"name": "mbpp", "error": "Multiprocessing bootstrap error"},
260
+ {"name": "noreval", "error": "Missing sacrebleu, bert_score, rouge_score"},
261
+ {"name": "norsumm_nno_p0", "error": "Missing sacrebleu, bert_score, rouge_score"},
262
+ {"name": "pile", "error": "Dataset disabled - the-eye.eu unavailable"},
263
+ {"name": "social_iqa", "error": "Dataset scripts no longer supported"},
264
+ {"name": "supergpqa", "error": "Fallback dataset loading not permitted"},
265
+ {"name": "tmlu", "error": "Extractor interface mismatch - needs lm_eval_task_data argument"}
266
+ ]
267
+ },
268
+ "working_benchmarks": {
269
+ "count": 179,
270
+ "examples": [
271
+ "ArabCulture", "aclue", "acp_bench", "advanced_ai_risk", "afrixnli",
272
+ "ai2_arc", "anli", "apps", "arabic_leaderboard_acva", "arabicmmlu",
273
+ "arithmetic", "asdiv", "babi", "bbh", "bbq", "belebele", "blimp",
274
+ "boolq", "cb", "ceval", "cmmlu_agronomy", "cola", "commonsense_qa",
275
+ "coqa", "drop", "glue", "gpqa", "gsm8k", "hellaswag", "hendrycks_ethics",
276
+ "humaneval", "lambada_openai", "logiqa", "math", "mathqa", "medmcqa",
277
+ "mmlu", "mrpc", "multirc", "mutual", "openbookqa", "piqa", "pubmedqa",
278
+ "qasper", "race", "sciq", "siqa", "squad_completion", "storycloze",
279
+ "swag", "triviaqa", "truthfulqa", "winogrande", "wsc273", "xcopa", "xnli"
280
+ ]
281
+ }
282
+ }
@@ -0,0 +1,38 @@
1
+ {
2
+ "description": "Benchmarks with weak or problematic contrastive pair generation",
3
+ "categories": {
4
+ "identical_pairs": {
5
+ "description": "Positive and negative responses are identical - extractor bug",
6
+ "benchmarks": [
7
+ "paloma"
8
+ ]
9
+ },
10
+ "lazy_math_negative": {
11
+ "description": "Negative is just 'correct_answer + 1' instead of meaningful wrong answer",
12
+ "benchmarks": [
13
+ "hendrycks_math",
14
+ "math500",
15
+ "livemathbench_cnmo_en",
16
+ "polymath_en_medium",
17
+ "polymath_zh_medium",
18
+ "polymath_en_high",
19
+ "polymath_zh_high"
20
+ ]
21
+ },
22
+ "hedging_negative": {
23
+ "description": "Negative is 'I believe the answer is not X' instead of actual wrong answer",
24
+ "benchmarks": [
25
+ "simpleqa",
26
+ "frames"
27
+ ]
28
+ },
29
+ "negation_pattern": {
30
+ "description": "Negative is 'not X' pattern - acceptable for some benchmarks but weak",
31
+ "benchmarks": [
32
+ "babi"
33
+ ]
34
+ }
35
+ },
36
+ "total_weak_benchmarks": 12,
37
+ "notes": "These benchmarks technically work but have suboptimal contrastive pair quality. Consider improving extractors to generate more meaningful negative examples."
38
+ }