wisent 0.7.701__py3-none-any.whl → 0.7.901__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- wisent/__init__.py +1 -1
- wisent/core/activations/activation_cache.py +393 -0
- wisent/core/activations/activations.py +3 -3
- wisent/core/activations/activations_collector.py +9 -5
- wisent/core/activations/classifier_inference_strategy.py +12 -11
- wisent/core/activations/extraction_strategy.py +256 -84
- wisent/core/classifiers/classifiers/core/atoms.py +3 -2
- wisent/core/cli/__init__.py +2 -1
- wisent/core/cli/agent/apply_steering.py +5 -7
- wisent/core/cli/agent/train_classifier.py +19 -7
- wisent/core/cli/check_linearity.py +35 -3
- wisent/core/cli/cluster_benchmarks.py +4 -6
- wisent/core/cli/create_steering_vector.py +6 -4
- wisent/core/cli/diagnose_vectors.py +7 -4
- wisent/core/cli/estimate_unified_goodness_time.py +6 -4
- wisent/core/cli/generate_pairs_from_task.py +9 -56
- wisent/core/cli/geometry_search.py +137 -0
- wisent/core/cli/get_activations.py +1 -1
- wisent/core/cli/method_optimizer.py +4 -3
- wisent/core/cli/modify_weights.py +3 -2
- wisent/core/cli/optimize_sample_size.py +1 -1
- wisent/core/cli/optimize_steering.py +14 -16
- wisent/core/cli/optimize_weights.py +2 -1
- wisent/core/cli/preview_pairs.py +203 -0
- wisent/core/cli/steering_method_trainer.py +3 -3
- wisent/core/cli/tasks.py +19 -76
- wisent/core/cli/train_unified_goodness.py +3 -3
- wisent/core/contrastive_pairs/diagnostics/control_vectors.py +4 -4
- wisent/core/contrastive_pairs/diagnostics/linearity.py +7 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/agentic_search.py +37 -347
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/aider_polyglot.py +113 -136
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/codeforces.py +2 -12
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/coding_benchmarks.py +124 -504
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/faithbench.py +40 -63
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/flames.py +46 -89
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/flores.py +15 -4
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/frames.py +36 -20
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/hallucinations_leaderboard.py +3 -45
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/livemathbench.py +42 -4
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/longform_writing.py +2 -112
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/math500.py +39 -4
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/medium_priority_benchmarks.py +475 -525
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/mercury.py +65 -42
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/olympiadbench.py +2 -12
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/planbench.py +78 -219
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/polymath.py +37 -4
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/recode.py +84 -69
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/refusalbench.py +168 -160
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/simpleqa.py +44 -25
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/tau_bench.py +3 -103
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/toolbench.py +3 -97
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/toolemu.py +48 -182
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_extractor_manifest.py +3 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_extractor_registry.py +19 -1
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/aclue.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/acp_bench.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/acp_bench_hard.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/advanced.py +2 -4
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/aexams.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/afrimmlu.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/afrixnli.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arabculture.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arabic.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arabic_exams.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arabic_leaderboard_complete.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arabic_leaderboard_light.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arabicmmlu.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/aradice.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arc.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arc_challenge.py +1 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arc_easy.py +1 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arithmetic.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/asdiv.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/babi.py +36 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/basque_bench.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/bbq.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/belebele.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/benchmarks.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/bertaqa.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/bhs.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/bhtc.py +3 -5
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/blimp.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/blimp_nl.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/boolq.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/c4.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cabbq.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/careqa.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/catalan_bench.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/catalanqa.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/catcola.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cb.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/ceval.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/ceval_valid.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/chain.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/chartqa.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/claim.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/click.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cmmlu.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cnn.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cocoteros.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/coedit.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/commonsense.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/commonsense_qa.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/copa.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/copal_id.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/coqa.py +3 -4
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/csatqa.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cycle.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/darija_bench.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/darijahellaswag.py +2 -6
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/darijammlu.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/dbpedia.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/discrim_eval.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/doc.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/drop.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/epec.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eq.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eq_bench.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eq_bench_ca.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eq_bench_es.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/esbbq.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/ethics.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eus.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eus_exams.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eus_proficiency.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eus_reading.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eus_trivia.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/evalita_llm.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/financial.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/flan.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/french_bench.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/galician_bench.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/gaokao.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/glianorex.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/global_mmlu.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/global_piqa.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/gpt3.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/groundcocoa.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/haerae.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/headqa.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/hellaswag.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/hendrycks_ethics.py +5 -9
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/hendrycks_math.py +63 -16
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/histoires_morales.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/hrm8k.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/humaneval_infilling.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/icelandic_winogrande.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/inverse.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/inverse_scaling.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/ja.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/japanese_leaderboard.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/japanese_leaderboard_mc.py +1 -1
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/kmmlu.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/kobest.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/kormedmcqa.py +5 -17
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/lambada_cloze.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/lambada_multilingual.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/law.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/leaderboard.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/lingoly.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/llama3.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/lm_syneval.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/logiqa.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/logiqa2.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/longbench.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/longbenchv2.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mastermind.py +2 -4
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mc-taco.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/med_concepts_qa.py +2 -4
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/meddialog.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/medical.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/medmcqa.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/medqa.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mela.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/metabench.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/minerva_math.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mmlu.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mmlusr.py +3 -4
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mrpc.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/multiblimp.py +2 -5
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/multirc.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mutual.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/non.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/noreval.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/noreval_exact.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/noreval_gen_exact.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/noreval_mc.py +4 -8
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/noreval_mc_log_likelihoods.py +4 -8
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/nq_open.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/okapi_arc_multilingual.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/okapi_hellaswag_multilingual.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/okapi_mmlu_multilingual.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/okapi_truthfulqa_multilingual.py +2 -5
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/olaph.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/openbookqa.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/option.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/parafraseja.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/parafrases.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/paws.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/paws_x.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/pawsx.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/persona.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/phrases.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/pile.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/piqa.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/portuguese_bench.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/prompt.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/prost.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/pubmedqa.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/qa4mre.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/qasper.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/qasper_bool.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/qnli.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/qnlieu.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/qqp.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/race.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/random.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/record.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/reversed.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/rte.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/ruler.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/sciq.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/score.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/scrolls.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/scrolls_mc.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/self.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/sglue.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/sglue_rte.py +2 -1
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/siqa.py +4 -7
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/social_iqa.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/spanish_bench.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/storycloze.py +2 -6
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/summarization.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/super.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/super_glue.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/swag.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/swde.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/sycophancy.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/t0.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/teca.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinyarc.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinybenchmarks.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinygsm8k.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinyhellaswag.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinymmlu.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinytruthfulqa.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinywinogrande.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tmmluplus.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/triviaqa.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/truthfulqa.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/truthfulqa_mc1.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/truthfulqa_mc2.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/turblimp_core.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/turkishmmlu.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/turkishmmlu_mc.py +0 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/unscramble.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/vaxx.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/webqs.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/wic.py +3 -4
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/winogrande.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/wmdp.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/wnli.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/wsc.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/wsc273.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/xcopa.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/xlsum.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/xnli.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/xquad.py +2 -4
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/xstorycloze.py +2 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/xwinograd.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/zhoblimp.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_pairs_generation.py +173 -6
- wisent/core/data_loaders/loaders/lm_loader.py +12 -1
- wisent/core/geometry_runner.py +995 -0
- wisent/core/geometry_search_space.py +237 -0
- wisent/core/hyperparameter_optimizer.py +1 -1
- wisent/core/main.py +3 -0
- wisent/core/models/core/atoms.py +5 -3
- wisent/core/models/wisent_model.py +1 -1
- wisent/core/optuna/classifier/optuna_classifier_optimizer.py +2 -2
- wisent/core/parser_arguments/check_linearity_parser.py +12 -2
- wisent/core/parser_arguments/generate_vector_from_synthetic_parser.py +2 -2
- wisent/core/parser_arguments/generate_vector_from_task_parser.py +2 -2
- wisent/core/parser_arguments/geometry_search_parser.py +61 -0
- wisent/core/parser_arguments/main_parser.py +8 -0
- wisent/core/parser_arguments/train_unified_goodness_parser.py +2 -2
- wisent/core/steering.py +5 -3
- wisent/core/steering_methods/methods/hyperplane.py +2 -1
- wisent/core/synthetic/generators/nonsense_generator.py +30 -18
- wisent/core/trainers/steering_trainer.py +2 -2
- wisent/core/utils/device.py +27 -27
- wisent/core/utils/layer_combinations.py +70 -0
- wisent/examples/__init__.py +1 -0
- wisent/examples/scripts/__init__.py +1 -0
- wisent/examples/scripts/count_all_benchmarks.py +121 -0
- wisent/examples/scripts/discover_directions.py +469 -0
- wisent/examples/scripts/extract_benchmark_info.py +71 -0
- wisent/examples/scripts/generate_paper_data.py +384 -0
- wisent/examples/scripts/intervention_validation.py +626 -0
- wisent/examples/scripts/results/test_AraDiCE_ArabicMMLU_lev_evaluation.json +324 -0
- wisent/examples/scripts/results/test_AraDiCE_ArabicMMLU_lev_pairs.json +92 -0
- wisent/examples/scripts/results/test_aexams_IslamicStudies_evaluation.json +324 -0
- wisent/examples/scripts/results/test_aexams_IslamicStudies_pairs.json +92 -0
- wisent/examples/scripts/results/test_afrimgsm_pairs.json +92 -0
- wisent/examples/scripts/results/test_afrimmlu_evaluation.json +324 -0
- wisent/examples/scripts/results/test_afrimmlu_pairs.json +92 -0
- wisent/examples/scripts/search_all_short_names.py +31 -0
- wisent/examples/scripts/test_all_benchmarks.py +138 -0
- wisent/examples/scripts/test_all_benchmarks_new.py +28 -0
- wisent/examples/scripts/test_contrastive_pairs_all_supported.py +230 -0
- wisent/examples/scripts/test_nonsense_baseline.py +261 -0
- wisent/examples/scripts/test_one_benchmark.py +324 -0
- wisent/examples/scripts/test_one_coding_benchmark.py +293 -0
- wisent/examples/scripts/threshold_analysis.py +434 -0
- wisent/examples/scripts/visualization_gallery.py +582 -0
- wisent/parameters/lm_eval/broken_in_lm_eval.json +179 -2
- wisent/parameters/lm_eval/category_directions.json +137 -0
- wisent/parameters/lm_eval/repair_plan.json +282 -0
- wisent/parameters/lm_eval/weak_contrastive_pairs.json +38 -0
- wisent/parameters/lm_eval/working_benchmarks.json +206 -0
- wisent/parameters/lm_eval/working_benchmarks_categorized.json +236 -0
- wisent/tests/test_detector_accuracy.py +1 -1
- wisent/tests/visualize_geometry.py +1 -1
- {wisent-0.7.701.dist-info → wisent-0.7.901.dist-info}/METADATA +1 -1
- {wisent-0.7.701.dist-info → wisent-0.7.901.dist-info}/RECORD +329 -295
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/browsecomp.py +0 -245
- {wisent-0.7.701.dist-info → wisent-0.7.901.dist-info}/WHEEL +0 -0
- {wisent-0.7.701.dist-info → wisent-0.7.901.dist-info}/entry_points.txt +0 -0
- {wisent-0.7.701.dist-info → wisent-0.7.901.dist-info}/licenses/LICENSE +0 -0
- {wisent-0.7.701.dist-info → wisent-0.7.901.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,384 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Generate data for RepScan paper.
|
|
3
|
+
|
|
4
|
+
Produces:
|
|
5
|
+
1. Main results table (LaTeX)
|
|
6
|
+
2. Per-category summary table
|
|
7
|
+
3. Benchmark list with contrastive definitions
|
|
8
|
+
4. Data for figures (JSON)
|
|
9
|
+
|
|
10
|
+
Usage:
|
|
11
|
+
python -m wisent.examples.scripts.generate_paper_data
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import json
|
|
15
|
+
import subprocess
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
from typing import Dict, List, Any
|
|
18
|
+
from collections import defaultdict
|
|
19
|
+
|
|
20
|
+
S3_BUCKET = "wisent-bucket"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def download_all_results(output_dir: Path) -> Dict[str, Path]:
|
|
24
|
+
"""Download all results from S3."""
|
|
25
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
26
|
+
|
|
27
|
+
subprocess.run(
|
|
28
|
+
["aws", "s3", "sync",
|
|
29
|
+
f"s3://{S3_BUCKET}/direction_discovery/",
|
|
30
|
+
str(output_dir),
|
|
31
|
+
"--quiet"],
|
|
32
|
+
check=False,
|
|
33
|
+
capture_output=True,
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
models = {}
|
|
37
|
+
for d in output_dir.iterdir():
|
|
38
|
+
if d.is_dir():
|
|
39
|
+
models[d.name] = d
|
|
40
|
+
|
|
41
|
+
return models
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def load_model_results(model_dir: Path) -> Dict[str, Any]:
|
|
45
|
+
"""Load all category results for a model."""
|
|
46
|
+
results = {}
|
|
47
|
+
for f in model_dir.glob("*.json"):
|
|
48
|
+
if "summary" in f.name:
|
|
49
|
+
continue
|
|
50
|
+
category = f.stem.split("_")[-1]
|
|
51
|
+
with open(f) as fp:
|
|
52
|
+
results[category] = json.load(fp)
|
|
53
|
+
return results
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def compute_diagnosis(signal: float, linear: float) -> str:
|
|
57
|
+
"""Compute diagnosis from signal and linear probe accuracy."""
|
|
58
|
+
if signal < 0.6:
|
|
59
|
+
return "NO_SIGNAL"
|
|
60
|
+
elif linear > 0.6 and (signal - linear) < 0.15:
|
|
61
|
+
return "LINEAR"
|
|
62
|
+
else:
|
|
63
|
+
return "NONLINEAR"
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def generate_main_results_table(all_models: Dict[str, Dict]) -> str:
|
|
67
|
+
"""Generate main results table in LaTeX."""
|
|
68
|
+
lines = [
|
|
69
|
+
r"\begin{table}[t]",
|
|
70
|
+
r"\centering",
|
|
71
|
+
r"\caption{RepScan diagnosis results across models and categories. Signal = MLP CV accuracy, Linear = Linear probe CV accuracy, kNN = k-NN CV accuracy (k=10). Diagnosis: LINEAR indicates CAA-viable representation, NONLINEAR indicates manifold structure, NO\_SIGNAL indicates no detectable representation.}",
|
|
72
|
+
r"\label{tab:main_results}",
|
|
73
|
+
r"\small",
|
|
74
|
+
r"\begin{tabular}{llccccl}",
|
|
75
|
+
r"\toprule",
|
|
76
|
+
r"\textbf{Model} & \textbf{Category} & \textbf{Signal} & \textbf{Linear} & \textbf{kNN} & \textbf{Gap} & \textbf{Diagnosis} \\",
|
|
77
|
+
r"\midrule",
|
|
78
|
+
]
|
|
79
|
+
|
|
80
|
+
for model_name, categories in sorted(all_models.items()):
|
|
81
|
+
model_short = model_name.replace("meta-llama_", "").replace("Qwen_", "").replace("openai_", "")
|
|
82
|
+
first_row = True
|
|
83
|
+
|
|
84
|
+
for cat_name in sorted(categories.keys()):
|
|
85
|
+
data = categories[cat_name]
|
|
86
|
+
results = data.get("results", [])
|
|
87
|
+
if not results:
|
|
88
|
+
continue
|
|
89
|
+
|
|
90
|
+
n = len(results)
|
|
91
|
+
avg_signal = sum(r["signal_strength"] for r in results) / n
|
|
92
|
+
avg_linear = sum(r["linear_probe_accuracy"] for r in results) / n
|
|
93
|
+
avg_knn = sum(r["nonlinear_metrics"]["knn_accuracy_k10"] for r in results) / n
|
|
94
|
+
gap = avg_signal - avg_linear
|
|
95
|
+
diagnosis = compute_diagnosis(avg_signal, avg_linear)
|
|
96
|
+
|
|
97
|
+
# Color coding for diagnosis
|
|
98
|
+
if diagnosis == "LINEAR":
|
|
99
|
+
diag_str = r"\textcolor{green!60!black}{LINEAR}"
|
|
100
|
+
elif diagnosis == "NONLINEAR":
|
|
101
|
+
diag_str = r"\textcolor{blue}{NONLINEAR}"
|
|
102
|
+
else:
|
|
103
|
+
diag_str = r"\textcolor{gray}{NO\_SIGNAL}"
|
|
104
|
+
|
|
105
|
+
model_col = model_short if first_row else ""
|
|
106
|
+
first_row = False
|
|
107
|
+
|
|
108
|
+
lines.append(f"{model_col} & {cat_name} & {avg_signal:.2f} & {avg_linear:.2f} & {avg_knn:.2f} & {gap:+.2f} & {diag_str} \\\\")
|
|
109
|
+
|
|
110
|
+
lines.append(r"\midrule")
|
|
111
|
+
|
|
112
|
+
lines[-1] = r"\bottomrule" # Replace last midrule
|
|
113
|
+
lines.extend([
|
|
114
|
+
r"\end{tabular}",
|
|
115
|
+
r"\end{table}",
|
|
116
|
+
])
|
|
117
|
+
|
|
118
|
+
return "\n".join(lines)
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def generate_benchmark_table(all_models: Dict[str, Dict]) -> str:
|
|
122
|
+
"""Generate benchmark list with contrastive definitions."""
|
|
123
|
+
# Collect unique benchmarks
|
|
124
|
+
benchmarks = defaultdict(lambda: {"categories": set(), "signal": [], "linear": [], "knn": []})
|
|
125
|
+
|
|
126
|
+
for model_name, categories in all_models.items():
|
|
127
|
+
for cat_name, data in categories.items():
|
|
128
|
+
results = data.get("results", [])
|
|
129
|
+
seen = set()
|
|
130
|
+
for r in results:
|
|
131
|
+
bench = r["benchmark"]
|
|
132
|
+
if bench in seen:
|
|
133
|
+
continue
|
|
134
|
+
seen.add(bench)
|
|
135
|
+
|
|
136
|
+
benchmarks[bench]["categories"].add(cat_name)
|
|
137
|
+
benchmarks[bench]["signal"].append(r["signal_strength"])
|
|
138
|
+
benchmarks[bench]["linear"].append(r["linear_probe_accuracy"])
|
|
139
|
+
benchmarks[bench]["knn"].append(r["nonlinear_metrics"]["knn_accuracy_k10"])
|
|
140
|
+
|
|
141
|
+
lines = [
|
|
142
|
+
r"\begin{longtable}{p{3cm}p{2.5cm}cccl}",
|
|
143
|
+
r"\caption{Per-benchmark RepScan results (averaged across models and strategies).} \label{tab:benchmarks} \\",
|
|
144
|
+
r"\toprule",
|
|
145
|
+
r"\textbf{Benchmark} & \textbf{Category} & \textbf{Signal} & \textbf{Linear} & \textbf{kNN} & \textbf{Diagnosis} \\",
|
|
146
|
+
r"\midrule",
|
|
147
|
+
r"\endfirsthead",
|
|
148
|
+
r"\multicolumn{6}{c}{\tablename\ \thetable{} -- continued} \\",
|
|
149
|
+
r"\toprule",
|
|
150
|
+
r"\textbf{Benchmark} & \textbf{Category} & \textbf{Signal} & \textbf{Linear} & \textbf{kNN} & \textbf{Diagnosis} \\",
|
|
151
|
+
r"\midrule",
|
|
152
|
+
r"\endhead",
|
|
153
|
+
]
|
|
154
|
+
|
|
155
|
+
for bench, data in sorted(benchmarks.items(), key=lambda x: -max(x[1]["signal"]) if x[1]["signal"] else 0):
|
|
156
|
+
cats = ", ".join(sorted(data["categories"]))[:20]
|
|
157
|
+
avg_signal = sum(data["signal"]) / len(data["signal"]) if data["signal"] else 0
|
|
158
|
+
avg_linear = sum(data["linear"]) / len(data["linear"]) if data["linear"] else 0
|
|
159
|
+
avg_knn = sum(data["knn"]) / len(data["knn"]) if data["knn"] else 0
|
|
160
|
+
diagnosis = compute_diagnosis(avg_signal, avg_linear)
|
|
161
|
+
|
|
162
|
+
if diagnosis == "LINEAR":
|
|
163
|
+
diag_str = r"\textcolor{green!60!black}{LINEAR}"
|
|
164
|
+
elif diagnosis == "NONLINEAR":
|
|
165
|
+
diag_str = r"\textcolor{blue}{NONLINEAR}"
|
|
166
|
+
else:
|
|
167
|
+
diag_str = r"\textcolor{gray}{NO\_SIG}"
|
|
168
|
+
|
|
169
|
+
bench_escaped = bench.replace("_", r"\_")
|
|
170
|
+
lines.append(f"{bench_escaped} & {cats} & {avg_signal:.2f} & {avg_linear:.2f} & {avg_knn:.2f} & {diag_str} \\\\")
|
|
171
|
+
|
|
172
|
+
lines.extend([
|
|
173
|
+
r"\bottomrule",
|
|
174
|
+
r"\end{longtable}",
|
|
175
|
+
])
|
|
176
|
+
|
|
177
|
+
return "\n".join(lines)
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
def generate_figure_data(all_models: Dict[str, Dict]) -> Dict[str, Any]:
|
|
181
|
+
"""Generate JSON data for figures."""
|
|
182
|
+
figure_data = {
|
|
183
|
+
"diagnosis_distribution": {"LINEAR": 0, "NONLINEAR": 0, "NO_SIGNAL": 0},
|
|
184
|
+
"per_category": {},
|
|
185
|
+
"top_benchmarks": {"linear": [], "nonlinear": [], "no_signal": []},
|
|
186
|
+
"metrics_by_diagnosis": {
|
|
187
|
+
"LINEAR": {"signal": [], "linear": [], "knn": [], "mmd": []},
|
|
188
|
+
"NONLINEAR": {"signal": [], "linear": [], "knn": [], "mmd": []},
|
|
189
|
+
"NO_SIGNAL": {"signal": [], "linear": [], "knn": [], "mmd": []},
|
|
190
|
+
},
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
all_results = []
|
|
194
|
+
|
|
195
|
+
for model_name, categories in all_models.items():
|
|
196
|
+
for cat_name, data in categories.items():
|
|
197
|
+
results = data.get("results", [])
|
|
198
|
+
all_results.extend(results)
|
|
199
|
+
|
|
200
|
+
if cat_name not in figure_data["per_category"]:
|
|
201
|
+
figure_data["per_category"][cat_name] = {
|
|
202
|
+
"signal": [], "linear": [], "knn": []
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
for r in results:
|
|
206
|
+
signal = r["signal_strength"]
|
|
207
|
+
linear = r["linear_probe_accuracy"]
|
|
208
|
+
knn = r["nonlinear_metrics"]["knn_accuracy_k10"]
|
|
209
|
+
mmd = r["nonlinear_metrics"]["mmd_rbf"]
|
|
210
|
+
diagnosis = compute_diagnosis(signal, linear)
|
|
211
|
+
|
|
212
|
+
figure_data["diagnosis_distribution"][diagnosis] += 1
|
|
213
|
+
figure_data["metrics_by_diagnosis"][diagnosis]["signal"].append(signal)
|
|
214
|
+
figure_data["metrics_by_diagnosis"][diagnosis]["linear"].append(linear)
|
|
215
|
+
figure_data["metrics_by_diagnosis"][diagnosis]["knn"].append(knn)
|
|
216
|
+
figure_data["metrics_by_diagnosis"][diagnosis]["mmd"].append(mmd)
|
|
217
|
+
|
|
218
|
+
figure_data["per_category"][cat_name]["signal"].append(signal)
|
|
219
|
+
figure_data["per_category"][cat_name]["linear"].append(linear)
|
|
220
|
+
figure_data["per_category"][cat_name]["knn"].append(knn)
|
|
221
|
+
|
|
222
|
+
# Compute averages
|
|
223
|
+
for diag in figure_data["metrics_by_diagnosis"]:
|
|
224
|
+
for metric in list(figure_data["metrics_by_diagnosis"][diag].keys()):
|
|
225
|
+
values = figure_data["metrics_by_diagnosis"][diag][metric]
|
|
226
|
+
if values and isinstance(values, list):
|
|
227
|
+
figure_data["metrics_by_diagnosis"][diag][f"{metric}_mean"] = sum(values) / len(values)
|
|
228
|
+
figure_data["metrics_by_diagnosis"][diag][f"{metric}_std"] = (
|
|
229
|
+
sum((v - sum(values)/len(values))**2 for v in values) / len(values)
|
|
230
|
+
) ** 0.5
|
|
231
|
+
|
|
232
|
+
# Top benchmarks per diagnosis
|
|
233
|
+
benchmarks_by_diag = {"LINEAR": [], "NONLINEAR": [], "NO_SIGNAL": []}
|
|
234
|
+
seen = set()
|
|
235
|
+
|
|
236
|
+
for r in all_results:
|
|
237
|
+
bench = r["benchmark"]
|
|
238
|
+
if bench in seen:
|
|
239
|
+
continue
|
|
240
|
+
seen.add(bench)
|
|
241
|
+
|
|
242
|
+
signal = r["signal_strength"]
|
|
243
|
+
linear = r["linear_probe_accuracy"]
|
|
244
|
+
knn = r["nonlinear_metrics"]["knn_accuracy_k10"]
|
|
245
|
+
diagnosis = compute_diagnosis(signal, linear)
|
|
246
|
+
|
|
247
|
+
benchmarks_by_diag[diagnosis].append({
|
|
248
|
+
"benchmark": bench,
|
|
249
|
+
"signal": signal,
|
|
250
|
+
"linear": linear,
|
|
251
|
+
"knn": knn,
|
|
252
|
+
"gap": knn - linear,
|
|
253
|
+
})
|
|
254
|
+
|
|
255
|
+
# Sort and take top 5
|
|
256
|
+
benchmarks_by_diag["LINEAR"].sort(key=lambda x: x["linear"], reverse=True)
|
|
257
|
+
benchmarks_by_diag["NONLINEAR"].sort(key=lambda x: x["gap"], reverse=True)
|
|
258
|
+
benchmarks_by_diag["NO_SIGNAL"].sort(key=lambda x: x["signal"])
|
|
259
|
+
|
|
260
|
+
figure_data["top_benchmarks"] = {
|
|
261
|
+
diag: benches[:5] for diag, benches in benchmarks_by_diag.items()
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
return figure_data
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
def generate_summary_statistics(all_models: Dict[str, Dict]) -> str:
|
|
268
|
+
"""Generate summary statistics for paper text."""
|
|
269
|
+
total_results = 0
|
|
270
|
+
total_linear = 0
|
|
271
|
+
total_nonlinear = 0
|
|
272
|
+
total_no_signal = 0
|
|
273
|
+
|
|
274
|
+
categories = set()
|
|
275
|
+
benchmarks = set()
|
|
276
|
+
|
|
277
|
+
for model_name, model_categories in all_models.items():
|
|
278
|
+
for cat_name, data in model_categories.items():
|
|
279
|
+
categories.add(cat_name)
|
|
280
|
+
results = data.get("results", [])
|
|
281
|
+
total_results += len(results)
|
|
282
|
+
|
|
283
|
+
for r in results:
|
|
284
|
+
benchmarks.add(r["benchmark"])
|
|
285
|
+
signal = r["signal_strength"]
|
|
286
|
+
linear = r["linear_probe_accuracy"]
|
|
287
|
+
diagnosis = compute_diagnosis(signal, linear)
|
|
288
|
+
|
|
289
|
+
if diagnosis == "LINEAR":
|
|
290
|
+
total_linear += 1
|
|
291
|
+
elif diagnosis == "NONLINEAR":
|
|
292
|
+
total_nonlinear += 1
|
|
293
|
+
else:
|
|
294
|
+
total_no_signal += 1
|
|
295
|
+
|
|
296
|
+
text = f"""
|
|
297
|
+
## Summary Statistics for Paper
|
|
298
|
+
|
|
299
|
+
- **Total tests**: {total_results:,}
|
|
300
|
+
- **Models tested**: {len(all_models)}
|
|
301
|
+
- **Categories**: {len(categories)} ({', '.join(sorted(categories))})
|
|
302
|
+
- **Unique benchmarks**: {len(benchmarks)}
|
|
303
|
+
|
|
304
|
+
### Diagnosis Distribution:
|
|
305
|
+
- **LINEAR (CAA-viable)**: {total_linear:,} ({100*total_linear/total_results:.1f}%)
|
|
306
|
+
- **NONLINEAR (manifold)**: {total_nonlinear:,} ({100*total_nonlinear/total_results:.1f}%)
|
|
307
|
+
- **NO_SIGNAL**: {total_no_signal:,} ({100*total_no_signal/total_results:.1f}%)
|
|
308
|
+
|
|
309
|
+
### Key Findings:
|
|
310
|
+
1. {100*total_linear/total_results:.0f}% of benchmarks have LINEAR representations suitable for CAA
|
|
311
|
+
2. {100*total_nonlinear/total_results:.0f}% have NONLINEAR representations requiring different methods
|
|
312
|
+
3. {100*total_no_signal/total_results:.0f}% show no detectable signal
|
|
313
|
+
"""
|
|
314
|
+
return text
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
def main():
|
|
318
|
+
"""Generate all paper data."""
|
|
319
|
+
print("=" * 70)
|
|
320
|
+
print("GENERATING PAPER DATA")
|
|
321
|
+
print("=" * 70)
|
|
322
|
+
|
|
323
|
+
output_dir = Path("/tmp/paper_data")
|
|
324
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
325
|
+
|
|
326
|
+
# Download results
|
|
327
|
+
print("\n1. Downloading results from S3...")
|
|
328
|
+
results_dir = output_dir / "results"
|
|
329
|
+
models = download_all_results(results_dir)
|
|
330
|
+
print(f" Found {len(models)} models: {list(models.keys())}")
|
|
331
|
+
|
|
332
|
+
# Load all results
|
|
333
|
+
print("\n2. Loading results...")
|
|
334
|
+
all_models = {}
|
|
335
|
+
for model_name, model_dir in models.items():
|
|
336
|
+
all_models[model_name] = load_model_results(model_dir)
|
|
337
|
+
print(f" {model_name}: {len(all_models[model_name])} categories")
|
|
338
|
+
|
|
339
|
+
# Generate main table
|
|
340
|
+
print("\n3. Generating main results table...")
|
|
341
|
+
main_table = generate_main_results_table(all_models)
|
|
342
|
+
with open(output_dir / "main_results_table.tex", "w") as f:
|
|
343
|
+
f.write(main_table)
|
|
344
|
+
print(f" Saved: {output_dir / 'main_results_table.tex'}")
|
|
345
|
+
|
|
346
|
+
# Generate benchmark table
|
|
347
|
+
print("\n4. Generating benchmark table...")
|
|
348
|
+
bench_table = generate_benchmark_table(all_models)
|
|
349
|
+
with open(output_dir / "benchmark_table.tex", "w") as f:
|
|
350
|
+
f.write(bench_table)
|
|
351
|
+
print(f" Saved: {output_dir / 'benchmark_table.tex'}")
|
|
352
|
+
|
|
353
|
+
# Generate figure data
|
|
354
|
+
print("\n5. Generating figure data...")
|
|
355
|
+
figure_data = generate_figure_data(all_models)
|
|
356
|
+
with open(output_dir / "figure_data.json", "w") as f:
|
|
357
|
+
json.dump(figure_data, f, indent=2)
|
|
358
|
+
print(f" Saved: {output_dir / 'figure_data.json'}")
|
|
359
|
+
|
|
360
|
+
# Generate summary statistics
|
|
361
|
+
print("\n6. Generating summary statistics...")
|
|
362
|
+
summary = generate_summary_statistics(all_models)
|
|
363
|
+
with open(output_dir / "summary_statistics.md", "w") as f:
|
|
364
|
+
f.write(summary)
|
|
365
|
+
print(f" Saved: {output_dir / 'summary_statistics.md'}")
|
|
366
|
+
print(summary)
|
|
367
|
+
|
|
368
|
+
# Upload to S3
|
|
369
|
+
print("\n7. Uploading to S3...")
|
|
370
|
+
for f in output_dir.glob("*"):
|
|
371
|
+
if f.is_file():
|
|
372
|
+
subprocess.run(
|
|
373
|
+
["aws", "s3", "cp", str(f), f"s3://{S3_BUCKET}/paper_data/{f.name}", "--quiet"],
|
|
374
|
+
check=False,
|
|
375
|
+
)
|
|
376
|
+
|
|
377
|
+
print("\n" + "=" * 70)
|
|
378
|
+
print("PAPER DATA GENERATION COMPLETE")
|
|
379
|
+
print("=" * 70)
|
|
380
|
+
print(f"Output directory: {output_dir}")
|
|
381
|
+
|
|
382
|
+
|
|
383
|
+
if __name__ == "__main__":
|
|
384
|
+
main()
|