wisent 0.7.379__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of wisent might be problematic. Click here for more details.
- wisent/__init__.py +64 -0
- wisent/cli.py +114 -0
- wisent/core/__init__.py +40 -0
- wisent/core/activations/__init__.py +26 -0
- wisent/core/activations/activations.py +97 -0
- wisent/core/activations/activations_collector.py +506 -0
- wisent/core/activations/core/__init__.py +0 -0
- wisent/core/activations/core/atoms.py +219 -0
- wisent/core/activations/prompt_construction_strategy.py +47 -0
- wisent/core/adapters/__init__.py +22 -0
- wisent/core/adapters/audio.py +616 -0
- wisent/core/adapters/base.py +420 -0
- wisent/core/adapters/multimodal.py +738 -0
- wisent/core/adapters/robotics.py +643 -0
- wisent/core/adapters/text.py +441 -0
- wisent/core/adapters/video.py +555 -0
- wisent/core/agent/__init__.py +1 -0
- wisent/core/agent/budget.py +644 -0
- wisent/core/agent/device_benchmarks.py +691 -0
- wisent/core/agent/diagnose/__init__.py +1 -0
- wisent/core/agent/diagnose/agent_classifier_decision.py +641 -0
- wisent/core/agent/diagnose/classifier_marketplace.py +554 -0
- wisent/core/agent/diagnose/create_classifier.py +1155 -0
- wisent/core/agent/diagnose/response_diagnostics.py +273 -0
- wisent/core/agent/diagnose/select_classifiers.py +507 -0
- wisent/core/agent/diagnose/synthetic_classifier_option.py +755 -0
- wisent/core/agent/diagnose/tasks/__init__.py +33 -0
- wisent/core/agent/diagnose/tasks/task_manager.py +1453 -0
- wisent/core/agent/diagnose/tasks/task_relevance.py +94 -0
- wisent/core/agent/diagnose/tasks/task_selector.py +151 -0
- wisent/core/agent/diagnose.py +249 -0
- wisent/core/agent/steer.py +215 -0
- wisent/core/agent/timeout.py +134 -0
- wisent/core/autonomous_agent.py +1158 -0
- wisent/core/benchmark_extractors.py +372 -0
- wisent/core/benchmark_registry.py +151 -0
- wisent/core/bigcode_extractors.py +26 -0
- wisent/core/bigcode_integration.py +886 -0
- wisent/core/branding.py +108 -0
- wisent/core/classifier/__init__.py +1 -0
- wisent/core/classifier/models/__init__.py +1 -0
- wisent/core/classifiers/__init__.py +1 -0
- wisent/core/classifiers/classifiers/__init__.py +0 -0
- wisent/core/classifiers/classifiers/core/__init__.py +0 -0
- wisent/core/classifiers/classifiers/core/atoms.py +748 -0
- wisent/core/classifiers/classifiers/models/__init__.py +0 -0
- wisent/core/classifiers/classifiers/models/logistic.py +29 -0
- wisent/core/classifiers/classifiers/models/mlp.py +47 -0
- wisent/core/classifiers/classifiers/rotator.py +137 -0
- wisent/core/classifiers/core/__init__.py +1 -0
- wisent/core/classifiers/models/__init__.py +1 -0
- wisent/core/classifiers/pipeline_steps/__init__.py +1 -0
- wisent/core/cli/__init__.py +26 -0
- wisent/core/cli/agent/__init__.py +15 -0
- wisent/core/cli/agent/apply_steering.py +192 -0
- wisent/core/cli/agent/evaluate_response.py +128 -0
- wisent/core/cli/agent/generate_synthetic_pairs.py +123 -0
- wisent/core/cli/agent/main.py +139 -0
- wisent/core/cli/agent/train_classifier.py +173 -0
- wisent/core/cli/check_linearity.py +126 -0
- wisent/core/cli/create_steering_vector.py +304 -0
- wisent/core/cli/diagnose_pairs.py +153 -0
- wisent/core/cli/diagnose_vectors.py +404 -0
- wisent/core/cli/estimate_unified_goodness_time.py +428 -0
- wisent/core/cli/evaluate_refusal.py +241 -0
- wisent/core/cli/evaluate_responses.py +926 -0
- wisent/core/cli/generate_humanization_pairs.py +128 -0
- wisent/core/cli/generate_pairs.py +175 -0
- wisent/core/cli/generate_pairs_from_task.py +108 -0
- wisent/core/cli/generate_responses.py +160 -0
- wisent/core/cli/generate_vector_from_synthetic.py +217 -0
- wisent/core/cli/generate_vector_from_task.py +248 -0
- wisent/core/cli/get_activations.py +192 -0
- wisent/core/cli/inference_config.py +84 -0
- wisent/core/cli/inference_config_cli.py +54 -0
- wisent/core/cli/modify_weights.py +660 -0
- wisent/core/cli/multi_steer.py +112 -0
- wisent/core/cli/optimization_cache.py +298 -0
- wisent/core/cli/optimize.py +621 -0
- wisent/core/cli/optimize_classification.py +473 -0
- wisent/core/cli/optimize_sample_size.py +390 -0
- wisent/core/cli/optimize_steering.py +3421 -0
- wisent/core/cli/optimize_weights.py +1287 -0
- wisent/core/cli/steering_method_trainer.py +641 -0
- wisent/core/cli/steering_search_space.py +508 -0
- wisent/core/cli/tasks.py +940 -0
- wisent/core/cli/train_unified_goodness.py +681 -0
- wisent/core/cli_logger.py +22 -0
- wisent/core/config_manager.py +1731 -0
- wisent/core/contrastive_pairs/__init__.py +15 -0
- wisent/core/contrastive_pairs/core/__init__.py +0 -0
- wisent/core/contrastive_pairs/core/atoms.py +45 -0
- wisent/core/contrastive_pairs/core/buliders.py +59 -0
- wisent/core/contrastive_pairs/core/pair.py +183 -0
- wisent/core/contrastive_pairs/core/response.py +153 -0
- wisent/core/contrastive_pairs/core/serialization.py +306 -0
- wisent/core/contrastive_pairs/core/set.py +192 -0
- wisent/core/contrastive_pairs/diagnostics/__init__.py +79 -0
- wisent/core/contrastive_pairs/diagnostics/activations.py +53 -0
- wisent/core/contrastive_pairs/diagnostics/base.py +73 -0
- wisent/core/contrastive_pairs/diagnostics/control_vectors.py +1655 -0
- wisent/core/contrastive_pairs/diagnostics/coverage.py +79 -0
- wisent/core/contrastive_pairs/diagnostics/divergence.py +98 -0
- wisent/core/contrastive_pairs/diagnostics/duplicates.py +118 -0
- wisent/core/contrastive_pairs/diagnostics/linearity.py +325 -0
- wisent/core/contrastive_pairs/diagnostics/vector_quality.py +620 -0
- wisent/core/contrastive_pairs/huggingface_pairs/__init__.py +1 -0
- wisent/core/contrastive_pairs/huggingface_pairs/atoms.py +255 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_extractor_manifest.py +470 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_extractor_registry.py +136 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/__init__.py +44 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/agentbench.py +225 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/agentharm.py +267 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/agentic_search.py +444 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/aider_polyglot.py +225 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/aime.py +118 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/aime2024.py +74 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/aime2025.py +73 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/alpaca_eval.py +153 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/apps.py +182 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/arena_hard.py +179 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/atis.py +89 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/babilong.py +96 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/bangla_mmlu.py +108 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/basqueglue.py +217 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/bec2016eu.py +99 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/bfcl.py +283 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/bhtc_v2.py +87 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/browsecomp.py +245 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/chain_of_thought.py +89 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/chinese_simpleqa.py +209 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/cluewsc.py +177 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/cnn_dailymail.py +92 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/codeforces.py +378 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/codexglue.py +109 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/codexglue_code_to_text.py +15 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/codexglue_code_to_text_go.py +64 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/codexglue_code_to_text_java.py +65 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/codexglue_code_to_text_javascript.py +65 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/codexglue_code_to_text_php.py +65 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/codexglue_code_to_text_python.py +65 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/codexglue_code_to_text_ruby.py +65 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/coding_benchmarks.py +844 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/coedit_gec.py +79 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/conala.py +133 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/concode.py +111 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/dbpedia_14.py +91 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/doc_vqa.py +102 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/donotanswer.py +236 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/ds1000.py +129 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/ds_1000.py +155 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/epec_koref_bin.py +85 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/ethos_binary.py +82 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/evalita_mp.py +165 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/evalita_sp_sum_task_fp_small_p1.py +89 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/facts_grounding.py +181 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/faithbench.py +295 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/financial_tweets.py +100 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/flames.py +270 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/flan_held_in.py +98 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/flores.py +572 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/frames.py +143 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/freebase.py +99 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/get_negative_example_livecodebench.py +146 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/get_positive_example_livecodebench.py +140 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/gpt3_translation_benchmarks.py +98 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/hallucinations_leaderboard.py +389 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/halueval.py +246 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/harmbench.py +250 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/healthbench.py +181 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/hle.py +106 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/hmmt.py +117 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/humaneval.py +119 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/humanevalpack.py +102 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/instruct_humaneval.py +180 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/instructhumaneval.py +129 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/iwslt2017_ar_en.py +98 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/iwslt2017_en_ar.py +98 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/jailbreakbench.py +258 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/law_stack_exchange.py +101 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/ledgar.py +118 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/livecodebench.py +61 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/livecodebench_contrastive_pair_generator.py +491 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/livecodebench_v6.py +263 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/livemathbench.py +230 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/llama.py +96 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/longform_writing.py +285 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/m_mmlu.py +96 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/math.py +186 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/math500.py +146 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/mbpp.py +142 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/meddialog.py +79 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/medical_abstracts.py +101 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/medium_priority_benchmarks.py +787 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/mercury.py +111 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/mmlu_redux.py +194 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/mmlusr.py +108 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/multimedqa.py +99 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/multipl_e.py +109 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/multiple.py +96 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/multiple_choice.py +87 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/multiple_cpp.py +128 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/multiple_go.py +128 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/multiple_java.py +128 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/multiple_js.py +128 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/multiple_py.py +15 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/multiple_rs.py +128 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/non_greedy_robustness_agieval_aqua_rat.py +92 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/olympiadbench.py +287 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/openllm.py +99 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/option_order_robustness_agieval_aqua_rat.py +92 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/or_bench.py +300 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/penn_treebank.py +80 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/planbench.py +317 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/polymath.py +467 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/prompt_robustness_agieval_aqua_rat.py +92 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/pythia.py +99 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/recode.py +131 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/refusalbench.py +280 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/scicode.py +275 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/self_consistency.py +90 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/simpleqa.py +145 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/sorry_bench.py +211 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/stsb.py +79 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/super_glue_lm_eval_v1.py +99 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/super_glue_lm_eval_v1_seq2seq.py +98 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/super_glue_t5_prompt.py +123 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/super_gpqa.py +106 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/swe_bench.py +428 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/swe_bench_verified.py +158 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/sycophancy_eval.py +205 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/t0_eval.py +79 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/tag.py +98 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/tau_bench.py +305 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/tmlu.py +109 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/toolbench.py +360 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/toolemu.py +386 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/travelplanner.py +286 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/truthfulqa_generation.py +128 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/unfair_tos.py +83 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/vaxx_stance.py +86 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/wiceu.py +85 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/wikitext103.py +97 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/wildguard.py +280 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/wmt14_en_fr.py +97 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/wmt14_fr_en.py +97 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/wmt16_de_en.py +90 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/wmt16_en_de.py +90 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/wmt16_en_ro.py +90 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/wmt16_ro_en.py +90 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/wmt_ro_en_t5_prompt.py +90 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/xsum.py +81 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/__init__.py +0 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/atoms.py +265 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/__init__.py +472 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/aclue.py +24 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/acp.py +33 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/acpbench.py +39 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/advanced_ai_risk.py +59 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/aexams.py +14 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/afrimgsm.py +10 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/afrimmlu.py +10 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/afrixnli.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/afrobench.py +14 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/afrobench_adr.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/afrobench_afriqa.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/afrobench_afrisenti.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/afrobench_belebele.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/afrobench_flores.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/afrobench_injongointent.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/afrobench_mafand.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/afrobench_masakhaner.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/afrobench_masakhanews.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/afrobench_masakhapos.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/afrobench_naijarc.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/afrobench_nollysenti.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/afrobench_ntrex.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/afrobench_openai_mmlu.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/afrobench_salt.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/afrobench_sib.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/afrobench_uhura_arc_easy.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/afrobench_xlsum.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/agieval.py +33 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/anli.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/arab_culture.py +24 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/arabic_leaderboard_acva.py +67 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/arabic_leaderboard_acva_light.py +67 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/arabic_leaderboard_complete.py +24 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/arabic_leaderboard_light.py +81 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/arabicmmlu.py +59 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/aradice.py +36 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/arc.py +61 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/arithmetic.py +19 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/basque_bench.py +37 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/bbh.py +121 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/bbq.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/belebele.py +293 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/bertaqa.py +25 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/bigbench.py +300 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/blimp.py +76 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/careqa.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/catalan_bench.py +43 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/ceval_valid.py +61 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/cmmlu.py +76 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/code_x_glue.py +16 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/copal_id.py +11 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/crows_pairs.py +31 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/csatqa.py +15 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/darija.py +29 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/darijammlu.py +57 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/egymmlu.py +62 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/eus.py +76 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/evalita_mp.py +93 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/fld.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/flores.py +466 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/freebase.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/french_bench.py +23 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/galician_bench.py +41 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/glianorex.py +11 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/global_mmlu.py +115 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/gpqa.py +27 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/gsm8k.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/gsm8k_platinum.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/haerae.py +14 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/headqa.py +11 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/hellaswag.py +39 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/hendrycks_ethics.py +14 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/hendrycks_math.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/hrm8k.py +20 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/inverse.py +22 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/japanese_leaderboard.py +20 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/jsonschema_bench.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/kbl.py +85 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/kmmlu.py +281 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/kobest.py +14 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/kormedmcqa.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/lambada.py +28 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/leaderboard.py +52 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/libra.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/lingoly.py +11 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/longbench.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/m.py +43 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/mastermind.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/mathqa.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/med.py +24 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/meddialog.py +12 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/medqa.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/mela.py +18 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/metabench.py +36 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/mgsm.py +44 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/minerva_math.py +16 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/mlqa.py +58 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/mmlu.py +70 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/mmlu_pro.py +23 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/mmlu_pro_plus.py +23 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/mmlu_prox.py +191 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/mmlusr.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/mmmu.py +46 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/model_written_evals.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/multiblimp.py +111 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/non.py +23 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/noreval.py +143 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/noridiom.py +20 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/nortruthfulqa.py +32 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/nrk.py +20 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/okapi.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/okapi_arc_multilingual.py +10 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/okapi_hellaswag_multilingual.py +24 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/okapi_mmlu_multilingual.py +24 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/okapi_truthfulqa_multilingual.py +34 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/paloma.py +25 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/pawsx.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/persona.py +144 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/pile.py +31 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/polemo2.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/portuguese_bench.py +31 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/prompt.py +23 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/qa4mre.py +12 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/qasper.py +11 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/ru.py +19 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/ruler.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/score.py +20 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/scrolls.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/self_consistency.py +11 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/spanish_bench.py +38 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/storycloze.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/super_glue_t5_prompt.py +17 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/tinyBenchmarks.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/tmlu.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/tmmluplus.py +80 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/translation.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/truthfulqa.py +76 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/truthfulqa_multi.py +24 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/turkishmmlu.py +30 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/unitxt.py +23 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/unscramble.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/winogender.py +16 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/wmdp.py +12 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/wmt14.py +16 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/wmt16.py +22 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/wsc273.py +9 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/xcopa.py +21 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/xnli.py +28 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/xnli_eu.py +12 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/xquad.py +22 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/xstorycloze.py +22 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/group_task_manifests/xwinograd.py +15 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_extractor_manifest.py +478 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_extractor_registry.py +140 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/__init__.py +125 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/aclue.py +171 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/acp_bench.py +207 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/acp_bench_hard.py +185 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/advanced.py +130 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/aexams.py +184 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/afrimgsm.py +98 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/afrimmlu.py +113 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/afrixnli.py +129 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/afrobench_cot.py +88 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/afrobench_mc.py +107 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/ag.py +134 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/agieval.py +155 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/ai2_arc.py +114 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/anagrams1.py +81 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/anagrams2.py +81 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/anli.py +140 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arabculture.py +180 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arabic.py +98 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arabic_exams.py +104 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arabic_leaderboard_complete.py +168 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arabic_leaderboard_light.py +168 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arabicmmlu.py +167 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/aradice.py +268 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arc.py +133 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arc_challenge.py +118 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arc_easy.py +118 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arc_gen.py +101 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arc_mc.py +106 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/argument.py +134 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arithmetic.py +114 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/asdiv.py +122 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/assin.py +103 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/babi.py +113 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/basque_bench.py +155 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/basque_bench_gen.py +168 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/basque_bench_mc.py +139 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/bbh.py +133 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/bbq.py +169 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/belebele.py +181 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/benchmarks.py +155 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/bertaqa.py +165 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/bhs.py +155 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/bhtc.py +143 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/bigbench.py +170 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/blimp.py +171 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/blimp_nl.py +152 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/boolq.py +117 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/boolq_seq2seq.py +117 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/c4.py +150 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cabbq.py +152 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cabreu.py +127 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/careqa.py +169 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/catalan_bench.py +155 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/catalan_bench_gen.py +119 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/catalan_bench_mc.py +113 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/catalanqa.py +171 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/catcola.py +139 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cb.py +117 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/ceval.py +223 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/ceval_valid.py +163 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/chain.py +110 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/chartqa.py +238 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/claim.py +151 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/click.py +152 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cmmlu.py +166 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cnn.py +144 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cocoteros.py +148 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/code2text.py +161 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/code_x_glue.py +114 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/codexglue.py +107 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/coedit.py +149 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cola.py +83 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/commonsense.py +107 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/commonsense_qa.py +127 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/copa.py +124 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/copal_id.py +169 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/coqa.py +162 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/coqcat.py +114 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/crows_pairs.py +158 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/csatqa.py +152 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cycle.py +107 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cycle_letters.py +81 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/darija_bench.py +221 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/darijahellaswag.py +174 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/darijammlu.py +152 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/dbpedia.py +157 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/discrim_eval.py +152 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/doc.py +107 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/drop.py +129 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/egyhellaswag.py +125 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/egymmlu.py +180 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/epec.py +142 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eq.py +107 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eq_bench.py +194 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eq_bench_ca.py +152 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eq_bench_es.py +152 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/esbbq.py +152 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/escola.py +85 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/ethics.py +135 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/ethos.py +99 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eus.py +107 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eus_exams.py +225 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eus_proficiency.py +159 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eus_reading.py +159 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eus_trivia.py +159 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/evalita_llm.py +166 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/evalita_sp.py +109 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/fda.py +105 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/financial.py +107 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/flan.py +114 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/fld.py +143 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/french_bench.py +202 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/french_bench_mc.py +98 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/french_bench_perplexity.py +86 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/galcola.py +109 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/galician_bench.py +155 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/galician_bench_gen.py +118 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/galician_bench_mc.py +112 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/gaokao.py +141 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/glianorex.py +118 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/global_mmlu.py +171 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/global_piqa.py +152 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/glue.py +109 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/gpqa.py +161 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/gpt3.py +110 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/groundcocoa.py +184 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/gsm.py +108 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/gsm8k.py +134 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/haerae.py +152 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/headqa.py +112 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/hellaswag.py +125 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/hendrycks_ethics.py +225 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/hendrycks_math.py +191 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/histoires_morales.py +179 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/hle.py +111 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/hrm8k.py +203 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/humaneval.py +124 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/humaneval_infilling.py +152 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/icelandic_winogrande.py +152 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/ifeval.py +118 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/inverse.py +107 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/inverse_scaling.py +192 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/iwslt2017.py +117 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/ja.py +107 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/japanese_leaderboard.py +155 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/japanese_leaderboard_gen.py +224 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/japanese_leaderboard_mc.py +120 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/jsonschema_bench.py +123 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/kbl.py +140 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/kmmlu.py +168 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/kmmlu_cot.py +88 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/kmmlu_mc.py +107 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/kobest.py +165 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/kormedmcqa.py +160 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/lambada.py +147 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/lambada_cloze.py +185 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/lambada_multilingual.py +185 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/lambada_multilingual_stablelm.py +141 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/law.py +107 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/leaderboard.py +194 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/libra.py +165 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/lingoly.py +203 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/livemathbench.py +155 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/llama3.py +152 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/lm_syneval.py +152 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/logieval.py +82 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/logiqa.py +115 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/logiqa2.py +114 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/longbench.py +152 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/longbenchv2.py +152 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mastermind.py +203 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mathqa.py +137 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mbpp.py +123 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mc-taco.py +115 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/med_concepts_qa.py +224 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/meddialog.py +180 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/medical.py +107 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mediqa_qa2019.py +123 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/medmcqa.py +169 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/medqa.py +118 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/medtext.py +108 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mela.py +96 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/meqsum.py +115 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/metabench.py +154 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mgsm.py +122 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mimic_repsum.py +140 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/minerva_math.py +172 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mlqa.py +143 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mmlu.py +144 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mmlu_cot.py +88 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mmlu_mc.py +107 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mmlu_pro.py +145 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mmlusr.py +189 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mmmu.py +150 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mnli.py +113 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/model_written_evals.py +115 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/moral_stories.py +151 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mrpc.py +111 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mts_dialog.py +118 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mts_dialog_perplexity.py +97 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/multiblimp.py +134 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/multilingual.py +106 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/multirc.py +114 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mutual.py +113 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/non.py +107 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/noreval.py +173 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/noreval_exact.py +157 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/noreval_gen.py +277 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/noreval_gen_exact.py +165 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/noreval_mc.py +228 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/noreval_mc_log_likelihoods.py +223 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/noticia.py +105 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/nq_open.py +135 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/okapi.py +27 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/okapi_arc_multilingual.py +167 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/okapi_hellaswag_multilingual.py +174 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/okapi_mmlu_multilingual.py +162 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/okapi_truthfulqa_multilingual.py +209 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/olaph.py +186 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/olaph_perplexity.py +97 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/openbookqa.py +118 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/option.py +107 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/paloma.py +205 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/parafraseja.py +110 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/parafrases.py +110 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/paws.py +107 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/paws_x.py +154 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/pawsx.py +115 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/persona.py +246 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/phrases.py +144 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/phrases_ca_va.py +82 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/pile.py +161 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/pile_10k.py +140 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/piqa.py +116 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/polemo2.py +135 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/polymath.py +155 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/portuguese_bench.py +155 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/portuguese_bench_gen.py +121 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/portuguese_bench_mc.py +103 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/prompt.py +107 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/prost.py +115 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/pubmedqa.py +112 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/qa4mre.py +119 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/qasper.py +118 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/qasper_bool.py +112 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/qnli.py +111 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/qnlieu.py +107 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/qqp.py +111 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/quac.py +111 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/race.py +124 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/random.py +107 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/realtoxicityprompts.py +124 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/record.py +125 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/reversed.py +110 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/rte.py +111 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/ruler.py +170 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/sciq.py +113 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/score.py +177 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/scrolls.py +161 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/scrolls_mc.py +157 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/self.py +110 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/sglue.py +131 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/sglue_rte.py +119 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/simple_cooccurrence_bias.py +121 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/siqa.py +209 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/social_iqa.py +114 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/spanish_bench.py +155 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/spanish_bench_gen.py +117 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/spanish_bench_mc.py +110 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/squad2.py +129 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/squad_completion.py +121 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/sst2.py +111 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/storycloze.py +250 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/summarization.py +107 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/super.py +107 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/super_glue.py +154 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/superglue.py +111 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/supergpqa.py +111 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/swag.py +115 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/swde.py +179 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/sycophancy.py +117 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/t0.py +110 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/teca.py +110 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinyarc.py +110 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinybenchmarks.py +155 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinygsm8k.py +110 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinyhellaswag.py +110 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinymmlu.py +110 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinytruthfulqa.py +113 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinywinogrande.py +110 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tmmluplus.py +181 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/toxigen.py +91 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/translation.py +149 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/triviaqa.py +130 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/truthfulqa.py +112 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/truthfulqa_mc1.py +120 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/truthfulqa_mc2.py +140 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/truthfulqa_multi.py +142 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/turblimp_core.py +152 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/turkishmmlu.py +161 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/turkishmmlu_cot.py +104 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/turkishmmlu_mc.py +102 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/twenty_newsgroups.py +111 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/unitxt.py +131 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/unscramble.py +155 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/vaxx.py +95 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/webqs.py +130 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/wic.py +122 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/wikitext.py +146 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/winogender.py +139 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/winogrande.py +118 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/wmdp.py +155 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/wmt14.py +110 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/wmt16.py +118 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/wnli.py +114 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/wsc.py +117 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/wsc273.py +180 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/xcopa.py +197 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/xlsum.py +147 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/xnli.py +131 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/xquad.py +203 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/xstorycloze.py +129 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/xwinograd.py +124 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/yahoo.py +108 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/zhoblimp.py +155 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_pairs_generation.py +56 -0
- wisent/core/data_loaders/__init__.py +235 -0
- wisent/core/data_loaders/core/__init__.py +0 -0
- wisent/core/data_loaders/core/atoms.py +99 -0
- wisent/core/data_loaders/loaders/__init__.py +0 -0
- wisent/core/data_loaders/loaders/custom.py +120 -0
- wisent/core/data_loaders/loaders/huggingface_loader.py +153 -0
- wisent/core/data_loaders/loaders/lm_loader.py +494 -0
- wisent/core/data_loaders/loaders/lm_loader_special_cases.py +496 -0
- wisent/core/data_loaders/loaders/task_interface_loader.py +300 -0
- wisent/core/data_loaders/rotator.py +118 -0
- wisent/core/detection_handling.py +259 -0
- wisent/core/diversity_processors.py +193 -0
- wisent/core/download_full_benchmarks.py +1512 -0
- wisent/core/errors/__init__.py +203 -0
- wisent/core/errors/error_codes.py +763 -0
- wisent/core/errors/error_handler.py +134 -0
- wisent/core/evaluators/__init__.py +0 -0
- wisent/core/evaluators/benchmark_specific/__init__.py +42 -0
- wisent/core/evaluators/benchmark_specific/aime_evaluator.py +90 -0
- wisent/core/evaluators/benchmark_specific/coding/__init__.py +0 -0
- wisent/core/evaluators/benchmark_specific/coding/metrics/__init__.py +0 -0
- wisent/core/evaluators/benchmark_specific/coding/metrics/core/__init__.py +0 -0
- wisent/core/evaluators/benchmark_specific/coding/metrics/core/atoms.py +36 -0
- wisent/core/evaluators/benchmark_specific/coding/metrics/evaluator.py +363 -0
- wisent/core/evaluators/benchmark_specific/coding/metrics/passk.py +67 -0
- wisent/core/evaluators/benchmark_specific/coding/output_sanitizer/__init__.py +0 -0
- wisent/core/evaluators/benchmark_specific/coding/output_sanitizer/core/__init__.py +0 -0
- wisent/core/evaluators/benchmark_specific/coding/output_sanitizer/core/atoms.py +27 -0
- wisent/core/evaluators/benchmark_specific/coding/output_sanitizer/cpp_sanitizer.py +62 -0
- wisent/core/evaluators/benchmark_specific/coding/output_sanitizer/java_sanitizer.py +78 -0
- wisent/core/evaluators/benchmark_specific/coding/output_sanitizer/python_sanitizer.py +94 -0
- wisent/core/evaluators/benchmark_specific/coding/output_sanitizer/utils.py +126 -0
- wisent/core/evaluators/benchmark_specific/coding/providers/__init__.py +18 -0
- wisent/core/evaluators/benchmark_specific/coding/providers/core/__init__.py +0 -0
- wisent/core/evaluators/benchmark_specific/coding/providers/core/atoms.py +31 -0
- wisent/core/evaluators/benchmark_specific/coding/providers/livecodebench/__init__.py +3 -0
- wisent/core/evaluators/benchmark_specific/coding/providers/livecodebench/provider.py +305 -0
- wisent/core/evaluators/benchmark_specific/coding/safe_docker/Dockerfile +31 -0
- wisent/core/evaluators/benchmark_specific/coding/safe_docker/__init__.py +0 -0
- wisent/core/evaluators/benchmark_specific/coding/safe_docker/core/__init__.py +0 -0
- wisent/core/evaluators/benchmark_specific/coding/safe_docker/core/atoms.py +105 -0
- wisent/core/evaluators/benchmark_specific/coding/safe_docker/core/runtime.py +143 -0
- wisent/core/evaluators/benchmark_specific/coding/safe_docker/entrypoint.py +121 -0
- wisent/core/evaluators/benchmark_specific/coding/safe_docker/recipes.py +60 -0
- wisent/core/evaluators/benchmark_specific/coding/solution_generator.py +258 -0
- wisent/core/evaluators/benchmark_specific/conala_evaluator.py +332 -0
- wisent/core/evaluators/benchmark_specific/exact_match_evaluator.py +81 -0
- wisent/core/evaluators/benchmark_specific/f1_evaluator.py +173 -0
- wisent/core/evaluators/benchmark_specific/generation_evaluator.py +488 -0
- wisent/core/evaluators/benchmark_specific/livemathbench_evaluator.py +393 -0
- wisent/core/evaluators/benchmark_specific/log_likelihoods_evaluator.py +202 -0
- wisent/core/evaluators/benchmark_specific/math_evaluator.py +119 -0
- wisent/core/evaluators/benchmark_specific/math_parsing/__init__.py +1 -0
- wisent/core/evaluators/benchmark_specific/math_parsing/core.py +1640 -0
- wisent/core/evaluators/benchmark_specific/math_parsing/extract_boxed.py +48 -0
- wisent/core/evaluators/benchmark_specific/math_parsing/is_equiv.py +159 -0
- wisent/core/evaluators/benchmark_specific/math_parsing/scripts.py +919 -0
- wisent/core/evaluators/benchmark_specific/perplexity_evaluator.py +175 -0
- wisent/core/evaluators/benchmark_specific/polymath_evaluator.py +114 -0
- wisent/core/evaluators/core/__init__.py +5 -0
- wisent/core/evaluators/core/atoms.py +166 -0
- wisent/core/evaluators/custom/__init__.py +20 -0
- wisent/core/evaluators/custom/custom_evaluator.py +382 -0
- wisent/core/evaluators/custom/examples/__init__.py +37 -0
- wisent/core/evaluators/custom/examples/desklib_detector.py +166 -0
- wisent/core/evaluators/custom/examples/gptzero.py +185 -0
- wisent/core/evaluators/custom/examples/humanization.py +79 -0
- wisent/core/evaluators/custom/examples/humanization_coherent.py +127 -0
- wisent/core/evaluators/custom/examples/roberta_detector.py +173 -0
- wisent/core/evaluators/oracles/__init__.py +0 -0
- wisent/core/evaluators/oracles/interactive.py +73 -0
- wisent/core/evaluators/oracles/nlp_evaluator.py +440 -0
- wisent/core/evaluators/oracles/truthfulqa_gen_evaluator.py +168 -0
- wisent/core/evaluators/oracles/user_specified.py +67 -0
- wisent/core/evaluators/personalization/__init__.py +12 -0
- wisent/core/evaluators/personalization/alignment.py +166 -0
- wisent/core/evaluators/personalization/coherence.py +325 -0
- wisent/core/evaluators/personalization/difference.py +73 -0
- wisent/core/evaluators/rotator.py +217 -0
- wisent/core/evaluators/steering_evaluators.py +386 -0
- wisent/core/evaluators/synthetic_evaluator.py +377 -0
- wisent/core/hyperparameter_optimizer.py +547 -0
- wisent/core/layer.py +17 -0
- wisent/core/lm_eval_harness_ground_truth.py +1431 -0
- wisent/core/main.py +101 -0
- wisent/core/managed_cached_benchmarks.py +609 -0
- wisent/core/mixed_benchmark_sampler.py +366 -0
- wisent/core/modalities/__init__.py +545 -0
- wisent/core/model_persistence.py +302 -0
- wisent/core/models/__init__.py +23 -0
- wisent/core/models/core/__init__.py +0 -0
- wisent/core/models/core/atoms.py +465 -0
- wisent/core/models/inference_config.py +127 -0
- wisent/core/models/wisent_model.py +893 -0
- wisent/core/multi_steering.py +397 -0
- wisent/core/opti/__init__.py +0 -0
- wisent/core/opti/core/__init__.py +0 -0
- wisent/core/opti/core/atoms.py +177 -0
- wisent/core/opti/methods/__init__.py +10 -0
- wisent/core/opti/methods/opti_classificator.py +172 -0
- wisent/core/opti/methods/opti_steering.py +139 -0
- wisent/core/opti/methods/opti_weights.py +523 -0
- wisent/core/optuna/__init__.py +54 -0
- wisent/core/optuna/classifier/__init__.py +25 -0
- wisent/core/optuna/classifier/activation_generator.py +351 -0
- wisent/core/optuna/classifier/classifier_cache.py +509 -0
- wisent/core/optuna/classifier/optuna_classifier_optimizer.py +685 -0
- wisent/core/optuna/steering/__init__.py +20 -0
- wisent/core/optuna/steering/bigcode_evaluator_wrapper.py +200 -0
- wisent/core/optuna/steering/data_utils.py +342 -0
- wisent/core/optuna/steering/metrics.py +412 -0
- wisent/core/optuna/steering/steering_optimization.py +1096 -0
- wisent/core/parser.py +1662 -0
- wisent/core/parser_arguments/__init__.py +10 -0
- wisent/core/parser_arguments/agent_parser.py +122 -0
- wisent/core/parser_arguments/check_linearity_parser.py +82 -0
- wisent/core/parser_arguments/configure_model_parser.py +7 -0
- wisent/core/parser_arguments/create_steering_vector_parser.py +67 -0
- wisent/core/parser_arguments/diagnose_pairs_parser.py +25 -0
- wisent/core/parser_arguments/diagnose_vectors_parser.py +72 -0
- wisent/core/parser_arguments/evaluate_parser.py +40 -0
- wisent/core/parser_arguments/evaluate_refusal_parser.py +32 -0
- wisent/core/parser_arguments/evaluate_responses_parser.py +12 -0
- wisent/core/parser_arguments/full_optimize_parser.py +194 -0
- wisent/core/parser_arguments/generate_pairs_from_task_parser.py +33 -0
- wisent/core/parser_arguments/generate_pairs_parser.py +43 -0
- wisent/core/parser_arguments/generate_responses_parser.py +16 -0
- wisent/core/parser_arguments/generate_vector_from_synthetic_parser.py +148 -0
- wisent/core/parser_arguments/generate_vector_from_task_parser.py +149 -0
- wisent/core/parser_arguments/generate_vector_parser.py +89 -0
- wisent/core/parser_arguments/get_activations_parser.py +90 -0
- wisent/core/parser_arguments/inference_config_parser.py +65 -0
- wisent/core/parser_arguments/main_parser.py +220 -0
- wisent/core/parser_arguments/model_config_parser.py +59 -0
- wisent/core/parser_arguments/modify_weights_parser.py +309 -0
- wisent/core/parser_arguments/monitor_parser.py +17 -0
- wisent/core/parser_arguments/multi_steer_parser.py +48 -0
- wisent/core/parser_arguments/nonsense_parser.py +26 -0
- wisent/core/parser_arguments/optimization_cache_parser.py +64 -0
- wisent/core/parser_arguments/optimize_classification_parser.py +108 -0
- wisent/core/parser_arguments/optimize_parser.py +142 -0
- wisent/core/parser_arguments/optimize_sample_size_parser.py +58 -0
- wisent/core/parser_arguments/optimize_steering_parser.py +617 -0
- wisent/core/parser_arguments/optimize_weights_parser.py +403 -0
- wisent/core/parser_arguments/synthetic_parser.py +117 -0
- wisent/core/parser_arguments/tasks_parser.py +591 -0
- wisent/core/parser_arguments/train_unified_goodness_parser.py +172 -0
- wisent/core/parser_arguments/utils.py +107 -0
- wisent/core/prompts/__init__.py +0 -0
- wisent/core/prompts/core/__init__.py +0 -0
- wisent/core/prompts/core/atom.py +57 -0
- wisent/core/prompts/core/prompt_formater.py +148 -0
- wisent/core/prompts/prompt_stratiegies/__init__.py +0 -0
- wisent/core/prompts/prompt_stratiegies/direct_completion.py +26 -0
- wisent/core/prompts/prompt_stratiegies/instruction_following.py +26 -0
- wisent/core/prompts/prompt_stratiegies/multiple_choice.py +31 -0
- wisent/core/prompts/prompt_stratiegies/role_playing.py +33 -0
- wisent/core/representation.py +5 -0
- wisent/core/save_results.py +277 -0
- wisent/core/steering.py +660 -0
- wisent/core/steering_method.py +20 -0
- wisent/core/steering_methods/__init__.py +54 -0
- wisent/core/steering_methods/core/__init__.py +0 -0
- wisent/core/steering_methods/core/atoms.py +154 -0
- wisent/core/steering_methods/methods/__init__.py +0 -0
- wisent/core/steering_methods/methods/caa.py +45 -0
- wisent/core/steering_methods/methods/prism.py +588 -0
- wisent/core/steering_methods/methods/pulse.py +641 -0
- wisent/core/steering_methods/methods/titan.py +1005 -0
- wisent/core/steering_methods/preflight.py +322 -0
- wisent/core/steering_methods/registry.py +649 -0
- wisent/core/steering_methods/rotator.py +121 -0
- wisent/core/steering_optimizer.py +1503 -0
- wisent/core/synthetic/__init__.py +0 -0
- wisent/core/synthetic/cleaners/__init__.py +0 -0
- wisent/core/synthetic/cleaners/core/__init__.py +0 -0
- wisent/core/synthetic/cleaners/core/atoms.py +58 -0
- wisent/core/synthetic/cleaners/deduper_cleaner.py +53 -0
- wisent/core/synthetic/cleaners/methods/__init__.py +0 -0
- wisent/core/synthetic/cleaners/methods/base_dedupers.py +321 -0
- wisent/core/synthetic/cleaners/methods/base_refusalers.py +286 -0
- wisent/core/synthetic/cleaners/methods/core/__init__.py +0 -0
- wisent/core/synthetic/cleaners/methods/core/atoms.py +47 -0
- wisent/core/synthetic/cleaners/pairs_cleaner.py +90 -0
- wisent/core/synthetic/cleaners/refusaler_cleaner.py +133 -0
- wisent/core/synthetic/db_instructions/__init__.py +0 -0
- wisent/core/synthetic/db_instructions/core/__init__.py +0 -0
- wisent/core/synthetic/db_instructions/core/atoms.py +25 -0
- wisent/core/synthetic/db_instructions/mini_dp.py +115 -0
- wisent/core/synthetic/generators/__init__.py +0 -0
- wisent/core/synthetic/generators/core/__init__.py +0 -0
- wisent/core/synthetic/generators/core/atoms.py +73 -0
- wisent/core/synthetic/generators/diversities/__init__.py +0 -0
- wisent/core/synthetic/generators/diversities/core/__init__.py +0 -0
- wisent/core/synthetic/generators/diversities/core/core.py +68 -0
- wisent/core/synthetic/generators/diversities/methods/__init__.py +0 -0
- wisent/core/synthetic/generators/diversities/methods/fast_diversity.py +249 -0
- wisent/core/synthetic/generators/nonsense_generator.py +150 -0
- wisent/core/synthetic/generators/pairs_generator.py +313 -0
- wisent/core/task_interface.py +143 -0
- wisent/core/task_selector.py +232 -0
- wisent/core/tasks/__init__.py +218 -0
- wisent/core/tasks/aime_task.py +142 -0
- wisent/core/tasks/file_task.py +212 -0
- wisent/core/tasks/hle_task.py +180 -0
- wisent/core/tasks/hmmt_task.py +120 -0
- wisent/core/tasks/livecodebench_task.py +94 -0
- wisent/core/tasks/livemathbench_task.py +159 -0
- wisent/core/tasks/lm_eval_task.py +611 -0
- wisent/core/tasks/math500_task.py +84 -0
- wisent/core/tasks/polymath_task.py +147 -0
- wisent/core/tasks/supergpqa_task.py +220 -0
- wisent/core/time_estimator.py +155 -0
- wisent/core/timing_calibration.py +176 -0
- wisent/core/tracking/__init__.py +54 -0
- wisent/core/tracking/latency.py +620 -0
- wisent/core/tracking/memory.py +360 -0
- wisent/core/trainers/__init__.py +0 -0
- wisent/core/trainers/core/__init__.py +11 -0
- wisent/core/trainers/core/atoms.py +45 -0
- wisent/core/trainers/steering_trainer.py +365 -0
- wisent/core/universal_subspace.py +918 -0
- wisent/core/user_model_config.py +158 -0
- wisent/core/utils/__init__.py +64 -0
- wisent/core/utils/base_rotator.py +292 -0
- wisent/core/utils/dataset_splits.py +197 -0
- wisent/core/utils/device.py +279 -0
- wisent/core/weight_modification/__init__.py +134 -0
- wisent/core/weight_modification/additive.py +340 -0
- wisent/core/weight_modification/directional.py +1357 -0
- wisent/core/weight_modification/export.py +359 -0
- wisent/core/weight_modification/multi_direction.py +410 -0
- wisent/core/weight_modification/utils.py +236 -0
- wisent/core/wisent.py +660 -0
- wisent/examples/contrastive_pairs/humanization_human_vs_ai.json +2112 -0
- wisent/examples/scripts/1/test_basqueglue_evaluation.json +51 -0
- wisent/examples/scripts/1/test_basqueglue_pairs.json +14 -0
- wisent/examples/scripts/1/test_bec2016eu_evaluation.json +51 -0
- wisent/examples/scripts/1/test_bec2016eu_pairs.json +14 -0
- wisent/examples/scripts/1/test_belebele_evaluation.json +51 -0
- wisent/examples/scripts/1/test_belebele_pairs.json +14 -0
- wisent/examples/scripts/1/test_benchmarks_evaluation.json +51 -0
- wisent/examples/scripts/1/test_benchmarks_pairs.json +14 -0
- wisent/examples/scripts/1/test_bertaqa_evaluation.json +51 -0
- wisent/examples/scripts/1/test_bertaqa_pairs.json +14 -0
- wisent/examples/scripts/1/test_bhtc_v2_evaluation.json +30 -0
- wisent/examples/scripts/1/test_bhtc_v2_pairs.json +8 -0
- wisent/examples/scripts/1/test_boolq-seq2seq_evaluation.json +30 -0
- wisent/examples/scripts/1/test_boolq-seq2seq_pairs.json +8 -0
- wisent/examples/scripts/1/test_cabreu_evaluation.json +30 -0
- wisent/examples/scripts/1/test_cabreu_pairs.json +8 -0
- wisent/examples/scripts/1/test_careqa_en_evaluation.json +30 -0
- wisent/examples/scripts/1/test_careqa_en_pairs.json +8 -0
- wisent/examples/scripts/1/test_careqa_evaluation.json +30 -0
- wisent/examples/scripts/1/test_careqa_pairs.json +8 -0
- wisent/examples/scripts/1/test_catalanqa_evaluation.json +30 -0
- wisent/examples/scripts/1/test_catalanqa_pairs.json +8 -0
- wisent/examples/scripts/1/test_catcola_evaluation.json +30 -0
- wisent/examples/scripts/1/test_catcola_pairs.json +8 -0
- wisent/examples/scripts/1/test_chartqa_evaluation.json +30 -0
- wisent/examples/scripts/1/test_chartqa_pairs.json +8 -0
- wisent/examples/scripts/1/test_claim_stance_topic_evaluation.json +30 -0
- wisent/examples/scripts/1/test_claim_stance_topic_pairs.json +8 -0
- wisent/examples/scripts/1/test_cnn_dailymail_evaluation.json +30 -0
- wisent/examples/scripts/1/test_cnn_dailymail_pairs.json +8 -0
- wisent/examples/scripts/1/test_cocoteros_es_evaluation.json +30 -0
- wisent/examples/scripts/1/test_cocoteros_es_pairs.json +8 -0
- wisent/examples/scripts/1/test_coedit_gec_evaluation.json +30 -0
- wisent/examples/scripts/1/test_coedit_gec_pairs.json +8 -0
- wisent/examples/scripts/1/test_cola_evaluation.json +30 -0
- wisent/examples/scripts/1/test_cola_pairs.json +8 -0
- wisent/examples/scripts/1/test_coqcat_evaluation.json +30 -0
- wisent/examples/scripts/1/test_coqcat_pairs.json +8 -0
- wisent/examples/scripts/1/test_dbpedia_14_evaluation.json +30 -0
- wisent/examples/scripts/1/test_dbpedia_14_pairs.json +8 -0
- wisent/examples/scripts/1/test_epec_koref_bin_evaluation.json +30 -0
- wisent/examples/scripts/1/test_epec_koref_bin_pairs.json +8 -0
- wisent/examples/scripts/1/test_ethos_binary_evaluation.json +30 -0
- wisent/examples/scripts/1/test_ethos_binary_pairs.json +8 -0
- wisent/examples/scripts/2/test_afrimgsm_direct_amh_evaluation.json +30 -0
- wisent/examples/scripts/2/test_afrimgsm_direct_amh_pairs.json +8 -0
- wisent/examples/scripts/2/test_afrimmlu_direct_amh_evaluation.json +30 -0
- wisent/examples/scripts/2/test_afrimmlu_direct_amh_pairs.json +8 -0
- wisent/examples/scripts/2/test_afrixnli_en_direct_amh_evaluation.json +30 -0
- wisent/examples/scripts/2/test_afrixnli_en_direct_amh_pairs.json +8 -0
- wisent/examples/scripts/2/test_arc_ar_evaluation.json +30 -0
- wisent/examples/scripts/2/test_arc_ar_pairs.json +8 -0
- wisent/examples/scripts/2/test_atis_evaluation.json +30 -0
- wisent/examples/scripts/2/test_atis_pairs.json +8 -0
- wisent/examples/scripts/2/test_babi_evaluation.json +30 -0
- wisent/examples/scripts/2/test_babi_pairs.json +8 -0
- wisent/examples/scripts/2/test_babilong_evaluation.json +30 -0
- wisent/examples/scripts/2/test_babilong_pairs.json +8 -0
- wisent/examples/scripts/2/test_bangla_mmlu_evaluation.json +30 -0
- wisent/examples/scripts/2/test_bangla_mmlu_pairs.json +8 -0
- wisent/examples/scripts/2/test_basque-glue_pairs.json +14 -0
- wisent/examples/scripts/benchmark_tags.json +2140 -0
- wisent/examples/scripts/lm_eval_readme.json +4 -0
- wisent/examples/scripts/results/benchmark_descriptions.json +1244 -0
- wisent/examples/scripts/results/benchmark_evaluation_methods.json +66 -0
- wisent/examples/scripts/results/benchmark_evaluator_mapping.json +2781 -0
- wisent/examples/scripts/results/benchmark_evaluator_mapping_updated.json +30536 -0
- wisent/examples/scripts/results/benchmark_evaluators_clean.json +469 -0
- wisent/examples/scripts/results/benchmark_methods_summary.json +260 -0
- wisent/examples/scripts/results/benchmark_pair_creation_methods.json +66 -0
- wisent/examples/scripts/results/benchmark_pair_totals.json +269 -0
- wisent/examples/scripts/results/benchmark_tags.json +917 -0
- wisent/examples/scripts/results/benchmark_test_summary_nov4.json +71 -0
- wisent/examples/scripts/results/coding_benchmarks_test_code_status.json +150 -0
- wisent/examples/scripts/results/failing_benchmarks.json +946 -0
- wisent/examples/scripts/results/failing_benchmarks_list.json +41 -0
- wisent/examples/scripts/results/failing_benchmarks_test_results.json +945 -0
- wisent/examples/scripts/results/missing_benchmark_tags.json +341 -0
- wisent/examples/scripts/results/test_20_newsgroups_evaluation.json +30 -0
- wisent/examples/scripts/results/test_20_newsgroups_pairs.json +8 -0
- wisent/examples/scripts/results/test_AraDICE_evaluation.json +51 -0
- wisent/examples/scripts/results/test_AraDICE_pairs.json +14 -0
- wisent/examples/scripts/results/test_AraDiCE_boolq_egy/test_AraDiCE_boolq_egy_evaluation.json +30 -0
- wisent/examples/scripts/results/test_AraDiCE_boolq_egy/test_AraDiCE_boolq_egy_pairs.json +8 -0
- wisent/examples/scripts/results/test_ArabCulture_evaluation.json +51 -0
- wisent/examples/scripts/results/test_ArabCulture_pairs.json +14 -0
- wisent/examples/scripts/results/test_Tag_evaluation.json +30 -0
- wisent/examples/scripts/results/test_Tag_pairs.json +8 -0
- wisent/examples/scripts/results/test_aclue_evaluation.json +51 -0
- wisent/examples/scripts/results/test_aclue_pairs.json +14 -0
- wisent/examples/scripts/results/test_acp_bench_evaluation.json +51 -0
- wisent/examples/scripts/results/test_acp_bench_hard_evaluation.json +51 -0
- wisent/examples/scripts/results/test_acp_bench_hard_pairs.json +14 -0
- wisent/examples/scripts/results/test_acp_bench_pairs.json +14 -0
- wisent/examples/scripts/results/test_advanced_ai_risk_evaluation.json +51 -0
- wisent/examples/scripts/results/test_advanced_ai_risk_pairs.json +14 -0
- wisent/examples/scripts/results/test_aexams_evaluation.json +51 -0
- wisent/examples/scripts/results/test_aexams_pairs.json +14 -0
- wisent/examples/scripts/results/test_afrimgsm_direct_amh_evaluation.json +30 -0
- wisent/examples/scripts/results/test_afrimgsm_direct_amh_pairs.json +8 -0
- wisent/examples/scripts/results/test_afrimmlu_direct_amh_evaluation.json +30 -0
- wisent/examples/scripts/results/test_afrimmlu_direct_amh_pairs.json +8 -0
- wisent/examples/scripts/results/test_afrixnli_en_direct_amh_evaluation.json +30 -0
- wisent/examples/scripts/results/test_afrixnli_en_direct_amh_pairs.json +8 -0
- wisent/examples/scripts/results/test_ag_news_evaluation.json +30 -0
- wisent/examples/scripts/results/test_ag_news_pairs.json +8 -0
- wisent/examples/scripts/results/test_agieval_evaluation.json +51 -0
- wisent/examples/scripts/results/test_agieval_pairs.json +14 -0
- wisent/examples/scripts/results/test_aime2024_evaluation.json +30 -0
- wisent/examples/scripts/results/test_aime2024_pairs.json +8 -0
- wisent/examples/scripts/results/test_aime2025_evaluation.json +30 -0
- wisent/examples/scripts/results/test_aime2025_pairs.json +8 -0
- wisent/examples/scripts/results/test_aime_evaluation.json +30 -0
- wisent/examples/scripts/results/test_aime_pairs.json +8 -0
- wisent/examples/scripts/results/test_anagrams1_evaluation.json +30 -0
- wisent/examples/scripts/results/test_anagrams1_pairs.json +8 -0
- wisent/examples/scripts/results/test_anagrams2_evaluation.json +30 -0
- wisent/examples/scripts/results/test_anagrams2_pairs.json +8 -0
- wisent/examples/scripts/results/test_anli_evaluation.json +30 -0
- wisent/examples/scripts/results/test_anli_pairs.json +8 -0
- wisent/examples/scripts/results/test_apps_evaluation.json +30 -0
- wisent/examples/scripts/results/test_apps_pairs.json +8 -0
- wisent/examples/scripts/results/test_arabic_exams_evaluation.json +30 -0
- wisent/examples/scripts/results/test_arabic_exams_pairs.json +8 -0
- wisent/examples/scripts/results/test_arabic_leaderboard_complete_evaluation.json +51 -0
- wisent/examples/scripts/results/test_arabic_leaderboard_complete_pairs.json +14 -0
- wisent/examples/scripts/results/test_arabic_leaderboard_light_evaluation.json +51 -0
- wisent/examples/scripts/results/test_arabic_leaderboard_light_pairs.json +14 -0
- wisent/examples/scripts/results/test_arabicmmlu_evaluation.json +51 -0
- wisent/examples/scripts/results/test_arabicmmlu_pairs.json +14 -0
- wisent/examples/scripts/results/test_aradice/test_aradice_evaluation.json +51 -0
- wisent/examples/scripts/results/test_aradice/test_aradice_pairs.json +14 -0
- wisent/examples/scripts/results/test_aradice3/test_aradice_evaluation.json +51 -0
- wisent/examples/scripts/results/test_aradice3/test_aradice_pairs.json +14 -0
- wisent/examples/scripts/results/test_arc_ar_evaluation.json +30 -0
- wisent/examples/scripts/results/test_arc_ar_pairs.json +8 -0
- wisent/examples/scripts/results/test_arc_challenge_evaluation.json +30 -0
- wisent/examples/scripts/results/test_arc_challenge_pairs.json +8 -0
- wisent/examples/scripts/results/test_arc_easy_evaluation.json +30 -0
- wisent/examples/scripts/results/test_arc_easy_pairs.json +8 -0
- wisent/examples/scripts/results/test_argument_topic_evaluation.json +30 -0
- wisent/examples/scripts/results/test_argument_topic_pairs.json +8 -0
- wisent/examples/scripts/results/test_arithmetic_evaluation.json +51 -0
- wisent/examples/scripts/results/test_arithmetic_pairs.json +14 -0
- wisent/examples/scripts/results/test_asdiv_evaluation.json +30 -0
- wisent/examples/scripts/results/test_asdiv_pairs.json +8 -0
- wisent/examples/scripts/results/test_assin_entailment_evaluation.json +30 -0
- wisent/examples/scripts/results/test_assin_entailment_pairs.json +8 -0
- wisent/examples/scripts/results/test_atis_evaluation.json +30 -0
- wisent/examples/scripts/results/test_atis_pairs.json +8 -0
- wisent/examples/scripts/results/test_babi_evaluation.json +30 -0
- wisent/examples/scripts/results/test_babi_pairs.json +8 -0
- wisent/examples/scripts/results/test_babilong_evaluation.json +30 -0
- wisent/examples/scripts/results/test_babilong_pairs.json +8 -0
- wisent/examples/scripts/results/test_bangla_mmlu_evaluation.json +30 -0
- wisent/examples/scripts/results/test_bangla_mmlu_pairs.json +8 -0
- wisent/examples/scripts/results/test_banking77_evaluation.json +30 -0
- wisent/examples/scripts/results/test_banking77_pairs.json +8 -0
- wisent/examples/scripts/results/test_basque/test_basque-glue_pairs.json +14 -0
- wisent/examples/scripts/results/test_basque-glue_evaluation.json +51 -0
- wisent/examples/scripts/results/test_basque-glue_pairs.json +14 -0
- wisent/examples/scripts/results/test_basque2/test_basque-glue_evaluation.json +51 -0
- wisent/examples/scripts/results/test_basque2/test_basque-glue_pairs.json +14 -0
- wisent/examples/scripts/results/test_basque_bench_evaluation.json +51 -0
- wisent/examples/scripts/results/test_basque_bench_pairs.json +14 -0
- wisent/examples/scripts/results/test_basque_glue/test_basque-glue_evaluation.json +51 -0
- wisent/examples/scripts/results/test_basque_glue/test_basque-glue_pairs.json +14 -0
- wisent/examples/scripts/results/test_basqueglue_evaluation.json +51 -0
- wisent/examples/scripts/results/test_basqueglue_pairs.json +14 -0
- wisent/examples/scripts/results/test_bbh_evaluation.json +51 -0
- wisent/examples/scripts/results/test_bbh_pairs.json +14 -0
- wisent/examples/scripts/results/test_bbq_evaluation.json +30 -0
- wisent/examples/scripts/results/test_bbq_pairs.json +8 -0
- wisent/examples/scripts/results/test_bec2016eu_evaluation.json +51 -0
- wisent/examples/scripts/results/test_bec2016eu_pairs.json +14 -0
- wisent/examples/scripts/results/test_belebele_evaluation.json +51 -0
- wisent/examples/scripts/results/test_belebele_pairs.json +14 -0
- wisent/examples/scripts/results/test_benchmarks_evaluation.json +51 -0
- wisent/examples/scripts/results/test_benchmarks_pairs.json +14 -0
- wisent/examples/scripts/results/test_bertaqa_evaluation.json +51 -0
- wisent/examples/scripts/results/test_bertaqa_pairs.json +14 -0
- wisent/examples/scripts/results/test_bhtc_v2_evaluation.json +30 -0
- wisent/examples/scripts/results/test_bhtc_v2_pairs.json +8 -0
- wisent/examples/scripts/results/test_bigbench_evaluation.json +51 -0
- wisent/examples/scripts/results/test_bigbench_pairs.json +14 -0
- wisent/examples/scripts/results/test_blimp_evaluation.json +51 -0
- wisent/examples/scripts/results/test_blimp_pairs.json +14 -0
- wisent/examples/scripts/results/test_boolq/test_boolq_evaluation.json +30 -0
- wisent/examples/scripts/results/test_boolq/test_boolq_pairs.json +8 -0
- wisent/examples/scripts/results/test_boolq-seq2seq_evaluation.json +30 -0
- wisent/examples/scripts/results/test_boolq-seq2seq_pairs.json +8 -0
- wisent/examples/scripts/results/test_boolq_evaluation.json +30 -0
- wisent/examples/scripts/results/test_boolq_pairs.json +8 -0
- wisent/examples/scripts/results/test_c4_evaluation.json +30 -0
- wisent/examples/scripts/results/test_c4_pairs.json +8 -0
- wisent/examples/scripts/results/test_cabreu_evaluation.json +30 -0
- wisent/examples/scripts/results/test_cabreu_pairs.json +8 -0
- wisent/examples/scripts/results/test_careqa_evaluation.json +30 -0
- wisent/examples/scripts/results/test_careqa_pairs.json +8 -0
- wisent/examples/scripts/results/test_catalan_bench_evaluation.json +51 -0
- wisent/examples/scripts/results/test_catalan_bench_pairs.json +14 -0
- wisent/examples/scripts/results/test_catalanqa_evaluation.json +30 -0
- wisent/examples/scripts/results/test_catalanqa_pairs.json +8 -0
- wisent/examples/scripts/results/test_catcola_evaluation.json +30 -0
- wisent/examples/scripts/results/test_catcola_pairs.json +8 -0
- wisent/examples/scripts/results/test_cb_evaluation.json +30 -0
- wisent/examples/scripts/results/test_cb_pairs.json +8 -0
- wisent/examples/scripts/results/test_ceval/test_ceval_evaluation.json +51 -0
- wisent/examples/scripts/results/test_ceval/test_ceval_pairs.json +14 -0
- wisent/examples/scripts/results/test_ceval_accountant/test_ceval-valid_accountant_evaluation.json +30 -0
- wisent/examples/scripts/results/test_ceval_accountant/test_ceval-valid_accountant_pairs.json +8 -0
- wisent/examples/scripts/results/test_ceval_evaluation.json +51 -0
- wisent/examples/scripts/results/test_ceval_pairs.json +14 -0
- wisent/examples/scripts/results/test_ceval_valid/test_ceval_valid_evaluation.json +51 -0
- wisent/examples/scripts/results/test_ceval_valid/test_ceval_valid_pairs.json +14 -0
- wisent/examples/scripts/results/test_chain_of_thought_evaluation.json +51 -0
- wisent/examples/scripts/results/test_chain_of_thought_pairs.json +14 -0
- wisent/examples/scripts/results/test_chartqa_evaluation.json +30 -0
- wisent/examples/scripts/results/test_chartqa_pairs.json +8 -0
- wisent/examples/scripts/results/test_claim_stance_topic_evaluation.json +30 -0
- wisent/examples/scripts/results/test_claim_stance_topic_pairs.json +8 -0
- wisent/examples/scripts/results/test_cmmlu_evaluation.json +51 -0
- wisent/examples/scripts/results/test_cmmlu_pairs.json +14 -0
- wisent/examples/scripts/results/test_cnn_dailymail_evaluation.json +30 -0
- wisent/examples/scripts/results/test_cnn_dailymail_pairs.json +8 -0
- wisent/examples/scripts/results/test_cocoteros_es_evaluation.json +30 -0
- wisent/examples/scripts/results/test_cocoteros_es_pairs.json +8 -0
- wisent/examples/scripts/results/test_codexglue_code_to_text_go_evaluation.json +30 -0
- wisent/examples/scripts/results/test_codexglue_code_to_text_go_pairs.json +8 -0
- wisent/examples/scripts/results/test_codexglue_code_to_text_java_evaluation.json +30 -0
- wisent/examples/scripts/results/test_codexglue_code_to_text_java_pairs.json +8 -0
- wisent/examples/scripts/results/test_codexglue_code_to_text_javascript_evaluation.json +30 -0
- wisent/examples/scripts/results/test_codexglue_code_to_text_javascript_pairs.json +8 -0
- wisent/examples/scripts/results/test_codexglue_code_to_text_php_evaluation.json +30 -0
- wisent/examples/scripts/results/test_codexglue_code_to_text_php_pairs.json +8 -0
- wisent/examples/scripts/results/test_codexglue_code_to_text_python_evaluation.json +30 -0
- wisent/examples/scripts/results/test_codexglue_code_to_text_python_pairs.json +8 -0
- wisent/examples/scripts/results/test_codexglue_code_to_text_ruby_evaluation.json +30 -0
- wisent/examples/scripts/results/test_codexglue_code_to_text_ruby_pairs.json +8 -0
- wisent/examples/scripts/results/test_coedit_gec_evaluation.json +30 -0
- wisent/examples/scripts/results/test_coedit_gec_pairs.json +8 -0
- wisent/examples/scripts/results/test_cola_evaluation.json +30 -0
- wisent/examples/scripts/results/test_cola_pairs.json +8 -0
- wisent/examples/scripts/results/test_commonsense_qa_evaluation.json +30 -0
- wisent/examples/scripts/results/test_commonsense_qa_pairs.json +8 -0
- wisent/examples/scripts/results/test_conala_evaluation.json +30 -0
- wisent/examples/scripts/results/test_conala_pairs.json +8 -0
- wisent/examples/scripts/results/test_concode_evaluation.json +30 -0
- wisent/examples/scripts/results/test_concode_pairs.json +8 -0
- wisent/examples/scripts/results/test_copa_evaluation.json +30 -0
- wisent/examples/scripts/results/test_copa_pairs.json +8 -0
- wisent/examples/scripts/results/test_copal_id_evaluation.json +30 -0
- wisent/examples/scripts/results/test_copal_id_pairs.json +8 -0
- wisent/examples/scripts/results/test_coqa_evaluation.json +30 -0
- wisent/examples/scripts/results/test_coqa_pairs.json +8 -0
- wisent/examples/scripts/results/test_coqcat_evaluation.json +30 -0
- wisent/examples/scripts/results/test_coqcat_pairs.json +8 -0
- wisent/examples/scripts/results/test_crows_pairs_evaluation.json +51 -0
- wisent/examples/scripts/results/test_crows_pairs_pairs.json +14 -0
- wisent/examples/scripts/results/test_csatqa_evaluation.json +51 -0
- wisent/examples/scripts/results/test_csatqa_pairs.json +14 -0
- wisent/examples/scripts/results/test_cycle_letters_evaluation.json +30 -0
- wisent/examples/scripts/results/test_cycle_letters_pairs.json +8 -0
- wisent/examples/scripts/results/test_darija_bench/test_darija_bench_evaluation.json +51 -0
- wisent/examples/scripts/results/test_darija_bench/test_darija_bench_pairs.json +14 -0
- wisent/examples/scripts/results/test_darija_bench_evaluation.json +51 -0
- wisent/examples/scripts/results/test_darija_bench_pairs.json +14 -0
- wisent/examples/scripts/results/test_darijahellaswag_evaluation.json +30 -0
- wisent/examples/scripts/results/test_darijahellaswag_pairs.json +8 -0
- wisent/examples/scripts/results/test_darijammlu_evaluation.json +51 -0
- wisent/examples/scripts/results/test_darijammlu_pairs.json +14 -0
- wisent/examples/scripts/results/test_dbpedia_14_evaluation.json +30 -0
- wisent/examples/scripts/results/test_dbpedia_14_pairs.json +8 -0
- wisent/examples/scripts/results/test_drop_evaluation.json +30 -0
- wisent/examples/scripts/results/test_drop_pairs.json +8 -0
- wisent/examples/scripts/results/test_ds1000_evaluation.json +30 -0
- wisent/examples/scripts/results/test_ds1000_pairs.json +8 -0
- wisent/examples/scripts/results/test_egyhellaswag_evaluation.json +30 -0
- wisent/examples/scripts/results/test_egyhellaswag_pairs.json +8 -0
- wisent/examples/scripts/results/test_egymmlu_evaluation.json +51 -0
- wisent/examples/scripts/results/test_egymmlu_pairs.json +14 -0
- wisent/examples/scripts/results/test_epec_koref_bin_evaluation.json +30 -0
- wisent/examples/scripts/results/test_epec_koref_bin_pairs.json +8 -0
- wisent/examples/scripts/results/test_eq_bench_evaluation.json +30 -0
- wisent/examples/scripts/results/test_eq_bench_pairs.json +8 -0
- wisent/examples/scripts/results/test_escola_evaluation.json +30 -0
- wisent/examples/scripts/results/test_escola_pairs.json +8 -0
- wisent/examples/scripts/results/test_ethics_cm_evaluation.json +30 -0
- wisent/examples/scripts/results/test_ethics_cm_pairs.json +8 -0
- wisent/examples/scripts/results/test_ethos_binary_evaluation.json +30 -0
- wisent/examples/scripts/results/test_ethos_binary_pairs.json +8 -0
- wisent/examples/scripts/results/test_eus_exams/test_eus_exams_evaluation.json +51 -0
- wisent/examples/scripts/results/test_eus_exams/test_eus_exams_pairs.json +14 -0
- wisent/examples/scripts/results/test_eus_exams_es_evaluation.json +51 -0
- wisent/examples/scripts/results/test_eus_exams_es_pairs.json +14 -0
- wisent/examples/scripts/results/test_eus_exams_evaluation.json +51 -0
- wisent/examples/scripts/results/test_eus_exams_pairs.json +14 -0
- wisent/examples/scripts/results/test_eus_proficiency_evaluation.json +30 -0
- wisent/examples/scripts/results/test_eus_proficiency_pairs.json +8 -0
- wisent/examples/scripts/results/test_eus_reading_evaluation.json +30 -0
- wisent/examples/scripts/results/test_eus_reading_pairs.json +8 -0
- wisent/examples/scripts/results/test_eus_trivia_evaluation.json +30 -0
- wisent/examples/scripts/results/test_eus_trivia_pairs.json +8 -0
- wisent/examples/scripts/results/test_evalita-mp_evaluation.json +51 -0
- wisent/examples/scripts/results/test_evalita-mp_pairs.json +14 -0
- wisent/examples/scripts/results/test_evalita-sp_sum_task_fp-small_p1_evaluation.json +30 -0
- wisent/examples/scripts/results/test_evalita-sp_sum_task_fp-small_p1_pairs.json +8 -0
- wisent/examples/scripts/results/test_evalita_LLM_evaluation.json +51 -0
- wisent/examples/scripts/results/test_evalita_LLM_pairs.json +14 -0
- wisent/examples/scripts/results/test_evalita_llm/test_evalita_llm_evaluation.json +51 -0
- wisent/examples/scripts/results/test_evalita_llm/test_evalita_llm_pairs.json +14 -0
- wisent/examples/scripts/results/test_evalita_mp/test_evalita-mp_te_prompt-1_evaluation.json +30 -0
- wisent/examples/scripts/results/test_evalita_mp/test_evalita-mp_te_prompt-1_pairs.json +8 -0
- wisent/examples/scripts/results/test_evalita_mp2/test_evalita_mp_evaluation.json +51 -0
- wisent/examples/scripts/results/test_evalita_mp2/test_evalita_mp_pairs.json +14 -0
- wisent/examples/scripts/results/test_evalita_sp2/test_evalita-sp_sum_task_fp-small_p1_evaluation.json +30 -0
- wisent/examples/scripts/results/test_evalita_sp2/test_evalita-sp_sum_task_fp-small_p1_pairs.json +8 -0
- wisent/examples/scripts/results/test_fda_evaluation.json +30 -0
- wisent/examples/scripts/results/test_fda_pairs.json +8 -0
- wisent/examples/scripts/results/test_financial_tweets_evaluation.json +30 -0
- wisent/examples/scripts/results/test_financial_tweets_pairs.json +8 -0
- wisent/examples/scripts/results/test_fld/test_fld_evaluation.json +30 -0
- wisent/examples/scripts/results/test_fld/test_fld_pairs.json +8 -0
- wisent/examples/scripts/results/test_fld_evaluation.json +30 -0
- wisent/examples/scripts/results/test_fld_fixed/test_fld_evaluation.json +30 -0
- wisent/examples/scripts/results/test_fld_fixed/test_fld_pairs.json +8 -0
- wisent/examples/scripts/results/test_fld_pairs.json +8 -0
- wisent/examples/scripts/results/test_flores_evaluation.json +51 -0
- wisent/examples/scripts/results/test_flores_pairs.json +14 -0
- wisent/examples/scripts/results/test_freebase_evaluation.json +30 -0
- wisent/examples/scripts/results/test_freebase_pairs.json +8 -0
- wisent/examples/scripts/results/test_french_bench_evaluation.json +51 -0
- wisent/examples/scripts/results/test_french_bench_pairs.json +14 -0
- wisent/examples/scripts/results/test_galcola_evaluation.json +30 -0
- wisent/examples/scripts/results/test_galcola_pairs.json +8 -0
- wisent/examples/scripts/results/test_galician_bench_evaluation.json +51 -0
- wisent/examples/scripts/results/test_galician_bench_pairs.json +14 -0
- wisent/examples/scripts/results/test_glianorex_evaluation.json +30 -0
- wisent/examples/scripts/results/test_glianorex_pairs.json +8 -0
- wisent/examples/scripts/results/test_global_mmlu_evaluation.json +51 -0
- wisent/examples/scripts/results/test_global_mmlu_pairs.json +14 -0
- wisent/examples/scripts/results/test_glue_evaluation.json +51 -0
- wisent/examples/scripts/results/test_glue_pairs.json +14 -0
- wisent/examples/scripts/results/test_gpqa_evaluation.json +51 -0
- wisent/examples/scripts/results/test_gpqa_pairs.json +14 -0
- wisent/examples/scripts/results/test_gpt3_translation_benchmarks_evaluation.json +51 -0
- wisent/examples/scripts/results/test_gpt3_translation_benchmarks_pairs.json +14 -0
- wisent/examples/scripts/results/test_groundcocoa_evaluation.json +30 -0
- wisent/examples/scripts/results/test_groundcocoa_pairs.json +8 -0
- wisent/examples/scripts/results/test_gsm8k_evaluation.json +30 -0
- wisent/examples/scripts/results/test_gsm8k_pairs.json +8 -0
- wisent/examples/scripts/results/test_haerae_evaluation.json +51 -0
- wisent/examples/scripts/results/test_haerae_pairs.json +14 -0
- wisent/examples/scripts/results/test_headqa_evaluation.json +30 -0
- wisent/examples/scripts/results/test_headqa_pairs.json +8 -0
- wisent/examples/scripts/results/test_hellaswag_evaluation.json +30 -0
- wisent/examples/scripts/results/test_hellaswag_pairs.json +8 -0
- wisent/examples/scripts/results/test_hendrycks_ethics_evaluation.json +51 -0
- wisent/examples/scripts/results/test_hendrycks_ethics_pairs.json +14 -0
- wisent/examples/scripts/results/test_hendrycks_math_evaluation.json +51 -0
- wisent/examples/scripts/results/test_hendrycks_math_pairs.json +14 -0
- wisent/examples/scripts/results/test_histoires_morales_evaluation.json +30 -0
- wisent/examples/scripts/results/test_histoires_morales_pairs.json +8 -0
- wisent/examples/scripts/results/test_hmmt_evaluation.json +30 -0
- wisent/examples/scripts/results/test_hmmt_feb_2025_evaluation.json +30 -0
- wisent/examples/scripts/results/test_hmmt_feb_2025_pairs.json +8 -0
- wisent/examples/scripts/results/test_hmmt_pairs.json +8 -0
- wisent/examples/scripts/results/test_hrm8k_evaluation.json +51 -0
- wisent/examples/scripts/results/test_hrm8k_pairs.json +14 -0
- wisent/examples/scripts/results/test_humaneval_evaluation.json +30 -0
- wisent/examples/scripts/results/test_humaneval_pairs.json +8 -0
- wisent/examples/scripts/results/test_humaneval_plus_evaluation.json +30 -0
- wisent/examples/scripts/results/test_humaneval_plus_pairs.json +8 -0
- wisent/examples/scripts/results/test_ifeval_evaluation.json +30 -0
- wisent/examples/scripts/results/test_ifeval_pairs.json +8 -0
- wisent/examples/scripts/results/test_instruct_humaneval/test_instruct_humaneval_evaluation.json +30 -0
- wisent/examples/scripts/results/test_instruct_humaneval/test_instruct_humaneval_pairs.json +8 -0
- wisent/examples/scripts/results/test_instruct_humaneval_evaluation.json +30 -0
- wisent/examples/scripts/results/test_instruct_humaneval_pairs.json +8 -0
- wisent/examples/scripts/results/test_inverse_scaling_evaluation.json +51 -0
- wisent/examples/scripts/results/test_inverse_scaling_hindsight_neglect_10shot_evaluation.json +30 -0
- wisent/examples/scripts/results/test_inverse_scaling_hindsight_neglect_10shot_pairs.json +8 -0
- wisent/examples/scripts/results/test_inverse_scaling_mc/test_inverse_scaling_mc_evaluation.json +51 -0
- wisent/examples/scripts/results/test_inverse_scaling_mc/test_inverse_scaling_mc_pairs.json +14 -0
- wisent/examples/scripts/results/test_inverse_scaling_pairs.json +14 -0
- wisent/examples/scripts/results/test_iwslt2017-ar-en_evaluation.json +30 -0
- wisent/examples/scripts/results/test_iwslt2017-ar-en_pairs.json +8 -0
- wisent/examples/scripts/results/test_iwslt2017-en-ar_evaluation.json +30 -0
- wisent/examples/scripts/results/test_iwslt2017-en-ar_pairs.json +8 -0
- wisent/examples/scripts/results/test_iwslt2017_ar_en/test_iwslt2017-ar-en_evaluation.json +30 -0
- wisent/examples/scripts/results/test_iwslt2017_ar_en/test_iwslt2017-ar-en_pairs.json +8 -0
- wisent/examples/scripts/results/test_iwslt2017_en_ar/test_iwslt2017-en-ar_evaluation.json +30 -0
- wisent/examples/scripts/results/test_iwslt2017_en_ar/test_iwslt2017-en-ar_pairs.json +8 -0
- wisent/examples/scripts/results/test_iwslt2017_group/test_iwslt2017_evaluation.json +30 -0
- wisent/examples/scripts/results/test_iwslt2017_group/test_iwslt2017_pairs.json +8 -0
- wisent/examples/scripts/results/test_japanese_leaderboard_evaluation.json +51 -0
- wisent/examples/scripts/results/test_japanese_leaderboard_pairs.json +14 -0
- wisent/examples/scripts/results/test_jsonschema_bench/test_jsonschema_bench_evaluation.json +30 -0
- wisent/examples/scripts/results/test_jsonschema_bench/test_jsonschema_bench_pairs.json +8 -0
- wisent/examples/scripts/results/test_jsonschema_bench_evaluation.json +30 -0
- wisent/examples/scripts/results/test_jsonschema_bench_final/test_jsonschema_bench_evaluation.json +30 -0
- wisent/examples/scripts/results/test_jsonschema_bench_final/test_jsonschema_bench_pairs.json +8 -0
- wisent/examples/scripts/results/test_jsonschema_bench_pairs.json +8 -0
- wisent/examples/scripts/results/test_kbl_evaluation.json +51 -0
- wisent/examples/scripts/results/test_kbl_fixed/test_kbl_evaluation.json +51 -0
- wisent/examples/scripts/results/test_kbl_fixed/test_kbl_pairs.json +14 -0
- wisent/examples/scripts/results/test_kbl_pairs.json +14 -0
- wisent/examples/scripts/results/test_kmmlu_evaluation.json +51 -0
- wisent/examples/scripts/results/test_kmmlu_pairs.json +14 -0
- wisent/examples/scripts/results/test_kobest_evaluation.json +51 -0
- wisent/examples/scripts/results/test_kobest_pairs.json +14 -0
- wisent/examples/scripts/results/test_kormedmcqa/test_kormedmcqa_evaluation.json +30 -0
- wisent/examples/scripts/results/test_kormedmcqa/test_kormedmcqa_pairs.json +8 -0
- wisent/examples/scripts/results/test_kormedmcqa_dentist/test_kormedmcqa_dentist_evaluation.json +30 -0
- wisent/examples/scripts/results/test_kormedmcqa_dentist/test_kormedmcqa_dentist_pairs.json +8 -0
- wisent/examples/scripts/results/test_kormedmcqa_evaluation.json +30 -0
- wisent/examples/scripts/results/test_kormedmcqa_pairs.json +8 -0
- wisent/examples/scripts/results/test_lambada_cloze_evaluation.json +30 -0
- wisent/examples/scripts/results/test_lambada_cloze_pairs.json +8 -0
- wisent/examples/scripts/results/test_lambada_evaluation.json +30 -0
- wisent/examples/scripts/results/test_lambada_final/test_lambada_openai_mt_stablelm_en_evaluation.json +30 -0
- wisent/examples/scripts/results/test_lambada_final/test_lambada_openai_mt_stablelm_en_pairs.json +8 -0
- wisent/examples/scripts/results/test_lambada_multilingual/test_lambada_multilingual_evaluation.json +51 -0
- wisent/examples/scripts/results/test_lambada_multilingual/test_lambada_multilingual_pairs.json +14 -0
- wisent/examples/scripts/results/test_lambada_multilingual_evaluation.json +51 -0
- wisent/examples/scripts/results/test_lambada_multilingual_pairs.json +14 -0
- wisent/examples/scripts/results/test_lambada_multilingual_stablelm_evaluation.json +51 -0
- wisent/examples/scripts/results/test_lambada_multilingual_stablelm_pairs.json +14 -0
- wisent/examples/scripts/results/test_lambada_openai_evaluation.json +30 -0
- wisent/examples/scripts/results/test_lambada_openai_pairs.json +8 -0
- wisent/examples/scripts/results/test_lambada_pairs.json +8 -0
- wisent/examples/scripts/results/test_lambada_stablelm_en_fixed/test_lambada_openai_mt_stablelm_en_evaluation.json +30 -0
- wisent/examples/scripts/results/test_lambada_stablelm_en_fixed/test_lambada_openai_mt_stablelm_en_pairs.json +8 -0
- wisent/examples/scripts/results/test_lambada_stablelm_fixed/test_lambada_openai_mt_stablelm_en_evaluation.json +30 -0
- wisent/examples/scripts/results/test_lambada_stablelm_fixed/test_lambada_openai_mt_stablelm_en_pairs.json +8 -0
- wisent/examples/scripts/results/test_lambada_standard_evaluation.json +30 -0
- wisent/examples/scripts/results/test_lambada_standard_pairs.json +8 -0
- wisent/examples/scripts/results/test_leaderboard_evaluation.json +51 -0
- wisent/examples/scripts/results/test_leaderboard_pairs.json +14 -0
- wisent/examples/scripts/results/test_libra/test_libra_evaluation.json +51 -0
- wisent/examples/scripts/results/test_libra/test_libra_pairs.json +14 -0
- wisent/examples/scripts/results/test_libra_evaluation.json +51 -0
- wisent/examples/scripts/results/test_libra_pairs.json +14 -0
- wisent/examples/scripts/results/test_lingoly_evaluation.json +30 -0
- wisent/examples/scripts/results/test_lingoly_pairs.json +8 -0
- wisent/examples/scripts/results/test_livecodebench_evaluation.json +30 -0
- wisent/examples/scripts/results/test_livecodebench_pairs.json +8 -0
- wisent/examples/scripts/results/test_livemathbench_cnmo_en_evaluation.json +30 -0
- wisent/examples/scripts/results/test_livemathbench_cnmo_en_pairs.json +8 -0
- wisent/examples/scripts/results/test_livemathbench_cnmo_zh_evaluation.json +30 -0
- wisent/examples/scripts/results/test_livemathbench_cnmo_zh_pairs.json +8 -0
- wisent/examples/scripts/results/test_llama_evaluation.json +30 -0
- wisent/examples/scripts/results/test_llama_pairs.json +8 -0
- wisent/examples/scripts/results/test_logiqa2_evaluation.json +30 -0
- wisent/examples/scripts/results/test_logiqa2_pairs.json +8 -0
- wisent/examples/scripts/results/test_logiqa_evaluation.json +30 -0
- wisent/examples/scripts/results/test_logiqa_pairs.json +8 -0
- wisent/examples/scripts/results/test_m_mmlu_evaluation.json +51 -0
- wisent/examples/scripts/results/test_m_mmlu_pairs.json +14 -0
- wisent/examples/scripts/results/test_mastermind/test_mastermind_evaluation.json +51 -0
- wisent/examples/scripts/results/test_mastermind/test_mastermind_pairs.json +14 -0
- wisent/examples/scripts/results/test_mastermind_24_easy/test_mastermind_24_easy_evaluation.json +30 -0
- wisent/examples/scripts/results/test_mastermind_24_easy/test_mastermind_24_easy_pairs.json +8 -0
- wisent/examples/scripts/results/test_mastermind_evaluation.json +51 -0
- wisent/examples/scripts/results/test_mastermind_pairs.json +14 -0
- wisent/examples/scripts/results/test_math500_evaluation.json +30 -0
- wisent/examples/scripts/results/test_math500_pairs.json +8 -0
- wisent/examples/scripts/results/test_math_evaluation.json +30 -0
- wisent/examples/scripts/results/test_math_pairs.json +8 -0
- wisent/examples/scripts/results/test_mathqa_evaluation.json +30 -0
- wisent/examples/scripts/results/test_mathqa_pairs.json +8 -0
- wisent/examples/scripts/results/test_mbpp_evaluation.json +30 -0
- wisent/examples/scripts/results/test_mbpp_pairs.json +8 -0
- wisent/examples/scripts/results/test_mbpp_plus_evaluation.json +30 -0
- wisent/examples/scripts/results/test_mbpp_plus_pairs.json +8 -0
- wisent/examples/scripts/results/test_mc_taco_evaluation.json +30 -0
- wisent/examples/scripts/results/test_mc_taco_pairs.json +8 -0
- wisent/examples/scripts/results/test_med_concepts_qa/test_med_concepts_qa_evaluation.json +51 -0
- wisent/examples/scripts/results/test_med_concepts_qa/test_med_concepts_qa_pairs.json +14 -0
- wisent/examples/scripts/results/test_med_concepts_qa_atc_easy/test_med_concepts_qa_atc_easy_evaluation.json +30 -0
- wisent/examples/scripts/results/test_med_concepts_qa_atc_easy/test_med_concepts_qa_atc_easy_pairs.json +8 -0
- wisent/examples/scripts/results/test_med_concepts_qa_evaluation.json +51 -0
- wisent/examples/scripts/results/test_med_concepts_qa_pairs.json +14 -0
- wisent/examples/scripts/results/test_meddialog_evaluation.json +30 -0
- wisent/examples/scripts/results/test_meddialog_pairs.json +8 -0
- wisent/examples/scripts/results/test_meddialog_raw_perplexity/test_meddialog_raw_perplexity_evaluation.json +30 -0
- wisent/examples/scripts/results/test_meddialog_raw_perplexity/test_meddialog_raw_perplexity_pairs.json +8 -0
- wisent/examples/scripts/results/test_mediqa_qa2019_evaluation.json +30 -0
- wisent/examples/scripts/results/test_mediqa_qa2019_pairs.json +8 -0
- wisent/examples/scripts/results/test_medmcqa_evaluation.json +30 -0
- wisent/examples/scripts/results/test_medmcqa_pairs.json +8 -0
- wisent/examples/scripts/results/test_medqa_evaluation.json +30 -0
- wisent/examples/scripts/results/test_medqa_pairs.json +8 -0
- wisent/examples/scripts/results/test_medtext_evaluation.json +30 -0
- wisent/examples/scripts/results/test_medtext_pairs.json +8 -0
- wisent/examples/scripts/results/test_mela_evaluation.json +51 -0
- wisent/examples/scripts/results/test_mela_pairs.json +14 -0
- wisent/examples/scripts/results/test_meqsum_evaluation.json +30 -0
- wisent/examples/scripts/results/test_meqsum_pairs.json +8 -0
- wisent/examples/scripts/results/test_mercury_evaluation.json +30 -0
- wisent/examples/scripts/results/test_mercury_pairs.json +8 -0
- wisent/examples/scripts/results/test_metabench_evaluation.json +51 -0
- wisent/examples/scripts/results/test_metabench_pairs.json +14 -0
- wisent/examples/scripts/results/test_mgsm_evaluation.json +51 -0
- wisent/examples/scripts/results/test_mgsm_pairs.json +14 -0
- wisent/examples/scripts/results/test_mimic_repsum_evaluation.json +30 -0
- wisent/examples/scripts/results/test_mimic_repsum_pairs.json +8 -0
- wisent/examples/scripts/results/test_minerva_math_evaluation.json +51 -0
- wisent/examples/scripts/results/test_minerva_math_pairs.json +14 -0
- wisent/examples/scripts/results/test_mlqa_evaluation.json +51 -0
- wisent/examples/scripts/results/test_mlqa_pairs.json +14 -0
- wisent/examples/scripts/results/test_mmlu-pro-plus_evaluation.json +51 -0
- wisent/examples/scripts/results/test_mmlu-pro-plus_pairs.json +14 -0
- wisent/examples/scripts/results/test_mmlu_evaluation.json +51 -0
- wisent/examples/scripts/results/test_mmlu_pairs.json +14 -0
- wisent/examples/scripts/results/test_mmlu_pro_evaluation.json +51 -0
- wisent/examples/scripts/results/test_mmlu_pro_pairs.json +14 -0
- wisent/examples/scripts/results/test_mmlu_prox_evaluation.json +51 -0
- wisent/examples/scripts/results/test_mmlu_prox_pairs.json +14 -0
- wisent/examples/scripts/results/test_mmlusr_evaluation.json +30 -0
- wisent/examples/scripts/results/test_mmlusr_pairs.json +8 -0
- wisent/examples/scripts/results/test_mmmu_evaluation.json +51 -0
- wisent/examples/scripts/results/test_mmmu_pairs.json +14 -0
- wisent/examples/scripts/results/test_mnli_evaluation.json +30 -0
- wisent/examples/scripts/results/test_mnli_pairs.json +8 -0
- wisent/examples/scripts/results/test_model_written_evals_evaluation.json +51 -0
- wisent/examples/scripts/results/test_model_written_evals_pairs.json +14 -0
- wisent/examples/scripts/results/test_moral_stories_evaluation.json +30 -0
- wisent/examples/scripts/results/test_moral_stories_pairs.json +8 -0
- wisent/examples/scripts/results/test_mts_dialog_evaluation.json +30 -0
- wisent/examples/scripts/results/test_mts_dialog_pairs.json +8 -0
- wisent/examples/scripts/results/test_multiblimp_evaluation.json +51 -0
- wisent/examples/scripts/results/test_multiblimp_pairs.json +14 -0
- wisent/examples/scripts/results/test_multimedqa_evaluation.json +51 -0
- wisent/examples/scripts/results/test_multimedqa_pairs.json +14 -0
- wisent/examples/scripts/results/test_multipl_e_evaluation.json +30 -0
- wisent/examples/scripts/results/test_multipl_e_pairs.json +8 -0
- wisent/examples/scripts/results/test_mutual_evaluation.json +30 -0
- wisent/examples/scripts/results/test_mutual_pairs.json +8 -0
- wisent/examples/scripts/results/test_non_greedy_robustness_agieval_aqua_rat_evaluation.json +30 -0
- wisent/examples/scripts/results/test_non_greedy_robustness_agieval_aqua_rat_pairs.json +8 -0
- wisent/examples/scripts/results/test_noreval_evaluation.json +51 -0
- wisent/examples/scripts/results/test_noreval_pairs.json +14 -0
- wisent/examples/scripts/results/test_noticia_evaluation.json +30 -0
- wisent/examples/scripts/results/test_noticia_pairs.json +8 -0
- wisent/examples/scripts/results/test_nq_open_evaluation.json +30 -0
- wisent/examples/scripts/results/test_nq_open_pairs.json +8 -0
- wisent/examples/scripts/results/test_olaph_evaluation.json +30 -0
- wisent/examples/scripts/results/test_olaph_pairs.json +8 -0
- wisent/examples/scripts/results/test_openbookqa_evaluation.json +30 -0
- wisent/examples/scripts/results/test_openbookqa_pairs.json +8 -0
- wisent/examples/scripts/results/test_openllm_evaluation.json +51 -0
- wisent/examples/scripts/results/test_openllm_pairs.json +14 -0
- wisent/examples/scripts/results/test_option_order_robustness_agieval_aqua_rat_evaluation.json +30 -0
- wisent/examples/scripts/results/test_option_order_robustness_agieval_aqua_rat_pairs.json +8 -0
- wisent/examples/scripts/results/test_paloma_evaluation.json +51 -0
- wisent/examples/scripts/results/test_paloma_pairs.json +14 -0
- wisent/examples/scripts/results/test_passkey/test_passkey_evaluation.json +30 -0
- wisent/examples/scripts/results/test_passkey/test_passkey_pairs.json +8 -0
- wisent/examples/scripts/results/test_paws-x_evaluation.json +51 -0
- wisent/examples/scripts/results/test_paws-x_pairs.json +14 -0
- wisent/examples/scripts/results/test_paws_en/test_paws_en_evaluation.json +30 -0
- wisent/examples/scripts/results/test_paws_en/test_paws_en_pairs.json +8 -0
- wisent/examples/scripts/results/test_penn_treebank_evaluation.json +30 -0
- wisent/examples/scripts/results/test_penn_treebank_pairs.json +8 -0
- wisent/examples/scripts/results/test_pile_10k/test_pile_10k_evaluation.json +30 -0
- wisent/examples/scripts/results/test_pile_10k/test_pile_10k_pairs.json +8 -0
- wisent/examples/scripts/results/test_piqa_evaluation.json +30 -0
- wisent/examples/scripts/results/test_piqa_pairs.json +8 -0
- wisent/examples/scripts/results/test_polemo2_evaluation.json +30 -0
- wisent/examples/scripts/results/test_polemo2_pairs.json +8 -0
- wisent/examples/scripts/results/test_polymath_en_high_evaluation.json +30 -0
- wisent/examples/scripts/results/test_polymath_en_high_pairs.json +8 -0
- wisent/examples/scripts/results/test_polymath_en_medium_evaluation.json +30 -0
- wisent/examples/scripts/results/test_polymath_en_medium_pairs.json +8 -0
- wisent/examples/scripts/results/test_polymath_zh_high_evaluation.json +30 -0
- wisent/examples/scripts/results/test_polymath_zh_high_pairs.json +8 -0
- wisent/examples/scripts/results/test_polymath_zh_medium_evaluation.json +30 -0
- wisent/examples/scripts/results/test_polymath_zh_medium_pairs.json +8 -0
- wisent/examples/scripts/results/test_portuguese_bench_evaluation.json +51 -0
- wisent/examples/scripts/results/test_portuguese_bench_pairs.json +14 -0
- wisent/examples/scripts/results/test_prompt_robustness_agieval_aqua_rat/test_prompt_robustness_agieval_aqua_rat_evaluation.json +30 -0
- wisent/examples/scripts/results/test_prompt_robustness_agieval_aqua_rat/test_prompt_robustness_agieval_aqua_rat_pairs.json +8 -0
- wisent/examples/scripts/results/test_prompt_robustness_agieval_aqua_rat_evaluation.json +30 -0
- wisent/examples/scripts/results/test_prompt_robustness_agieval_aqua_rat_pairs.json +8 -0
- wisent/examples/scripts/results/test_prost_evaluation.json +30 -0
- wisent/examples/scripts/results/test_prost_pairs.json +8 -0
- wisent/examples/scripts/results/test_ptb_evaluation.json +30 -0
- wisent/examples/scripts/results/test_ptb_pairs.json +8 -0
- wisent/examples/scripts/results/test_pubmedqa_evaluation.json +30 -0
- wisent/examples/scripts/results/test_pubmedqa_pairs.json +8 -0
- wisent/examples/scripts/results/test_pythia_evaluation.json +51 -0
- wisent/examples/scripts/results/test_pythia_pairs.json +14 -0
- wisent/examples/scripts/results/test_qa4mre_evaluation.json +30 -0
- wisent/examples/scripts/results/test_qa4mre_pairs.json +8 -0
- wisent/examples/scripts/results/test_qasper_evaluation.json +30 -0
- wisent/examples/scripts/results/test_qasper_pairs.json +8 -0
- wisent/examples/scripts/results/test_race_evaluation.json +30 -0
- wisent/examples/scripts/results/test_race_pairs.json +8 -0
- wisent/examples/scripts/results/test_realtoxicityprompts_evaluation.json +30 -0
- wisent/examples/scripts/results/test_realtoxicityprompts_pairs.json +8 -0
- wisent/examples/scripts/results/test_recode_evaluation.json +30 -0
- wisent/examples/scripts/results/test_recode_pairs.json +8 -0
- wisent/examples/scripts/results/test_record_evaluation.json +30 -0
- wisent/examples/scripts/results/test_record_pairs.json +8 -0
- wisent/examples/scripts/results/test_ruler_evaluation.json +51 -0
- wisent/examples/scripts/results/test_ruler_pairs.json +14 -0
- wisent/examples/scripts/results/test_sciq_evaluation.json +30 -0
- wisent/examples/scripts/results/test_sciq_pairs.json +8 -0
- wisent/examples/scripts/results/test_score_evaluation.json +51 -0
- wisent/examples/scripts/results/test_score_pairs.json +14 -0
- wisent/examples/scripts/results/test_self_consistency_evaluation.json +30 -0
- wisent/examples/scripts/results/test_self_consistency_pairs.json +8 -0
- wisent/examples/scripts/results/test_siqa/test_siqa_evaluation.json +30 -0
- wisent/examples/scripts/results/test_siqa/test_siqa_pairs.json +8 -0
- wisent/examples/scripts/results/test_siqa_evaluation.json +30 -0
- wisent/examples/scripts/results/test_siqa_pairs.json +8 -0
- wisent/examples/scripts/results/test_spanish_bench_evaluation.json +51 -0
- wisent/examples/scripts/results/test_spanish_bench_pairs.json +14 -0
- wisent/examples/scripts/results/test_squad2_evaluation.json +30 -0
- wisent/examples/scripts/results/test_squad2_pairs.json +8 -0
- wisent/examples/scripts/results/test_squadv2_evaluation.json +30 -0
- wisent/examples/scripts/results/test_squadv2_pairs.json +8 -0
- wisent/examples/scripts/results/test_super-glue-lm-eval-v1-seq2seq_evaluation.json +30 -0
- wisent/examples/scripts/results/test_super-glue-lm-eval-v1-seq2seq_pairs.json +8 -0
- wisent/examples/scripts/results/test_super-glue-lm-eval-v1_evaluation.json +51 -0
- wisent/examples/scripts/results/test_super-glue-lm-eval-v1_pairs.json +14 -0
- wisent/examples/scripts/results/test_swag_evaluation.json +30 -0
- wisent/examples/scripts/results/test_swag_pairs.json +8 -0
- wisent/examples/scripts/results/test_tinyBenchmarks_evaluation.json +51 -0
- wisent/examples/scripts/results/test_tinyBenchmarks_pairs.json +14 -0
- wisent/examples/scripts/results/test_tmmluplus_evaluation.json +51 -0
- wisent/examples/scripts/results/test_tmmluplus_pairs.json +14 -0
- wisent/examples/scripts/results/test_translation_evaluation.json +51 -0
- wisent/examples/scripts/results/test_translation_pairs.json +14 -0
- wisent/examples/scripts/results/test_triviaqa_evaluation.json +30 -0
- wisent/examples/scripts/results/test_triviaqa_pairs.json +8 -0
- wisent/examples/scripts/results/test_truthfulqa-multi_evaluation.json +51 -0
- wisent/examples/scripts/results/test_truthfulqa-multi_pairs.json +14 -0
- wisent/examples/scripts/results/test_truthfulqa_evaluation.json +30 -0
- wisent/examples/scripts/results/test_truthfulqa_mc1_evaluation.json +30 -0
- wisent/examples/scripts/results/test_truthfulqa_mc1_pairs.json +8 -0
- wisent/examples/scripts/results/test_truthfulqa_mc2_evaluation.json +30 -0
- wisent/examples/scripts/results/test_truthfulqa_mc2_pairs.json +8 -0
- wisent/examples/scripts/results/test_truthfulqa_pairs.json +8 -0
- wisent/examples/scripts/results/test_turkishmmlu_evaluation.json +51 -0
- wisent/examples/scripts/results/test_turkishmmlu_pairs.json +14 -0
- wisent/examples/scripts/results/test_unfair_tos_evaluation.json +30 -0
- wisent/examples/scripts/results/test_unfair_tos_pairs.json +8 -0
- wisent/examples/scripts/results/test_unscramble_evaluation.json +51 -0
- wisent/examples/scripts/results/test_unscramble_pairs.json +14 -0
- wisent/examples/scripts/results/test_webqs_evaluation.json +30 -0
- wisent/examples/scripts/results/test_webqs_pairs.json +8 -0
- wisent/examples/scripts/results/test_wikitext103_evaluation.json +30 -0
- wisent/examples/scripts/results/test_wikitext103_pairs.json +8 -0
- wisent/examples/scripts/results/test_wikitext_evaluation.json +30 -0
- wisent/examples/scripts/results/test_wikitext_pairs.json +8 -0
- wisent/examples/scripts/results/test_winogender_evaluation.json +51 -0
- wisent/examples/scripts/results/test_winogender_pairs.json +14 -0
- wisent/examples/scripts/results/test_winogrande_evaluation.json +30 -0
- wisent/examples/scripts/results/test_winogrande_pairs.json +8 -0
- wisent/examples/scripts/results/test_wmdp_evaluation.json +30 -0
- wisent/examples/scripts/results/test_wmdp_pairs.json +8 -0
- wisent/examples/scripts/results/test_wmt-ro-en-t5-prompt_evaluation.json +30 -0
- wisent/examples/scripts/results/test_wmt-ro-en-t5-prompt_pairs.json +8 -0
- wisent/examples/scripts/results/test_wmt14_en_fr_evaluation.json +30 -0
- wisent/examples/scripts/results/test_wmt14_en_fr_pairs.json +8 -0
- wisent/examples/scripts/results/test_wmt16_en_de_evaluation.json +30 -0
- wisent/examples/scripts/results/test_wmt16_en_de_pairs.json +8 -0
- wisent/examples/scripts/results/test_wmt16_ro_en_evaluation.json +30 -0
- wisent/examples/scripts/results/test_wmt16_ro_en_pairs.json +8 -0
- wisent/examples/scripts/results/test_wsc273_evaluation.json +30 -0
- wisent/examples/scripts/results/test_wsc273_pairs.json +8 -0
- wisent/examples/scripts/results/test_xcopa_evaluation.json +51 -0
- wisent/examples/scripts/results/test_xcopa_pairs.json +14 -0
- wisent/examples/scripts/results/test_xnli_eu_evaluation.json +30 -0
- wisent/examples/scripts/results/test_xnli_eu_pairs.json +8 -0
- wisent/examples/scripts/results/test_xnli_evaluation.json +51 -0
- wisent/examples/scripts/results/test_xnli_pairs.json +14 -0
- wisent/examples/scripts/results/test_xquad_evaluation.json +51 -0
- wisent/examples/scripts/results/test_xquad_pairs.json +14 -0
- wisent/examples/scripts/results/test_xstorycloze_evaluation.json +51 -0
- wisent/examples/scripts/results/test_xstorycloze_pairs.json +14 -0
- wisent/examples/scripts/results/test_xsum_evaluation.json +30 -0
- wisent/examples/scripts/results/test_xsum_pairs.json +8 -0
- wisent/examples/scripts/results/test_xwinograd_evaluation.json +51 -0
- wisent/examples/scripts/results/test_xwinograd_pairs.json +14 -0
- wisent/examples/scripts/results/test_yahoo_answers_topics_evaluation.json +30 -0
- wisent/examples/scripts/results/test_yahoo_answers_topics_pairs.json +8 -0
- wisent/parameters/__init__.py +1 -0
- wisent/parameters/lm_eval/all_lm_eval_task_families.json +169 -0
- wisent/parameters/lm_eval/broken_in_lm_eval.json +10 -0
- wisent/parameters/lm_eval/evaluations_not_lm_eval_tasks.json +0 -0
- wisent/parameters/lm_eval/evaluator_check.json +3476 -0
- wisent/parameters/lm_eval/final_verification.json +24782 -0
- wisent/parameters/lm_eval/group_task_evaluators.json +1833 -0
- wisent/parameters/lm_eval/group_tasks.json +150 -0
- wisent/parameters/lm_eval/individual_tasks.json +402 -0
- wisent/parameters/lm_eval/no_readmes.json +1 -0
- wisent/parameters/lm_eval/not_lm_eval_tasks.json +110 -0
- wisent/parameters/lm_eval/read_tasks.json +208 -0
- wisent/parameters/lm_eval/readme_files.json +208 -0
- wisent/parameters/lm_eval/track_progress_not_lm_eval_tasks.json +128 -0
- wisent/parameters/tasks/missing_task_families.json +2963 -0
- wisent/parameters/tasks/remaining_tasks_to_implement.json +199 -0
- wisent/parameters/tasks/risks.json +10 -0
- wisent/parameters/tasks/skills.json +14 -0
- wisent/parameters/tasks/tasks.json +56031 -0
- wisent/scripts/run_quality_metrics_sweep.sh +315 -0
- wisent/tests/__init__.py +0 -0
- wisent/tests/examples/__init__.py +0 -0
- wisent/tests/examples/cli/__init__.py +0 -0
- wisent/tests/examples/cli/activations/__init__.py +0 -0
- wisent/tests/examples/cli/activations/test_get_activations.py +127 -0
- wisent/tests/examples/cli/classifier/__init__.py +0 -0
- wisent/tests/examples/cli/classifier/test_classifier_examples.py +141 -0
- wisent/tests/examples/cli/contrastive_pairs/__init__.py +0 -0
- wisent/tests/examples/cli/contrastive_pairs/test_generate_pairs.py +89 -0
- wisent/tests/examples/cli/evaluation/__init__.py +0 -0
- wisent/tests/examples/cli/evaluation/test_evaluation_examples.py +117 -0
- wisent/tests/examples/cli/generate/__init__.py +0 -0
- wisent/tests/examples/cli/generate/test_generate_with_classifier.py +146 -0
- wisent/tests/examples/cli/generate/test_generate_with_steering.py +149 -0
- wisent/tests/examples/cli/generate/test_only_generate.py +110 -0
- wisent/tests/examples/cli/multi_steering/__init__.py +0 -0
- wisent/tests/examples/cli/multi_steering/test_multi_steer_from_trained_vectors.py +210 -0
- wisent/tests/examples/cli/multi_steering/test_multi_steer_with_different_parameters.py +205 -0
- wisent/tests/examples/cli/multi_steering/test_train_and_multi_steer.py +174 -0
- wisent/tests/examples/cli/optimizer/__init__.py +0 -0
- wisent/tests/examples/cli/optimizer/test_optimize_sample_size.py +102 -0
- wisent/tests/examples/cli/optimizer/test_optimizer_examples.py +59 -0
- wisent/tests/examples/cli/steering/__init__.py +0 -0
- wisent/tests/examples/cli/steering/test_create_steering_vectors.py +135 -0
- wisent/tests/examples/cli/synthetic/__init__.py +0 -0
- wisent/tests/examples/cli/synthetic/test_synthetic_pairs.py +45 -0
- wisent/tests/nosense/__init__.py +6 -0
- wisent/tests/nosense/base_nosense.py +81 -0
- wisent/tests/nosense/math500_nosense.py +72 -0
- wisent/tests/nosense/test_robustness.py +336 -0
- wisent/tests/test_all_cli_commands.py +674 -0
- wisent/tests/test_geometry_comprehensive.py +327 -0
- wisent/tests/test_titan_geometry.py +257 -0
- wisent/tests/visualize_geometry.py +148 -0
- wisent-0.7.379.dist-info/METADATA +64 -0
- wisent-0.7.379.dist-info/RECORD +1720 -0
- wisent-0.7.379.dist-info/WHEEL +5 -0
- wisent-0.7.379.dist-info/entry_points.txt +2 -0
- wisent-0.7.379.dist-info/licenses/LICENSE +21 -0
- wisent-0.7.379.dist-info/top_level.txt +1 -0
wisent/core/parser.py
ADDED
|
@@ -0,0 +1,1662 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Command-line argument parser for wisent.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import argparse
|
|
6
|
+
from typing import List, Optional
|
|
7
|
+
|
|
8
|
+
from wisent.core.errors import ModelNotProvidedError, InvalidValueError
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def setup_parser() -> argparse.ArgumentParser:
|
|
12
|
+
"""Set up the main CLI parser with subcommands."""
|
|
13
|
+
parser = argparse.ArgumentParser(description="Wisent: Advanced AI Safety and Alignment Toolkit")
|
|
14
|
+
|
|
15
|
+
# Global arguments
|
|
16
|
+
parser.add_argument("--verbose", action="store_true", help="Enable verbose output")
|
|
17
|
+
|
|
18
|
+
# Create subparsers for different commands
|
|
19
|
+
subparsers = parser.add_subparsers(dest="command", help="Available commands")
|
|
20
|
+
|
|
21
|
+
# Tasks command (main evaluation pipeline)
|
|
22
|
+
tasks_parser = subparsers.add_parser("tasks", help="Run evaluation tasks")
|
|
23
|
+
setup_tasks_parser(tasks_parser)
|
|
24
|
+
|
|
25
|
+
# Generate pairs command
|
|
26
|
+
generate_parser = subparsers.add_parser("generate-pairs", help="Generate synthetic contrastive pairs")
|
|
27
|
+
setup_generate_pairs_parser(generate_parser)
|
|
28
|
+
|
|
29
|
+
# Synthetic command (generate + train + test)
|
|
30
|
+
synthetic_parser = subparsers.add_parser("synthetic", help="Run synthetic contrastive pair pipeline")
|
|
31
|
+
setup_synthetic_parser(synthetic_parser)
|
|
32
|
+
|
|
33
|
+
# Test nonsense detection command
|
|
34
|
+
test_nonsense_parser = subparsers.add_parser("test-nonsense", help="Test nonsense detection system")
|
|
35
|
+
setup_test_nonsense_parser(test_nonsense_parser)
|
|
36
|
+
|
|
37
|
+
# Monitor command for performance monitoring
|
|
38
|
+
monitor_parser = subparsers.add_parser("monitor", help="Performance monitoring and system information")
|
|
39
|
+
setup_monitor_parser(monitor_parser)
|
|
40
|
+
|
|
41
|
+
# Agent command for autonomous agent interaction
|
|
42
|
+
agent_parser = subparsers.add_parser("agent", help="Interact with autonomous agent")
|
|
43
|
+
setup_agent_parser(agent_parser)
|
|
44
|
+
|
|
45
|
+
# Model configuration command for managing optimal parameters
|
|
46
|
+
model_config_parser = subparsers.add_parser("model-config", help="Manage model-specific optimal parameters")
|
|
47
|
+
setup_model_config_parser(model_config_parser)
|
|
48
|
+
|
|
49
|
+
# Configure model command for setting up new/unsupported models
|
|
50
|
+
configure_model_parser = subparsers.add_parser(
|
|
51
|
+
"configure-model", help="Configure tokens and layer access for unsupported models"
|
|
52
|
+
)
|
|
53
|
+
setup_configure_model_parser(configure_model_parser)
|
|
54
|
+
|
|
55
|
+
# Classification optimization command for finding optimal classification parameters
|
|
56
|
+
classification_optimizer_parser = subparsers.add_parser(
|
|
57
|
+
"optimize-classification", help="Optimize classification parameters across all tasks"
|
|
58
|
+
)
|
|
59
|
+
setup_classification_optimizer_parser(classification_optimizer_parser)
|
|
60
|
+
|
|
61
|
+
# Steering optimization command for finding optimal steering parameters
|
|
62
|
+
steering_optimizer_parser = subparsers.add_parser(
|
|
63
|
+
"optimize-steering", help="Optimize steering parameters for different methods"
|
|
64
|
+
)
|
|
65
|
+
setup_steering_optimizer_parser(steering_optimizer_parser)
|
|
66
|
+
|
|
67
|
+
# Sample size optimization command for finding optimal training sample sizes
|
|
68
|
+
sample_size_optimizer_parser = subparsers.add_parser(
|
|
69
|
+
"optimize-sample-size", help="Find optimal training sample size for classifiers"
|
|
70
|
+
)
|
|
71
|
+
setup_sample_size_optimizer_parser(sample_size_optimizer_parser)
|
|
72
|
+
|
|
73
|
+
# Full optimization command that runs both classification and sample size optimization
|
|
74
|
+
full_optimizer_parser = subparsers.add_parser(
|
|
75
|
+
"full-optimize", help="Run full optimization: classification parameters then sample size"
|
|
76
|
+
)
|
|
77
|
+
setup_full_optimizer_parser(full_optimizer_parser)
|
|
78
|
+
|
|
79
|
+
# New unified optimize command
|
|
80
|
+
from wisent.core.parser_arguments.optimize_parser import setup_optimize_parser
|
|
81
|
+
optimize_parser = subparsers.add_parser(
|
|
82
|
+
"optimize", help="Full Optuna-based optimization: classification + steering (ALL methods) + weights"
|
|
83
|
+
)
|
|
84
|
+
setup_optimize_parser(optimize_parser)
|
|
85
|
+
|
|
86
|
+
# Generate vector command for creating steering vectors without tasks
|
|
87
|
+
generate_vector_parser = subparsers.add_parser(
|
|
88
|
+
"generate-vector", help="Generate steering vectors from contrastive pairs (file or description)"
|
|
89
|
+
)
|
|
90
|
+
setup_generate_vector_parser(generate_vector_parser)
|
|
91
|
+
|
|
92
|
+
# Multi-vector steering command for combining multiple vectors at inference time
|
|
93
|
+
multi_steer_parser = subparsers.add_parser(
|
|
94
|
+
"multi-steer", help="Combine multiple steering vectors dynamically at inference time"
|
|
95
|
+
)
|
|
96
|
+
setup_multi_steer_parser(multi_steer_parser)
|
|
97
|
+
|
|
98
|
+
# Single-prompt evaluation command for real-time steering assessment
|
|
99
|
+
evaluate_parser = subparsers.add_parser(
|
|
100
|
+
"evaluate", help="Evaluate single prompt with steering vector and return quality scores"
|
|
101
|
+
)
|
|
102
|
+
setup_evaluate_parser(evaluate_parser)
|
|
103
|
+
|
|
104
|
+
# Unified goodness training command - train single vector from ALL benchmarks
|
|
105
|
+
unified_goodness_parser = subparsers.add_parser(
|
|
106
|
+
"train-unified-goodness",
|
|
107
|
+
help="Train a single 'goodness' steering vector from pooled multi-benchmark data"
|
|
108
|
+
)
|
|
109
|
+
setup_train_unified_goodness_parser(unified_goodness_parser)
|
|
110
|
+
|
|
111
|
+
return parser
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def setup_tasks_parser(parser):
|
|
115
|
+
"""Set up the tasks subcommand parser."""
|
|
116
|
+
|
|
117
|
+
# Task listing options (mutually exclusive with task execution)
|
|
118
|
+
list_group = parser.add_mutually_exclusive_group()
|
|
119
|
+
list_group.add_argument(
|
|
120
|
+
"--list-tasks",
|
|
121
|
+
action="store_true",
|
|
122
|
+
help="List all 37 available benchmark tasks organized by priority (excludes 28 known problematic benchmarks)",
|
|
123
|
+
)
|
|
124
|
+
list_group.add_argument(
|
|
125
|
+
"--task-info", type=str, metavar="TASK_NAME", help="Show detailed information about a specific task"
|
|
126
|
+
)
|
|
127
|
+
list_group.add_argument("--all", action="store_true", help="Run all 37 available benchmarks automatically")
|
|
128
|
+
|
|
129
|
+
# Task execution argument (optional when using listing commands or --all)
|
|
130
|
+
parser.add_argument(
|
|
131
|
+
"task_names",
|
|
132
|
+
nargs="?",
|
|
133
|
+
help="Comma-separated list of available task names (37 working benchmarks), or path to CSV/JSON file with --from-csv/--from-json (not needed with --all)",
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
# Skills/risks based task selection
|
|
137
|
+
parser.add_argument(
|
|
138
|
+
"--skills", type=str, nargs="+", help="Select tasks by skill categories (e.g., coding, mathematics, reasoning)"
|
|
139
|
+
)
|
|
140
|
+
parser.add_argument(
|
|
141
|
+
"--risks",
|
|
142
|
+
type=str,
|
|
143
|
+
nargs="+",
|
|
144
|
+
help="Select tasks by risk categories (e.g., harmfulness, toxicity, hallucination)",
|
|
145
|
+
)
|
|
146
|
+
parser.add_argument(
|
|
147
|
+
"--num-tasks",
|
|
148
|
+
type=int,
|
|
149
|
+
default=None,
|
|
150
|
+
help="Number of tasks to randomly select from matched tasks (default: all)",
|
|
151
|
+
)
|
|
152
|
+
parser.add_argument(
|
|
153
|
+
"--min-quality-score",
|
|
154
|
+
type=int,
|
|
155
|
+
default=2,
|
|
156
|
+
choices=[1, 2, 3, 4, 5],
|
|
157
|
+
help="Minimum quality score for tasks when using --skills/--risks (default: 2)",
|
|
158
|
+
)
|
|
159
|
+
parser.add_argument(
|
|
160
|
+
"--task-seed", type=int, default=None, help="Random seed for task selection (for reproducibility)"
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
# Mixed sampling from multiple benchmarks
|
|
164
|
+
parser.add_argument(
|
|
165
|
+
"--tag",
|
|
166
|
+
type=str,
|
|
167
|
+
nargs="+",
|
|
168
|
+
help="Sample randomly from all benchmarks with these tags (e.g., --tag coding). Creates a mixed dataset from multiple benchmarks.",
|
|
169
|
+
)
|
|
170
|
+
parser.add_argument(
|
|
171
|
+
"--mixed-samples",
|
|
172
|
+
type=int,
|
|
173
|
+
default=1000,
|
|
174
|
+
help="Total number of samples to collect when using --tag (default: 1000)",
|
|
175
|
+
)
|
|
176
|
+
parser.add_argument(
|
|
177
|
+
"--tag-mode",
|
|
178
|
+
type=str,
|
|
179
|
+
choices=["any", "all"],
|
|
180
|
+
default="any",
|
|
181
|
+
help="Whether benchmarks must have ANY or ALL specified tags (default: any)",
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
# Cross-benchmark evaluation
|
|
185
|
+
parser.add_argument(
|
|
186
|
+
"--train-task", type=str, help="Task/benchmark to train on (can be a task name or --tag for mixed)"
|
|
187
|
+
)
|
|
188
|
+
parser.add_argument(
|
|
189
|
+
"--eval-task", type=str, help="Task/benchmark to evaluate on (can be a task name or --tag for mixed)"
|
|
190
|
+
)
|
|
191
|
+
parser.add_argument(
|
|
192
|
+
"--train-tag", type=str, nargs="+", help="Tags for training data when using cross-benchmark evaluation"
|
|
193
|
+
)
|
|
194
|
+
parser.add_argument(
|
|
195
|
+
"--eval-tag", type=str, nargs="+", help="Tags for evaluation data when using cross-benchmark evaluation"
|
|
196
|
+
)
|
|
197
|
+
parser.add_argument(
|
|
198
|
+
"--cross-benchmark",
|
|
199
|
+
action="store_true",
|
|
200
|
+
help="Enable cross-benchmark evaluation mode (train on one, eval on another)",
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
# Synthetic pair generation
|
|
204
|
+
parser.add_argument(
|
|
205
|
+
"--synthetic", action="store_true", help="Generate synthetic contrastive pairs from a trait description"
|
|
206
|
+
)
|
|
207
|
+
parser.add_argument(
|
|
208
|
+
"--trait",
|
|
209
|
+
type=str,
|
|
210
|
+
help="Natural language description of desired model behavior (e.g., 'hallucinates less', 'more factual', 'less verbose')",
|
|
211
|
+
)
|
|
212
|
+
parser.add_argument(
|
|
213
|
+
"--num-synthetic-pairs", type=int, default=30, help="Number of synthetic pairs to generate (default: 30)"
|
|
214
|
+
)
|
|
215
|
+
parser.add_argument("--save-synthetic", type=str, help="Path to save generated synthetic pairs as JSON")
|
|
216
|
+
parser.add_argument(
|
|
217
|
+
"--load-synthetic", type=str, help="Path to load previously generated synthetic pairs from JSON"
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
parser.add_argument("--model", type=str, default="meta-llama/Llama-3.1-8B-Instruct", help="Model name or path")
|
|
221
|
+
parser.add_argument(
|
|
222
|
+
"--layer",
|
|
223
|
+
type=str,
|
|
224
|
+
default="15",
|
|
225
|
+
help="Layer(s) to extract activations from. Can be a single layer (15), range (14-16), or comma-separated list (14,15,16)",
|
|
226
|
+
)
|
|
227
|
+
parser.add_argument("--shots", type=int, default=0, help="Number of few-shot examples")
|
|
228
|
+
parser.add_argument("--split-ratio", type=float, default=0.8, help="Train/test split ratio")
|
|
229
|
+
parser.add_argument("--limit", type=int, default=None, help="Limit number of documents per task")
|
|
230
|
+
parser.add_argument(
|
|
231
|
+
"--training-limit",
|
|
232
|
+
type=int,
|
|
233
|
+
default=None,
|
|
234
|
+
help="Limit number of training documents (overrides limit for training)",
|
|
235
|
+
)
|
|
236
|
+
parser.add_argument(
|
|
237
|
+
"--testing-limit",
|
|
238
|
+
type=int,
|
|
239
|
+
default=None,
|
|
240
|
+
help="Limit number of testing documents (overrides limit for testing)",
|
|
241
|
+
)
|
|
242
|
+
parser.add_argument("--output", type=str, default="./results", help="Output directory for results")
|
|
243
|
+
parser.add_argument(
|
|
244
|
+
"--classifier-type", type=str, choices=["logistic", "mlp"], default="logistic", help="Type of classifier"
|
|
245
|
+
)
|
|
246
|
+
parser.add_argument("--max-new-tokens", type=int, default=300, help="Maximum new tokens for generation")
|
|
247
|
+
parser.add_argument("--device", type=str, default=None, help="Device to run on")
|
|
248
|
+
parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility")
|
|
249
|
+
parser.add_argument("--verbose", action="store_true", help="Enable verbose logging")
|
|
250
|
+
parser.add_argument(
|
|
251
|
+
"--token-aggregation",
|
|
252
|
+
type=str,
|
|
253
|
+
choices=["average", "final", "first", "max", "min", "max_score"],
|
|
254
|
+
default="average",
|
|
255
|
+
help="How to aggregate token scores for classification. 'max_score' uses the highest individual token hallucination score as the response score.",
|
|
256
|
+
)
|
|
257
|
+
parser.add_argument(
|
|
258
|
+
"--ground-truth-method",
|
|
259
|
+
type=str,
|
|
260
|
+
choices=[
|
|
261
|
+
"none",
|
|
262
|
+
"exact_match",
|
|
263
|
+
"substring_match",
|
|
264
|
+
"user_specified",
|
|
265
|
+
"interactive",
|
|
266
|
+
"manual_review",
|
|
267
|
+
"good",
|
|
268
|
+
"lm-eval-harness",
|
|
269
|
+
],
|
|
270
|
+
default="lm-eval-harness",
|
|
271
|
+
help="Method for ground truth evaluation. 'lm-eval-harness' uses lm-eval-harness tasks for evaluation (default for most tasks), 'none' skips evaluation, 'exact_match' and 'substring_match' are problematic for free-form generation, 'user_specified' allows manual labeling, 'interactive' prompts for y/n labeling, 'manual_review' marks for review, 'good' marks everything as truthful (for debugging)",
|
|
272
|
+
)
|
|
273
|
+
parser.add_argument(
|
|
274
|
+
"--user-labels",
|
|
275
|
+
type=str,
|
|
276
|
+
nargs="*",
|
|
277
|
+
default=None,
|
|
278
|
+
help="User-specified ground truth labels for responses ('truthful' or 'hallucination'). Used with --ground-truth-method user_specified",
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
# File input arguments
|
|
282
|
+
parser.add_argument(
|
|
283
|
+
"--from-csv",
|
|
284
|
+
action="store_true",
|
|
285
|
+
help="Load task data from CSV file. Requires columns: question, correct_answer, incorrect_answer",
|
|
286
|
+
)
|
|
287
|
+
parser.add_argument(
|
|
288
|
+
"--from-json",
|
|
289
|
+
action="store_true",
|
|
290
|
+
help="Load task data from JSON file. Expected format: list of objects with question, correct_answer, incorrect_answer",
|
|
291
|
+
)
|
|
292
|
+
parser.add_argument(
|
|
293
|
+
"--question-col", type=str, default="question", help="Column name for questions in CSV file (default: question)"
|
|
294
|
+
)
|
|
295
|
+
parser.add_argument(
|
|
296
|
+
"--correct-col",
|
|
297
|
+
type=str,
|
|
298
|
+
default="correct_answer",
|
|
299
|
+
help="Column name for correct answers in CSV file (default: correct_answer)",
|
|
300
|
+
)
|
|
301
|
+
parser.add_argument(
|
|
302
|
+
"--incorrect-col",
|
|
303
|
+
type=str,
|
|
304
|
+
default="incorrect_answer",
|
|
305
|
+
help="Column name for incorrect answers in CSV file (default: incorrect_answer)",
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
# Optimization arguments
|
|
309
|
+
parser.add_argument(
|
|
310
|
+
"--optimize",
|
|
311
|
+
action="store_true",
|
|
312
|
+
help="Enable hyperparameter optimization. When enabled, will find optimal layer, threshold, and aggregation method",
|
|
313
|
+
)
|
|
314
|
+
parser.add_argument(
|
|
315
|
+
"--optimize-layers",
|
|
316
|
+
type=str,
|
|
317
|
+
default="all",
|
|
318
|
+
help="Layer range for optimization (e.g., '8-24' or '10,15,20' or 'all'). Default: all (uses all model layers)",
|
|
319
|
+
)
|
|
320
|
+
parser.add_argument(
|
|
321
|
+
"--optimize-metric",
|
|
322
|
+
type=str,
|
|
323
|
+
choices=["accuracy", "f1", "precision", "recall", "auc"],
|
|
324
|
+
default="f1",
|
|
325
|
+
help="Metric to optimize for. Default: f1",
|
|
326
|
+
)
|
|
327
|
+
parser.add_argument(
|
|
328
|
+
"--optimize-max-combinations",
|
|
329
|
+
type=int,
|
|
330
|
+
default=100,
|
|
331
|
+
help="Maximum number of hyperparameter combinations to test. Default: 100",
|
|
332
|
+
)
|
|
333
|
+
parser.add_argument(
|
|
334
|
+
"--auto-optimize",
|
|
335
|
+
action="store_true",
|
|
336
|
+
help="Automatically enable optimization when layer is not specified or is -1",
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
# Dataset validation arguments
|
|
340
|
+
parser.add_argument(
|
|
341
|
+
"--allow-small-dataset",
|
|
342
|
+
action="store_true",
|
|
343
|
+
help="Allow training with datasets smaller than 4 samples (may cause training issues)",
|
|
344
|
+
)
|
|
345
|
+
|
|
346
|
+
# Detection handling arguments
|
|
347
|
+
parser.add_argument(
|
|
348
|
+
"--detection-action",
|
|
349
|
+
type=str,
|
|
350
|
+
choices=["pass_through", "replace_with_placeholder", "regenerate_until_safe"],
|
|
351
|
+
default="pass_through",
|
|
352
|
+
help="Action to take when problematic content is detected (default: pass_through)",
|
|
353
|
+
)
|
|
354
|
+
parser.add_argument(
|
|
355
|
+
"--placeholder-message",
|
|
356
|
+
type=str,
|
|
357
|
+
default=None,
|
|
358
|
+
help="Custom placeholder message for detected content (if not specified, uses default)",
|
|
359
|
+
)
|
|
360
|
+
parser.add_argument(
|
|
361
|
+
"--max-regeneration-attempts",
|
|
362
|
+
type=int,
|
|
363
|
+
default=3,
|
|
364
|
+
help="Maximum attempts to regenerate safe content (default: 3)",
|
|
365
|
+
)
|
|
366
|
+
parser.add_argument(
|
|
367
|
+
"--detection-threshold",
|
|
368
|
+
type=float,
|
|
369
|
+
default=0.6,
|
|
370
|
+
help="Threshold for classification (higher = more strict detection) (default: 0.6)",
|
|
371
|
+
)
|
|
372
|
+
parser.add_argument("--log-detections", action="store_true", help="Enable logging of detection events")
|
|
373
|
+
|
|
374
|
+
# Code execution security arguments
|
|
375
|
+
parser.add_argument(
|
|
376
|
+
"--trust-code-execution",
|
|
377
|
+
action="store_true",
|
|
378
|
+
help="⚠️ UNSAFE: Allow code execution without Docker in trusted sandbox environments (e.g., RunPod containers). Use only in secure, isolated environments!",
|
|
379
|
+
)
|
|
380
|
+
|
|
381
|
+
# Steering mode arguments
|
|
382
|
+
parser.add_argument(
|
|
383
|
+
"--steering-mode", action="store_true", help="Enable steering mode (uses CAA vectors instead of classification)"
|
|
384
|
+
)
|
|
385
|
+
parser.add_argument(
|
|
386
|
+
"--steering-strength", type=float, default=1.0, help="Strength of steering vector application (default: 1.0)"
|
|
387
|
+
)
|
|
388
|
+
|
|
389
|
+
# Steering method selection
|
|
390
|
+
parser.add_argument(
|
|
391
|
+
"--steering-method",
|
|
392
|
+
type=str,
|
|
393
|
+
default="CAA",
|
|
394
|
+
choices=["CAA"],
|
|
395
|
+
help="Steering method to use",
|
|
396
|
+
)
|
|
397
|
+
|
|
398
|
+
# Steering output mode selection
|
|
399
|
+
parser.add_argument(
|
|
400
|
+
"--output-mode",
|
|
401
|
+
type=str,
|
|
402
|
+
default="both",
|
|
403
|
+
choices=["likelihoods", "responses", "both"],
|
|
404
|
+
help="Type of comparison to show: 'likelihoods' for log-likelihood comparison only, 'responses' for response generation only, 'both' for both (default: both)",
|
|
405
|
+
)
|
|
406
|
+
|
|
407
|
+
# Token steering arguments
|
|
408
|
+
parser.add_argument("--enable-token-steering", action="store_true", help="Enable token-level steering control")
|
|
409
|
+
parser.add_argument(
|
|
410
|
+
"--token-steering-strategy",
|
|
411
|
+
type=str,
|
|
412
|
+
default="last_only",
|
|
413
|
+
choices=[
|
|
414
|
+
"last_only",
|
|
415
|
+
"first_only",
|
|
416
|
+
"all_equal",
|
|
417
|
+
"exponential_decay",
|
|
418
|
+
"exponential_growth",
|
|
419
|
+
"linear_decay",
|
|
420
|
+
"linear_growth",
|
|
421
|
+
"custom",
|
|
422
|
+
],
|
|
423
|
+
help="Token steering strategy (default: last_only)",
|
|
424
|
+
)
|
|
425
|
+
parser.add_argument(
|
|
426
|
+
"--token-decay-rate",
|
|
427
|
+
type=float,
|
|
428
|
+
default=0.5,
|
|
429
|
+
help="Decay rate for exponential token steering strategies (0-1, default: 0.5)",
|
|
430
|
+
)
|
|
431
|
+
parser.add_argument(
|
|
432
|
+
"--token-min-strength",
|
|
433
|
+
type=float,
|
|
434
|
+
default=0.1,
|
|
435
|
+
help="Minimum steering strength for token strategies (default: 0.1)",
|
|
436
|
+
)
|
|
437
|
+
parser.add_argument(
|
|
438
|
+
"--token-max-strength",
|
|
439
|
+
type=float,
|
|
440
|
+
default=1.0,
|
|
441
|
+
help="Maximum steering strength for token strategies (default: 1.0)",
|
|
442
|
+
)
|
|
443
|
+
parser.add_argument(
|
|
444
|
+
"--token-apply-to-prompt",
|
|
445
|
+
action="store_true",
|
|
446
|
+
help="Apply steering to prompt tokens as well as generated tokens",
|
|
447
|
+
)
|
|
448
|
+
parser.add_argument(
|
|
449
|
+
"--token-prompt-strength-multiplier",
|
|
450
|
+
type=float,
|
|
451
|
+
default=0.1,
|
|
452
|
+
help="Strength multiplier for prompt tokens (default: 0.1)",
|
|
453
|
+
)
|
|
454
|
+
|
|
455
|
+
# Training/Inference mode arguments
|
|
456
|
+
parser.add_argument(
|
|
457
|
+
"--train-only",
|
|
458
|
+
action="store_true",
|
|
459
|
+
help="Training-only mode: train classifiers/vectors and save them, skip inference",
|
|
460
|
+
)
|
|
461
|
+
parser.add_argument(
|
|
462
|
+
"--inference-only",
|
|
463
|
+
action="store_true",
|
|
464
|
+
help="Inference-only mode: load pre-trained classifiers/vectors and use for monitoring/steering",
|
|
465
|
+
)
|
|
466
|
+
parser.add_argument(
|
|
467
|
+
"--save-classifier",
|
|
468
|
+
type=str,
|
|
469
|
+
default=None,
|
|
470
|
+
help="Path to save trained classifier(s). In multi-layer mode, saves one file per layer with layer suffix",
|
|
471
|
+
)
|
|
472
|
+
parser.add_argument(
|
|
473
|
+
"--load-classifier",
|
|
474
|
+
type=str,
|
|
475
|
+
default=None,
|
|
476
|
+
help="Path to load pre-trained classifier(s). In multi-layer mode, expects files with layer suffix",
|
|
477
|
+
)
|
|
478
|
+
parser.add_argument(
|
|
479
|
+
"--classifier-dir",
|
|
480
|
+
type=str,
|
|
481
|
+
default="./models",
|
|
482
|
+
help="Directory for saving/loading classifiers and vectors (default: ./models)",
|
|
483
|
+
)
|
|
484
|
+
|
|
485
|
+
# Prompt construction and token targeting strategy arguments
|
|
486
|
+
parser.add_argument(
|
|
487
|
+
"--prompt-construction-strategy",
|
|
488
|
+
type=str,
|
|
489
|
+
choices=["multiple_choice", "role_playing", "direct_completion", "instruction_following"],
|
|
490
|
+
default="multiple_choice",
|
|
491
|
+
help="Strategy for constructing prompts from question-answer pairs (default: multiple_choice)",
|
|
492
|
+
)
|
|
493
|
+
parser.add_argument(
|
|
494
|
+
"--token-targeting-strategy",
|
|
495
|
+
type=str,
|
|
496
|
+
choices=["choice_token", "continuation_token", "last_token", "first_token", "mean_pooling", "max_pooling"],
|
|
497
|
+
default="choice_token",
|
|
498
|
+
help="Strategy for targeting tokens during activation extraction (default: choice_token)",
|
|
499
|
+
)
|
|
500
|
+
|
|
501
|
+
# Normalization options
|
|
502
|
+
parser.add_argument("--normalize-mode", action="store_true", help="Enable normalization mode (legacy flag)")
|
|
503
|
+
parser.add_argument(
|
|
504
|
+
"--normalization-method",
|
|
505
|
+
type=str,
|
|
506
|
+
default="none",
|
|
507
|
+
choices=["none", "l2_unit", "cross_behavior", "layer_wise_mean"],
|
|
508
|
+
help="Vector normalization method to apply",
|
|
509
|
+
)
|
|
510
|
+
parser.add_argument("--target-norm", type=float, default=None, help="Target norm for certain normalization methods")
|
|
511
|
+
|
|
512
|
+
# Nonsense detection options
|
|
513
|
+
parser.add_argument(
|
|
514
|
+
"--enable-nonsense-detection",
|
|
515
|
+
action="store_true",
|
|
516
|
+
help="Enable nonsense detection to stop lobotomized responses",
|
|
517
|
+
)
|
|
518
|
+
parser.add_argument(
|
|
519
|
+
"--max-word-length",
|
|
520
|
+
type=int,
|
|
521
|
+
default=20,
|
|
522
|
+
help="Maximum reasonable word length for nonsense detection (default: 20)",
|
|
523
|
+
)
|
|
524
|
+
parser.add_argument(
|
|
525
|
+
"--repetition-threshold",
|
|
526
|
+
type=float,
|
|
527
|
+
default=0.7,
|
|
528
|
+
help="Threshold for repetitive content detection (0-1, default: 0.7)",
|
|
529
|
+
)
|
|
530
|
+
parser.add_argument(
|
|
531
|
+
"--gibberish-threshold",
|
|
532
|
+
type=float,
|
|
533
|
+
default=0.3,
|
|
534
|
+
help="Threshold for gibberish word detection (0-1, default: 0.3)",
|
|
535
|
+
)
|
|
536
|
+
parser.add_argument(
|
|
537
|
+
"--disable-dictionary-check",
|
|
538
|
+
action="store_true",
|
|
539
|
+
help="Disable dictionary-based word validation (faster but less accurate)",
|
|
540
|
+
)
|
|
541
|
+
parser.add_argument(
|
|
542
|
+
"--nonsense-action",
|
|
543
|
+
type=str,
|
|
544
|
+
default="regenerate",
|
|
545
|
+
choices=["regenerate", "stop", "flag"],
|
|
546
|
+
help="Action when nonsense is detected: regenerate, stop generation, or flag for review",
|
|
547
|
+
)
|
|
548
|
+
|
|
549
|
+
# Performance monitoring options
|
|
550
|
+
parser.add_argument(
|
|
551
|
+
"--enable-memory-tracking", action="store_true", help="Enable memory usage tracking and reporting"
|
|
552
|
+
)
|
|
553
|
+
parser.add_argument(
|
|
554
|
+
"--enable-latency-tracking", action="store_true", help="Enable latency/timing tracking and reporting"
|
|
555
|
+
)
|
|
556
|
+
parser.add_argument(
|
|
557
|
+
"--memory-sampling-interval", type=float, default=0.1, help="Memory sampling interval in seconds (default: 0.1)"
|
|
558
|
+
)
|
|
559
|
+
parser.add_argument("--track-gpu-memory", action="store_true", help="Track GPU memory usage (requires CUDA)")
|
|
560
|
+
parser.add_argument(
|
|
561
|
+
"--detailed-performance-report",
|
|
562
|
+
action="store_true",
|
|
563
|
+
help="Generate detailed performance report with all metrics",
|
|
564
|
+
)
|
|
565
|
+
parser.add_argument("--export-performance-csv", type=str, default=None, help="Export performance data to CSV file")
|
|
566
|
+
parser.add_argument(
|
|
567
|
+
"--show-memory-usage", action="store_true", help="Show current memory usage without full tracking"
|
|
568
|
+
)
|
|
569
|
+
parser.add_argument("--show-timing-summary", action="store_true", help="Show timing summary after evaluation")
|
|
570
|
+
|
|
571
|
+
# Test-time activation saving/loading options
|
|
572
|
+
parser.add_argument(
|
|
573
|
+
"--save-test-activations", type=str, default=None, help="Save test activations to file for future use"
|
|
574
|
+
)
|
|
575
|
+
parser.add_argument(
|
|
576
|
+
"--load-test-activations", type=str, default=None, help="Load test activations from file instead of computing"
|
|
577
|
+
)
|
|
578
|
+
|
|
579
|
+
# Priority-aware benchmark selection options
|
|
580
|
+
parser.add_argument(
|
|
581
|
+
"--priority",
|
|
582
|
+
type=str,
|
|
583
|
+
default="all",
|
|
584
|
+
choices=["all", "high", "medium", "low"],
|
|
585
|
+
help="Priority level for benchmark selection (default: all)",
|
|
586
|
+
)
|
|
587
|
+
parser.add_argument(
|
|
588
|
+
"--fast-only", action="store_true", help="Only use fast benchmarks (high priority, < 13.5s loading time)"
|
|
589
|
+
)
|
|
590
|
+
parser.add_argument(
|
|
591
|
+
"--time-budget",
|
|
592
|
+
type=float,
|
|
593
|
+
default=None,
|
|
594
|
+
help="Time budget in minutes for benchmark selection (auto-selects fast benchmarks)",
|
|
595
|
+
)
|
|
596
|
+
parser.add_argument(
|
|
597
|
+
"--max-benchmarks",
|
|
598
|
+
type=int,
|
|
599
|
+
default=None,
|
|
600
|
+
help="Maximum number of benchmarks to select (combines with priority filtering)",
|
|
601
|
+
)
|
|
602
|
+
parser.add_argument(
|
|
603
|
+
"--smart-selection", action="store_true", help="Use smart benchmark selection based on relevance and priority"
|
|
604
|
+
)
|
|
605
|
+
parser.add_argument(
|
|
606
|
+
"--prefer-fast",
|
|
607
|
+
action="store_true",
|
|
608
|
+
help="Prefer fast benchmarks in selection when multiple options are available",
|
|
609
|
+
)
|
|
610
|
+
|
|
611
|
+
parser.add_argument(
|
|
612
|
+
"--save-steering-vector", type=str, default=None, help="Path to save the computed steering vector"
|
|
613
|
+
)
|
|
614
|
+
parser.add_argument(
|
|
615
|
+
"--load-steering-vector", type=str, default=None, help="Path to load a pre-computed steering vector"
|
|
616
|
+
)
|
|
617
|
+
|
|
618
|
+
# Additional output options
|
|
619
|
+
parser.add_argument("--csv-output", type=str, default=None, help="Path to save results in CSV format")
|
|
620
|
+
parser.add_argument("--evaluation-report", type=str, default=None, help="Path to save evaluation report")
|
|
621
|
+
parser.add_argument("--continue-on-error", action="store_true", help="Continue processing other tasks if one fails")
|
|
622
|
+
|
|
623
|
+
# Benchmark caching arguments
|
|
624
|
+
parser.add_argument(
|
|
625
|
+
"--cache-benchmark",
|
|
626
|
+
action="store_true",
|
|
627
|
+
default=True,
|
|
628
|
+
help="Cache the benchmark data locally for faster future access (default: True)",
|
|
629
|
+
)
|
|
630
|
+
parser.add_argument("--no-cache", dest="cache_benchmark", action="store_false", help="Disable benchmark caching")
|
|
631
|
+
parser.add_argument(
|
|
632
|
+
"--use-cached", action="store_true", default=True, help="Use cached benchmark data if available (default: True)"
|
|
633
|
+
)
|
|
634
|
+
parser.add_argument(
|
|
635
|
+
"--force-download", action="store_true", help="Force fresh download even if cached version exists"
|
|
636
|
+
)
|
|
637
|
+
parser.add_argument(
|
|
638
|
+
"--cache-dir",
|
|
639
|
+
type=str,
|
|
640
|
+
default="./benchmark_cache",
|
|
641
|
+
help="Directory to store cached benchmark data (default: ./benchmark_cache)",
|
|
642
|
+
)
|
|
643
|
+
parser.add_argument("--cache-status", action="store_true", help="Show cache status and exit")
|
|
644
|
+
parser.add_argument("--cleanup-cache", type=int, metavar="DAYS", help="Clean up cache entries older than DAYS days")
|
|
645
|
+
|
|
646
|
+
|
|
647
|
+
def parse_layers_from_arg(layer_arg: str, model=None) -> List[int]:
|
|
648
|
+
"""
|
|
649
|
+
Parse layer argument into list of integers.
|
|
650
|
+
|
|
651
|
+
Args:
|
|
652
|
+
layer_arg: String like "15", "14-16", "14,15,16", or "-1" (for auto-optimization)
|
|
653
|
+
model: Model object (needed for determining available layers)
|
|
654
|
+
|
|
655
|
+
Returns:
|
|
656
|
+
List of layer indices
|
|
657
|
+
"""
|
|
658
|
+
# Handle special cases
|
|
659
|
+
if layer_arg == "-1":
|
|
660
|
+
# Signal for auto-optimization - return single layer list
|
|
661
|
+
return [-1]
|
|
662
|
+
|
|
663
|
+
# Use existing parse_layer_range logic
|
|
664
|
+
layers = parse_layer_range(layer_arg, model)
|
|
665
|
+
if layers is None:
|
|
666
|
+
# "all" case - auto-detect model layers
|
|
667
|
+
if model is not None:
|
|
668
|
+
from .hyperparameter_optimizer import detect_model_layers
|
|
669
|
+
|
|
670
|
+
total_layers = detect_model_layers(model)
|
|
671
|
+
return list(range(total_layers))
|
|
672
|
+
# If no model provided, we cannot determine layers - this should not happen
|
|
673
|
+
raise ModelNotProvidedError()
|
|
674
|
+
|
|
675
|
+
return layers
|
|
676
|
+
|
|
677
|
+
|
|
678
|
+
def parse_layer_range(layer_range_str: str, model=None) -> Optional[List[int]]:
|
|
679
|
+
"""
|
|
680
|
+
Parse layer range string into list of integers.
|
|
681
|
+
|
|
682
|
+
Args:
|
|
683
|
+
layer_range_str: String like "8-24", "10,15,20", or "all"
|
|
684
|
+
model: Model object (needed for "all" option)
|
|
685
|
+
|
|
686
|
+
Returns:
|
|
687
|
+
List of layer indices, or None if "all" (will be auto-detected later)
|
|
688
|
+
"""
|
|
689
|
+
if layer_range_str.lower() == "all":
|
|
690
|
+
# Return None to signal auto-detection
|
|
691
|
+
return None
|
|
692
|
+
if "-" in layer_range_str:
|
|
693
|
+
# Range format: "8-24"
|
|
694
|
+
start, end = map(int, layer_range_str.split("-"))
|
|
695
|
+
return list(range(start, end + 1))
|
|
696
|
+
if "," in layer_range_str:
|
|
697
|
+
# Comma-separated format: "10,15,20"
|
|
698
|
+
return [int(x.strip()) for x in layer_range_str.split(",")]
|
|
699
|
+
# Single layer
|
|
700
|
+
return [int(layer_range_str)]
|
|
701
|
+
|
|
702
|
+
|
|
703
|
+
def aggregate_token_scores(token_scores: List[float], method: str) -> float:
|
|
704
|
+
"""
|
|
705
|
+
Aggregate token scores using the specified method.
|
|
706
|
+
|
|
707
|
+
Args:
|
|
708
|
+
token_scores: List of token scores (probabilities)
|
|
709
|
+
method: Aggregation method ("average", "final", "first", "max", "min")
|
|
710
|
+
|
|
711
|
+
Returns:
|
|
712
|
+
Aggregated score
|
|
713
|
+
"""
|
|
714
|
+
if not token_scores:
|
|
715
|
+
return 0.5
|
|
716
|
+
|
|
717
|
+
# Convert any tensor values to floats and filter out None values
|
|
718
|
+
clean_scores = []
|
|
719
|
+
for i, score in enumerate(token_scores):
|
|
720
|
+
if score is None:
|
|
721
|
+
raise InvalidValueError(
|
|
722
|
+
param_name=f"token_score[{i}]",
|
|
723
|
+
actual=None,
|
|
724
|
+
expected="float value"
|
|
725
|
+
)
|
|
726
|
+
if hasattr(score, "item"): # Handle tensors
|
|
727
|
+
raise InvalidValueError(
|
|
728
|
+
param_name=f"token_score[{i}]",
|
|
729
|
+
actual=str(type(score)),
|
|
730
|
+
expected="float, got tensor"
|
|
731
|
+
)
|
|
732
|
+
if not isinstance(score, (int, float)):
|
|
733
|
+
raise InvalidValueError(
|
|
734
|
+
param_name=f"token_score[{i}]",
|
|
735
|
+
actual=type(score).__name__,
|
|
736
|
+
expected="float"
|
|
737
|
+
)
|
|
738
|
+
clean_scores.append(float(score))
|
|
739
|
+
|
|
740
|
+
if not clean_scores:
|
|
741
|
+
return 0.5
|
|
742
|
+
|
|
743
|
+
if method == "average":
|
|
744
|
+
return sum(clean_scores) / len(clean_scores)
|
|
745
|
+
if method == "final":
|
|
746
|
+
return clean_scores[-1]
|
|
747
|
+
if method == "first":
|
|
748
|
+
return clean_scores[0]
|
|
749
|
+
if method == "max":
|
|
750
|
+
return max(clean_scores)
|
|
751
|
+
if method == "min":
|
|
752
|
+
return min(clean_scores)
|
|
753
|
+
# Default to average if unknown method
|
|
754
|
+
return sum(clean_scores) / len(clean_scores)
|
|
755
|
+
|
|
756
|
+
|
|
757
|
+
def setup_generate_pairs_parser(parser):
|
|
758
|
+
"""Set up the generate-pairs subcommand parser."""
|
|
759
|
+
parser.add_argument(
|
|
760
|
+
"--trait", type=str, required=True, help="Natural language description of the desired trait or behavior"
|
|
761
|
+
)
|
|
762
|
+
parser.add_argument(
|
|
763
|
+
"--num-pairs", type=int, default=30, help="Number of contrastive pairs to generate (default: 30)"
|
|
764
|
+
)
|
|
765
|
+
parser.add_argument(
|
|
766
|
+
"--output", type=str, required=True, help="Output file path for the generated pairs (JSON format)"
|
|
767
|
+
)
|
|
768
|
+
parser.add_argument(
|
|
769
|
+
"--model", type=str, default="meta-llama/Llama-3.1-8B-Instruct", help="Model name or path to use for generation"
|
|
770
|
+
)
|
|
771
|
+
parser.add_argument("--device", type=str, default=None, help="Device to run on")
|
|
772
|
+
parser.add_argument("--verbose", action="store_true", help="Enable verbose output")
|
|
773
|
+
parser.add_argument(
|
|
774
|
+
"--similarity-threshold",
|
|
775
|
+
type=float,
|
|
776
|
+
default=0.8,
|
|
777
|
+
help="Similarity threshold for deduplication (0-1, higher = more strict)",
|
|
778
|
+
)
|
|
779
|
+
parser.add_argument("--timing", action="store_true", help="Show detailed timing for each generation step")
|
|
780
|
+
parser.add_argument(
|
|
781
|
+
"--max-workers", type=int, default=4, help="Number of parallel workers for generation (default: 4)"
|
|
782
|
+
)
|
|
783
|
+
|
|
784
|
+
|
|
785
|
+
def setup_synthetic_parser(parser):
|
|
786
|
+
"""Set up the synthetic subcommand parser."""
|
|
787
|
+
# Either generate new pairs or load existing ones
|
|
788
|
+
group = parser.add_mutually_exclusive_group(required=True)
|
|
789
|
+
group.add_argument(
|
|
790
|
+
"--trait", type=str, help="Natural language description of the desired trait or behavior (generates new pairs)"
|
|
791
|
+
)
|
|
792
|
+
group.add_argument("--pairs-file", type=str, help="Path to existing JSON file with contrastive pairs")
|
|
793
|
+
|
|
794
|
+
# Generation parameters (only used if --trait is specified)
|
|
795
|
+
parser.add_argument(
|
|
796
|
+
"--num-pairs",
|
|
797
|
+
type=int,
|
|
798
|
+
default=30,
|
|
799
|
+
help="Number of contrastive pairs to generate (default: 30, only used with --trait)",
|
|
800
|
+
)
|
|
801
|
+
parser.add_argument(
|
|
802
|
+
"--save-pairs",
|
|
803
|
+
type=str,
|
|
804
|
+
default=None,
|
|
805
|
+
help="Save generated pairs to this file (optional, only used with --trait)",
|
|
806
|
+
)
|
|
807
|
+
|
|
808
|
+
# Model and device
|
|
809
|
+
parser.add_argument("--model", type=str, default="meta-llama/Llama-3.1-8B-Instruct", help="Model name or path")
|
|
810
|
+
parser.add_argument("--device", type=str, default=None, help="Device to run on")
|
|
811
|
+
|
|
812
|
+
# Training/evaluation parameters
|
|
813
|
+
parser.add_argument("--layer", type=str, default="15", help="Layer(s) to extract activations from")
|
|
814
|
+
parser.add_argument(
|
|
815
|
+
"--steering-method",
|
|
816
|
+
type=str,
|
|
817
|
+
default="CAA",
|
|
818
|
+
choices=["CAA"],
|
|
819
|
+
help="Steering method to use",
|
|
820
|
+
)
|
|
821
|
+
parser.add_argument("--steering-strength", type=float, default=1.0, help="Strength of steering vector application")
|
|
822
|
+
parser.add_argument(
|
|
823
|
+
"--test-questions", type=int, default=5, help="Number of test questions to generate for evaluation"
|
|
824
|
+
)
|
|
825
|
+
|
|
826
|
+
# Output
|
|
827
|
+
parser.add_argument("--output", type=str, default="./results", help="Output directory for results")
|
|
828
|
+
parser.add_argument("--verbose", action="store_true", help="Enable verbose output")
|
|
829
|
+
|
|
830
|
+
# Nonsense detection options
|
|
831
|
+
parser.add_argument(
|
|
832
|
+
"--enable-nonsense-detection",
|
|
833
|
+
action="store_true",
|
|
834
|
+
help="Enable nonsense detection to stop lobotomized responses",
|
|
835
|
+
)
|
|
836
|
+
parser.add_argument(
|
|
837
|
+
"--max-word-length",
|
|
838
|
+
type=int,
|
|
839
|
+
default=20,
|
|
840
|
+
help="Maximum reasonable word length for nonsense detection (default: 20)",
|
|
841
|
+
)
|
|
842
|
+
parser.add_argument(
|
|
843
|
+
"--repetition-threshold",
|
|
844
|
+
type=float,
|
|
845
|
+
default=0.7,
|
|
846
|
+
help="Threshold for repetitive content detection (0-1, default: 0.7)",
|
|
847
|
+
)
|
|
848
|
+
parser.add_argument(
|
|
849
|
+
"--gibberish-threshold",
|
|
850
|
+
type=float,
|
|
851
|
+
default=0.3,
|
|
852
|
+
help="Threshold for gibberish word detection (0-1, default: 0.3)",
|
|
853
|
+
)
|
|
854
|
+
parser.add_argument(
|
|
855
|
+
"--disable-dictionary-check",
|
|
856
|
+
action="store_true",
|
|
857
|
+
help="Disable dictionary-based word validation (faster but less accurate)",
|
|
858
|
+
)
|
|
859
|
+
parser.add_argument(
|
|
860
|
+
"--nonsense-action",
|
|
861
|
+
type=str,
|
|
862
|
+
default="regenerate",
|
|
863
|
+
choices=["regenerate", "stop", "flag"],
|
|
864
|
+
help="Action when nonsense is detected: regenerate, stop generation, or flag for review",
|
|
865
|
+
)
|
|
866
|
+
|
|
867
|
+
|
|
868
|
+
def setup_test_nonsense_parser(parser):
|
|
869
|
+
"""Set up the test-nonsense subcommand parser."""
|
|
870
|
+
parser.add_argument(
|
|
871
|
+
"text", type=str, nargs="?", help="Text to analyze (if not provided, will use interactive mode)"
|
|
872
|
+
)
|
|
873
|
+
parser.add_argument("--max-word-length", type=int, default=20, help="Maximum reasonable word length (default: 20)")
|
|
874
|
+
parser.add_argument(
|
|
875
|
+
"--repetition-threshold",
|
|
876
|
+
type=float,
|
|
877
|
+
default=0.7,
|
|
878
|
+
help="Threshold for repetitive content detection (0-1, default: 0.7)",
|
|
879
|
+
)
|
|
880
|
+
parser.add_argument(
|
|
881
|
+
"--gibberish-threshold",
|
|
882
|
+
type=float,
|
|
883
|
+
default=0.3,
|
|
884
|
+
help="Threshold for gibberish word detection (0-1, default: 0.3)",
|
|
885
|
+
)
|
|
886
|
+
parser.add_argument(
|
|
887
|
+
"--disable-dictionary-check", action="store_true", help="Disable dictionary-based word validation"
|
|
888
|
+
)
|
|
889
|
+
parser.add_argument("--verbose", action="store_true", help="Show detailed analysis")
|
|
890
|
+
parser.add_argument("--examples", action="store_true", help="Test with built-in example texts")
|
|
891
|
+
|
|
892
|
+
|
|
893
|
+
def setup_monitor_parser(parser):
|
|
894
|
+
"""Set up the monitor subcommand parser."""
|
|
895
|
+
parser.add_argument("--memory-info", action="store_true", help="Show current memory usage information")
|
|
896
|
+
parser.add_argument("--system-info", action="store_true", help="Show system information and capabilities")
|
|
897
|
+
parser.add_argument("--benchmark", action="store_true", help="Run performance benchmarks")
|
|
898
|
+
parser.add_argument("--test-gpu", action="store_true", help="Test GPU availability and memory")
|
|
899
|
+
parser.add_argument("--continuous", action="store_true", help="Continuous monitoring mode (Ctrl+C to stop)")
|
|
900
|
+
parser.add_argument("--interval", type=float, default=1.0, help="Monitoring interval in seconds (default: 1.0)")
|
|
901
|
+
parser.add_argument("--export-csv", type=str, default=None, help="Export monitoring data to CSV file")
|
|
902
|
+
parser.add_argument(
|
|
903
|
+
"--duration", type=int, default=60, help="Duration for continuous monitoring in seconds (default: 60)"
|
|
904
|
+
)
|
|
905
|
+
parser.add_argument("--track-gpu", action="store_true", help="Include GPU monitoring (requires CUDA)")
|
|
906
|
+
parser.add_argument("--detailed", action="store_true", help="Show detailed monitoring information")
|
|
907
|
+
|
|
908
|
+
|
|
909
|
+
def setup_agent_parser(parser):
|
|
910
|
+
"""Set up the agent subcommand parser."""
|
|
911
|
+
parser.add_argument("prompt", type=str, help="Prompt to send to the autonomous agent")
|
|
912
|
+
parser.add_argument("--model", type=str, default="meta-llama/Llama-3.1-8B-Instruct", help="Model to use")
|
|
913
|
+
parser.add_argument("--layer", type=int, help="Layer to use (overrides parameter file)")
|
|
914
|
+
parser.add_argument(
|
|
915
|
+
"--quality-threshold", type=float, default=0.3, help="Quality threshold for classifiers (default: 0.3)"
|
|
916
|
+
)
|
|
917
|
+
parser.add_argument(
|
|
918
|
+
"--time-budget",
|
|
919
|
+
type=float,
|
|
920
|
+
default=10.0,
|
|
921
|
+
help="Time budget in minutes for creating classifiers (default: 10.0)",
|
|
922
|
+
)
|
|
923
|
+
parser.add_argument("--max-attempts", type=int, default=3, help="Maximum improvement attempts (default: 3)")
|
|
924
|
+
parser.add_argument(
|
|
925
|
+
"--max-classifiers", type=int, default=None, help="Maximum classifiers to use (default: no limit)"
|
|
926
|
+
)
|
|
927
|
+
parser.add_argument("--verbose", action="store_true", help="Enable verbose output")
|
|
928
|
+
|
|
929
|
+
# Steering method arguments
|
|
930
|
+
parser.add_argument(
|
|
931
|
+
"--steering-method",
|
|
932
|
+
type=str,
|
|
933
|
+
default="CAA",
|
|
934
|
+
choices=["CAA"],
|
|
935
|
+
help="Steering method to use (default: CAA)",
|
|
936
|
+
)
|
|
937
|
+
parser.add_argument(
|
|
938
|
+
"--steering-strength", type=float, default=1.0, help="Strength of steering vector application (default: 1.0)"
|
|
939
|
+
)
|
|
940
|
+
parser.add_argument("--steering-mode", action="store_true", help="Enable steering mode")
|
|
941
|
+
|
|
942
|
+
# Normalization parameters
|
|
943
|
+
parser.add_argument("--normalize-mode", action="store_true", help="Enable normalization of steering vectors")
|
|
944
|
+
parser.add_argument(
|
|
945
|
+
"--normalization-method",
|
|
946
|
+
type=str,
|
|
947
|
+
default="none",
|
|
948
|
+
choices=["none", "l2_unit", "l2_norm", "max_norm"],
|
|
949
|
+
help="Normalization method for steering vectors (default: none)",
|
|
950
|
+
)
|
|
951
|
+
parser.add_argument("--target-norm", type=float, default=None, help="Target norm for steering vectors")
|
|
952
|
+
|
|
953
|
+
# Quality Control System parameters
|
|
954
|
+
parser.add_argument(
|
|
955
|
+
"--enable-quality-control",
|
|
956
|
+
action="store_true",
|
|
957
|
+
default=True,
|
|
958
|
+
help="Enable new quality control system (default: True)",
|
|
959
|
+
)
|
|
960
|
+
parser.add_argument(
|
|
961
|
+
"--max-quality-attempts",
|
|
962
|
+
type=int,
|
|
963
|
+
default=5,
|
|
964
|
+
help="Maximum attempts to achieve acceptable quality (default: 5)",
|
|
965
|
+
)
|
|
966
|
+
parser.add_argument(
|
|
967
|
+
"--show-parameter-reasoning", action="store_true", help="Display model's reasoning for parameter choices"
|
|
968
|
+
)
|
|
969
|
+
|
|
970
|
+
|
|
971
|
+
def setup_classification_optimizer_parser(parser):
|
|
972
|
+
"""Set up the classification-optimizer subcommand parser."""
|
|
973
|
+
parser.add_argument("model", type=str, help="Model name or path to optimize")
|
|
974
|
+
parser.add_argument("--limit", type=int, default=1000, help="Maximum samples per task (default: 1000)")
|
|
975
|
+
parser.add_argument(
|
|
976
|
+
"--optimization-metric",
|
|
977
|
+
type=str,
|
|
978
|
+
default="f1",
|
|
979
|
+
choices=["f1", "accuracy", "precision", "recall"],
|
|
980
|
+
help="Metric to optimize (default: f1)",
|
|
981
|
+
)
|
|
982
|
+
parser.add_argument(
|
|
983
|
+
"--max-time-per-task", type=float, default=15.0, help="Maximum time per task in minutes (default: 15.0)"
|
|
984
|
+
)
|
|
985
|
+
parser.add_argument(
|
|
986
|
+
"--layer-range", type=str, default=None, help="Layer range to test (e.g., '10-20', if None uses all layers)"
|
|
987
|
+
)
|
|
988
|
+
parser.add_argument(
|
|
989
|
+
"--aggregation-methods",
|
|
990
|
+
type=str,
|
|
991
|
+
nargs="+",
|
|
992
|
+
default=["average", "final", "first", "max", "min"],
|
|
993
|
+
help="Token aggregation methods to test",
|
|
994
|
+
)
|
|
995
|
+
parser.add_argument(
|
|
996
|
+
"--threshold-range",
|
|
997
|
+
type=float,
|
|
998
|
+
nargs="+",
|
|
999
|
+
default=[0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
|
|
1000
|
+
help="Detection thresholds to test",
|
|
1001
|
+
)
|
|
1002
|
+
parser.add_argument("--device", type=str, default=None, help="Device to run on")
|
|
1003
|
+
parser.add_argument("--results-file", type=str, default=None, help="Custom file path for saving results")
|
|
1004
|
+
parser.add_argument("--no-save", action="store_true", help="Don't save results to model config")
|
|
1005
|
+
parser.add_argument("--save-logs-json", type=str, default=None, help="Save detailed optimization logs to JSON file")
|
|
1006
|
+
parser.add_argument(
|
|
1007
|
+
"--save-classifiers",
|
|
1008
|
+
action="store_true",
|
|
1009
|
+
default=True,
|
|
1010
|
+
help="Save best classifiers for each task (default: True)",
|
|
1011
|
+
)
|
|
1012
|
+
parser.add_argument(
|
|
1013
|
+
"--no-save-classifiers",
|
|
1014
|
+
dest="save_classifiers",
|
|
1015
|
+
action="store_false",
|
|
1016
|
+
help="Don't save classifiers (overrides --save-classifiers)",
|
|
1017
|
+
)
|
|
1018
|
+
parser.add_argument(
|
|
1019
|
+
"--classifiers-dir",
|
|
1020
|
+
type=str,
|
|
1021
|
+
default=None,
|
|
1022
|
+
help="Directory to save classifiers (default: ./optimized_classifiers/model_name/)",
|
|
1023
|
+
)
|
|
1024
|
+
|
|
1025
|
+
# Timing calibration options
|
|
1026
|
+
parser.add_argument(
|
|
1027
|
+
"--skip-timing-estimation", action="store_true", help="Skip timing estimation and proceed without time warnings"
|
|
1028
|
+
)
|
|
1029
|
+
parser.add_argument("--calibration-file", type=str, default=None, help="File to save/load calibration data")
|
|
1030
|
+
parser.add_argument(
|
|
1031
|
+
"--calibrate-only",
|
|
1032
|
+
action="store_true",
|
|
1033
|
+
help="Only run calibration and exit (saves to --calibration-file if provided)",
|
|
1034
|
+
)
|
|
1035
|
+
|
|
1036
|
+
|
|
1037
|
+
def setup_configure_model_parser(parser):
|
|
1038
|
+
"""Set up the configure-model subcommand parser."""
|
|
1039
|
+
parser.add_argument("model", type=str, help="Model name to configure")
|
|
1040
|
+
parser.add_argument("--force", action="store_true", help="Force reconfiguration even if model already has a config")
|
|
1041
|
+
|
|
1042
|
+
|
|
1043
|
+
def setup_steering_optimizer_parser(parser):
|
|
1044
|
+
"""Set up the steering-optimizer subcommand parser."""
|
|
1045
|
+
# Create subparsers for different steering optimization types
|
|
1046
|
+
steering_subparsers = parser.add_subparsers(dest="steering_action", help="Steering optimization actions")
|
|
1047
|
+
|
|
1048
|
+
# Auto optimization subcommand (NEW - runs after classification optimization)
|
|
1049
|
+
auto_parser = steering_subparsers.add_parser(
|
|
1050
|
+
"auto", help="Automatically optimize steering based on classification config"
|
|
1051
|
+
)
|
|
1052
|
+
auto_parser.add_argument("model", type=str, help="Model name or path")
|
|
1053
|
+
auto_parser.add_argument(
|
|
1054
|
+
"--task",
|
|
1055
|
+
type=str,
|
|
1056
|
+
default=None,
|
|
1057
|
+
help="Specific task to optimize (defaults to all classification-optimized tasks)",
|
|
1058
|
+
)
|
|
1059
|
+
auto_parser.add_argument(
|
|
1060
|
+
"--methods",
|
|
1061
|
+
type=str,
|
|
1062
|
+
nargs="+",
|
|
1063
|
+
choices=["CAA"],
|
|
1064
|
+
default=["CAA"],
|
|
1065
|
+
help="Steering methods to test (default: CAA)",
|
|
1066
|
+
)
|
|
1067
|
+
auto_parser.add_argument("--limit", type=int, default=100, help="Maximum samples for testing (default: 100)")
|
|
1068
|
+
auto_parser.add_argument("--max-time", type=float, default=60.0, help="Maximum time in minutes (default: 60)")
|
|
1069
|
+
auto_parser.add_argument(
|
|
1070
|
+
"--strength-range",
|
|
1071
|
+
type=float,
|
|
1072
|
+
nargs="+",
|
|
1073
|
+
default=[0.5, 1.0, 1.5, 2.0],
|
|
1074
|
+
help="Steering strengths to test (default: 0.5 1.0 1.5 2.0)",
|
|
1075
|
+
)
|
|
1076
|
+
auto_parser.add_argument(
|
|
1077
|
+
"--layer-range",
|
|
1078
|
+
type=str,
|
|
1079
|
+
default=None,
|
|
1080
|
+
help="Explicit layer range to search (e.g., '0-5' or '0,2,4'). If not specified, uses classification layer or defaults to 0-5",
|
|
1081
|
+
)
|
|
1082
|
+
|
|
1083
|
+
# Method comparison subcommand
|
|
1084
|
+
method_parser = steering_subparsers.add_parser(
|
|
1085
|
+
"compare-methods", help="Compare different steering methods for a task"
|
|
1086
|
+
)
|
|
1087
|
+
method_parser.add_argument("model", type=str, help="Model name or path")
|
|
1088
|
+
method_parser.add_argument(
|
|
1089
|
+
"--task", type=str, default="truthfulqa_mc1", help="Task to optimize steering for (default: truthfulqa_mc1)"
|
|
1090
|
+
)
|
|
1091
|
+
method_parser.add_argument(
|
|
1092
|
+
"--methods",
|
|
1093
|
+
type=str,
|
|
1094
|
+
nargs="+",
|
|
1095
|
+
choices=["CAA"],
|
|
1096
|
+
default=["CAA"],
|
|
1097
|
+
help="Steering methods to compare",
|
|
1098
|
+
)
|
|
1099
|
+
method_parser.add_argument("--limit", type=int, default=100, help="Maximum samples for testing (default: 100)")
|
|
1100
|
+
method_parser.add_argument(
|
|
1101
|
+
"--max-time", type=float, default=30.0, help="Maximum optimization time in minutes (default: 30.0)"
|
|
1102
|
+
)
|
|
1103
|
+
|
|
1104
|
+
# Layer optimization subcommand
|
|
1105
|
+
layer_parser = steering_subparsers.add_parser("optimize-layer", help="Find optimal steering layer for a method")
|
|
1106
|
+
layer_parser.add_argument("model", type=str, help="Model name or path")
|
|
1107
|
+
layer_parser.add_argument(
|
|
1108
|
+
"--task", type=str, default="truthfulqa_mc1", help="Task to optimize for (default: truthfulqa_mc1)"
|
|
1109
|
+
)
|
|
1110
|
+
layer_parser.add_argument(
|
|
1111
|
+
"--method",
|
|
1112
|
+
type=str,
|
|
1113
|
+
default="CAA",
|
|
1114
|
+
choices=["CAA"],
|
|
1115
|
+
help="Steering method to use (default: CAA)",
|
|
1116
|
+
)
|
|
1117
|
+
layer_parser.add_argument("--layer-range", type=str, default=None, help="Layer range to search (e.g., '10-20')")
|
|
1118
|
+
layer_parser.add_argument(
|
|
1119
|
+
"--strength", type=float, default=1.0, help="Fixed steering strength during layer search (default: 1.0)"
|
|
1120
|
+
)
|
|
1121
|
+
layer_parser.add_argument("--limit", type=int, default=100, help="Maximum samples for testing (default: 100)")
|
|
1122
|
+
|
|
1123
|
+
# Strength optimization subcommand
|
|
1124
|
+
strength_parser = steering_subparsers.add_parser("optimize-strength", help="Find optimal steering strength")
|
|
1125
|
+
strength_parser.add_argument("model", type=str, help="Model name or path")
|
|
1126
|
+
strength_parser.add_argument(
|
|
1127
|
+
"--task", type=str, default="truthfulqa_mc1", help="Task to optimize for (default: truthfulqa_mc1)"
|
|
1128
|
+
)
|
|
1129
|
+
strength_parser.add_argument(
|
|
1130
|
+
"--method",
|
|
1131
|
+
type=str,
|
|
1132
|
+
default="CAA",
|
|
1133
|
+
choices=["CAA"],
|
|
1134
|
+
help="Steering method to use (default: CAA)",
|
|
1135
|
+
)
|
|
1136
|
+
strength_parser.add_argument(
|
|
1137
|
+
"--layer", type=int, default=None, help="Steering layer to use (defaults to classification layer)"
|
|
1138
|
+
)
|
|
1139
|
+
strength_parser.add_argument(
|
|
1140
|
+
"--strength-range",
|
|
1141
|
+
type=float,
|
|
1142
|
+
nargs=2,
|
|
1143
|
+
default=[0.1, 2.0],
|
|
1144
|
+
help="Min and max strength to test (default: 0.1 2.0)",
|
|
1145
|
+
)
|
|
1146
|
+
strength_parser.add_argument(
|
|
1147
|
+
"--strength-steps", type=int, default=10, help="Number of strength values to test (default: 10)"
|
|
1148
|
+
)
|
|
1149
|
+
strength_parser.add_argument("--limit", type=int, default=100, help="Maximum samples for testing (default: 100)")
|
|
1150
|
+
|
|
1151
|
+
# Comprehensive optimization subcommand
|
|
1152
|
+
comprehensive_parser = steering_subparsers.add_parser(
|
|
1153
|
+
"comprehensive", help="Run comprehensive steering optimization"
|
|
1154
|
+
)
|
|
1155
|
+
comprehensive_parser.add_argument("model", type=str, help="Model name or path")
|
|
1156
|
+
comprehensive_parser.add_argument(
|
|
1157
|
+
"--tasks",
|
|
1158
|
+
type=str,
|
|
1159
|
+
nargs="+",
|
|
1160
|
+
default=None,
|
|
1161
|
+
help="Tasks to optimize (defaults to classification-optimized tasks)",
|
|
1162
|
+
)
|
|
1163
|
+
comprehensive_parser.add_argument(
|
|
1164
|
+
"--methods",
|
|
1165
|
+
type=str,
|
|
1166
|
+
nargs="+",
|
|
1167
|
+
choices=["CAA"],
|
|
1168
|
+
default=["CAA"],
|
|
1169
|
+
help="Steering methods to test",
|
|
1170
|
+
)
|
|
1171
|
+
comprehensive_parser.add_argument("--limit", type=int, default=100, help="Sample limit per task (default: 100)")
|
|
1172
|
+
comprehensive_parser.add_argument(
|
|
1173
|
+
"--max-time-per-task", type=float, default=20.0, help="Time limit per task in minutes (default: 20.0)"
|
|
1174
|
+
)
|
|
1175
|
+
comprehensive_parser.add_argument("--no-save", action="store_true", help="Don't save results to model config")
|
|
1176
|
+
|
|
1177
|
+
# Personalization optimization subcommand
|
|
1178
|
+
personalization_parser = steering_subparsers.add_parser(
|
|
1179
|
+
"personalization", help="Optimize steering parameters for personality/trait steering"
|
|
1180
|
+
)
|
|
1181
|
+
personalization_parser.add_argument("model", type=str, help="Model name or path")
|
|
1182
|
+
personalization_parser.add_argument(
|
|
1183
|
+
"--trait", type=str, required=True, help="Trait description to steer towards (e.g., 'evil villain personality')"
|
|
1184
|
+
)
|
|
1185
|
+
personalization_parser.add_argument(
|
|
1186
|
+
"--trait-name", type=str, default=None, help="Short name for the trait (e.g., 'evil'). Defaults to first word of trait."
|
|
1187
|
+
)
|
|
1188
|
+
personalization_parser.add_argument(
|
|
1189
|
+
"--num-pairs", type=int, default=20, help="Number of synthetic pairs to generate (default: 20)"
|
|
1190
|
+
)
|
|
1191
|
+
personalization_parser.add_argument(
|
|
1192
|
+
"--num-test-prompts", type=int, default=5, help="Number of test prompts for evaluation (default: 5)"
|
|
1193
|
+
)
|
|
1194
|
+
personalization_parser.add_argument(
|
|
1195
|
+
"--layers", type=int, nargs="+", default=None,
|
|
1196
|
+
help="Specific layers to test (default: ALL layers)"
|
|
1197
|
+
)
|
|
1198
|
+
personalization_parser.add_argument(
|
|
1199
|
+
"--strength-range", type=float, nargs=2, default=[0.5, 5.0],
|
|
1200
|
+
help="Min and max steering strength to test (default: 0.5 5.0)"
|
|
1201
|
+
)
|
|
1202
|
+
personalization_parser.add_argument(
|
|
1203
|
+
"--num-strength-steps", type=int, default=5, help="Number of strength values to test (default: 5)"
|
|
1204
|
+
)
|
|
1205
|
+
personalization_parser.add_argument(
|
|
1206
|
+
"--output-dir", type=str, default="./personalization_optimization",
|
|
1207
|
+
help="Directory to save results and best vectors (default: ./personalization_optimization)"
|
|
1208
|
+
)
|
|
1209
|
+
personalization_parser.add_argument(
|
|
1210
|
+
"--max-new-tokens", type=int, default=150, help="Max tokens to generate for evaluation (default: 150)"
|
|
1211
|
+
)
|
|
1212
|
+
personalization_parser.add_argument("--device", type=str, default=None, help="Device to run on")
|
|
1213
|
+
personalization_parser.add_argument("--verbose", action="store_true", help="Enable verbose output")
|
|
1214
|
+
|
|
1215
|
+
# Multi-trait personalization optimization subcommand
|
|
1216
|
+
multi_personalization_parser = steering_subparsers.add_parser(
|
|
1217
|
+
"multi-personalization", help="Joint optimization for multiple traits with shared parameters"
|
|
1218
|
+
)
|
|
1219
|
+
multi_personalization_parser.add_argument("model", type=str, help="Model name or path")
|
|
1220
|
+
multi_personalization_parser.add_argument(
|
|
1221
|
+
"--trait", type=str, action="append", required=True, dest="traits",
|
|
1222
|
+
help="Trait description (can be specified multiple times, e.g., --trait 'evil' --trait 'italian')"
|
|
1223
|
+
)
|
|
1224
|
+
multi_personalization_parser.add_argument(
|
|
1225
|
+
"--trait-name", type=str, action="append", dest="trait_names",
|
|
1226
|
+
help="Short name for each trait (must match number of --trait args)"
|
|
1227
|
+
)
|
|
1228
|
+
multi_personalization_parser.add_argument(
|
|
1229
|
+
"--num-pairs", type=int, default=10, help="Number of synthetic pairs per trait (default: 10)"
|
|
1230
|
+
)
|
|
1231
|
+
multi_personalization_parser.add_argument(
|
|
1232
|
+
"--num-test-prompts", type=int, default=5, help="Number of test prompts for evaluation (default: 5)"
|
|
1233
|
+
)
|
|
1234
|
+
multi_personalization_parser.add_argument(
|
|
1235
|
+
"--layers", type=int, nargs="+", default=None,
|
|
1236
|
+
help="Specific layers to test (default: ALL layers)"
|
|
1237
|
+
)
|
|
1238
|
+
multi_personalization_parser.add_argument(
|
|
1239
|
+
"--strength-range", type=float, nargs=2, default=[0.5, 5.0],
|
|
1240
|
+
help="Min and max steering strength to test per trait (default: 0.5 5.0)"
|
|
1241
|
+
)
|
|
1242
|
+
multi_personalization_parser.add_argument(
|
|
1243
|
+
"--num-strength-steps", type=int, default=5, help="Number of strength values to test (default: 5)"
|
|
1244
|
+
)
|
|
1245
|
+
multi_personalization_parser.add_argument(
|
|
1246
|
+
"--output-dir", type=str, default="./multi_personalization_optimization",
|
|
1247
|
+
help="Directory to save results and vectors (default: ./multi_personalization_optimization)"
|
|
1248
|
+
)
|
|
1249
|
+
multi_personalization_parser.add_argument(
|
|
1250
|
+
"--max-new-tokens", type=int, default=150, help="Max tokens to generate for evaluation (default: 150)"
|
|
1251
|
+
)
|
|
1252
|
+
multi_personalization_parser.add_argument("--device", type=str, default=None, help="Device to run on")
|
|
1253
|
+
multi_personalization_parser.add_argument("--verbose", action="store_true", help="Enable verbose output")
|
|
1254
|
+
|
|
1255
|
+
# Common arguments for all steering optimization subcommands
|
|
1256
|
+
parser.add_argument("--device", type=str, default=None, help="Device to run on")
|
|
1257
|
+
parser.add_argument("--verbose", action="store_true", help="Enable verbose output")
|
|
1258
|
+
|
|
1259
|
+
|
|
1260
|
+
def setup_model_config_parser(parser):
|
|
1261
|
+
"""Set up the model-config subcommand parser."""
|
|
1262
|
+
# Create subparsers for different model config actions
|
|
1263
|
+
config_subparsers = parser.add_subparsers(dest="config_action", help="Model configuration actions")
|
|
1264
|
+
|
|
1265
|
+
# Save configuration subcommand
|
|
1266
|
+
save_parser = config_subparsers.add_parser("save", help="Save optimal parameters for a model")
|
|
1267
|
+
save_parser.add_argument("model", type=str, help="Model name or path")
|
|
1268
|
+
save_parser.add_argument("--classification-layer", type=int, required=True, help="Optimal layer for classification")
|
|
1269
|
+
save_parser.add_argument(
|
|
1270
|
+
"--steering-layer", type=int, default=None, help="Optimal layer for steering (defaults to classification layer)"
|
|
1271
|
+
)
|
|
1272
|
+
save_parser.add_argument(
|
|
1273
|
+
"--token-aggregation",
|
|
1274
|
+
type=str,
|
|
1275
|
+
default="average",
|
|
1276
|
+
choices=["average", "final", "first", "max", "min", "max_score"],
|
|
1277
|
+
help="Token aggregation method. 'max_score' uses highest token score.",
|
|
1278
|
+
)
|
|
1279
|
+
save_parser.add_argument("--detection-threshold", type=float, default=0.6, help="Detection threshold")
|
|
1280
|
+
save_parser.add_argument(
|
|
1281
|
+
"--optimization-method", type=str, default="manual", help="How these parameters were determined"
|
|
1282
|
+
)
|
|
1283
|
+
save_parser.add_argument("--metrics", type=str, default=None, help="JSON string with optimization metrics")
|
|
1284
|
+
|
|
1285
|
+
# List configurations subcommand
|
|
1286
|
+
list_parser = config_subparsers.add_parser("list", help="List all saved model configurations")
|
|
1287
|
+
list_parser.add_argument("--detailed", action="store_true", help="Show detailed configuration information")
|
|
1288
|
+
|
|
1289
|
+
# Show configuration subcommand
|
|
1290
|
+
show_parser = config_subparsers.add_parser("show", help="Show configuration for a specific model")
|
|
1291
|
+
show_parser.add_argument("model", type=str, help="Model name or path")
|
|
1292
|
+
show_parser.add_argument("--task", type=str, default=None, help="Show task-specific overrides if available")
|
|
1293
|
+
|
|
1294
|
+
# Remove configuration subcommand
|
|
1295
|
+
remove_parser = config_subparsers.add_parser("remove", help="Remove configuration for a model")
|
|
1296
|
+
remove_parser.add_argument("model", type=str, help="Model name or path")
|
|
1297
|
+
remove_parser.add_argument("--confirm", action="store_true", help="Confirm removal without prompting")
|
|
1298
|
+
|
|
1299
|
+
# Test configuration subcommand
|
|
1300
|
+
test_parser = config_subparsers.add_parser("test", help="Test if saved configuration works")
|
|
1301
|
+
test_parser.add_argument("model", type=str, help="Model name or path")
|
|
1302
|
+
test_parser.add_argument(
|
|
1303
|
+
"--task", type=str, default="truthfulqa_mc1", help="Task to test with (default: truthfulqa_mc1)"
|
|
1304
|
+
)
|
|
1305
|
+
test_parser.add_argument("--limit", type=int, default=5, help="Number of samples to test with (default: 5)")
|
|
1306
|
+
test_parser.add_argument("--device", type=str, default=None, help="Device to run on")
|
|
1307
|
+
|
|
1308
|
+
# Common arguments for all subcommands
|
|
1309
|
+
parser.add_argument(
|
|
1310
|
+
"--config-dir",
|
|
1311
|
+
type=str,
|
|
1312
|
+
default=None,
|
|
1313
|
+
help="Custom directory for configuration files (default: ~/.wisent/model_configs/)",
|
|
1314
|
+
)
|
|
1315
|
+
parser.add_argument("--verbose", action="store_true", help="Enable verbose output")
|
|
1316
|
+
|
|
1317
|
+
|
|
1318
|
+
def setup_sample_size_optimizer_parser(parser):
|
|
1319
|
+
"""Set up the sample-size-optimizer subcommand parser."""
|
|
1320
|
+
parser.add_argument("model", type=str, help="Model name or path to optimize")
|
|
1321
|
+
parser.add_argument("--task", type=str, required=True, help="Task to optimize for (REQUIRED)")
|
|
1322
|
+
parser.add_argument("--layer", type=int, required=True, help="Layer index to use (REQUIRED)")
|
|
1323
|
+
parser.add_argument(
|
|
1324
|
+
"--token-aggregation",
|
|
1325
|
+
type=str,
|
|
1326
|
+
required=True,
|
|
1327
|
+
choices=["average", "final", "first", "max", "min", "max_score"],
|
|
1328
|
+
help="Token aggregation method (REQUIRED). 'max_score' uses highest token score.",
|
|
1329
|
+
)
|
|
1330
|
+
|
|
1331
|
+
# Classification-specific arguments
|
|
1332
|
+
parser.add_argument(
|
|
1333
|
+
"--threshold", type=float, default=0.5, help="Detection threshold for classification (default: 0.5)"
|
|
1334
|
+
)
|
|
1335
|
+
|
|
1336
|
+
# Steering mode
|
|
1337
|
+
parser.add_argument("--steering-mode", action="store_true", help="Optimize for steering instead of classification")
|
|
1338
|
+
parser.add_argument(
|
|
1339
|
+
"--steering-method",
|
|
1340
|
+
type=str,
|
|
1341
|
+
default="CAA",
|
|
1342
|
+
choices=["CAA"],
|
|
1343
|
+
help="Steering method to use (default: CAA)",
|
|
1344
|
+
)
|
|
1345
|
+
parser.add_argument("--steering-strength", type=float, default=1.0, help="Steering strength to use (default: 1.0)")
|
|
1346
|
+
parser.add_argument(
|
|
1347
|
+
"--token-targeting-strategy",
|
|
1348
|
+
type=str,
|
|
1349
|
+
default="LAST_TOKEN",
|
|
1350
|
+
choices=["CHOICE_TOKEN", "LAST_TOKEN", "FIRST_TOKEN", "ALL_TOKENS"],
|
|
1351
|
+
help="Token targeting strategy for steering (default: LAST_TOKEN)",
|
|
1352
|
+
)
|
|
1353
|
+
|
|
1354
|
+
# Common optimization parameters
|
|
1355
|
+
parser.add_argument(
|
|
1356
|
+
"--sample-sizes",
|
|
1357
|
+
type=int,
|
|
1358
|
+
nargs="+",
|
|
1359
|
+
default=[5, 10, 20, 50, 100, 200, 500],
|
|
1360
|
+
help="Sample sizes to test (default: 5 10 20 50 100 200 500)",
|
|
1361
|
+
)
|
|
1362
|
+
parser.add_argument("--test-size", type=int, default=200, help="Fixed test set size (default: 200)")
|
|
1363
|
+
parser.add_argument("--test-split", type=float, default=0.2, help="DEPRECATED: Use --test-size instead")
|
|
1364
|
+
parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility (default: 42)")
|
|
1365
|
+
parser.add_argument("--limit", type=int, default=None, help="Maximum number of samples to load from dataset")
|
|
1366
|
+
parser.add_argument("--save-plot", action="store_true", help="Save performance plot")
|
|
1367
|
+
parser.add_argument("--no-save-config", action="store_true", help="Don't save optimal sample size to model config")
|
|
1368
|
+
parser.add_argument("--device", type=str, default=None, help="Device to run on")
|
|
1369
|
+
parser.add_argument("--verbose", action="store_true", help="Enable verbose output")
|
|
1370
|
+
parser.add_argument(
|
|
1371
|
+
"--force", action="store_true", help="Force optimization even without matching classifier parameters"
|
|
1372
|
+
)
|
|
1373
|
+
|
|
1374
|
+
|
|
1375
|
+
def setup_full_optimizer_parser(parser):
|
|
1376
|
+
"""Set up the full-optimize subcommand parser."""
|
|
1377
|
+
parser.add_argument("model", type=str, help="Model name or path to optimize")
|
|
1378
|
+
|
|
1379
|
+
# Task selection - mutually exclusive options
|
|
1380
|
+
task_group = parser.add_mutually_exclusive_group()
|
|
1381
|
+
task_group.add_argument("--tasks", type=str, nargs="+", help="Specific tasks to optimize")
|
|
1382
|
+
task_group.add_argument(
|
|
1383
|
+
"--skills", type=str, nargs="+", help="Select tasks by skill categories (e.g., coding, mathematics, reasoning)"
|
|
1384
|
+
)
|
|
1385
|
+
task_group.add_argument(
|
|
1386
|
+
"--risks",
|
|
1387
|
+
type=str,
|
|
1388
|
+
nargs="+",
|
|
1389
|
+
help="Select tasks by risk categories (e.g., harmfulness, toxicity, hallucination)",
|
|
1390
|
+
)
|
|
1391
|
+
|
|
1392
|
+
# General limit that applies to all optimizations unless overridden
|
|
1393
|
+
parser.add_argument(
|
|
1394
|
+
"--limit",
|
|
1395
|
+
type=int,
|
|
1396
|
+
default=100,
|
|
1397
|
+
help="Sample limit for all optimizations (default: 100). Can be overridden by specific limits below",
|
|
1398
|
+
)
|
|
1399
|
+
|
|
1400
|
+
# Specific limits (override general limit if provided)
|
|
1401
|
+
parser.add_argument(
|
|
1402
|
+
"--classification-limit",
|
|
1403
|
+
type=int,
|
|
1404
|
+
default=None,
|
|
1405
|
+
help="Sample limit for classification optimization (overrides --limit)",
|
|
1406
|
+
)
|
|
1407
|
+
parser.add_argument(
|
|
1408
|
+
"--sample-size-limit",
|
|
1409
|
+
type=int,
|
|
1410
|
+
default=None,
|
|
1411
|
+
help="Sample limit for sample size optimization (overrides --limit)",
|
|
1412
|
+
)
|
|
1413
|
+
parser.add_argument(
|
|
1414
|
+
"--steering-limit", type=int, default=None, help="Sample limit for steering optimization (overrides --limit)"
|
|
1415
|
+
)
|
|
1416
|
+
|
|
1417
|
+
parser.add_argument(
|
|
1418
|
+
"--sample-sizes",
|
|
1419
|
+
type=int,
|
|
1420
|
+
nargs="+",
|
|
1421
|
+
default=[5, 10, 20, 50, 100, 200, 500],
|
|
1422
|
+
help="Sample sizes to test (default: 5 10 20 50 100 200 500)",
|
|
1423
|
+
)
|
|
1424
|
+
parser.add_argument(
|
|
1425
|
+
"--skip-classification", action="store_true", help="Skip classification optimization and use existing config"
|
|
1426
|
+
)
|
|
1427
|
+
parser.add_argument("--skip-sample-size", action="store_true", help="Skip sample size optimization")
|
|
1428
|
+
parser.add_argument("--skip-classifier-training", action="store_true", help="Skip final classifier training step")
|
|
1429
|
+
parser.add_argument("--skip-control-vectors", action="store_true", help="Skip control vector training step")
|
|
1430
|
+
|
|
1431
|
+
# Steering optimization options
|
|
1432
|
+
parser.add_argument("--skip-steering", action="store_true", help="Skip steering optimization")
|
|
1433
|
+
parser.add_argument(
|
|
1434
|
+
"--steering-methods",
|
|
1435
|
+
type=str,
|
|
1436
|
+
nargs="+",
|
|
1437
|
+
choices=["CAA"],
|
|
1438
|
+
default=["CAA"],
|
|
1439
|
+
help="Steering methods to test (default: CAA)",
|
|
1440
|
+
)
|
|
1441
|
+
parser.add_argument(
|
|
1442
|
+
"--steering-layer-range", type=str, default=None, help="Layer range for steering optimization (e.g., '0-5')"
|
|
1443
|
+
)
|
|
1444
|
+
parser.add_argument(
|
|
1445
|
+
"--steering-strength-range",
|
|
1446
|
+
type=float,
|
|
1447
|
+
nargs="+",
|
|
1448
|
+
default=[0.5, 1.0, 1.5, 2.0],
|
|
1449
|
+
help="Steering strengths to test (default: 0.5 1.0 1.5 2.0)",
|
|
1450
|
+
)
|
|
1451
|
+
# Task selection options
|
|
1452
|
+
parser.add_argument(
|
|
1453
|
+
"--num-tasks",
|
|
1454
|
+
type=int,
|
|
1455
|
+
default=None,
|
|
1456
|
+
help="Number of tasks to randomly select from matched tasks (default: all)",
|
|
1457
|
+
)
|
|
1458
|
+
parser.add_argument(
|
|
1459
|
+
"--min-quality-score",
|
|
1460
|
+
type=int,
|
|
1461
|
+
default=2,
|
|
1462
|
+
choices=[1, 2, 3, 4, 5],
|
|
1463
|
+
help="Minimum quality score for tasks (default: 2)",
|
|
1464
|
+
)
|
|
1465
|
+
parser.add_argument(
|
|
1466
|
+
"--task-seed", type=int, default=None, help="Random seed for task selection (for reproducibility)"
|
|
1467
|
+
)
|
|
1468
|
+
|
|
1469
|
+
parser.add_argument(
|
|
1470
|
+
"--max-time-per-task", type=float, default=20.0, help="Maximum time per task in minutes (default: 20.0)"
|
|
1471
|
+
)
|
|
1472
|
+
|
|
1473
|
+
parser.add_argument("--device", type=str, default=None, help="Device to run on")
|
|
1474
|
+
parser.add_argument("--verbose", action="store_true", help="Enable verbose output")
|
|
1475
|
+
parser.add_argument("--save-plots", action="store_true", help="Save plots for both optimizations")
|
|
1476
|
+
|
|
1477
|
+
# Timing calibration options
|
|
1478
|
+
parser.add_argument(
|
|
1479
|
+
"--skip-timing-estimation", action="store_true", help="Skip timing estimation and proceed without time warnings"
|
|
1480
|
+
)
|
|
1481
|
+
parser.add_argument("--calibration-file", type=str, default=None, help="File to save/load calibration data")
|
|
1482
|
+
parser.add_argument(
|
|
1483
|
+
"--calibrate-only",
|
|
1484
|
+
action="store_true",
|
|
1485
|
+
help="Only run calibration and exit (saves to --calibration-file if provided)",
|
|
1486
|
+
)
|
|
1487
|
+
|
|
1488
|
+
|
|
1489
|
+
def setup_generate_vector_parser(parser):
|
|
1490
|
+
"""Set up the generate-vector subcommand parser."""
|
|
1491
|
+
# Source of contrastive pairs - mutually exclusive for single property
|
|
1492
|
+
source_group = parser.add_mutually_exclusive_group(required=False)
|
|
1493
|
+
source_group.add_argument(
|
|
1494
|
+
"--from-pairs",
|
|
1495
|
+
type=str,
|
|
1496
|
+
metavar="FILE",
|
|
1497
|
+
help="Path to JSON file containing contrastive pairs (single property)",
|
|
1498
|
+
)
|
|
1499
|
+
source_group.add_argument(
|
|
1500
|
+
"--from-description",
|
|
1501
|
+
type=str,
|
|
1502
|
+
metavar="TRAIT",
|
|
1503
|
+
help="Natural language description of the trait (single property)",
|
|
1504
|
+
)
|
|
1505
|
+
|
|
1506
|
+
# Multi-property support
|
|
1507
|
+
parser.add_argument("--multi-property", action="store_true", help="Enable multi-property steering")
|
|
1508
|
+
parser.add_argument(
|
|
1509
|
+
"--property-files",
|
|
1510
|
+
type=str,
|
|
1511
|
+
nargs="+",
|
|
1512
|
+
metavar="NAME:FILE:LAYER",
|
|
1513
|
+
help="Property definitions from files (format: property_name:pairs_file:layer)",
|
|
1514
|
+
)
|
|
1515
|
+
parser.add_argument(
|
|
1516
|
+
"--property-descriptions",
|
|
1517
|
+
type=str,
|
|
1518
|
+
nargs="+",
|
|
1519
|
+
metavar="NAME:DESC:LAYER",
|
|
1520
|
+
help="Property definitions from descriptions (format: property_name:description:layer)",
|
|
1521
|
+
)
|
|
1522
|
+
|
|
1523
|
+
# Model configuration
|
|
1524
|
+
parser.add_argument("--model", type=str, default="distilgpt2", help="Model name or path (default: distilgpt2)")
|
|
1525
|
+
parser.add_argument("--device", type=str, default=None, help="Device to run on (default: auto-detect)")
|
|
1526
|
+
|
|
1527
|
+
# Steering method configuration
|
|
1528
|
+
parser.add_argument(
|
|
1529
|
+
"--method",
|
|
1530
|
+
type=str,
|
|
1531
|
+
default="CAA",
|
|
1532
|
+
choices=["CAA"],
|
|
1533
|
+
help="Steering method to use (default: CAA)",
|
|
1534
|
+
)
|
|
1535
|
+
parser.add_argument("--layer", type=int, default=0, help="Layer index to apply steering (default: 0)")
|
|
1536
|
+
|
|
1537
|
+
# Output configuration
|
|
1538
|
+
parser.add_argument("--output", type=str, required=True, help="Output path for the generated steering vector")
|
|
1539
|
+
|
|
1540
|
+
# Pair generation options (only used with --from-description)
|
|
1541
|
+
parser.add_argument(
|
|
1542
|
+
"--num-pairs",
|
|
1543
|
+
type=int,
|
|
1544
|
+
default=30,
|
|
1545
|
+
help="Number of pairs to generate when using --from-description (default: 30)",
|
|
1546
|
+
)
|
|
1547
|
+
parser.add_argument(
|
|
1548
|
+
"--save-pairs", type=str, default=None, help="Save generated pairs to this file when using --from-description"
|
|
1549
|
+
)
|
|
1550
|
+
|
|
1551
|
+
|
|
1552
|
+
|
|
1553
|
+
# Activation extraction configuration
|
|
1554
|
+
parser.add_argument(
|
|
1555
|
+
"--prompt-construction",
|
|
1556
|
+
type=str,
|
|
1557
|
+
default="multiple_choice",
|
|
1558
|
+
choices=["multiple_choice", "role_playing", "direct_completion", "instruction_following"],
|
|
1559
|
+
help="Strategy for constructing prompts from question-answer pairs (default: multiple_choice)",
|
|
1560
|
+
)
|
|
1561
|
+
parser.add_argument(
|
|
1562
|
+
"--token-targeting",
|
|
1563
|
+
type=str,
|
|
1564
|
+
default="choice_token",
|
|
1565
|
+
choices=["choice_token", "continuation_token", "last_token", "first_token", "mean_pooling", "max_pooling"],
|
|
1566
|
+
help="Strategy for targeting tokens in activation extraction (default: choice_token)",
|
|
1567
|
+
)
|
|
1568
|
+
|
|
1569
|
+
# General options
|
|
1570
|
+
parser.add_argument("--verbose", action="store_true", help="Enable verbose output")
|
|
1571
|
+
|
|
1572
|
+
|
|
1573
|
+
def setup_multi_steer_parser(parser):
|
|
1574
|
+
"""Set up the multi-steer subcommand parser for dynamic vector combination."""
|
|
1575
|
+
# Vector inputs - can specify multiple vector-weight pairs
|
|
1576
|
+
parser.add_argument(
|
|
1577
|
+
"--vector",
|
|
1578
|
+
type=str,
|
|
1579
|
+
action="append",
|
|
1580
|
+
required=False,
|
|
1581
|
+
default=None,
|
|
1582
|
+
metavar="PATH:WEIGHT",
|
|
1583
|
+
help="Path to steering vector and its weight (format: path/to/vector.pt:0.5). Can be specified multiple times. If omitted, generates unsteered baseline.",
|
|
1584
|
+
)
|
|
1585
|
+
|
|
1586
|
+
# Model configuration
|
|
1587
|
+
parser.add_argument("--model", type=str, required=True, help="Model name or path")
|
|
1588
|
+
parser.add_argument("--layer", type=int, required=False, default=None, help="Layer index to apply combined steering (required when using vectors)")
|
|
1589
|
+
parser.add_argument("--device", type=str, default=None, help="Device to run on (default: auto-detect)")
|
|
1590
|
+
|
|
1591
|
+
# Steering method configuration
|
|
1592
|
+
parser.add_argument(
|
|
1593
|
+
"--method",
|
|
1594
|
+
type=str,
|
|
1595
|
+
default="CAA",
|
|
1596
|
+
choices=["CAA"],
|
|
1597
|
+
help="Steering method to use for combination (default: CAA)",
|
|
1598
|
+
)
|
|
1599
|
+
|
|
1600
|
+
# Generation configuration
|
|
1601
|
+
parser.add_argument("--prompt", type=str, required=True, help="Prompt to generate with combined steering")
|
|
1602
|
+
parser.add_argument("--max-new-tokens", type=int, default=100, help="Maximum new tokens to generate (default: 100)")
|
|
1603
|
+
|
|
1604
|
+
# Weight normalization
|
|
1605
|
+
parser.add_argument("--normalize-weights", action="store_true", help="Normalize weights to sum to 1.0")
|
|
1606
|
+
parser.add_argument(
|
|
1607
|
+
"--allow-unnormalized", action="store_true", help="Allow weights that don't sum to 1.0 (for stronger effects)"
|
|
1608
|
+
)
|
|
1609
|
+
parser.add_argument(
|
|
1610
|
+
"--target-norm", type=float, default=None, help="Scale the combined vector to have this norm (e.g., 10.0)"
|
|
1611
|
+
)
|
|
1612
|
+
|
|
1613
|
+
# Output options
|
|
1614
|
+
parser.add_argument(
|
|
1615
|
+
"--save-combined", type=str, default=None, help="Save the combined steering vector to this path"
|
|
1616
|
+
)
|
|
1617
|
+
parser.add_argument("--verbose", action="store_true", help="Enable verbose output showing weight calculations")
|
|
1618
|
+
|
|
1619
|
+
|
|
1620
|
+
def setup_evaluate_parser(parser):
|
|
1621
|
+
"""Set up the evaluate subcommand parser for single-prompt evaluation."""
|
|
1622
|
+
|
|
1623
|
+
# Required arguments
|
|
1624
|
+
parser.add_argument("--vector", type=str, required=True, help="Path to steering vector file (.pt)")
|
|
1625
|
+
parser.add_argument("--prompt", type=str, required=True, help="Prompt to evaluate")
|
|
1626
|
+
parser.add_argument(
|
|
1627
|
+
"--model", type=str, required=True, help="Model name or path (used for both generation and evaluation)"
|
|
1628
|
+
)
|
|
1629
|
+
parser.add_argument("--trait", type=str, required=True, help="Trait name (e.g., 'catholic', 'cynical')")
|
|
1630
|
+
|
|
1631
|
+
# Optional model configuration
|
|
1632
|
+
parser.add_argument("--device", type=str, default=None, help="Device to run on (default: auto-detect)")
|
|
1633
|
+
|
|
1634
|
+
# Optional steering parameters
|
|
1635
|
+
parser.add_argument(
|
|
1636
|
+
"--steering-strength", type=float, default=2.0, help="Steering strength to apply (default: 2.0)"
|
|
1637
|
+
)
|
|
1638
|
+
parser.add_argument("--max-new-tokens", type=int, default=100, help="Maximum new tokens to generate (default: 100)")
|
|
1639
|
+
parser.add_argument(
|
|
1640
|
+
"--trait-description",
|
|
1641
|
+
type=str,
|
|
1642
|
+
default=None,
|
|
1643
|
+
help="Optional description of the trait (default: use trait name)",
|
|
1644
|
+
)
|
|
1645
|
+
|
|
1646
|
+
# Optional threshold parameters
|
|
1647
|
+
parser.add_argument(
|
|
1648
|
+
"--trait-threshold", type=float, default=None, help="Minimum trait quality threshold (-1 to 1 scale)"
|
|
1649
|
+
)
|
|
1650
|
+
parser.add_argument(
|
|
1651
|
+
"--answer-threshold", type=float, default=None, help="Minimum answer quality threshold (0 to 1 scale)"
|
|
1652
|
+
)
|
|
1653
|
+
|
|
1654
|
+
# Output options
|
|
1655
|
+
parser.add_argument("--verbose", action="store_true", help="Enable verbose output")
|
|
1656
|
+
parser.add_argument("--json", action="store_true", help="Output results as JSON")
|
|
1657
|
+
|
|
1658
|
+
|
|
1659
|
+
def setup_train_unified_goodness_parser(parser):
|
|
1660
|
+
"""Set up the train-unified-goodness subcommand parser."""
|
|
1661
|
+
from wisent.core.parser_arguments.train_unified_goodness_parser import setup_train_unified_goodness_parser as _setup
|
|
1662
|
+
_setup(parser)
|