wisent 0.7.701__py3-none-any.whl → 0.7.1045__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- wisent/__init__.py +1 -1
- wisent/comparison/__init__.py +1 -0
- wisent/comparison/detect_bos_features.py +275 -0
- wisent/comparison/fgaa.py +465 -0
- wisent/comparison/lora.py +669 -0
- wisent/comparison/lora_dpo.py +592 -0
- wisent/comparison/main.py +444 -0
- wisent/comparison/ours.py +76 -0
- wisent/comparison/sae.py +304 -0
- wisent/comparison/utils.py +381 -0
- wisent/core/activations/activation_cache.py +393 -0
- wisent/core/activations/activations.py +3 -3
- wisent/core/activations/activations_collector.py +12 -7
- wisent/core/activations/classifier_inference_strategy.py +12 -11
- wisent/core/activations/extraction_strategy.py +260 -84
- wisent/core/classifiers/classifiers/core/atoms.py +3 -2
- wisent/core/cli/__init__.py +2 -1
- wisent/core/cli/agent/train_classifier.py +16 -3
- wisent/core/cli/check_linearity.py +35 -3
- wisent/core/cli/cluster_benchmarks.py +4 -6
- wisent/core/cli/create_steering_vector.py +6 -4
- wisent/core/cli/diagnose_vectors.py +7 -4
- wisent/core/cli/estimate_unified_goodness_time.py +6 -4
- wisent/core/cli/generate_pairs_from_task.py +9 -56
- wisent/core/cli/generate_vector_from_task.py +11 -20
- wisent/core/cli/geometry_search.py +137 -0
- wisent/core/cli/get_activations.py +2 -2
- wisent/core/cli/method_optimizer.py +4 -3
- wisent/core/cli/modify_weights.py +3 -2
- wisent/core/cli/optimize_sample_size.py +1 -1
- wisent/core/cli/optimize_steering.py +14 -16
- wisent/core/cli/optimize_weights.py +2 -1
- wisent/core/cli/preview_pairs.py +203 -0
- wisent/core/cli/steering_method_trainer.py +3 -3
- wisent/core/cli/tasks.py +19 -76
- wisent/core/cli/train_unified_goodness.py +3 -3
- wisent/core/contrastive_pairs/diagnostics/control_vectors.py +4 -4
- wisent/core/contrastive_pairs/diagnostics/linearity.py +7 -0
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/agentic_search.py +37 -347
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/aider_polyglot.py +113 -136
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/codeforces.py +2 -12
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/coding_benchmarks.py +124 -504
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/faithbench.py +40 -63
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/flames.py +46 -89
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/flores.py +15 -4
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/frames.py +36 -20
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/hallucinations_leaderboard.py +3 -45
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/livemathbench.py +42 -4
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/longform_writing.py +2 -112
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/math500.py +39 -4
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/medium_priority_benchmarks.py +475 -525
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/mercury.py +65 -42
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/olympiadbench.py +2 -12
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/planbench.py +78 -219
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/polymath.py +37 -4
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/recode.py +84 -69
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/refusalbench.py +168 -160
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/simpleqa.py +44 -25
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/tau_bench.py +3 -103
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/toolbench.py +3 -97
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/toolemu.py +48 -182
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_extractor_manifest.py +3 -0
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_extractor_registry.py +19 -1
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/aclue.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/acp_bench.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/acp_bench_hard.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/advanced.py +2 -4
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/aexams.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/afrimmlu.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/afrixnli.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arabculture.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arabic.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arabic_exams.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arabic_leaderboard_complete.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arabic_leaderboard_light.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arabicmmlu.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/aradice.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arc.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arc_challenge.py +1 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arc_easy.py +1 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/arithmetic.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/asdiv.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/babi.py +36 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/basque_bench.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/bbq.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/belebele.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/benchmarks.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/bertaqa.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/bhs.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/bhtc.py +3 -5
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/blimp.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/blimp_nl.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/boolq.py +22 -5
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/c4.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cabbq.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/careqa.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/catalan_bench.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/catalanqa.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/catcola.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cb.py +10 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/ceval.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/ceval_valid.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/chain.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/chartqa.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/claim.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/click.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cmmlu.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cnn.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cocoteros.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/coedit.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/commonsense.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/commonsense_qa.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/copa.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/copal_id.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/coqa.py +3 -4
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/csatqa.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/cycle.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/darija_bench.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/darijahellaswag.py +2 -6
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/darijammlu.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/dbpedia.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/discrim_eval.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/doc.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/drop.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/epec.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eq.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eq_bench.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eq_bench_ca.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eq_bench_es.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/esbbq.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/ethics.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eus.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eus_exams.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eus_proficiency.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eus_reading.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/eus_trivia.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/evalita_llm.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/financial.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/flan.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/french_bench.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/galician_bench.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/gaokao.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/glianorex.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/global_mmlu.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/global_piqa.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/gpt3.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/groundcocoa.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/haerae.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/headqa.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/hellaswag.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/hendrycks_ethics.py +5 -9
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/hendrycks_math.py +63 -16
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/histoires_morales.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/hrm8k.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/humaneval_infilling.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/icelandic_winogrande.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/inverse.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/inverse_scaling.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/ja.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/japanese_leaderboard.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/japanese_leaderboard_mc.py +1 -1
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/kmmlu.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/kobest.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/kormedmcqa.py +5 -17
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/lambada_cloze.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/lambada_multilingual.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/law.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/leaderboard.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/lingoly.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/llama3.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/lm_syneval.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/logiqa.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/logiqa2.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/longbench.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/longbenchv2.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mastermind.py +2 -4
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mc-taco.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/med_concepts_qa.py +2 -4
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/meddialog.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/medical.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/medmcqa.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/medqa.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mela.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/metabench.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/minerva_math.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mmlu.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mmlusr.py +3 -4
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mrpc.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/multiblimp.py +2 -5
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/multirc.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/mutual.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/non.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/noreval.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/noreval_exact.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/noreval_gen_exact.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/noreval_mc.py +4 -8
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/noreval_mc_log_likelihoods.py +4 -8
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/nq_open.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/okapi_arc_multilingual.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/okapi_hellaswag_multilingual.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/okapi_mmlu_multilingual.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/okapi_truthfulqa_multilingual.py +2 -5
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/olaph.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/openbookqa.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/option.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/parafraseja.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/parafrases.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/paws.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/paws_x.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/pawsx.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/persona.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/phrases.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/pile.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/piqa.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/portuguese_bench.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/prompt.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/prost.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/pubmedqa.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/qa4mre.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/qasper.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/qasper_bool.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/qnli.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/qnlieu.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/qqp.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/race.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/random.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/record.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/reversed.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/rte.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/ruler.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/sciq.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/score.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/scrolls.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/scrolls_mc.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/self.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/sglue.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/sglue_rte.py +2 -1
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/siqa.py +4 -7
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/social_iqa.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/spanish_bench.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/storycloze.py +2 -6
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/summarization.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/super.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/super_glue.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/swag.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/swde.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/sycophancy.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/t0.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/teca.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinyarc.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinybenchmarks.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinygsm8k.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinyhellaswag.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinymmlu.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinytruthfulqa.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tinywinogrande.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/tmmluplus.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/triviaqa.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/truthfulqa.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/truthfulqa_mc1.py +9 -4
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/truthfulqa_mc2.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/turblimp_core.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/turkishmmlu.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/turkishmmlu_mc.py +0 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/unscramble.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/vaxx.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/webqs.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/wic.py +3 -4
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/winogrande.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/wmdp.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/wnli.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/wsc.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/wsc273.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/xcopa.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/xlsum.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/xnli.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/xquad.py +2 -4
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/xstorycloze.py +2 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/xwinograd.py +2 -2
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_extractors/zhoblimp.py +1 -3
- wisent/core/contrastive_pairs/lm_eval_pairs/lm_task_pairs_generation.py +173 -6
- wisent/core/data_loaders/loaders/lm_loader.py +12 -1
- wisent/core/geometry_runner.py +995 -0
- wisent/core/geometry_search_space.py +237 -0
- wisent/core/hyperparameter_optimizer.py +1 -1
- wisent/core/main.py +3 -0
- wisent/core/models/core/atoms.py +5 -3
- wisent/core/models/wisent_model.py +1 -1
- wisent/core/optuna/classifier/optuna_classifier_optimizer.py +2 -2
- wisent/core/parser_arguments/check_linearity_parser.py +12 -2
- wisent/core/parser_arguments/generate_vector_from_synthetic_parser.py +2 -2
- wisent/core/parser_arguments/generate_vector_from_task_parser.py +6 -13
- wisent/core/parser_arguments/geometry_search_parser.py +61 -0
- wisent/core/parser_arguments/get_activations_parser.py +5 -14
- wisent/core/parser_arguments/main_parser.py +8 -0
- wisent/core/parser_arguments/train_unified_goodness_parser.py +2 -2
- wisent/core/steering.py +5 -3
- wisent/core/steering_methods/methods/hyperplane.py +2 -1
- wisent/core/synthetic/generators/nonsense_generator.py +30 -18
- wisent/core/trainers/steering_trainer.py +2 -2
- wisent/core/utils/device.py +27 -27
- wisent/core/utils/layer_combinations.py +70 -0
- wisent/examples/__init__.py +1 -0
- wisent/examples/scripts/__init__.py +1 -0
- wisent/examples/scripts/count_all_benchmarks.py +121 -0
- wisent/examples/scripts/discover_directions.py +469 -0
- wisent/examples/scripts/extract_benchmark_info.py +71 -0
- wisent/examples/scripts/search_all_short_names.py +31 -0
- wisent/examples/scripts/test_all_benchmarks.py +138 -0
- wisent/examples/scripts/test_all_benchmarks_new.py +28 -0
- wisent/examples/scripts/test_contrastive_pairs_all_supported.py +230 -0
- wisent/examples/scripts/test_nonsense_baseline.py +261 -0
- wisent/examples/scripts/test_one_benchmark.py +324 -0
- wisent/examples/scripts/test_one_coding_benchmark.py +293 -0
- wisent/parameters/lm_eval/broken_in_lm_eval.json +179 -2
- wisent/parameters/lm_eval/category_directions.json +137 -0
- wisent/parameters/lm_eval/repair_plan.json +282 -0
- wisent/parameters/lm_eval/weak_contrastive_pairs.json +38 -0
- wisent/parameters/lm_eval/working_benchmarks.json +206 -0
- wisent/parameters/lm_eval/working_benchmarks_categorized.json +236 -0
- wisent/tests/test_detector_accuracy.py +1 -1
- wisent/tests/visualize_geometry.py +1 -1
- {wisent-0.7.701.dist-info → wisent-0.7.1045.dist-info}/METADATA +5 -1
- {wisent-0.7.701.dist-info → wisent-0.7.1045.dist-info}/RECORD +328 -358
- wisent/core/contrastive_pairs/huggingface_pairs/hf_task_extractors/browsecomp.py +0 -245
- wisent/examples/contrastive_pairs/humanization_human_vs_ai.json +0 -2112
- wisent/examples/scripts/1/test_basqueglue_evaluation.json +0 -51
- wisent/examples/scripts/1/test_basqueglue_pairs.json +0 -14
- wisent/examples/scripts/1/test_bec2016eu_evaluation.json +0 -51
- wisent/examples/scripts/1/test_bec2016eu_pairs.json +0 -14
- wisent/examples/scripts/1/test_belebele_evaluation.json +0 -51
- wisent/examples/scripts/1/test_belebele_pairs.json +0 -14
- wisent/examples/scripts/1/test_benchmarks_evaluation.json +0 -51
- wisent/examples/scripts/1/test_benchmarks_pairs.json +0 -14
- wisent/examples/scripts/1/test_bertaqa_evaluation.json +0 -51
- wisent/examples/scripts/1/test_bertaqa_pairs.json +0 -14
- wisent/examples/scripts/1/test_bhtc_v2_evaluation.json +0 -30
- wisent/examples/scripts/1/test_bhtc_v2_pairs.json +0 -8
- wisent/examples/scripts/1/test_boolq-seq2seq_evaluation.json +0 -30
- wisent/examples/scripts/1/test_boolq-seq2seq_pairs.json +0 -8
- wisent/examples/scripts/1/test_cabreu_evaluation.json +0 -30
- wisent/examples/scripts/1/test_cabreu_pairs.json +0 -8
- wisent/examples/scripts/1/test_careqa_en_evaluation.json +0 -30
- wisent/examples/scripts/1/test_careqa_en_pairs.json +0 -8
- wisent/examples/scripts/1/test_careqa_evaluation.json +0 -30
- wisent/examples/scripts/1/test_careqa_pairs.json +0 -8
- wisent/examples/scripts/1/test_catalanqa_evaluation.json +0 -30
- wisent/examples/scripts/1/test_catalanqa_pairs.json +0 -8
- wisent/examples/scripts/1/test_catcola_evaluation.json +0 -30
- wisent/examples/scripts/1/test_catcola_pairs.json +0 -8
- wisent/examples/scripts/1/test_chartqa_evaluation.json +0 -30
- wisent/examples/scripts/1/test_chartqa_pairs.json +0 -8
- wisent/examples/scripts/1/test_claim_stance_topic_evaluation.json +0 -30
- wisent/examples/scripts/1/test_claim_stance_topic_pairs.json +0 -8
- wisent/examples/scripts/1/test_cnn_dailymail_evaluation.json +0 -30
- wisent/examples/scripts/1/test_cnn_dailymail_pairs.json +0 -8
- wisent/examples/scripts/1/test_cocoteros_es_evaluation.json +0 -30
- wisent/examples/scripts/1/test_cocoteros_es_pairs.json +0 -8
- wisent/examples/scripts/1/test_coedit_gec_evaluation.json +0 -30
- wisent/examples/scripts/1/test_coedit_gec_pairs.json +0 -8
- wisent/examples/scripts/1/test_cola_evaluation.json +0 -30
- wisent/examples/scripts/1/test_cola_pairs.json +0 -8
- wisent/examples/scripts/1/test_coqcat_evaluation.json +0 -30
- wisent/examples/scripts/1/test_coqcat_pairs.json +0 -8
- wisent/examples/scripts/1/test_dbpedia_14_evaluation.json +0 -30
- wisent/examples/scripts/1/test_dbpedia_14_pairs.json +0 -8
- wisent/examples/scripts/1/test_epec_koref_bin_evaluation.json +0 -30
- wisent/examples/scripts/1/test_epec_koref_bin_pairs.json +0 -8
- wisent/examples/scripts/1/test_ethos_binary_evaluation.json +0 -30
- wisent/examples/scripts/1/test_ethos_binary_pairs.json +0 -8
- wisent/examples/scripts/2/test_afrimgsm_direct_amh_evaluation.json +0 -30
- wisent/examples/scripts/2/test_afrimgsm_direct_amh_pairs.json +0 -8
- wisent/examples/scripts/2/test_afrimmlu_direct_amh_evaluation.json +0 -30
- wisent/examples/scripts/2/test_afrimmlu_direct_amh_pairs.json +0 -8
- wisent/examples/scripts/2/test_afrixnli_en_direct_amh_evaluation.json +0 -30
- wisent/examples/scripts/2/test_afrixnli_en_direct_amh_pairs.json +0 -8
- wisent/examples/scripts/2/test_arc_ar_evaluation.json +0 -30
- wisent/examples/scripts/2/test_arc_ar_pairs.json +0 -8
- wisent/examples/scripts/2/test_atis_evaluation.json +0 -30
- wisent/examples/scripts/2/test_atis_pairs.json +0 -8
- wisent/examples/scripts/2/test_babi_evaluation.json +0 -30
- wisent/examples/scripts/2/test_babi_pairs.json +0 -8
- wisent/examples/scripts/2/test_babilong_evaluation.json +0 -30
- wisent/examples/scripts/2/test_babilong_pairs.json +0 -8
- wisent/examples/scripts/2/test_bangla_mmlu_evaluation.json +0 -30
- wisent/examples/scripts/2/test_bangla_mmlu_pairs.json +0 -8
- wisent/examples/scripts/2/test_basque-glue_pairs.json +0 -14
- {wisent-0.7.701.dist-info → wisent-0.7.1045.dist-info}/WHEEL +0 -0
- {wisent-0.7.701.dist-info → wisent-0.7.1045.dist-info}/entry_points.txt +0 -0
- {wisent-0.7.701.dist-info → wisent-0.7.1045.dist-info}/licenses/LICENSE +0 -0
- {wisent-0.7.701.dist-info → wisent-0.7.1045.dist-info}/top_level.txt +0 -0
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"task_name": "catcola",
|
|
3
|
-
"model_name": "mock",
|
|
4
|
-
"evaluator_name": "log_likelihoods",
|
|
5
|
-
"num_pairs": 1,
|
|
6
|
-
"all_correct": true,
|
|
7
|
-
"pairs": [
|
|
8
|
-
{
|
|
9
|
-
"pair_id": 0,
|
|
10
|
-
"prompt": "Sentence: T'ho ha explicat tant a tu com a ella.\n\nIs this sentence grammatically acceptable in Catal...",
|
|
11
|
-
"positive_response": "acceptable",
|
|
12
|
-
"negative_response": "unacceptable",
|
|
13
|
-
"positive_evaluation": {
|
|
14
|
-
"ground_truth": "TRUTHFUL",
|
|
15
|
-
"confidence": 1.0,
|
|
16
|
-
"expected": "TRUTHFUL",
|
|
17
|
-
"correct": true,
|
|
18
|
-
"details": "Predicted: 'acceptable' (log_prob=-0.500), Expected: 'acceptable'"
|
|
19
|
-
},
|
|
20
|
-
"negative_evaluation": {
|
|
21
|
-
"ground_truth": "UNTRUTHFUL",
|
|
22
|
-
"confidence": 0.0,
|
|
23
|
-
"expected": "UNTRUTHFUL",
|
|
24
|
-
"correct": true,
|
|
25
|
-
"details": "Predicted: 'acceptable' (log_prob=-0.500), Expected: 'unacceptable'"
|
|
26
|
-
},
|
|
27
|
-
"both_correct": true
|
|
28
|
-
}
|
|
29
|
-
]
|
|
30
|
-
}
|
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"task_name": "chartqa",
|
|
3
|
-
"model_name": "mock",
|
|
4
|
-
"evaluator_name": "log_likelihoods",
|
|
5
|
-
"num_pairs": 1,
|
|
6
|
-
"all_correct": true,
|
|
7
|
-
"pairs": [
|
|
8
|
-
{
|
|
9
|
-
"pair_id": 0,
|
|
10
|
-
"prompt": "Question: How many food item is shown in the bar graph?...",
|
|
11
|
-
"positive_response": "14",
|
|
12
|
-
"negative_response": "15",
|
|
13
|
-
"positive_evaluation": {
|
|
14
|
-
"ground_truth": "TRUTHFUL",
|
|
15
|
-
"confidence": 1.0,
|
|
16
|
-
"expected": "TRUTHFUL",
|
|
17
|
-
"correct": true,
|
|
18
|
-
"details": "Predicted: '14' (log_prob=-0.500), Expected: '14'"
|
|
19
|
-
},
|
|
20
|
-
"negative_evaluation": {
|
|
21
|
-
"ground_truth": "UNTRUTHFUL",
|
|
22
|
-
"confidence": 0.0,
|
|
23
|
-
"expected": "UNTRUTHFUL",
|
|
24
|
-
"correct": true,
|
|
25
|
-
"details": "Predicted: '14' (log_prob=-0.500), Expected: '15'"
|
|
26
|
-
},
|
|
27
|
-
"both_correct": true
|
|
28
|
-
}
|
|
29
|
-
]
|
|
30
|
-
}
|
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"task_name": "claim_stance_topic",
|
|
3
|
-
"model_name": "mock",
|
|
4
|
-
"evaluator_name": "log_likelihoods",
|
|
5
|
-
"num_pairs": 1,
|
|
6
|
-
"all_correct": true,
|
|
7
|
-
"pairs": [
|
|
8
|
-
{
|
|
9
|
-
"pair_id": 0,
|
|
10
|
-
"prompt": "Classify the Topic of the following Argument to one of these options: advertising, all nations a rig...",
|
|
11
|
-
"positive_response": "gambling",
|
|
12
|
-
"negative_response": "advertising",
|
|
13
|
-
"positive_evaluation": {
|
|
14
|
-
"ground_truth": "TRUTHFUL",
|
|
15
|
-
"confidence": 1.0,
|
|
16
|
-
"expected": "TRUTHFUL",
|
|
17
|
-
"correct": true,
|
|
18
|
-
"details": "Predicted: 'gambling' (log_prob=-0.500), Expected: 'gambling'"
|
|
19
|
-
},
|
|
20
|
-
"negative_evaluation": {
|
|
21
|
-
"ground_truth": "UNTRUTHFUL",
|
|
22
|
-
"confidence": 0.0,
|
|
23
|
-
"expected": "UNTRUTHFUL",
|
|
24
|
-
"correct": true,
|
|
25
|
-
"details": "Predicted: 'gambling' (log_prob=-0.500), Expected: 'advertising'"
|
|
26
|
-
},
|
|
27
|
-
"both_correct": true
|
|
28
|
-
}
|
|
29
|
-
]
|
|
30
|
-
}
|
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
[
|
|
2
|
-
{
|
|
3
|
-
"pair_id": 0,
|
|
4
|
-
"prompt": "Classify the Topic of the following Argument to one of these options: advertising, all nations a right to nuclear weapons, a mandatory retirement age, american jobs act, asean, atheism, austerity measures, barrier methods of contraception, blasphemy, boxing, bribery, burning the stars and stripes, children, collective bargaining rights claimed by trades unions, congressional earmarks, democratic governments should require voters to present photo identification at the polling station, democratization, endangered species, enforce term limits on the legislative branch of government, freedom of speech, fund education using a voucher scheme, gambling, governments should choose open source software, high rises for housing, holocaust denial, housewives should be paid for their work, hydroelectric dams, implement playoffs in collegiate level american football, intellectual property rights, israel's 2008-2009 military operations against gaza, leaking of military documents, multiculturalism, national service, only teach abstinence for sex education in schools, open primaries, partial birth abortions, physical education, poor communities, raising the school leaving age to 18, re-engage with myanmar, the blockade of gaza, the creation of private universities in the uk, the free market, the growing of tobacco, the keystone xl pipeline, the monarchy, the one-child policy of the republic of china, the right to asylum, the right to bear arms, the sale of violent video games to minors, the use of affirmative action, the use of performance enhancing drugs in professional sports, the use of truth and reconciliation commissions, wind power, year round schooling.\nArgument:\nmany people participate in gambling as a form of recreation or even as a means to gain an income\nTopic:",
|
|
5
|
-
"positive_response": "gambling",
|
|
6
|
-
"negative_response": "advertising"
|
|
7
|
-
}
|
|
8
|
-
]
|
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"task_name": "cnn_dailymail",
|
|
3
|
-
"model_name": "mock",
|
|
4
|
-
"evaluator_name": "log_likelihoods",
|
|
5
|
-
"num_pairs": 1,
|
|
6
|
-
"all_correct": true,
|
|
7
|
-
"pairs": [
|
|
8
|
-
{
|
|
9
|
-
"pair_id": 0,
|
|
10
|
-
"prompt": "Summarize the following article: (CNN)Share, and your gift will be multiplied. That may sound like a...",
|
|
11
|
-
"positive_response": "Zully Broussard decided to give a kidney to a stranger .\nA new computer program helped her donation spur transplants for six kidney patients .",
|
|
12
|
-
"negative_response": "A new computer program helped her donation spur transplants for six kidney patients. Zully Broussard decided to give a kidney to a stranger.",
|
|
13
|
-
"positive_evaluation": {
|
|
14
|
-
"ground_truth": "TRUTHFUL",
|
|
15
|
-
"confidence": 1.0,
|
|
16
|
-
"expected": "TRUTHFUL",
|
|
17
|
-
"correct": true,
|
|
18
|
-
"details": "Predicted: 'Zully Broussard decided to give a kidney to a stranger .\nA new computer program helped her donation spur transplants for six kidney patients .' (log_prob=-0.500), Expected: 'Zully Broussard decided to give a kidney to a stranger .\nA new computer program helped her donation spur transplants for six kidney patients .'"
|
|
19
|
-
},
|
|
20
|
-
"negative_evaluation": {
|
|
21
|
-
"ground_truth": "UNTRUTHFUL",
|
|
22
|
-
"confidence": 0.0,
|
|
23
|
-
"expected": "UNTRUTHFUL",
|
|
24
|
-
"correct": true,
|
|
25
|
-
"details": "Predicted: 'Zully Broussard decided to give a kidney to a stranger .\nA new computer program helped her donation spur transplants for six kidney patients .' (log_prob=-0.500), Expected: 'A new computer program helped her donation spur transplants for six kidney patients. Zully Broussard decided to give a kidney to a stranger.'"
|
|
26
|
-
},
|
|
27
|
-
"both_correct": true
|
|
28
|
-
}
|
|
29
|
-
]
|
|
30
|
-
}
|
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
[
|
|
2
|
-
{
|
|
3
|
-
"pair_id": 0,
|
|
4
|
-
"prompt": "Summarize the following article: (CNN)Share, and your gift will be multiplied. That may sound like an esoteric adage, but when Zully Broussard selflessly decided to give one of her kidneys to a stranger, her generosity paired up with big data. It resulted in six patients receiving transplants. That surprised and wowed her. \"I thought I was going to help this one person who I don't know, but the fact that so many people can have a life extension, that's pretty big,\" Broussard told CNN affiliate KGO. She may feel guided in her generosity by a higher power. \"Thanks for all the support and prayers,\" a comment on a Facebook page in her name read. \"I know this entire journey is much bigger than all of us. I also know I'm just the messenger.\" CNN cannot verify the authenticity of the page. But the power that multiplied Broussard's gift was data processing of genetic profiles from donor-recipient pairs. It works on a simple swapping principle but takes it to a much higher level, according to California Pacific Medical Center in San Francisco. So high, that it is taking five surgeons, a covey of physician assistants, nurses and anesthesiologists, and more than 40 support staff to perform surgeries on 12 people. They are extracting six kidneys from donors and implanting them into six recipients. \"The ages of the donors and recipients range from 26 to 70 and include three parent and child pairs, one sibling pair and one brother and sister-in-law pair,\" the medical center said in a statement. The chain of surgeries is to be wrapped up Friday. In late March, the medical center is planning to hold a reception for all 12 patients. Here's how the super swap works, according to California Pacific Medical Center. Say, your brother needs a kidney to save his life, or at least get off of dialysis, and you're willing to give him one of yours. But then it turns out that your kidney is not a match for him, and it's certain his body would reject it. Your brother can then get on a years-long waiting list for a kidney coming from an organ donor who died. Maybe that will work out -- or not, and time could run out for him. Alternatively, you and your brother could look for another recipient-living donor couple like yourselves -- say, two more siblings, where the donor's kidney isn't suited for his sister, the recipient. But maybe your kidney is a match for his sister, and his kidney is a match for your brother. So, you'd do a swap. That's called a paired donation. It's a bit of a surgical square dance, where four people cross over partners temporarily and everybody goes home smiling. But instead of a square dance, Broussard's generous move set off a chain reaction, like dominoes falling. Her kidney, which was removed Thursday, went to a recipient, who was paired with a donor. That donor's kidney went to the next recipient, who was also paired with a donor, and so on. On Friday, the last donor will give a kidney to someone who has been biding time on one of those deceased donor lists to complete the chain. Such long-chain transplanting is rare. It's been done before, California Pacific Medical Center said in a statement, but matching up the people in the chain has been laborious and taken a long time. That changed when a computer programmer named David Jacobs received a kidney transplant. He had been waiting on a deceased donor list, when a live donor came along -- someone nice enough to give away a kidney to a stranger. Jacobs paid it forward with his programming skills, creating MatchGrid, a program that genetically matches up donor pairs or chains quickly. \"When we did a five-way swap a few years ago, which was one of the largest, it took about three to four months. We did this in about three weeks,\" Jacobs said. But this chain wouldn't have worked so quickly without Broussard's generosity -- or may not have worked at all. \"The significance of the altruistic donor is that it opens up possibilities for pairing compatible donors and recipients,\" said Dr. Steven Katznelson. \"Where there had been only three or four options, with the inclusion of the altruistic donor, we had 140 options to consider for matching donors and recipients.\" And that's divine, Broussard's friend Shirley Williams wrote in a comment her on Broussard's Facebook page. \"You are a true angel my friend.\".",
|
|
5
|
-
"positive_response": "Zully Broussard decided to give a kidney to a stranger .\nA new computer program helped her donation spur transplants for six kidney patients .",
|
|
6
|
-
"negative_response": "A new computer program helped her donation spur transplants for six kidney patients. Zully Broussard decided to give a kidney to a stranger."
|
|
7
|
-
}
|
|
8
|
-
]
|
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"task_name": "cocoteros_es",
|
|
3
|
-
"model_name": "mock",
|
|
4
|
-
"evaluator_name": "log_likelihoods",
|
|
5
|
-
"num_pairs": 1,
|
|
6
|
-
"all_correct": true,
|
|
7
|
-
"pairs": [
|
|
8
|
-
{
|
|
9
|
-
"pair_id": 0,
|
|
10
|
-
"prompt": "Genera una frase corta con estas palabras: ['cepillo', 'diente', 'comida']. El contexto es: La higie...",
|
|
11
|
-
"positive_response": "Yo me cepillo los dientes despu\u00e9s de cada comida.",
|
|
12
|
-
"negative_response": "de comida. dientes cada los Yo me cepillo despu\u00e9s",
|
|
13
|
-
"positive_evaluation": {
|
|
14
|
-
"ground_truth": "TRUTHFUL",
|
|
15
|
-
"confidence": 1.0,
|
|
16
|
-
"expected": "TRUTHFUL",
|
|
17
|
-
"correct": true,
|
|
18
|
-
"details": "Predicted: 'Yo me cepillo los dientes despu\u00e9s de cada comida.' (log_prob=-0.500), Expected: 'Yo me cepillo los dientes despu\u00e9s de cada comida.'"
|
|
19
|
-
},
|
|
20
|
-
"negative_evaluation": {
|
|
21
|
-
"ground_truth": "UNTRUTHFUL",
|
|
22
|
-
"confidence": 0.0,
|
|
23
|
-
"expected": "UNTRUTHFUL",
|
|
24
|
-
"correct": true,
|
|
25
|
-
"details": "Predicted: 'Yo me cepillo los dientes despu\u00e9s de cada comida.' (log_prob=-0.500), Expected: 'de comida. dientes cada los Yo me cepillo despu\u00e9s'"
|
|
26
|
-
},
|
|
27
|
-
"both_correct": true
|
|
28
|
-
}
|
|
29
|
-
]
|
|
30
|
-
}
|
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
[
|
|
2
|
-
{
|
|
3
|
-
"pair_id": 0,
|
|
4
|
-
"prompt": "Genera una frase corta con estas palabras: ['cepillo', 'diente', 'comida']. El contexto es: La higiene bucodental es clave para prevenir enfermedades dentales.\n\nRespuesta:",
|
|
5
|
-
"positive_response": "Yo me cepillo los dientes despu\u00e9s de cada comida.",
|
|
6
|
-
"negative_response": "de comida. dientes cada los Yo me cepillo despu\u00e9s"
|
|
7
|
-
}
|
|
8
|
-
]
|
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"task_name": "coedit_gec",
|
|
3
|
-
"model_name": "mock",
|
|
4
|
-
"evaluator_name": "log_likelihoods",
|
|
5
|
-
"num_pairs": 1,
|
|
6
|
-
"all_correct": true,
|
|
7
|
-
"pairs": [
|
|
8
|
-
{
|
|
9
|
-
"pair_id": 0,
|
|
10
|
-
"prompt": "Make the minimal amount of changes to correct grammar and spelling errors in the following text.\nOri...",
|
|
11
|
-
"positive_response": "To partly offset this gigantic demand for energy, China has launched an ambitious program to produce 16% of its electricity from nuclear power by 2030.",
|
|
12
|
-
"negative_response": "To partly offset this gigantic demand for energy, China had launched an ambitious program to produce 16% of its electricity from nuclear power by 2030.",
|
|
13
|
-
"positive_evaluation": {
|
|
14
|
-
"ground_truth": "TRUTHFUL",
|
|
15
|
-
"confidence": 1.0,
|
|
16
|
-
"expected": "TRUTHFUL",
|
|
17
|
-
"correct": true,
|
|
18
|
-
"details": "Predicted: 'To partly offset this gigantic demand for energy, China has launched an ambitious program to produce 16% of its electricity from nuclear power by 2030.' (log_prob=-0.500), Expected: 'To partly offset this gigantic demand for energy, China has launched an ambitious program to produce 16% of its electricity from nuclear power by 2030.'"
|
|
19
|
-
},
|
|
20
|
-
"negative_evaluation": {
|
|
21
|
-
"ground_truth": "UNTRUTHFUL",
|
|
22
|
-
"confidence": 0.0,
|
|
23
|
-
"expected": "UNTRUTHFUL",
|
|
24
|
-
"correct": true,
|
|
25
|
-
"details": "Predicted: 'To partly offset this gigantic demand for energy, China has launched an ambitious program to produce 16% of its electricity from nuclear power by 2030.' (log_prob=-0.500), Expected: 'To partly offset this gigantic demand for energy, China had launched an ambitious program to produce 16% of its electricity from nuclear power by 2030.'"
|
|
26
|
-
},
|
|
27
|
-
"both_correct": true
|
|
28
|
-
}
|
|
29
|
-
]
|
|
30
|
-
}
|
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
[
|
|
2
|
-
{
|
|
3
|
-
"pair_id": 0,
|
|
4
|
-
"prompt": "Make the minimal amount of changes to correct grammar and spelling errors in the following text.\nOriginal text: To partly offset this gigantic demand for energy, China had launched an ambitious program to produce 16% of its electricity from nuclear power by 2030.\nCorrected text:",
|
|
5
|
-
"positive_response": "To partly offset this gigantic demand for energy, China has launched an ambitious program to produce 16% of its electricity from nuclear power by 2030.",
|
|
6
|
-
"negative_response": "To partly offset this gigantic demand for energy, China had launched an ambitious program to produce 16% of its electricity from nuclear power by 2030."
|
|
7
|
-
}
|
|
8
|
-
]
|
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"task_name": "cola",
|
|
3
|
-
"model_name": "mock",
|
|
4
|
-
"evaluator_name": "log_likelihoods",
|
|
5
|
-
"num_pairs": 1,
|
|
6
|
-
"all_correct": true,
|
|
7
|
-
"pairs": [
|
|
8
|
-
{
|
|
9
|
-
"pair_id": 0,
|
|
10
|
-
"prompt": "Sentence: The sailors rode the breeze clear of the rocks.\n\nIs this sentence grammatically acceptable...",
|
|
11
|
-
"positive_response": "acceptable",
|
|
12
|
-
"negative_response": "unacceptable",
|
|
13
|
-
"positive_evaluation": {
|
|
14
|
-
"ground_truth": "TRUTHFUL",
|
|
15
|
-
"confidence": 1.0,
|
|
16
|
-
"expected": "TRUTHFUL",
|
|
17
|
-
"correct": true,
|
|
18
|
-
"details": "Predicted: 'acceptable' (log_prob=-0.500), Expected: 'acceptable'"
|
|
19
|
-
},
|
|
20
|
-
"negative_evaluation": {
|
|
21
|
-
"ground_truth": "UNTRUTHFUL",
|
|
22
|
-
"confidence": 0.0,
|
|
23
|
-
"expected": "UNTRUTHFUL",
|
|
24
|
-
"correct": true,
|
|
25
|
-
"details": "Predicted: 'acceptable' (log_prob=-0.500), Expected: 'unacceptable'"
|
|
26
|
-
},
|
|
27
|
-
"both_correct": true
|
|
28
|
-
}
|
|
29
|
-
]
|
|
30
|
-
}
|
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"task_name": "coqcat",
|
|
3
|
-
"model_name": "mock",
|
|
4
|
-
"evaluator_name": "log_likelihoods",
|
|
5
|
-
"num_pairs": 1,
|
|
6
|
-
"all_correct": true,
|
|
7
|
-
"pairs": [
|
|
8
|
-
{
|
|
9
|
-
"pair_id": 0,
|
|
10
|
-
"prompt": "Story: Segons la mitologia grega, Tespi (en grec antic \u0398\u03ad\u03c3\u03c0\u03b9\u03bf\u03c2), va ser un heroi beoci, fill d'Erect...",
|
|
11
|
-
"positive_response": "Un heroi beoci.",
|
|
12
|
-
"negative_response": "De l'\u00c0tica.",
|
|
13
|
-
"positive_evaluation": {
|
|
14
|
-
"ground_truth": "TRUTHFUL",
|
|
15
|
-
"confidence": 1.0,
|
|
16
|
-
"expected": "TRUTHFUL",
|
|
17
|
-
"correct": true,
|
|
18
|
-
"details": "Predicted: 'Un heroi beoci.' (log_prob=-0.500), Expected: 'Un heroi beoci.'"
|
|
19
|
-
},
|
|
20
|
-
"negative_evaluation": {
|
|
21
|
-
"ground_truth": "UNTRUTHFUL",
|
|
22
|
-
"confidence": 0.0,
|
|
23
|
-
"expected": "UNTRUTHFUL",
|
|
24
|
-
"correct": true,
|
|
25
|
-
"details": "Predicted: 'Un heroi beoci.' (log_prob=-0.500), Expected: 'De l'\u00c0tica.'"
|
|
26
|
-
},
|
|
27
|
-
"both_correct": true
|
|
28
|
-
}
|
|
29
|
-
]
|
|
30
|
-
}
|
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
[
|
|
2
|
-
{
|
|
3
|
-
"pair_id": 0,
|
|
4
|
-
"prompt": "Story: Segons la mitologia grega, Tespi (en grec antic \u0398\u03ad\u03c3\u03c0\u03b9\u03bf\u03c2), va ser un heroi beoci, fill d'Erecteu, rei de l'\u00c0tica, i ep\u00f2nim de la ciutat de T\u00e8spies., Va abandonar l'\u00c0tica i va fundar un regne a Be\u00f2cia. Tespi t\u00e9 a veure amb el cicle de llegendes sobre H\u00e8racles. L'heroi va comen\u00e7ar al costat de Tespi les seves proeses, quan tenia divuit anys, matant el lle\u00f3 de Citer\u00f3, que feia estralls entre els ramats de Tespi i els d'Amfitri\u00f3, a la ve\u00efna Tebes. Mentre va durar la cacera, es va instal\u00b7lar al palau de Tespi, ca\u00e7ant durant el dia i tornant a dormir al palau a la nit. Tespi tenia cinquanta filles, les Tesp\u00edades, o b\u00e9 d'una mateixa esposa, Megamede, o b\u00e9 d'esposes i concubines diferents. El rei, que desitjava tenir nets d'un heroi tan important, posava al seu llit cada nit una de les seves filles. L'heroi, esgotat per la cacera, no s'adonava del canvi, i creia que tenia sempre la mateixa companya de llit. Totes elles van concebre un fill d'H\u00e8racles., Tespi va ser tamb\u00e9 l'amic que va acollir H\u00e8racles i el purific\u00e0, despr\u00e9s que l'heroi mat\u00e9s els fills que havia tingut amb M\u00e8gara.\n\nQuestion: Qui era Tespi?",
|
|
5
|
-
"positive_response": "Un heroi beoci.",
|
|
6
|
-
"negative_response": "De l'\u00c0tica."
|
|
7
|
-
}
|
|
8
|
-
]
|
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"task_name": "dbpedia_14",
|
|
3
|
-
"model_name": "mock",
|
|
4
|
-
"evaluator_name": "log_likelihoods",
|
|
5
|
-
"num_pairs": 1,
|
|
6
|
-
"all_correct": true,
|
|
7
|
-
"pairs": [
|
|
8
|
-
{
|
|
9
|
-
"pair_id": 0,
|
|
10
|
-
"prompt": "Classify the Topic of the following Paragraph to one of these options: Company, Educational Institut...",
|
|
11
|
-
"positive_response": "Album",
|
|
12
|
-
"negative_response": "Company",
|
|
13
|
-
"positive_evaluation": {
|
|
14
|
-
"ground_truth": "TRUTHFUL",
|
|
15
|
-
"confidence": 1.0,
|
|
16
|
-
"expected": "TRUTHFUL",
|
|
17
|
-
"correct": true,
|
|
18
|
-
"details": "Predicted: 'Album' (log_prob=-0.500), Expected: 'Album'"
|
|
19
|
-
},
|
|
20
|
-
"negative_evaluation": {
|
|
21
|
-
"ground_truth": "UNTRUTHFUL",
|
|
22
|
-
"confidence": 0.0,
|
|
23
|
-
"expected": "UNTRUTHFUL",
|
|
24
|
-
"correct": true,
|
|
25
|
-
"details": "Predicted: 'Album' (log_prob=-0.500), Expected: 'Company'"
|
|
26
|
-
},
|
|
27
|
-
"both_correct": true
|
|
28
|
-
}
|
|
29
|
-
]
|
|
30
|
-
}
|
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
[
|
|
2
|
-
{
|
|
3
|
-
"pair_id": 0,
|
|
4
|
-
"prompt": "Classify the Topic of the following Paragraph to one of these options: Company, Educational Institution, Artist, Athlete, Office Holder, Mean Of Transportation, Building, Natural Place, Village, Animal, Plant, Album, Film, Written Work.\nParagraph:\n Time Again is a 2003 smooth jazz album by saxophonist David Sanborn.\nTopic:",
|
|
5
|
-
"positive_response": "Album",
|
|
6
|
-
"negative_response": "Company"
|
|
7
|
-
}
|
|
8
|
-
]
|
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"task_name": "epec_koref_bin",
|
|
3
|
-
"model_name": "mock",
|
|
4
|
-
"evaluator_name": "log_likelihoods",
|
|
5
|
-
"num_pairs": 1,
|
|
6
|
-
"all_correct": true,
|
|
7
|
-
"pairs": [
|
|
8
|
-
{
|
|
9
|
-
"pair_id": 0,
|
|
10
|
-
"prompt": "Text: Orain , finalaren atarian , Eurokopako atezain onena bilakatu da Toldo , eta Italian heroi naz...",
|
|
11
|
-
"positive_response": "Yes",
|
|
12
|
-
"negative_response": "No",
|
|
13
|
-
"positive_evaluation": {
|
|
14
|
-
"ground_truth": "TRUTHFUL",
|
|
15
|
-
"confidence": 1.0,
|
|
16
|
-
"expected": "TRUTHFUL",
|
|
17
|
-
"correct": true,
|
|
18
|
-
"details": "Predicted: 'Yes' (log_prob=-0.500), Expected: 'Yes'"
|
|
19
|
-
},
|
|
20
|
-
"negative_evaluation": {
|
|
21
|
-
"ground_truth": "UNTRUTHFUL",
|
|
22
|
-
"confidence": 0.0,
|
|
23
|
-
"expected": "UNTRUTHFUL",
|
|
24
|
-
"correct": true,
|
|
25
|
-
"details": "Predicted: 'Yes' (log_prob=-0.500), Expected: 'No'"
|
|
26
|
-
},
|
|
27
|
-
"both_correct": true
|
|
28
|
-
}
|
|
29
|
-
]
|
|
30
|
-
}
|
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
[
|
|
2
|
-
{
|
|
3
|
-
"pair_id": 0,
|
|
4
|
-
"prompt": "Text: Orain , finalaren atarian , Eurokopako atezain onena bilakatu da Toldo , eta Italian heroi nazionala da . Merezita lortu du Fiorentinako atezainak gailurrera iristea , baina izugarri kostata .\n\nDo 'heroi nazionala' and 'Fiorentinako atezainak' refer to the same entity?\nAnswer:",
|
|
5
|
-
"positive_response": "Yes",
|
|
6
|
-
"negative_response": "No"
|
|
7
|
-
}
|
|
8
|
-
]
|
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"task_name": "ethos_binary",
|
|
3
|
-
"model_name": "mock",
|
|
4
|
-
"evaluator_name": "log_likelihoods",
|
|
5
|
-
"num_pairs": 1,
|
|
6
|
-
"all_correct": true,
|
|
7
|
-
"pairs": [
|
|
8
|
-
{
|
|
9
|
-
"pair_id": 0,
|
|
10
|
-
"prompt": "Classify the Hate Speech of the following Sentence to one of these options: not hate speech, hate sp...",
|
|
11
|
-
"positive_response": "not hate speech",
|
|
12
|
-
"negative_response": "hate speech",
|
|
13
|
-
"positive_evaluation": {
|
|
14
|
-
"ground_truth": "TRUTHFUL",
|
|
15
|
-
"confidence": 1.0,
|
|
16
|
-
"expected": "TRUTHFUL",
|
|
17
|
-
"correct": true,
|
|
18
|
-
"details": "Predicted: 'not hate speech' (log_prob=-0.500), Expected: 'not hate speech'"
|
|
19
|
-
},
|
|
20
|
-
"negative_evaluation": {
|
|
21
|
-
"ground_truth": "UNTRUTHFUL",
|
|
22
|
-
"confidence": 0.0,
|
|
23
|
-
"expected": "UNTRUTHFUL",
|
|
24
|
-
"correct": true,
|
|
25
|
-
"details": "Predicted: 'not hate speech' (log_prob=-0.500), Expected: 'hate speech'"
|
|
26
|
-
},
|
|
27
|
-
"both_correct": true
|
|
28
|
-
}
|
|
29
|
-
]
|
|
30
|
-
}
|
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
[
|
|
2
|
-
{
|
|
3
|
-
"pair_id": 0,
|
|
4
|
-
"prompt": "Classify the Hate Speech of the following Sentence to one of these options: not hate speech, hate speech.\nSentence:\nEvery publicity is good publicity ha ? I'\u0099m here after watching leaving neverland i don'\u0099t know he is guilty or not but in music he is a legend and no one can take that from him\nHate Speech:",
|
|
5
|
-
"positive_response": "not hate speech",
|
|
6
|
-
"negative_response": "hate speech"
|
|
7
|
-
}
|
|
8
|
-
]
|
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"task_name": "afrimgsm_direct_amh",
|
|
3
|
-
"model_name": "mock",
|
|
4
|
-
"evaluator_name": "log_likelihoods",
|
|
5
|
-
"num_pairs": 1,
|
|
6
|
-
"all_correct": true,
|
|
7
|
-
"pairs": [
|
|
8
|
-
{
|
|
9
|
-
"pair_id": 0,
|
|
10
|
-
"prompt": "\u12e8\u1303\u1294\u1275 \u12f3\u12ad\u12ec\u12ce\u127d \u1260\u1240\u1295 16 \u12a5\u1295\u1241\u120b\u120e\u127d\u1295 \u12ed\u1325\u120b\u1209\u1362 \u1260\u12e8\u1240\u1291 \u1226\u1235\u1275 \u1208\u1241\u122d\u1235 \u1275\u1260\u120b\u1208\u127d \u12a5\u1293 \u1260\u12e8\u1240\u1291 \u1208\u1313\u12f0\u129e\u1279\u12cb \u1260\u12a0\u122b\u1271 \u121b\u134b\u1295 \u1275\u130b\u130d\u122b\u1208\u127d\u1362 \u1240\u122a\u12cd\u1295 \u1260\u12e8\u1240\u1291 \u1260\u12a0\u122d\u1236 \u12a0...",
|
|
11
|
-
"positive_response": "18",
|
|
12
|
-
"negative_response": "19",
|
|
13
|
-
"positive_evaluation": {
|
|
14
|
-
"ground_truth": "TRUTHFUL",
|
|
15
|
-
"confidence": 1.0,
|
|
16
|
-
"expected": "TRUTHFUL",
|
|
17
|
-
"correct": true,
|
|
18
|
-
"details": "Predicted: '18' (log_prob=-0.500), Expected: '18'"
|
|
19
|
-
},
|
|
20
|
-
"negative_evaluation": {
|
|
21
|
-
"ground_truth": "UNTRUTHFUL",
|
|
22
|
-
"confidence": 0.0,
|
|
23
|
-
"expected": "UNTRUTHFUL",
|
|
24
|
-
"correct": true,
|
|
25
|
-
"details": "Predicted: '18' (log_prob=-0.500), Expected: '19'"
|
|
26
|
-
},
|
|
27
|
-
"both_correct": true
|
|
28
|
-
}
|
|
29
|
-
]
|
|
30
|
-
}
|
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
[
|
|
2
|
-
{
|
|
3
|
-
"pair_id": 0,
|
|
4
|
-
"prompt": "\u12e8\u1303\u1294\u1275 \u12f3\u12ad\u12ec\u12ce\u127d \u1260\u1240\u1295 16 \u12a5\u1295\u1241\u120b\u120e\u127d\u1295 \u12ed\u1325\u120b\u1209\u1362 \u1260\u12e8\u1240\u1291 \u1226\u1235\u1275 \u1208\u1241\u122d\u1235 \u1275\u1260\u120b\u1208\u127d \u12a5\u1293 \u1260\u12e8\u1240\u1291 \u1208\u1313\u12f0\u129e\u1279\u12cb \u1260\u12a0\u122b\u1271 \u121b\u134b\u1295 \u1275\u130b\u130d\u122b\u1208\u127d\u1362 \u1240\u122a\u12cd\u1295 \u1260\u12e8\u1240\u1291 \u1260\u12a0\u122d\u1236 \u12a0\u12f0\u122e\u127d \u1308\u1260\u12eb \u1260 2 \u12f6\u120b\u122d \u1208\u12a5\u12eb\u1295\u12f3\u1295\u12f1 \u1275\u12a9\u1235 \u12f3\u12ad\u12ec \u12a5\u1295\u1241\u120b\u120d \u1275\u1238\u1323\u1208\u127d\u1362 \u1260\u12a0\u122d\u1236 \u12a0\u12f0\u122e\u127d \u1308\u1260\u12eb \u1260\u12e8\u1240\u1291 \u1260\u12f6\u120b\u122d \u121d\u1295 \u12eb\u1205\u120d \u1273\u1308\u129b\u1208\u127d?",
|
|
5
|
-
"positive_response": "18",
|
|
6
|
-
"negative_response": "19"
|
|
7
|
-
}
|
|
8
|
-
]
|
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"task_name": "afrimmlu_direct_amh",
|
|
3
|
-
"model_name": "mock",
|
|
4
|
-
"evaluator_name": "log_likelihoods",
|
|
5
|
-
"num_pairs": 1,
|
|
6
|
-
"all_correct": true,
|
|
7
|
-
"pairs": [
|
|
8
|
-
{
|
|
9
|
-
"pair_id": 0,
|
|
10
|
-
"prompt": "Question: \u12ae\u120d\u1270\u1295 \u12a5\u1293 \u12a0\u1263\u1271 $13 \u12e8\u121a\u12eb\u12c8\u1323 \u12a0\u1295\u12f5 \u130b\u120e\u1295 \u1240\u1208\u121d \u1308\u12d9\u1362 \u12a5\u12eb\u1295\u12f3\u1295\u12f3\u1278\u12cd $9 \u12e8\u121a\u12eb\u12c8\u1321 2 \u1265\u1229\u123e\u127d\u1295\u121d \u1308\u12d9 \u1362\u12e8\u1308\u12d9\u1275\u1295 \u12e8\u1265\u1229\u123d \u12a5\u1293 \u1240\u1208\u121d \u130d\u1265...",
|
|
11
|
-
"positive_response": "$",
|
|
12
|
-
"negative_response": "2",
|
|
13
|
-
"positive_evaluation": {
|
|
14
|
-
"ground_truth": "TRUTHFUL",
|
|
15
|
-
"confidence": 1.0,
|
|
16
|
-
"expected": "TRUTHFUL",
|
|
17
|
-
"correct": true,
|
|
18
|
-
"details": "Predicted: '$' (log_prob=-0.500), Expected: '$'"
|
|
19
|
-
},
|
|
20
|
-
"negative_evaluation": {
|
|
21
|
-
"ground_truth": "UNTRUTHFUL",
|
|
22
|
-
"confidence": 0.0,
|
|
23
|
-
"expected": "UNTRUTHFUL",
|
|
24
|
-
"correct": true,
|
|
25
|
-
"details": "Predicted: '$' (log_prob=-0.500), Expected: '2'"
|
|
26
|
-
},
|
|
27
|
-
"both_correct": true
|
|
28
|
-
}
|
|
29
|
-
]
|
|
30
|
-
}
|
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
[
|
|
2
|
-
{
|
|
3
|
-
"pair_id": 0,
|
|
4
|
-
"prompt": "Question: \u12ae\u120d\u1270\u1295 \u12a5\u1293 \u12a0\u1263\u1271 $13 \u12e8\u121a\u12eb\u12c8\u1323 \u12a0\u1295\u12f5 \u130b\u120e\u1295 \u1240\u1208\u121d \u1308\u12d9\u1362 \u12a5\u12eb\u1295\u12f3\u1295\u12f3\u1278\u12cd $9 \u12e8\u121a\u12eb\u12c8\u1321 2 \u1265\u1229\u123e\u127d\u1295\u121d \u1308\u12d9 \u1362\u12e8\u1308\u12d9\u1275\u1295 \u12e8\u1265\u1229\u123d \u12a5\u1293 \u1240\u1208\u121d \u130d\u1265\u122d\u1295 \u1233\u12ed\u1328\u121d\u122d \u12a0\u1320\u1243\u120b\u12ed \u12c8\u132a\u12cd \u1235\u1295\u1275 \u1290\u1260\u122d\nA. 2\nB. $",
|
|
5
|
-
"positive_response": "$",
|
|
6
|
-
"negative_response": "2"
|
|
7
|
-
}
|
|
8
|
-
]
|
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"task_name": "afrixnli_en_direct_amh",
|
|
3
|
-
"model_name": "mock",
|
|
4
|
-
"evaluator_name": "log_likelihoods",
|
|
5
|
-
"num_pairs": 1,
|
|
6
|
-
"all_correct": true,
|
|
7
|
-
"pairs": [
|
|
8
|
-
{
|
|
9
|
-
"pair_id": 0,
|
|
10
|
-
"prompt": "Premise: \u12a5\u1293\u1274 \u1264\u1275 \u1218\u1325\u127b\u1208\u1201 \u12a0\u1208\nHypothesis: \u12e8\u1275\u121d\u1205\u122d\u1275 \u1264\u1271 \u12a0\u12cd\u1276\u1265\u1235 \u12a5\u1295\u12f2\u12eb\u12c8\u122d\u12f0\u12cd \u12a5\u1293\u1271\u1295 \u1320\u122b...",
|
|
11
|
-
"positive_response": "neutral",
|
|
12
|
-
"negative_response": "entailment",
|
|
13
|
-
"positive_evaluation": {
|
|
14
|
-
"ground_truth": "TRUTHFUL",
|
|
15
|
-
"confidence": 1.0,
|
|
16
|
-
"expected": "TRUTHFUL",
|
|
17
|
-
"correct": true,
|
|
18
|
-
"details": "Predicted: 'neutral' (log_prob=-0.500), Expected: 'neutral'"
|
|
19
|
-
},
|
|
20
|
-
"negative_evaluation": {
|
|
21
|
-
"ground_truth": "UNTRUTHFUL",
|
|
22
|
-
"confidence": 0.0,
|
|
23
|
-
"expected": "UNTRUTHFUL",
|
|
24
|
-
"correct": true,
|
|
25
|
-
"details": "Predicted: 'neutral' (log_prob=-0.500), Expected: 'entailment'"
|
|
26
|
-
},
|
|
27
|
-
"both_correct": true
|
|
28
|
-
}
|
|
29
|
-
]
|
|
30
|
-
}
|
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
[
|
|
2
|
-
{
|
|
3
|
-
"pair_id": 0,
|
|
4
|
-
"prompt": "Premise: \u12a5\u1293\u1274 \u1264\u1275 \u1218\u1325\u127b\u1208\u1201 \u12a0\u1208\nHypothesis: \u12e8\u1275\u121d\u1205\u122d\u1275 \u1264\u1271 \u12a0\u12cd\u1276\u1265\u1235 \u12a5\u1295\u12f2\u12eb\u12c8\u122d\u12f0\u12cd \u12a5\u1293\u1271\u1295 \u1320\u122b",
|
|
5
|
-
"positive_response": "neutral",
|
|
6
|
-
"negative_response": "entailment"
|
|
7
|
-
}
|
|
8
|
-
]
|
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"task_name": "arc_ar",
|
|
3
|
-
"model_name": "mock",
|
|
4
|
-
"evaluator_name": "log_likelihoods",
|
|
5
|
-
"num_pairs": 1,
|
|
6
|
-
"all_correct": true,
|
|
7
|
-
"pairs": [
|
|
8
|
-
{
|
|
9
|
-
"pair_id": 0,
|
|
10
|
-
"prompt": "Question: Question: \u0622\u0646\u0627 \u062a\u062d\u0645\u0644 \u0645\u0643\u0639\u0628 \u062b\u0644\u062c. \u0644\u0645\u0627\u0630\u0627 \u064a\u0630\u0648\u0628 \u0627\u0644\u0645\u0643\u0639\u0628 \u0641\u064a \u064a\u062f\u0647\u0627\u061f\nAnswer:\nA. \u0627\u0644\u0628\u0631\u0648\u062f\u0629 \u062a\u062a\u062d\u0631\u0643 \u0645\u0646 \u064a\u062f\u0647\u0627 \u0625...",
|
|
11
|
-
"positive_response": "\u0627\u0644\u062d\u0631\u0627\u0631\u0629 \u062a\u062a\u062d\u0631\u0643 \u0645\u0646 \u064a\u062f\u0647\u0627 \u0625\u0644\u0649 \u0627\u0644\u0645\u0643\u0639\u0628 \u0627\u0644\u062b\u0644\u062c\u064a.",
|
|
12
|
-
"negative_response": "\u0627\u0644\u0628\u0631\u0648\u062f\u0629 \u062a\u062a\u062d\u0631\u0643 \u0645\u0646 \u064a\u062f\u0647\u0627 \u0625\u0644\u0649 \u0627\u0644\u0645\u0643\u0639\u0628 \u0627\u0644\u062b\u0644\u062c\u064a.",
|
|
13
|
-
"positive_evaluation": {
|
|
14
|
-
"ground_truth": "TRUTHFUL",
|
|
15
|
-
"confidence": 1.0,
|
|
16
|
-
"expected": "TRUTHFUL",
|
|
17
|
-
"correct": true,
|
|
18
|
-
"details": "Predicted: '\u0627\u0644\u062d\u0631\u0627\u0631\u0629 \u062a\u062a\u062d\u0631\u0643 \u0645\u0646 \u064a\u062f\u0647\u0627 \u0625\u0644\u0649 \u0627\u0644\u0645\u0643\u0639\u0628 \u0627\u0644\u062b\u0644\u062c\u064a.' (log_prob=-0.500), Expected: '\u0627\u0644\u062d\u0631\u0627\u0631\u0629 \u062a\u062a\u062d\u0631\u0643 \u0645\u0646 \u064a\u062f\u0647\u0627 \u0625\u0644\u0649 \u0627\u0644\u0645\u0643\u0639\u0628 \u0627\u0644\u062b\u0644\u062c\u064a.'"
|
|
19
|
-
},
|
|
20
|
-
"negative_evaluation": {
|
|
21
|
-
"ground_truth": "UNTRUTHFUL",
|
|
22
|
-
"confidence": 0.0,
|
|
23
|
-
"expected": "UNTRUTHFUL",
|
|
24
|
-
"correct": true,
|
|
25
|
-
"details": "Predicted: '\u0627\u0644\u062d\u0631\u0627\u0631\u0629 \u062a\u062a\u062d\u0631\u0643 \u0645\u0646 \u064a\u062f\u0647\u0627 \u0625\u0644\u0649 \u0627\u0644\u0645\u0643\u0639\u0628 \u0627\u0644\u062b\u0644\u062c\u064a.' (log_prob=-0.500), Expected: '\u0627\u0644\u0628\u0631\u0648\u062f\u0629 \u062a\u062a\u062d\u0631\u0643 \u0645\u0646 \u064a\u062f\u0647\u0627 \u0625\u0644\u0649 \u0627\u0644\u0645\u0643\u0639\u0628 \u0627\u0644\u062b\u0644\u062c\u064a.'"
|
|
26
|
-
},
|
|
27
|
-
"both_correct": true
|
|
28
|
-
}
|
|
29
|
-
]
|
|
30
|
-
}
|
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
[
|
|
2
|
-
{
|
|
3
|
-
"pair_id": 0,
|
|
4
|
-
"prompt": "Question: Question: \u0622\u0646\u0627 \u062a\u062d\u0645\u0644 \u0645\u0643\u0639\u0628 \u062b\u0644\u062c. \u0644\u0645\u0627\u0630\u0627 \u064a\u0630\u0648\u0628 \u0627\u0644\u0645\u0643\u0639\u0628 \u0641\u064a \u064a\u062f\u0647\u0627\u061f\nAnswer:\nA. \u0627\u0644\u0628\u0631\u0648\u062f\u0629 \u062a\u062a\u062d\u0631\u0643 \u0645\u0646 \u064a\u062f\u0647\u0627 \u0625\u0644\u0649 \u0627\u0644\u0645\u0643\u0639\u0628 \u0627\u0644\u062b\u0644\u062c\u064a.\nB. \u0627\u0644\u062d\u0631\u0627\u0631\u0629 \u062a\u062a\u062d\u0631\u0643 \u0645\u0646 \u064a\u062f\u0647\u0627 \u0625\u0644\u0649 \u0627\u0644\u0645\u0643\u0639\u0628 \u0627\u0644\u062b\u0644\u062c\u064a.",
|
|
5
|
-
"positive_response": "\u0627\u0644\u062d\u0631\u0627\u0631\u0629 \u062a\u062a\u062d\u0631\u0643 \u0645\u0646 \u064a\u062f\u0647\u0627 \u0625\u0644\u0649 \u0627\u0644\u0645\u0643\u0639\u0628 \u0627\u0644\u062b\u0644\u062c\u064a.",
|
|
6
|
-
"negative_response": "\u0627\u0644\u0628\u0631\u0648\u062f\u0629 \u062a\u062a\u062d\u0631\u0643 \u0645\u0646 \u064a\u062f\u0647\u0627 \u0625\u0644\u0649 \u0627\u0644\u0645\u0643\u0639\u0628 \u0627\u0644\u062b\u0644\u062c\u064a."
|
|
7
|
-
}
|
|
8
|
-
]
|