liger-kernel-nightly 0.5.3.dev20250212203051__tar.gz → 0.5.3.dev20250214061904__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/Makefile +7 -3
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/PKG-INFO +1 -1
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/pyproject.toml +1 -1
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel_nightly.egg-info/PKG-INFO +1 -1
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel_nightly.egg-info/SOURCES.txt +8 -2
- liger_kernel_nightly-0.5.3.dev20250214061904/test/convergence/bf16/__init__.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051/test/convergence → liger_kernel_nightly-0.5.3.dev20250214061904/test/convergence/bf16}/test_mini_models.py +0 -40
- liger_kernel_nightly-0.5.3.dev20250214061904/test/convergence/bf16/test_mini_models_multimodal.py +421 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051/test/convergence → liger_kernel_nightly-0.5.3.dev20250214061904/test/convergence/bf16}/test_mini_models_with_logits.py +0 -39
- liger_kernel_nightly-0.5.3.dev20250214061904/test/convergence/fp32/__init__.py +0 -0
- liger_kernel_nightly-0.5.3.dev20250214061904/test/convergence/fp32/test_mini_models.py +546 -0
- liger_kernel_nightly-0.5.3.dev20250214061904/test/convergence/fp32/test_mini_models_multimodal.py +416 -0
- liger_kernel_nightly-0.5.3.dev20250214061904/test/convergence/fp32/test_mini_models_with_logits.py +545 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/.github/ISSUE_TEMPLATE/bug_report.yaml +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/.github/ISSUE_TEMPLATE/feature_request.yaml +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/.github/pull_request_template.md +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/.github/workflows/amd-ci.yml +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/.github/workflows/docs.yml +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/.github/workflows/intel-ci.yml +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/.github/workflows/nvi-ci.yml +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/.github/workflows/publish-nightly.yml +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/.github/workflows/publish-release.yml +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/.gitignore +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/LICENSE +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/NOTICE +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/README.md +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/benchmark/README.md +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/benchmark/__init__.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/benchmark/benchmarks_visualizer.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/benchmark/data/all_benchmark_data.csv +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/benchmark/scripts/__init__.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/benchmark/scripts/benchmark_cpo_loss.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/benchmark/scripts/benchmark_cross_entropy.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/benchmark/scripts/benchmark_distill_jsd_loss.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/benchmark/scripts/benchmark_dpo_loss.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/benchmark/scripts/benchmark_embedding.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/benchmark/scripts/benchmark_fused_linear_cross_entropy.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/benchmark/scripts/benchmark_fused_linear_jsd.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/benchmark/scripts/benchmark_geglu.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/benchmark/scripts/benchmark_group_norm.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/benchmark/scripts/benchmark_jsd.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/benchmark/scripts/benchmark_kl_div.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/benchmark/scripts/benchmark_kto_loss.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/benchmark/scripts/benchmark_layer_norm.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/benchmark/scripts/benchmark_orpo_loss.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/benchmark/scripts/benchmark_qwen2vl_mrope.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/benchmark/scripts/benchmark_rms_norm.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/benchmark/scripts/benchmark_rope.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/benchmark/scripts/benchmark_simpo_loss.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/benchmark/scripts/benchmark_swiglu.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/benchmark/scripts/utils.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/dev/fmt-requirements.txt +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/dev/modal/tests.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/dev/modal/tests_bwd.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/docs/Examples.md +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/docs/Getting-Started.md +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/docs/High-Level-APIs.md +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/docs/Low-Level-APIs.md +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/docs/acknowledgement.md +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/docs/contributing.md +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/docs/images/banner.GIF +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/docs/images/compose.gif +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/docs/images/e2e-memory.png +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/docs/images/e2e-tps.png +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/docs/images/logo-banner.png +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/docs/images/patch.gif +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/docs/images/post-training.png +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/docs/index.md +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/docs/license.md +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/alignment/accelerate_config.yaml +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/alignment/run_orpo.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/huggingface/README.md +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/huggingface/callback.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/huggingface/config/fsdp_config.json +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/huggingface/img/gemma_7b_mem.png +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/huggingface/img/gemma_7b_tp.png +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/huggingface/img/llama_mem_alloc.png +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/huggingface/img/llama_tps.png +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/huggingface/img/qwen_mem_alloc.png +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/huggingface/img/qwen_tps.png +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/huggingface/launch_on_modal.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/huggingface/requirements.txt +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/huggingface/run_benchmarks.sh +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/huggingface/run_gemma.sh +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/huggingface/run_llama.sh +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/huggingface/run_qwen.sh +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/huggingface/run_qwen2_vl.sh +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/huggingface/training.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/huggingface/training_multimodal.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/lightning/README.md +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/lightning/requirements.txt +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/lightning/training.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/medusa/README.md +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/medusa/callback.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/medusa/docs/images/Memory_Stage1_num_head_3.png +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/medusa/docs/images/Memory_Stage1_num_head_5.png +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/medusa/docs/images/Memory_Stage2_num_head_3.png +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/medusa/docs/images/Memory_Stage2_num_head_5.png +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/medusa/docs/images/Throughput_Stage1_num_head_3.png +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/medusa/docs/images/Throughput_Stage1_num_head_5.png +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/medusa/docs/images/Throughput_Stage2_num_head_3.png +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/medusa/docs/images/Throughput_Stage2_num_head_5.png +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/medusa/fsdp/acc-fsdp.conf +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/medusa/medusa_util.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/medusa/requirements.txt +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/medusa/scripts/llama3_8b_medusa.sh +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/examples/medusa/train.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/licenses/LICENSE-Apache-2.0 +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/licenses/LICENSE-MIT-AutoAWQ +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/licenses/LICENSE-MIT-Efficient-Cross-Entropy +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/licenses/LICENSE-MIT-llmc +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/licenses/LICENSE-MIT-triton +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/mkdocs.yml +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/setup.cfg +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/setup.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/__init__.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/chunked_loss/README.md +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/chunked_loss/__init__.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/chunked_loss/cpo_loss.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/chunked_loss/dpo_loss.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/chunked_loss/functional.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/chunked_loss/fused_linear_distillation.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/chunked_loss/fused_linear_preference.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/chunked_loss/fused_linear_unpaired_preference.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/chunked_loss/jsd_loss.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/chunked_loss/kto_loss.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/chunked_loss/orpo_loss.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/chunked_loss/simpo_loss.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/env_report.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/ops/__init__.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/ops/cross_entropy.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/ops/experimental/embedding.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/ops/experimental/mm_int8int2.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/ops/fused_linear_cross_entropy.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/ops/fused_linear_jsd.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/ops/geglu.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/ops/group_norm.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/ops/jsd.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/ops/kl_div.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/ops/layer_norm.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/ops/qwen2vl_mrope.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/ops/rms_norm.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/ops/rope.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/ops/swiglu.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/ops/utils.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/__init__.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/auto_model.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/cross_entropy.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/experimental/embedding.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/functional.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/fused_linear_cross_entropy.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/fused_linear_jsd.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/geglu.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/group_norm.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/jsd.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/kl_div.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/layer_norm.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/model/__init__.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/model/gemma.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/model/gemma2.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/model/llama.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/model/mistral.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/model/mixtral.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/model/mllama.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/model/phi3.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/model/qwen2.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/model/qwen2_vl.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/monkey_patch.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/qwen2vl_mrope.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/rms_norm.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/rope.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/swiglu.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/trainer/__init__.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/trainer/orpo_trainer.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/transformers/trainer_integration.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/triton/__init__.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/triton/monkey_patch.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel/utils.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel_nightly.egg-info/dependency_links.txt +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel_nightly.egg-info/requires.txt +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/src/liger_kernel_nightly.egg-info/top_level.txt +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/__init__.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/chunked_loss/__init__.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/chunked_loss/test_cpo_loss.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/chunked_loss/test_dpo_loss.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/chunked_loss/test_jsd_loss.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/chunked_loss/test_kto_loss.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/chunked_loss/test_orpo_loss.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/chunked_loss/test_simpo_loss.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/conftest.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/convergence/__init__.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/convergence/test_mini_models_multimodal.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/resources/fake_configs/Qwen/Qwen2-VL-7B-Instruct/tokenizer_config.json +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/resources/fake_configs/meta-llama/Llama-3.2-11B-Vision-Instruct/tokenizer_config.json +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/resources/scripts/generate_tokenized_dataset.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/resources/tiny_shakespeare.txt +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/resources/tiny_shakespeare_tokenized/data-00000-of-00001.arrow +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/resources/tiny_shakespeare_tokenized/dataset_info.json +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/resources/tiny_shakespeare_tokenized/state.json +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/transformers/test_auto_model.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/transformers/test_cross_entropy.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/transformers/test_embedding.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/transformers/test_fused_linear_cross_entropy.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/transformers/test_fused_linear_jsd.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/transformers/test_geglu.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/transformers/test_group_norm.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/transformers/test_jsd.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/transformers/test_kl_div.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/transformers/test_layer_norm.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/transformers/test_mm_int8int2.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/transformers/test_monkey_patch.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/transformers/test_qwen2vl_mrope.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/transformers/test_rms_norm.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/transformers/test_rope.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/transformers/test_swiglu.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/transformers/test_trainer_integration.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/transformers/test_transformers.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/triton/test_triton_monkey_patch.py +0 -0
- {liger_kernel_nightly-0.5.3.dev20250212203051 → liger_kernel_nightly-0.5.3.dev20250214061904}/test/utils.py +0 -0
@@ -18,9 +18,13 @@ checkstyle:
|
|
18
18
|
# Command to run pytest for convergence tests
|
19
19
|
# We have to explicitly set HF_DATASETS_OFFLINE=1, or dataset will silently try to send metrics and timeout (80s) https://github.com/huggingface/datasets/blob/37a603679f451826cfafd8aae00738b01dcb9d58/src/datasets/load.py#L286
|
20
20
|
test-convergence:
|
21
|
-
HF_DATASETS_OFFLINE=1 python -m pytest --disable-warnings test/convergence/test_mini_models.py
|
22
|
-
HF_DATASETS_OFFLINE=1 python -m pytest --disable-warnings test/convergence/test_mini_models_multimodal.py
|
23
|
-
HF_DATASETS_OFFLINE=1 python -m pytest --disable-warnings test/convergence/test_mini_models_with_logits.py
|
21
|
+
HF_DATASETS_OFFLINE=1 python -m pytest --disable-warnings test/convergence/fp32/test_mini_models.py
|
22
|
+
HF_DATASETS_OFFLINE=1 python -m pytest --disable-warnings test/convergence/fp32/test_mini_models_multimodal.py
|
23
|
+
HF_DATASETS_OFFLINE=1 python -m pytest --disable-warnings test/convergence/fp32/test_mini_models_with_logits.py
|
24
|
+
|
25
|
+
HF_DATASETS_OFFLINE=1 python -m pytest --disable-warnings test/convergence/bf16/test_mini_models.py
|
26
|
+
HF_DATASETS_OFFLINE=1 python -m pytest --disable-warnings test/convergence/bf16/test_mini_models_multimodal.py
|
27
|
+
HF_DATASETS_OFFLINE=1 python -m pytest --disable-warnings test/convergence/bf16/test_mini_models_with_logits.py
|
24
28
|
|
25
29
|
# Command to run all benchmark scripts and update benchmarking data file
|
26
30
|
# By default this doesn't overwrite existing data for the same benchmark experiment
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
4
4
|
|
5
5
|
[project]
|
6
6
|
name = "liger_kernel_nightly"
|
7
|
-
version = "0.5.3.
|
7
|
+
version = "0.5.3.dev20250214061904"
|
8
8
|
description = "Efficient Triton kernels for LLM Training"
|
9
9
|
urls = { "Homepage" = "https://github.com/linkedin/Liger-Kernel" }
|
10
10
|
readme = { file = "README.md", content-type = "text/markdown" }
|
@@ -180,9 +180,15 @@ test/chunked_loss/test_kto_loss.py
|
|
180
180
|
test/chunked_loss/test_orpo_loss.py
|
181
181
|
test/chunked_loss/test_simpo_loss.py
|
182
182
|
test/convergence/__init__.py
|
183
|
-
test/convergence/test_mini_models.py
|
184
183
|
test/convergence/test_mini_models_multimodal.py
|
185
|
-
test/convergence/
|
184
|
+
test/convergence/bf16/__init__.py
|
185
|
+
test/convergence/bf16/test_mini_models.py
|
186
|
+
test/convergence/bf16/test_mini_models_multimodal.py
|
187
|
+
test/convergence/bf16/test_mini_models_with_logits.py
|
188
|
+
test/convergence/fp32/__init__.py
|
189
|
+
test/convergence/fp32/test_mini_models.py
|
190
|
+
test/convergence/fp32/test_mini_models_multimodal.py
|
191
|
+
test/convergence/fp32/test_mini_models_with_logits.py
|
186
192
|
test/resources/tiny_shakespeare.txt
|
187
193
|
test/resources/fake_configs/Qwen/Qwen2-VL-7B-Instruct/tokenizer_config.json
|
188
194
|
test/resources/fake_configs/meta-llama/Llama-3.2-11B-Vision-Instruct/tokenizer_config.json
|
File without changes
|
@@ -457,7 +457,6 @@ def run_mini_model(
|
|
457
457
|
@pytest.mark.parametrize(
|
458
458
|
"model_name, num_steps, lr, dtype, loss_atol, loss_rtol, logits_atol, logits_rtol, param_atol, param_rtol",
|
459
459
|
[
|
460
|
-
("mini_llama3", 32, 1e-4, torch.float32, 1e-8, 2e-5, 1e-4, 1e-5, 5e-3, 1e-5),
|
461
460
|
pytest.param(
|
462
461
|
"mini_llama3",
|
463
462
|
32,
|
@@ -471,22 +470,6 @@ def run_mini_model(
|
|
471
470
|
1e-2,
|
472
471
|
marks=pytest.mark.skipif(not supports_bfloat16(), reason="bfloat16 not supported on this GPU"),
|
473
472
|
),
|
474
|
-
pytest.param(
|
475
|
-
"mini_mllama",
|
476
|
-
32,
|
477
|
-
1e-4,
|
478
|
-
torch.float32,
|
479
|
-
1e-8,
|
480
|
-
1e-5,
|
481
|
-
5e-3,
|
482
|
-
1e-5,
|
483
|
-
5e-3,
|
484
|
-
1e-5,
|
485
|
-
marks=pytest.mark.skipif(
|
486
|
-
not MLLAMA_AVAILABLE,
|
487
|
-
reason="Mllama not available in this version of transformers",
|
488
|
-
),
|
489
|
-
),
|
490
473
|
pytest.param(
|
491
474
|
"mini_mllama",
|
492
475
|
32,
|
@@ -506,7 +489,6 @@ def run_mini_model(
|
|
506
489
|
),
|
507
490
|
],
|
508
491
|
),
|
509
|
-
("mini_qwen2", 32, 1e-4, torch.float32, 1e-8, 1e-5, 5e-3, 1e-5, 5e-3, 1e-5),
|
510
492
|
pytest.param(
|
511
493
|
"mini_qwen2",
|
512
494
|
32,
|
@@ -520,22 +502,6 @@ def run_mini_model(
|
|
520
502
|
1e-2,
|
521
503
|
marks=pytest.mark.skipif(not supports_bfloat16(), reason="bfloat16 not supported on this GPU"),
|
522
504
|
),
|
523
|
-
pytest.param( # qwen2_vl requires slightly larger tolerances to pass this test after bug fix to qwen2_vl in transformers v4.47.0
|
524
|
-
"mini_qwen2_vl",
|
525
|
-
32,
|
526
|
-
1e-4,
|
527
|
-
torch.float32,
|
528
|
-
1e-5, # 1e-8,
|
529
|
-
1e-1, # 1e-5,
|
530
|
-
5e-3,
|
531
|
-
1e-5,
|
532
|
-
5e-3,
|
533
|
-
1e-5,
|
534
|
-
marks=pytest.mark.skipif(
|
535
|
-
not QWEN2_VL_AVAILABLE,
|
536
|
-
reason="Qwen2-VL not available in this version of transformers",
|
537
|
-
),
|
538
|
-
),
|
539
505
|
pytest.param(
|
540
506
|
"mini_qwen2_vl",
|
541
507
|
32,
|
@@ -555,7 +521,6 @@ def run_mini_model(
|
|
555
521
|
),
|
556
522
|
],
|
557
523
|
),
|
558
|
-
("mini_phi3", 32, 1e-4, torch.float32, 1e-8, 1e-5, 5e-3, 1e-5, 5e-3, 1e-5),
|
559
524
|
pytest.param(
|
560
525
|
"mini_phi3",
|
561
526
|
32,
|
@@ -569,7 +534,6 @@ def run_mini_model(
|
|
569
534
|
1e-2,
|
570
535
|
marks=pytest.mark.skipif(not supports_bfloat16(), reason="bfloat16 not supported on this GPU"),
|
571
536
|
),
|
572
|
-
("mini_mistral", 32, 1e-4, torch.float32, 1e-8, 1e-5, 5e-3, 1e-5, 5e-3, 1e-5),
|
573
537
|
pytest.param(
|
574
538
|
"mini_mistral",
|
575
539
|
32,
|
@@ -584,7 +548,6 @@ def run_mini_model(
|
|
584
548
|
marks=pytest.mark.skipif(not supports_bfloat16(), reason="bfloat16 not supported on this GPU"),
|
585
549
|
),
|
586
550
|
# TODO: mixtral is flaky so disable the test for now
|
587
|
-
# ("mini_mixtral", 32, 1e-4, torch.float32, 5e-4, 1e-4, 5e-3, 1e-5, 1e-2, 1e-5),
|
588
551
|
# pytest.param(
|
589
552
|
# "mini_mixtral",
|
590
553
|
# 32,
|
@@ -601,7 +564,6 @@ def run_mini_model(
|
|
601
564
|
# ),
|
602
565
|
# ),
|
603
566
|
# Gemma 1.1 and 2 has more tolerance because currently, the kernel is not a perfect match (casts are not done the same way)
|
604
|
-
("mini_gemma1", 32, 1e-4, torch.float32, 1e-8, 1e-4, 5e-3, 1e-5, 5e-3, 1e-5),
|
605
567
|
pytest.param(
|
606
568
|
"mini_gemma1",
|
607
569
|
32,
|
@@ -615,7 +577,6 @@ def run_mini_model(
|
|
615
577
|
1e-2,
|
616
578
|
marks=pytest.mark.skipif(not supports_bfloat16(), reason="bfloat16 not supported on this GPU"),
|
617
579
|
),
|
618
|
-
("mini_gemma1.1", 32, 1e-4, torch.float32, 1e-8, 1e-4, 5e-3, 1e-5, 5e-3, 1e-5),
|
619
580
|
pytest.param(
|
620
581
|
"mini_gemma1.1",
|
621
582
|
32,
|
@@ -629,7 +590,6 @@ def run_mini_model(
|
|
629
590
|
1e-2,
|
630
591
|
marks=pytest.mark.skipif(not supports_bfloat16(), reason="bfloat16 not supported on this GPU"),
|
631
592
|
),
|
632
|
-
("mini_gemma2", 32, 1e-4, torch.float32, 1e-8, 1e-4, 5e-3, 1e-5, 5e-3, 1e-5),
|
633
593
|
# TODO: Gemma2 test for bf16 is not passing within the tolerance range, might be casting issue, need to investigate
|
634
594
|
# pytest.param(
|
635
595
|
# "mini_gemma2",
|
liger_kernel_nightly-0.5.3.dev20250214061904/test/convergence/bf16/test_mini_models_multimodal.py
ADDED
@@ -0,0 +1,421 @@
|
|
1
|
+
import functools
|
2
|
+
import os
|
3
|
+
|
4
|
+
import pytest
|
5
|
+
import torch
|
6
|
+
|
7
|
+
from datasets import load_dataset
|
8
|
+
from torch.utils.data import DataLoader
|
9
|
+
from transformers import PreTrainedTokenizerFast
|
10
|
+
|
11
|
+
from liger_kernel.transformers import apply_liger_kernel_to_mllama
|
12
|
+
from liger_kernel.transformers import apply_liger_kernel_to_qwen2_vl
|
13
|
+
from test.utils import FAKE_CONFIGS_PATH
|
14
|
+
from test.utils import UNTOKENIZED_DATASET_PATH
|
15
|
+
from test.utils import MiniModelConfig
|
16
|
+
from test.utils import assert_verbose_allclose
|
17
|
+
from test.utils import load_tokenizer_config
|
18
|
+
from test.utils import multimodal_collate_fn
|
19
|
+
from test.utils import revert_liger_kernel_to_mllama
|
20
|
+
from test.utils import revert_liger_kernel_to_qwen2_vl
|
21
|
+
from test.utils import set_seed
|
22
|
+
from test.utils import supports_bfloat16
|
23
|
+
from test.utils import train_bpe_tokenizer
|
24
|
+
|
25
|
+
try:
|
26
|
+
# Qwen2-VL is only available in transformers>=4.45.0
|
27
|
+
from transformers.models.qwen2.tokenization_qwen2_fast import Qwen2TokenizerFast
|
28
|
+
from transformers.models.qwen2_vl.configuration_qwen2_vl import Qwen2VLConfig
|
29
|
+
from transformers.models.qwen2_vl.image_processing_qwen2_vl import Qwen2VLImageProcessor
|
30
|
+
from transformers.models.qwen2_vl.modeling_qwen2_vl import Qwen2VLForConditionalGeneration
|
31
|
+
from transformers.models.qwen2_vl.processing_qwen2_vl import Qwen2VLProcessor
|
32
|
+
|
33
|
+
QWEN2_VL_AVAILABLE = True
|
34
|
+
except ImportError:
|
35
|
+
QWEN2_VL_AVAILABLE = False
|
36
|
+
|
37
|
+
try:
|
38
|
+
# Mllama is only available in transformers>=4.45.0
|
39
|
+
from transformers.models.mllama.configuration_mllama import MllamaConfig
|
40
|
+
from transformers.models.mllama.configuration_mllama import MllamaTextConfig
|
41
|
+
from transformers.models.mllama.configuration_mllama import MllamaVisionConfig
|
42
|
+
from transformers.models.mllama.image_processing_mllama import MllamaImageProcessor
|
43
|
+
from transformers.models.mllama.modeling_mllama import MllamaForConditionalGeneration
|
44
|
+
from transformers.models.mllama.processing_mllama import MllamaProcessor
|
45
|
+
|
46
|
+
MLLAMA_AVAILABLE = True
|
47
|
+
except ImportError:
|
48
|
+
MLLAMA_AVAILABLE = False
|
49
|
+
|
50
|
+
from liger_kernel.utils import infer_device
|
51
|
+
|
52
|
+
device = infer_device()
|
53
|
+
|
54
|
+
torch.use_deterministic_algorithms(True)
|
55
|
+
|
56
|
+
# Only setting torch.use_deterministic_algorithms(True) throws the following error:
|
57
|
+
# RuntimeError: Deterministic behavior was enabled with either `torch.use_deterministic_algorithms(True)` or `at::Context::setDeterministicAlgorithms(true)`,
|
58
|
+
# but this operation is not deterministic because it uses CuBLAS and you have CUDA >= 10.2. To enable deterministic behavior in this case, you must set an
|
59
|
+
# environment variable before running your PyTorch application: CUBLAS_WORKSPACE_CONFIG=:4096:8 or CUBLAS_WORKSPACE_CONFIG=:16:8. For more information,
|
60
|
+
# go to https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility
|
61
|
+
|
62
|
+
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
|
63
|
+
|
64
|
+
TEST_IMAGE_DIM = 64
|
65
|
+
|
66
|
+
MINI_MODEL_SETUPS = {}
|
67
|
+
|
68
|
+
|
69
|
+
if MLLAMA_AVAILABLE:
|
70
|
+
MINI_MODEL_SETUPS["mini_mllama"] = MiniModelConfig(
|
71
|
+
liger_kernel_patch_func=functools.partial(apply_liger_kernel_to_mllama, fused_linear_cross_entropy=False),
|
72
|
+
liger_kernel_patch_revert_func=revert_liger_kernel_to_mllama,
|
73
|
+
model_class=MllamaForConditionalGeneration,
|
74
|
+
mini_model_config=MllamaConfig(
|
75
|
+
vision_config=MllamaVisionConfig(
|
76
|
+
hidden_act="gelu",
|
77
|
+
hidden_size=512, # 1280
|
78
|
+
image_size=560, # 560
|
79
|
+
initializer_range=0.02,
|
80
|
+
intermediate_layers_indices=[2], # [3, 7, 15, etc...]
|
81
|
+
intermediate_size=2048, # 5120
|
82
|
+
max_num_tiles=1, # 4
|
83
|
+
norm_eps=1e-5,
|
84
|
+
num_attention_heads=4, # 16
|
85
|
+
num_channels=3,
|
86
|
+
num_global_layers=2, # 8
|
87
|
+
num_hidden_layers=8, # 32
|
88
|
+
patch_size=140, # 14
|
89
|
+
supported_aspect_ratios=[[1, 1]], # [[1, 1], [1, 2], etc... ]
|
90
|
+
vision_output_dim=1024, # 7680
|
91
|
+
),
|
92
|
+
text_config=MllamaTextConfig(
|
93
|
+
bos_token_id=0,
|
94
|
+
eos_token_id=0,
|
95
|
+
pad_token_id=0,
|
96
|
+
cross_attention_layers=[2], # [3, 8, 13, 18, etc...]
|
97
|
+
dropout=0,
|
98
|
+
hidden_act="silu",
|
99
|
+
hidden_size=1024, # 4096
|
100
|
+
initializer_range=0.02,
|
101
|
+
intermediate_size=2048, # 14336
|
102
|
+
max_position_embeddings=131_072,
|
103
|
+
num_attention_heads=8, # 32
|
104
|
+
num_hidden_layers=4, # 40
|
105
|
+
num_key_value_heads=2, # 8
|
106
|
+
rms_norm_eps=1e-5,
|
107
|
+
rope_scaling=dict(
|
108
|
+
factor=8.0,
|
109
|
+
high_freq_factor=4.0,
|
110
|
+
low_freq_factor=1.0,
|
111
|
+
original_max_position_embeddings=8192,
|
112
|
+
rope_type="llama3",
|
113
|
+
),
|
114
|
+
rope_theta=500_000,
|
115
|
+
tie_word_embeddings=False,
|
116
|
+
use_cache=True,
|
117
|
+
vocab_size=32000, # 128256,
|
118
|
+
),
|
119
|
+
image_token_index=1, # NOTE: outside the vocab size
|
120
|
+
attn_implementation="sdpa",
|
121
|
+
),
|
122
|
+
)
|
123
|
+
|
124
|
+
if QWEN2_VL_AVAILABLE:
|
125
|
+
MINI_MODEL_SETUPS["mini_qwen2_vl"] = MiniModelConfig(
|
126
|
+
liger_kernel_patch_func=functools.partial(apply_liger_kernel_to_qwen2_vl, fused_linear_cross_entropy=False),
|
127
|
+
liger_kernel_patch_revert_func=revert_liger_kernel_to_qwen2_vl,
|
128
|
+
model_class=Qwen2VLForConditionalGeneration,
|
129
|
+
mini_model_config=Qwen2VLConfig(
|
130
|
+
attention_dropout=0.0,
|
131
|
+
# Token Ids and vocab size must match those in the tokenizer/processor
|
132
|
+
# test/resources/fake_configs/Qwen/Qwen2-VL-7B-Instruct/tokenizer_config.json
|
133
|
+
bos_token_id=0,
|
134
|
+
eos_token_id=0,
|
135
|
+
vision_start_token_id=1,
|
136
|
+
vision_end_token_id=2,
|
137
|
+
vision_token_id=3,
|
138
|
+
image_token_id=4,
|
139
|
+
video_token_id=5,
|
140
|
+
hidden_act="silu",
|
141
|
+
hidden_size=1024, # 8192
|
142
|
+
initializer_range=0.02,
|
143
|
+
intermediate_size=1024, # 29568
|
144
|
+
max_position_embeddings=32768,
|
145
|
+
max_window_layers=4, # 80
|
146
|
+
num_attention_heads=8, # 64
|
147
|
+
num_hidden_layers=4, # 80
|
148
|
+
num_key_value_heads=2, # 8
|
149
|
+
rms_norm_eps=1e-6, # 1e-5
|
150
|
+
rope_theta=1000000.0,
|
151
|
+
rope_scaling=dict(
|
152
|
+
type="mrope",
|
153
|
+
mrope_section=[16, 24, 24], # (temporal, height, width)
|
154
|
+
),
|
155
|
+
sliding_window=4096,
|
156
|
+
tie_word_embeddings=True,
|
157
|
+
use_cache=False, # True
|
158
|
+
vocab_size=32000, # 152064,
|
159
|
+
use_sliding_window=False,
|
160
|
+
vision_config={
|
161
|
+
"depth": 4, # 32
|
162
|
+
"embed_dim": 128, # 1280
|
163
|
+
"mlp_ratio": 1,
|
164
|
+
"num_heads": 8, # 16
|
165
|
+
"in_chans": 3,
|
166
|
+
"hidden_size": 1024, # 1536
|
167
|
+
},
|
168
|
+
attn_implementation="sdpa",
|
169
|
+
),
|
170
|
+
)
|
171
|
+
|
172
|
+
|
173
|
+
def create_processor(model_name):
|
174
|
+
if model_name == "mini_qwen2_vl":
|
175
|
+
tokenizer_config = load_tokenizer_config(
|
176
|
+
os.path.join(FAKE_CONFIGS_PATH, "Qwen/Qwen2-VL-7B-Instruct/tokenizer_config.json")
|
177
|
+
)
|
178
|
+
tokenizer_base = train_bpe_tokenizer(
|
179
|
+
[
|
180
|
+
token.content
|
181
|
+
for key, token in sorted(
|
182
|
+
tokenizer_config["added_tokens_decoder"].items(),
|
183
|
+
key=lambda x: int(x[0]),
|
184
|
+
)
|
185
|
+
]
|
186
|
+
)
|
187
|
+
qwen_tokenizer = Qwen2TokenizerFast(tokenizer_object=tokenizer_base, **tokenizer_config)
|
188
|
+
image_processor = Qwen2VLImageProcessor()
|
189
|
+
return Qwen2VLProcessor(image_processor=image_processor, tokenizer=qwen_tokenizer)
|
190
|
+
|
191
|
+
elif model_name == "mini_mllama":
|
192
|
+
tokenizer_config = load_tokenizer_config(
|
193
|
+
os.path.join(
|
194
|
+
FAKE_CONFIGS_PATH,
|
195
|
+
"meta-llama/Llama-3.2-11B-Vision-Instruct/tokenizer_config.json",
|
196
|
+
)
|
197
|
+
)
|
198
|
+
tokenizer_base = train_bpe_tokenizer(
|
199
|
+
[
|
200
|
+
token.content
|
201
|
+
for key, token in sorted(
|
202
|
+
tokenizer_config["added_tokens_decoder"].items(),
|
203
|
+
key=lambda x: int(x[0]),
|
204
|
+
)
|
205
|
+
]
|
206
|
+
)
|
207
|
+
fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer_base, **tokenizer_config)
|
208
|
+
image_processor = MllamaImageProcessor(size={"height": 560, "width": 560})
|
209
|
+
return MllamaProcessor(image_processor=image_processor, tokenizer=fast_tokenizer)
|
210
|
+
else:
|
211
|
+
raise ValueError(f"Processor not available for model {model_name}")
|
212
|
+
|
213
|
+
|
214
|
+
def create_multimodal_dataset(model_name: str):
|
215
|
+
processor = create_processor(model_name)
|
216
|
+
|
217
|
+
def generate_procedural_image(example, index):
|
218
|
+
"""Generate an image with a single row of white pixels at the index specified"""
|
219
|
+
image = torch.zeros(3, TEST_IMAGE_DIM, TEST_IMAGE_DIM)
|
220
|
+
image[:, index % TEST_IMAGE_DIM, :] = 255
|
221
|
+
example["image"] = image
|
222
|
+
return example
|
223
|
+
|
224
|
+
def apply_chat_template(example):
|
225
|
+
"""
|
226
|
+
Under the hood, this inserts the correct image placeholder token into the text.
|
227
|
+
More or less this conversation format is used by HF's mllms. The fact that it is
|
228
|
+
formatting as for IFT is not in-and-of-itself important here.
|
229
|
+
"""
|
230
|
+
conversation = [
|
231
|
+
{
|
232
|
+
"role": "user",
|
233
|
+
"content": [
|
234
|
+
{"type": "image"},
|
235
|
+
{"type": "text", "text": "Describe this image."},
|
236
|
+
],
|
237
|
+
},
|
238
|
+
{
|
239
|
+
"role": "assistant",
|
240
|
+
"content": [{"type": "text", "text": example["text"]}],
|
241
|
+
},
|
242
|
+
]
|
243
|
+
example["text"] = processor.tokenizer.apply_chat_template(conversation, tokenize=False)
|
244
|
+
return example
|
245
|
+
|
246
|
+
def preprocess_function(examples):
|
247
|
+
"""Tokenize text, preprocess images, and generate other relevant inputs for the model."""
|
248
|
+
return processor(
|
249
|
+
text=examples["text"],
|
250
|
+
images=examples["image"],
|
251
|
+
padding="max_length",
|
252
|
+
truncation=True,
|
253
|
+
max_length=1024, # longer than for text-only b/c images require quite a few tokens
|
254
|
+
return_tensors="pt",
|
255
|
+
)
|
256
|
+
|
257
|
+
train_dataset = (
|
258
|
+
load_dataset("text", data_files={"train": UNTOKENIZED_DATASET_PATH}, split="train")
|
259
|
+
.to_iterable_dataset() # only map examples as-needed and on-demand
|
260
|
+
.map(generate_procedural_image, with_indices=True)
|
261
|
+
.map(apply_chat_template)
|
262
|
+
.map(preprocess_function, remove_columns=["text", "image"])
|
263
|
+
)
|
264
|
+
return train_dataset
|
265
|
+
|
266
|
+
|
267
|
+
def create_model(model_name):
|
268
|
+
"""
|
269
|
+
Create a mini version model
|
270
|
+
The commented values are the original values
|
271
|
+
"""
|
272
|
+
model_config = MINI_MODEL_SETUPS[model_name].mini_model_config
|
273
|
+
model_class = MINI_MODEL_SETUPS[model_name].model_class
|
274
|
+
return model_class(model_config)
|
275
|
+
|
276
|
+
|
277
|
+
def run_mini_model_multimodal(
|
278
|
+
model_name="mini_qwen2_vl",
|
279
|
+
num_steps=100,
|
280
|
+
dtype=torch.bfloat16,
|
281
|
+
lr=1e-5,
|
282
|
+
with_liger=False,
|
283
|
+
):
|
284
|
+
# If we move it to the beginning of test_mini_model, the two runs are initialized with different weights.
|
285
|
+
# This is due to RNG (Random Number Generator). The formula of RNG progression is x_(n+1) = (a * x_n + c) % m
|
286
|
+
# Everytime RNG is used, like randomly initialzing weight, the RNG progresses to the next state.
|
287
|
+
# Therefore, we have to reset RNG before we create the model to ensure the weight initialization started from the same RNG state.
|
288
|
+
|
289
|
+
set_seed(42)
|
290
|
+
|
291
|
+
revert_kwargs = {"model_config": MINI_MODEL_SETUPS[model_name]}
|
292
|
+
if "mllama" in model_name:
|
293
|
+
revert_kwargs["model_type"] = "conditional_generation"
|
294
|
+
|
295
|
+
if with_liger is True:
|
296
|
+
kwargs = {
|
297
|
+
"rope": True,
|
298
|
+
"rms_norm": True,
|
299
|
+
"cross_entropy": True,
|
300
|
+
"layer_norm": True,
|
301
|
+
}
|
302
|
+
|
303
|
+
if "gemma" in model_name:
|
304
|
+
kwargs["geglu"] = True
|
305
|
+
else:
|
306
|
+
kwargs["swiglu"] = True
|
307
|
+
MINI_MODEL_SETUPS[model_name].liger_kernel_patch_func(**kwargs)
|
308
|
+
else:
|
309
|
+
MINI_MODEL_SETUPS[model_name].liger_kernel_patch_revert_func(**revert_kwargs)
|
310
|
+
|
311
|
+
model = create_model(model_name).to(dtype).to(device)
|
312
|
+
model.gradient_checkpointing_enable()
|
313
|
+
|
314
|
+
train_dataset = create_multimodal_dataset(model_name)
|
315
|
+
loader = DataLoader(train_dataset, batch_size=2, shuffle=False, collate_fn=multimodal_collate_fn)
|
316
|
+
loader_iter = iter(loader)
|
317
|
+
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
|
318
|
+
|
319
|
+
loss_list = []
|
320
|
+
|
321
|
+
for i in range(num_steps):
|
322
|
+
batch = next(loader_iter).to(model.device)
|
323
|
+
optimizer.zero_grad()
|
324
|
+
output = model(**batch)
|
325
|
+
output.loss.backward()
|
326
|
+
optimizer.step()
|
327
|
+
|
328
|
+
print(f"Step {i}, Loss: {output.loss.item()}")
|
329
|
+
loss_list.append(output.loss.item())
|
330
|
+
|
331
|
+
MINI_MODEL_SETUPS[model_name].liger_kernel_patch_revert_func(**revert_kwargs)
|
332
|
+
return {"loss": loss_list, "logits": output.logits, "model": model}
|
333
|
+
|
334
|
+
|
335
|
+
@pytest.mark.parametrize(
|
336
|
+
"model_name, num_steps, lr, dtype, loss_atol, loss_rtol, logits_atol, logits_rtol, param_atol, param_rtol",
|
337
|
+
[
|
338
|
+
pytest.param(
|
339
|
+
"mini_qwen2_vl",
|
340
|
+
32,
|
341
|
+
1e-4,
|
342
|
+
torch.bfloat16,
|
343
|
+
1e-3,
|
344
|
+
1e-2,
|
345
|
+
1e-1,
|
346
|
+
1e-2,
|
347
|
+
1e-2,
|
348
|
+
1e-2,
|
349
|
+
marks=[
|
350
|
+
pytest.mark.skipif(not supports_bfloat16(), reason="bfloat16 not supported on this GPU"),
|
351
|
+
pytest.mark.skipif(
|
352
|
+
not QWEN2_VL_AVAILABLE,
|
353
|
+
reason="Qwen2-VL not available in this version of transformers",
|
354
|
+
),
|
355
|
+
pytest.mark.skipif(device == "xpu", reason="skip for XPU"),
|
356
|
+
],
|
357
|
+
),
|
358
|
+
pytest.param(
|
359
|
+
"mini_mllama",
|
360
|
+
32,
|
361
|
+
1e-4,
|
362
|
+
torch.bfloat16,
|
363
|
+
1e-3,
|
364
|
+
1e-2,
|
365
|
+
1e-1,
|
366
|
+
1e-2,
|
367
|
+
1e-2,
|
368
|
+
1e-2,
|
369
|
+
marks=[
|
370
|
+
pytest.mark.skipif(not supports_bfloat16(), reason="bfloat16 not supported on this GPU"),
|
371
|
+
pytest.mark.skipif(
|
372
|
+
not MLLAMA_AVAILABLE,
|
373
|
+
reason="Mllama not available in this version of transformers",
|
374
|
+
),
|
375
|
+
],
|
376
|
+
),
|
377
|
+
],
|
378
|
+
)
|
379
|
+
def test_mini_model_multimodal(
|
380
|
+
model_name,
|
381
|
+
num_steps,
|
382
|
+
lr,
|
383
|
+
dtype,
|
384
|
+
loss_atol,
|
385
|
+
loss_rtol,
|
386
|
+
logits_atol,
|
387
|
+
logits_rtol,
|
388
|
+
param_atol,
|
389
|
+
param_rtol,
|
390
|
+
):
|
391
|
+
# Non-liger models should be initialized and tested first to avoid the module being overridden
|
392
|
+
expected_output = run_mini_model_multimodal(model_name=model_name, num_steps=num_steps, dtype=dtype, lr=lr)
|
393
|
+
|
394
|
+
actual_output = run_mini_model_multimodal(
|
395
|
+
model_name=model_name, num_steps=num_steps, dtype=dtype, lr=lr, with_liger=True
|
396
|
+
)
|
397
|
+
|
398
|
+
# Compare the loss of every step
|
399
|
+
assert_verbose_allclose(
|
400
|
+
torch.tensor([expected_output["loss"]]),
|
401
|
+
torch.tensor([actual_output["loss"]]),
|
402
|
+
atol=loss_atol,
|
403
|
+
rtol=loss_rtol,
|
404
|
+
)
|
405
|
+
|
406
|
+
# Compare the logits from the last step
|
407
|
+
assert_verbose_allclose(
|
408
|
+
expected_output["logits"],
|
409
|
+
actual_output["logits"],
|
410
|
+
atol=logits_atol,
|
411
|
+
rtol=logits_rtol,
|
412
|
+
)
|
413
|
+
|
414
|
+
# Compare the params from the last step
|
415
|
+
# Iterate over the model's parameters and compare them
|
416
|
+
for expected_param, actual_param in zip(
|
417
|
+
expected_output["model"].named_parameters(),
|
418
|
+
actual_output["model"].named_parameters(),
|
419
|
+
strict=False,
|
420
|
+
):
|
421
|
+
assert_verbose_allclose(expected_param[1], actual_param[1], atol=param_atol, rtol=param_rtol)
|