megatron-core 0.16.0rc0.dev127378__tar.gz → 0.16.0rc0.dev127802__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megatron-core might be problematic. Click here for more details.
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/PKG-INFO +1 -1
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/megatron_tokenizer.py +9 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/fp8_utils.py +49 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/async_stream.py +8 -2
- megatron_core-0.16.0rc0.dev127802/megatron/core/inference/contexts/attention_context/mamba_metadata.py +106 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/contexts/dynamic_context.py +188 -32
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/contexts/fused_kv_append_kernel.py +2 -2
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/data_parallel_inference_coordinator.py +7 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/engines/dynamic_engine.py +27 -13
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/engines/static_engine.py +3 -7
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/inference_client.py +3 -1
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/text_generation_controllers/text_generation_controller.py +4 -2
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/utils.py +28 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/gpt/gpt_model.py +1 -3
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/mamba/mamba_model.py +30 -1
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/package_info.py +1 -1
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/safe_globals.py +2 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/ssm/mamba_block.py +16 -25
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/ssm/mamba_hybrid_layer_allocation.py +29 -2
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/ssm/mamba_layer.py +5 -5
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/ssm/mamba_mixer.py +301 -57
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/dot_product_attention.py +2 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/moe/router.py +2 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/utils.py +143 -1
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron_core.egg-info/PKG-INFO +1 -1
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron_core.egg-info/SOURCES.txt +1 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/MANIFEST.in +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/README.md +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/README.md +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/activations.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/config.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/config_logger.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/bert_dataset.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/blended_dataset.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/blended_megatron_dataset_builder.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/blended_megatron_dataset_config.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/gpt_dataset.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/helpers.cpp +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/helpers.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/indexed_dataset.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/masked_dataset.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/megatron_dataset.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/multimodal_dataset.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/object_storage_utils.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/config/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/config/bert_embedders.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/config/config.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/config/gpt_chunk_datasets.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/config/tokenizers.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/db/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/db/build.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/db/dataset.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/db/utils.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/external_libs.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/index/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/index/build.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/index/factory.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/index/index.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/index/indexes/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/index/indexes/faiss_base.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/index/indexes/faiss_par_add.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/index/utils.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/index/validate.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/query/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/query/gpt_chunk_dataset.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/query/multi_split_gpt_dataset.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/query/query.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/query/retro_dataset.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/query/utils.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/retro/utils.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/t5_dataset.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/utils.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/datasets/utils_s3.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/dist_checkpointing/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/dist_checkpointing/core.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/dist_checkpointing/dict_utils.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/dist_checkpointing/exchange_utils.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/dist_checkpointing/mapping.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/dist_checkpointing/optimizer.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/dist_checkpointing/serialization.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/dist_checkpointing/state_dict_utils.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/dist_checkpointing/strategies/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/dist_checkpointing/strategies/async_utils.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/dist_checkpointing/strategies/base.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/dist_checkpointing/strategies/cached_metadata_filesystem_reader.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/dist_checkpointing/strategies/checkpointable.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/dist_checkpointing/strategies/common.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/dist_checkpointing/strategies/filesystem_async.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/dist_checkpointing/strategies/fully_parallel.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/dist_checkpointing/strategies/resharding.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/dist_checkpointing/strategies/state_dict_saver.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/dist_checkpointing/strategies/tensorstore.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/dist_checkpointing/strategies/torch.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/dist_checkpointing/strategies/two_stage.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/dist_checkpointing/strategies/zarr.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/dist_checkpointing/tensor_aware_state_dict.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/dist_checkpointing/utils.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/dist_checkpointing/validation.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/distributed/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/distributed/data_parallel_base.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/distributed/distributed_data_parallel.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/distributed/distributed_data_parallel_config.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/distributed/finalize_model_grads.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/distributed/fsdp/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/distributed/fsdp/mcore_fsdp_adapter.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/distributed/fsdp/src/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/distributed/fsdp/src/megatron_fsdp/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/distributed/fsdp/src/megatron_fsdp/distributed_data_parallel_config.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/distributed/fsdp/src/megatron_fsdp/fully_shard.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/distributed/fsdp/src/megatron_fsdp/megatron_fsdp.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/distributed/fsdp/src/megatron_fsdp/package_info.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/distributed/fsdp/src/megatron_fsdp/param_and_grad_buffer.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/distributed/fsdp/src/megatron_fsdp/uneven_dtensor.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/distributed/fsdp/src/megatron_fsdp/utils.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/distributed/param_and_grad_buffer.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/distributed/reduce_scatter_with_fp32_accumulation.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/distributed/torch_fully_sharded_data_parallel.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/distributed/torch_fully_sharded_data_parallel_config.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/energy_monitor.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/enums.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/export/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/export/data_type.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/export/export_config.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/export/model_type.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/export/trtllm/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/export/trtllm/engine_builder/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/export/trtllm/engine_builder/trtllm_engine_builder.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/export/trtllm/model_to_trllm_mapping/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/export/trtllm/model_to_trllm_mapping/default_conversion_dict.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/export/trtllm/trt_model_config.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/export/trtllm/trt_model_type.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/export/trtllm/trtllm_helper.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/export/trtllm/trtllm_layers.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/export/trtllm/trtllm_weights_converter/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/export/trtllm/trtllm_weights_converter/distributed_trtllm_model_weights_converter.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/export/trtllm/trtllm_weights_converter/single_device_trtllm_model_weights_converter.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/export/trtllm/trtllm_weights_converter/utils.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/extensions/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/extensions/kitchen.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/extensions/transformer_engine.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/extensions/transformer_engine_spec_provider.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/fp4_utils.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/full_cuda_graph.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/fusions/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/fusions/fused_bias_dropout.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/fusions/fused_bias_geglu.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/fusions/fused_bias_gelu.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/fusions/fused_bias_swiglu.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/fusions/fused_cross_entropy.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/fusions/fused_indices_converter.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/fusions/fused_layer_norm.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/fusions/fused_mla_yarn_rope_apply.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/fusions/fused_pad_routing_map.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/fusions/fused_softmax.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/fusions/fused_weighted_squared_relu.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/hyper_comm_grid.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/common_inference_params.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/communication_utils.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/contexts/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/contexts/attention_context/metadata_base.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/contexts/attention_context/mha_metadata.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/contexts/base_context.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/contexts/dynamic_block_allocator.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/contexts/static_context.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/engines/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/engines/abstract_engine.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/engines/mcore_engine.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/headers.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/inference_request.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/model_inference_wrappers/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/model_inference_wrappers/abstract_model_inference_wrapper.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/model_inference_wrappers/gpt/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/model_inference_wrappers/gpt/gpt_inference_wrapper.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/model_inference_wrappers/inference_wrapper_config.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/model_inference_wrappers/multimodal/vlm_inference_wrapper.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/model_inference_wrappers/t5/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/model_inference_wrappers/t5/t5_inference_wrapper.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/sampling_params.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/scheduler.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/text_generation_controllers/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/text_generation_controllers/encoder_decoder_text_generation_controller.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/text_generation_controllers/simple_text_generation_controller.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/text_generation_controllers/vlm_text_generation_controller.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/text_generation_server/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/text_generation_server/endpoints/common.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/text_generation_server/endpoints/completions.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/text_generation_server/run_mcore_engine.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/text_generation_server/text_generation_server.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/text_generation_server/tokenization.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference/unified_memory.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/inference_params.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/jit.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/model_parallel_config.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/T5/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/T5/t5_model.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/T5/t5_spec.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/backends.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/bert/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/bert/bert_layer_specs.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/bert/bert_lm_head.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/bert/bert_model.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/bert/pooler.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/common/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/common/embeddings/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/common/embeddings/language_model_embedding.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/common/embeddings/relative_pos_embedding.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/common/embeddings/rope_utils.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/common/embeddings/rotary_pos_embedding.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/common/embeddings/yarn_rotary_pos_embedding.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/common/language_module/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/common/language_module/language_module.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/common/model_chunk_schedule_plan.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/common/vision_module/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/common/vision_module/vision_module.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/gpt/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/gpt/fine_grained_callables.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/gpt/gpt_layer_specs.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/gpt/heterogeneous/heterogeneous_layer_specs.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/gpt/moe_module_specs.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/huggingface/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/huggingface/clip_model.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/huggingface/module.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/huggingface/qwen_model.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/mamba/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/mamba/mamba_layer_specs.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/mimo/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/mimo/config/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/mimo/config/base_configs.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/mimo/model/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/mimo/model/base.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/mimo/submodules/audio.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/mimo/submodules/base.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/mimo/submodules/vision.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/multimodal/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/multimodal/context_parallel.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/multimodal/llava_model.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/multimodal/llava_spec.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/retro/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/retro/base_attention.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/retro/config.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/retro/decoder_attention.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/retro/decoder_spec.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/retro/encoder_attention.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/retro/encoder_spec.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/retro/model.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/retro/utils.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/vision/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/vision/clip_vit_model.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/vision/multimodal_projector.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/vision/radio.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/models/vision/vit_layer_specs.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/msc_utils.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/nccl_allocator.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/num_microbatches_calculator.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/optimizer/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/optimizer/clip_grads.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/optimizer/cpu_offloading/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/optimizer/cpu_offloading/hybrid_optimizer.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/optimizer/distrib_optimizer.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/optimizer/grad_scaler.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/optimizer/optimizer.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/optimizer/optimizer_config.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/optimizer_param_scheduler.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/packed_seq_params.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/parallel_state.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/pipeline_parallel/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/pipeline_parallel/bridge_communicator.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/pipeline_parallel/combined_1f1b.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/pipeline_parallel/p2p_communication.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/pipeline_parallel/schedules.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/pipeline_parallel/utils.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/post_training/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/post_training/modelopt/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/post_training/modelopt/gpt/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/post_training/modelopt/gpt/model_specs.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/post_training/modelopt/gpt/state_dict_hooks.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/post_training/modelopt/layers.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/post_training/modelopt/mamba/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/post_training/modelopt/mamba/model_specs.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/process_groups_config.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/quantization/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/quantization/quant_config.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/quantization/utils.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/requirements.txt +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/rerun_state_machine.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/ssm/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/ssm/mamba_context_parallel.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/ssm/mlp_layer.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/ssm/triton_cache_manager.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tensor_parallel/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tensor_parallel/cross_entropy.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tensor_parallel/data.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tensor_parallel/layers.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tensor_parallel/mappings.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tensor_parallel/random.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tensor_parallel/utils.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/timers.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tokenizers/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tokenizers/base_tokenizer.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tokenizers/megatron_tokenizer.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tokenizers/text/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tokenizers/text/libraries/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tokenizers/text/libraries/abstract_tokenizer.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tokenizers/text/libraries/bytelevel_tokenizer.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tokenizers/text/libraries/chat_template.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tokenizers/text/libraries/huggingface_tokenizer.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tokenizers/text/libraries/megatron_hf_tokenizer.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tokenizers/text/libraries/null_tokenizer.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tokenizers/text/libraries/sentencepiece_tokenizer.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tokenizers/text/libraries/tiktoken_tokenizer.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tokenizers/text/models/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tokenizers/text/models/bert_tokenizer.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tokenizers/text/models/default_tokenizer.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tokenizers/text/models/gpt_tokenizer.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tokenizers/text/models/mamba_tokenizer.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tokenizers/text/models/retro_tokenizer.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tokenizers/text/models/t5_tokenizer.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tokenizers/text/text_tokenizer.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/tokenizers/text/utils/build_tokenizer.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/attention.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/cuda_graphs.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/custom_layers/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/custom_layers/transformer_engine.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/enums.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/fsdp_dtensor_checkpoint.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/heterogeneous/heterogeneous_config.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/heterogeneous/linear_replacements.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/identity_op.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/mlp.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/module.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/moe/__init__.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/moe/experts.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/moe/fused_a2a.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/moe/grouped_gemm_util.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/moe/moe_layer.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/moe/moe_utils.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/moe/shared_experts.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/moe/token_dispatcher.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/moe/upcycling_utils.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/multi_latent_attention.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/multi_token_prediction.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/pipeline_parallel_layer_layout.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/spec_utils.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/torch_layer_norm.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/torch_norm.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/transformer_block.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/transformer_config.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/transformer_layer.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/transformer/utils.py +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron_core.egg-info/dependency_links.txt +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron_core.egg-info/requires.txt +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron_core.egg-info/top_level.txt +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/pyproject.toml +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/setup.cfg +0 -0
- {megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/setup.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: megatron-core
|
|
3
|
-
Version: 0.16.0rc0.
|
|
3
|
+
Version: 0.16.0rc0.dev127802
|
|
4
4
|
Summary: Megatron Core - a library for efficient and scalable training of transformer based models
|
|
5
5
|
Author-email: NVIDIA <nemo-toolkit@nvidia.com>
|
|
6
6
|
Maintainer-email: NVIDIA <nemo-toolkit@nvidia.com>
|
|
@@ -1,11 +1,14 @@
|
|
|
1
1
|
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
|
|
2
2
|
import json
|
|
3
|
+
import logging
|
|
3
4
|
from abc import ABC, abstractmethod
|
|
4
5
|
from collections import OrderedDict
|
|
5
6
|
from typing import Any
|
|
6
7
|
|
|
7
8
|
import numpy
|
|
8
9
|
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
9
12
|
|
|
10
13
|
class MegatronLegacyTokenizer(ABC):
|
|
11
14
|
"""Abstract class for tokenizer
|
|
@@ -20,6 +23,12 @@ class MegatronLegacyTokenizer(ABC):
|
|
|
20
23
|
"""
|
|
21
24
|
|
|
22
25
|
def __init__(self, *tokenizer_paths: str, **tokenizer_options: Any):
|
|
26
|
+
# Deprecation warning
|
|
27
|
+
logger.warning(
|
|
28
|
+
"You’re using the legacy tokenizer system, which is deprecated "
|
|
29
|
+
"and will be removed in a future release. Please migrate to the new tokenizer system "
|
|
30
|
+
"(`megatron.core.tokenizers.MegatronTokenizer`)."
|
|
31
|
+
)
|
|
23
32
|
self.unique_identifiers = OrderedDict()
|
|
24
33
|
self.unique_identifiers["class"] = type(self).__name__
|
|
25
34
|
self.unique_identifiers["tokenizer_path"] = list(tokenizer_paths)
|
{megatron_core-0.16.0rc0.dev127378 → megatron_core-0.16.0rc0.dev127802}/megatron/core/fp8_utils.py
RENAMED
|
@@ -10,6 +10,12 @@ from typing import List, Optional
|
|
|
10
10
|
import torch
|
|
11
11
|
|
|
12
12
|
from megatron.core.enums import Fp8Recipe
|
|
13
|
+
from megatron.core.tensor_parallel import (
|
|
14
|
+
ColumnParallelLinear,
|
|
15
|
+
RowParallelLinear,
|
|
16
|
+
gather_from_sequence_parallel_region,
|
|
17
|
+
reduce_scatter_to_sequence_parallel_region,
|
|
18
|
+
)
|
|
13
19
|
from megatron.core.transformer.transformer_config import TransformerConfig
|
|
14
20
|
from megatron.core.utils import get_te_version, is_te_min_version
|
|
15
21
|
|
|
@@ -112,6 +118,27 @@ def get_fp8_align_size(fp8_recipe: Fp8Recipe) -> int:
|
|
|
112
118
|
return 16
|
|
113
119
|
|
|
114
120
|
|
|
121
|
+
def is_column_parallel_linear(module):
|
|
122
|
+
"""Returns whether the given module is a ColumnParallelLinear layer."""
|
|
123
|
+
if HAVE_TE and (
|
|
124
|
+
isinstance(module, TEColumnParallelLinear)
|
|
125
|
+
or isinstance(module, TELayerNormColumnParallelLinear)
|
|
126
|
+
):
|
|
127
|
+
return True
|
|
128
|
+
elif isinstance(module, ColumnParallelLinear):
|
|
129
|
+
return True
|
|
130
|
+
return False
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def is_row_parallel_linear(module):
|
|
134
|
+
"""Returns whether the given module is a RowParallelLinear layer."""
|
|
135
|
+
if HAVE_TE and isinstance(module, TERowParallelLinear):
|
|
136
|
+
return True
|
|
137
|
+
elif isinstance(module, RowParallelLinear):
|
|
138
|
+
return True
|
|
139
|
+
return False
|
|
140
|
+
|
|
141
|
+
|
|
115
142
|
"""
|
|
116
143
|
The code below abstracts the functionalities needed for implementing "--fp8-param-gather" into
|
|
117
144
|
several functions. It provides different implementations for each function based on different
|
|
@@ -587,6 +614,18 @@ if HAVE_TE:
|
|
|
587
614
|
if not FP8GlobalStateManager.is_fp8_enabled():
|
|
588
615
|
return original_forward(input_tensor, *args, **kwargs)
|
|
589
616
|
|
|
617
|
+
# With sequence parallelism we need to all-gather before padding
|
|
618
|
+
# and reduce-scatter after unpadding
|
|
619
|
+
if is_sequence_parallel := getattr(module, "sequence_parallel", False):
|
|
620
|
+
if is_column_parallel_linear(module):
|
|
621
|
+
input_tensor = gather_from_sequence_parallel_region(
|
|
622
|
+
input_tensor, group=module.tp_group
|
|
623
|
+
)
|
|
624
|
+
|
|
625
|
+
# Disable sequence parallelism on the module because we are handling the
|
|
626
|
+
# all-gather and reduce-scatter externally
|
|
627
|
+
module.sequence_parallel = False
|
|
628
|
+
|
|
590
629
|
seq_len, batch_size, hidden_size = input_tensor.shape
|
|
591
630
|
# Reshape to (S, B*H) to pad sequence dimension
|
|
592
631
|
input_2d = input_tensor.reshape(seq_len, -1)
|
|
@@ -612,6 +651,16 @@ if HAVE_TE:
|
|
|
612
651
|
unpadded_output_2d = _unpad_func(output_2d, [seq_len])
|
|
613
652
|
unpadded_output = unpadded_output_2d.reshape(seq_len, batch_size, output_hidden_size)
|
|
614
653
|
|
|
654
|
+
if is_sequence_parallel:
|
|
655
|
+
# Reduce-scatter after unpadding
|
|
656
|
+
if is_row_parallel_linear(module):
|
|
657
|
+
unpadded_output = reduce_scatter_to_sequence_parallel_region(
|
|
658
|
+
unpadded_output, group=module.tp_group
|
|
659
|
+
)
|
|
660
|
+
|
|
661
|
+
# Reset sequence parallelism flag on the module
|
|
662
|
+
module.sequence_parallel = True
|
|
663
|
+
|
|
615
664
|
if other_outputs:
|
|
616
665
|
return (unpadded_output,) + other_outputs
|
|
617
666
|
else:
|
|
@@ -9,6 +9,7 @@ import asyncio
|
|
|
9
9
|
from typing import Any, AsyncGenerator, Callable, Optional, Type, Union
|
|
10
10
|
|
|
11
11
|
from megatron.core.inference.inference_request import InferenceRequest
|
|
12
|
+
from megatron.core.utils import get_asyncio_loop
|
|
12
13
|
|
|
13
14
|
STOP_ITERATION = Exception()
|
|
14
15
|
|
|
@@ -20,12 +21,17 @@ class AsyncStream:
|
|
|
20
21
|
Adopted from https://github.com/vllm-project/vllm/blob/eb881ed006ca458b052905e33f0d16dbb428063a/vllm/v1/engine/async_stream.py # pylint: disable=line-too-long
|
|
21
22
|
"""
|
|
22
23
|
|
|
23
|
-
def __init__(
|
|
24
|
+
def __init__(
|
|
25
|
+
self,
|
|
26
|
+
request_id: int,
|
|
27
|
+
cancel: Callable[[str], None],
|
|
28
|
+
loop: Optional[asyncio.AbstractEventLoop] = None,
|
|
29
|
+
) -> None:
|
|
24
30
|
self._request_id = request_id
|
|
25
31
|
self._cancel = cancel
|
|
26
32
|
self._queue: asyncio.Queue = asyncio.Queue()
|
|
27
33
|
self._finished = False
|
|
28
|
-
self._loop =
|
|
34
|
+
self._loop = get_asyncio_loop(loop)
|
|
29
35
|
|
|
30
36
|
def put(self, item: Union[InferenceRequest, Exception]) -> None:
|
|
31
37
|
"""Adds a new value to the stream"""
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
# Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
|
|
3
|
+
import torch
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class MambaMetadata:
|
|
7
|
+
"""Manages the metadata tensors required for Mamba layers during inference."""
|
|
8
|
+
|
|
9
|
+
def __init__(self, max_requests: int):
|
|
10
|
+
"""
|
|
11
|
+
Initializes the Mamba slot allocator.
|
|
12
|
+
|
|
13
|
+
Args:
|
|
14
|
+
max_requests (int): The maximum number of concurrent requests.
|
|
15
|
+
"""
|
|
16
|
+
self.max_requests = max_requests
|
|
17
|
+
|
|
18
|
+
# Metadata for mapping requests to slots in the static Mamba state buffer
|
|
19
|
+
self.request_to_mamba_state_idx = torch.full(
|
|
20
|
+
(self.max_requests,), -1, dtype=torch.int32, device=torch.cuda.current_device()
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
# Separate mapping used only for CUDA graph compatibility
|
|
24
|
+
self.request_to_mamba_state_idx_cudagraph_only = torch.full(
|
|
25
|
+
(self.max_requests,), -1, dtype=torch.int32, device=torch.cuda.current_device()
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
# Allocator for Mamba state slots
|
|
29
|
+
self.mamba_state_free_slots = torch.arange(
|
|
30
|
+
self.max_requests, dtype=torch.int32, device=torch.cuda.current_device()
|
|
31
|
+
)
|
|
32
|
+
self.mamba_state_free_slot_count = self.max_requests
|
|
33
|
+
|
|
34
|
+
def reset(self) -> None:
|
|
35
|
+
"""
|
|
36
|
+
Resets all Mamba states and frees all allocated slots.
|
|
37
|
+
"""
|
|
38
|
+
self.request_to_mamba_state_idx.fill_(-1)
|
|
39
|
+
self.request_to_mamba_state_idx_cudagraph_only.fill_(-1)
|
|
40
|
+
|
|
41
|
+
# Re-initialize the free slot pool
|
|
42
|
+
self.mamba_state_free_slots = torch.arange(
|
|
43
|
+
self.max_requests, dtype=torch.int32, device=torch.cuda.current_device()
|
|
44
|
+
)
|
|
45
|
+
self.mamba_state_free_slot_count = self.max_requests
|
|
46
|
+
|
|
47
|
+
def reset_cudagraph_mapping(self) -> None:
|
|
48
|
+
"""
|
|
49
|
+
Resets only the CUDA graph mapping tensor.
|
|
50
|
+
"""
|
|
51
|
+
self.request_to_mamba_state_idx_cudagraph_only.fill_(-1)
|
|
52
|
+
|
|
53
|
+
def update_cudagraph_mapping(
|
|
54
|
+
self, active_mamba_indices: torch.Tensor, num_active_requests: int
|
|
55
|
+
) -> None:
|
|
56
|
+
"""
|
|
57
|
+
Updates the dedicated CUDA graph mapping tensor with the indices
|
|
58
|
+
of currently active requests.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
active_mamba_indices (Tensor): Tensor containing the Mamba slot indices
|
|
62
|
+
for active requests.
|
|
63
|
+
num_active_requests (int): The number of active requests.
|
|
64
|
+
"""
|
|
65
|
+
self.request_to_mamba_state_idx_cudagraph_only[0:num_active_requests] = active_mamba_indices
|
|
66
|
+
|
|
67
|
+
def allocate_slot(self) -> int:
|
|
68
|
+
"""
|
|
69
|
+
Allocates a new slot for a request in the Mamba state buffers.
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
int: The index of the allocated slot.
|
|
73
|
+
Returns None if no slots are available.
|
|
74
|
+
"""
|
|
75
|
+
if self.mamba_state_free_slot_count == 0:
|
|
76
|
+
return None
|
|
77
|
+
|
|
78
|
+
# Get a free slot
|
|
79
|
+
self.mamba_state_free_slot_count -= 1
|
|
80
|
+
mamba_idx = self.mamba_state_free_slots[self.mamba_state_free_slot_count]
|
|
81
|
+
|
|
82
|
+
return mamba_idx
|
|
83
|
+
|
|
84
|
+
def free_slots(self, request_indices: torch.Tensor) -> None:
|
|
85
|
+
"""
|
|
86
|
+
Frees the Mamba state slots associated with the given request indices.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
request_indices (Tensor): A 1D tensor of request indices to free.
|
|
90
|
+
"""
|
|
91
|
+
# Get the Mamba state indices for finished requests
|
|
92
|
+
mamba_indices_to_free = self.request_to_mamba_state_idx[request_indices]
|
|
93
|
+
|
|
94
|
+
# Filter out any invalid indices (e.g., -1)
|
|
95
|
+
mamba_indices_to_free = mamba_indices_to_free[mamba_indices_to_free != -1]
|
|
96
|
+
num_to_free = len(mamba_indices_to_free)
|
|
97
|
+
|
|
98
|
+
if num_to_free > 0:
|
|
99
|
+
# Add the freed indices back to the free slot pool
|
|
100
|
+
start_idx = self.mamba_state_free_slot_count
|
|
101
|
+
end_idx = start_idx + num_to_free
|
|
102
|
+
self.mamba_state_free_slots[start_idx:end_idx] = mamba_indices_to_free
|
|
103
|
+
self.mamba_state_free_slot_count = end_idx
|
|
104
|
+
|
|
105
|
+
# Invalidate the Mamba state index for the finished requests
|
|
106
|
+
self.request_to_mamba_state_idx[request_indices] = -1
|