lalamo 0.2.5__tar.gz → 0.2.7__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lalamo-0.2.5 → lalamo-0.2.7}/PKG-INFO +1 -1
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/__init__.py +1 -1
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/model_specs/common.py +3 -2
- lalamo-0.2.7/lalamo/modules/torch_interop.py +29 -0
- lalamo-0.2.7/lalamo/utils.py +27 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo.egg-info/PKG-INFO +1 -1
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo.egg-info/SOURCES.txt +1 -0
- lalamo-0.2.5/lalamo/utils.py +0 -55
- {lalamo-0.2.5 → lalamo-0.2.7}/LICENSE +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/README.md +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/common.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/language_model.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/main.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/__init__.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/common.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/configs/__init__.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/configs/common.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/configs/executorch.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/configs/huggingface/__init__.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/configs/huggingface/common.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/configs/huggingface/gemma2.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/configs/huggingface/gemma3.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/configs/huggingface/llama.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/configs/huggingface/mistral.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/configs/huggingface/qwen2.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/configs/huggingface/qwen3.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/loaders/__init__.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/loaders/common.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/loaders/executorch.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/loaders/huggingface.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/model_specs/__init__.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/model_specs/deepseek.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/model_specs/gemma.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/model_specs/huggingface.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/model_specs/llama.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/model_specs/mistral.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/model_specs/pleias.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/model_specs/polaris.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/model_specs/qwen.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/model_import/model_specs/reka.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/modules/__init__.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/modules/activations.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/modules/attention.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/modules/common.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/modules/decoder.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/modules/decoder_layer.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/modules/embedding.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/modules/kv_cache.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/modules/linear.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/modules/mlp.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/modules/normalization.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/modules/rope.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/modules/utils.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo/quantization.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo.egg-info/dependency_links.txt +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo.egg-info/entry_points.txt +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo.egg-info/requires.txt +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/lalamo.egg-info/top_level.txt +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/pyproject.toml +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/setup.cfg +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/tests/test_generation.py +0 -0
- {lalamo-0.2.5 → lalamo-0.2.7}/tests/test_huggingface_models.py +0 -0
|
@@ -8,7 +8,6 @@ from safetensors.flax import load_file as load_safetensors
|
|
|
8
8
|
|
|
9
9
|
from lalamo.model_import.configs import ForeignConfig
|
|
10
10
|
from lalamo.quantization import QuantizationMode
|
|
11
|
-
from lalamo.utils import torch_to_jax
|
|
12
11
|
|
|
13
12
|
__all__ = [
|
|
14
13
|
"HUGGINFACE_GENERATION_CONFIG_FILE",
|
|
@@ -38,6 +37,8 @@ class WeightsType(Enum):
|
|
|
38
37
|
|
|
39
38
|
import torch
|
|
40
39
|
|
|
40
|
+
from lalamo.modules.torch_interop import torch_to_jax
|
|
41
|
+
|
|
41
42
|
torch_weights = torch.load(filename, map_location="cpu", weights_only=True)
|
|
42
43
|
return {k: cast_if_float(torch_to_jax(v), float_dtype) for k, v in torch_weights.items()}
|
|
43
44
|
|
|
@@ -95,7 +96,7 @@ def awq_model_spec(
|
|
|
95
96
|
)
|
|
96
97
|
|
|
97
98
|
|
|
98
|
-
def build_quantized_models(model_specs: list[ModelSpec]):
|
|
99
|
+
def build_quantized_models(model_specs: list[ModelSpec]) -> list[ModelSpec]:
|
|
99
100
|
quantization_compatible_repos: list[str] = [
|
|
100
101
|
"Qwen/Qwen2.5-3B-Instruct",
|
|
101
102
|
"Qwen/Qwen2.5-7B-Instruct",
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import jax.numpy as jnp
|
|
2
|
+
import torch
|
|
3
|
+
from jaxtyping import Array
|
|
4
|
+
|
|
5
|
+
__all__ = ["jax_to_torch", "torch_to_jax"]
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@torch.no_grad()
|
|
9
|
+
def _torch_to_jax_bfloat16(tensor: torch.Tensor) -> Array:
|
|
10
|
+
if tensor.dtype != torch.bfloat16:
|
|
11
|
+
raise ValueError("Trying to convert non-bfloat16 tensor to bfloat16")
|
|
12
|
+
intermediate_tensor = tensor.view(torch.uint16)
|
|
13
|
+
return jnp.array(intermediate_tensor).view("bfloat16")
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def torch_to_jax(array: torch.Tensor) -> Array:
|
|
17
|
+
array = array.detach().cpu()
|
|
18
|
+
if array.dtype == torch.bfloat16:
|
|
19
|
+
return _torch_to_jax_bfloat16(array)
|
|
20
|
+
return jnp.array(array.numpy())
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def jax_to_torch(array: Array) -> torch.Tensor:
|
|
24
|
+
from torch.utils import dlpack as _dlpack
|
|
25
|
+
|
|
26
|
+
if array.dtype == jnp.bfloat16:
|
|
27
|
+
intermediate_array = array.view(jnp.uint16)
|
|
28
|
+
return _dlpack.from_dlpack(intermediate_array).view(torch.bfloat16)
|
|
29
|
+
return _dlpack.from_dlpack(array)
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import einops
|
|
2
|
+
import jax.numpy as jnp
|
|
3
|
+
from jaxtyping import Array
|
|
4
|
+
|
|
5
|
+
__all__ = ["jax_uint4_to_packed_uint8"]
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def jax_uint4_to_packed_uint8(array: Array) -> Array:
|
|
9
|
+
if array.dtype != jnp.uint4:
|
|
10
|
+
raise ValueError(f"Input array must have dtype jnp.uint4, but got {array.dtype}")
|
|
11
|
+
|
|
12
|
+
if not array.shape:
|
|
13
|
+
raise ValueError("Input array cannot be a scalar and must have at least one dimension.")
|
|
14
|
+
|
|
15
|
+
*_, last_dim = array.shape
|
|
16
|
+
if last_dim % 2 != 0:
|
|
17
|
+
raise ValueError(f"The last dimension of the input array must be even, but got shape {array.shape}")
|
|
18
|
+
|
|
19
|
+
low_nibbles, high_nibbles = einops.rearrange(
|
|
20
|
+
array.astype(jnp.uint8),
|
|
21
|
+
"... (dim_half two) -> two ... dim_half",
|
|
22
|
+
two=2,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
packed = (high_nibbles << 4) | low_nibbles
|
|
26
|
+
|
|
27
|
+
return packed.astype(jnp.uint8)
|
lalamo-0.2.5/lalamo/utils.py
DELETED
|
@@ -1,55 +0,0 @@
|
|
|
1
|
-
import einops
|
|
2
|
-
import jax.numpy as jnp
|
|
3
|
-
import torch.utils.dlpack
|
|
4
|
-
from jaxtyping import Array
|
|
5
|
-
|
|
6
|
-
__all__ = [
|
|
7
|
-
"jax_to_torch",
|
|
8
|
-
"jax_uint4_to_packed_uint8",
|
|
9
|
-
"torch_to_jax",
|
|
10
|
-
]
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
@torch.no_grad()
|
|
14
|
-
def _torch_to_jax_bfloat16(tensor: torch.Tensor) -> Array:
|
|
15
|
-
# Credit: https://github.com/jax-ml/ml_dtypes/issues/81#issuecomment-2399636232
|
|
16
|
-
if tensor.dtype != torch.bfloat16:
|
|
17
|
-
raise ValueError("Trying to convert non-bfloat16 tensor to bfloat16")
|
|
18
|
-
intermediate_tensor = tensor.view(torch.uint16)
|
|
19
|
-
return jnp.array(intermediate_tensor).view("bfloat16")
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
def torch_to_jax(array: torch.Tensor) -> Array:
|
|
23
|
-
array = array.detach().cpu()
|
|
24
|
-
if array.dtype == torch.bfloat16:
|
|
25
|
-
return _torch_to_jax_bfloat16(array)
|
|
26
|
-
return jnp.array(array.numpy())
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
def jax_to_torch(array: Array) -> torch.Tensor:
|
|
30
|
-
if array.dtype == jnp.bfloat16:
|
|
31
|
-
intermediate_array = array.view(jnp.uint16)
|
|
32
|
-
return torch.utils.dlpack.from_dlpack(intermediate_array).view(torch.bfloat16)
|
|
33
|
-
return torch.utils.dlpack.from_dlpack(array)
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
def jax_uint4_to_packed_uint8(array: Array) -> Array:
|
|
37
|
-
if array.dtype != jnp.uint4:
|
|
38
|
-
raise ValueError(f"Input array must have dtype jnp.uint4, but got {array.dtype}")
|
|
39
|
-
|
|
40
|
-
if not array.shape:
|
|
41
|
-
raise ValueError("Input array cannot be a scalar and must have at least one dimension.")
|
|
42
|
-
|
|
43
|
-
*_, last_dim = array.shape
|
|
44
|
-
if last_dim % 2 != 0:
|
|
45
|
-
raise ValueError(f"The last dimension of the input array must be even, but got shape {array.shape}")
|
|
46
|
-
|
|
47
|
-
low_nibbles, high_nibbles = einops.rearrange(
|
|
48
|
-
array.astype(jnp.uint8),
|
|
49
|
-
"... (dim_half two) -> two ... dim_half",
|
|
50
|
-
two=2,
|
|
51
|
-
)
|
|
52
|
-
|
|
53
|
-
packed = (high_nibbles << 4) | low_nibbles
|
|
54
|
-
|
|
55
|
-
return packed.astype(jnp.uint8)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|