ipex-llm 2.2.0b20250106.post1__py3-none-win_amd64.whl → 2.2.0b20250108__py3-none-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ipex_llm/libs/bloom-api.dll +0 -0
- ipex_llm/libs/bloom.dll +0 -0
- ipex_llm/libs/gptneox-api.dll +0 -0
- ipex_llm/libs/gptneox.dll +0 -0
- ipex_llm/libs/libbloom_avx.dll +0 -0
- ipex_llm/libs/libbloom_vnni.dll +0 -0
- ipex_llm/libs/libgptneox_avx.dll +0 -0
- ipex_llm/libs/libgptneox_vnni.dll +0 -0
- ipex_llm/libs/libllama_avx.dll +0 -0
- ipex_llm/libs/libllama_vnni.dll +0 -0
- ipex_llm/libs/libstarcoder_avx.dll +0 -0
- ipex_llm/libs/libstarcoder_vnni.dll +0 -0
- ipex_llm/libs/llama-api.dll +0 -0
- ipex_llm/libs/llama.dll +0 -0
- ipex_llm/libs/main-bloom.exe +0 -0
- ipex_llm/libs/main-gptneox.exe +0 -0
- ipex_llm/libs/main-llama.exe +0 -0
- ipex_llm/libs/main-starcoder.exe +0 -0
- ipex_llm/libs/pipeline.dll +0 -0
- ipex_llm/libs/quantize-bloom.exe +0 -0
- ipex_llm/libs/quantize-bloom_vnni.exe +0 -0
- ipex_llm/libs/quantize-gptneox.exe +0 -0
- ipex_llm/libs/quantize-gptneox_vnni.exe +0 -0
- ipex_llm/libs/quantize-llama.exe +0 -0
- ipex_llm/libs/quantize-llama_vnni.exe +0 -0
- ipex_llm/libs/quantize-starcoder.exe +0 -0
- ipex_llm/libs/quantize-starcoder_vnni.exe +0 -0
- ipex_llm/libs/starcoder-api.dll +0 -0
- ipex_llm/libs/starcoder.dll +0 -0
- ipex_llm/transformers/convert.py +13 -11
- ipex_llm/transformers/low_bit_linear.py +1 -2
- ipex_llm/transformers/models/chatglm4v.py +1 -0
- ipex_llm/transformers/models/glm.py +3 -1
- ipex_llm/transformers/models/llama.py +1 -1
- ipex_llm/transformers/models/minicpm.py +2 -1
- ipex_llm/transformers/models/minicpmv.py +1 -0
- ipex_llm/transformers/models/utils.py +1 -1
- ipex_llm/transformers/utils.py +0 -14
- ipex_llm/transformers/xpu_ops.py +25 -19
- {ipex_llm-2.2.0b20250106.post1.dist-info → ipex_llm-2.2.0b20250108.dist-info}/METADATA +20 -20
- {ipex_llm-2.2.0b20250106.post1.dist-info → ipex_llm-2.2.0b20250108.dist-info}/RECORD +47 -47
- {ipex_llm-2.2.0b20250106.post1.data → ipex_llm-2.2.0b20250108.data}/scripts/ipex-llm-init.bat +0 -0
- {ipex_llm-2.2.0b20250106.post1.data → ipex_llm-2.2.0b20250108.data}/scripts/llm-chat.ps1 +0 -0
- {ipex_llm-2.2.0b20250106.post1.data → ipex_llm-2.2.0b20250108.data}/scripts/llm-cli.ps1 +0 -0
- {ipex_llm-2.2.0b20250106.post1.dist-info → ipex_llm-2.2.0b20250108.dist-info}/WHEEL +0 -0
- {ipex_llm-2.2.0b20250106.post1.dist-info → ipex_llm-2.2.0b20250108.dist-info}/entry_points.txt +0 -0
- {ipex_llm-2.2.0b20250106.post1.dist-info → ipex_llm-2.2.0b20250108.dist-info}/top_level.txt +0 -0
ipex_llm/libs/bloom-api.dll
CHANGED
Binary file
|
ipex_llm/libs/bloom.dll
CHANGED
Binary file
|
ipex_llm/libs/gptneox-api.dll
CHANGED
Binary file
|
ipex_llm/libs/gptneox.dll
CHANGED
Binary file
|
ipex_llm/libs/libbloom_avx.dll
CHANGED
Binary file
|
ipex_llm/libs/libbloom_vnni.dll
CHANGED
Binary file
|
ipex_llm/libs/libgptneox_avx.dll
CHANGED
Binary file
|
Binary file
|
ipex_llm/libs/libllama_avx.dll
CHANGED
Binary file
|
ipex_llm/libs/libllama_vnni.dll
CHANGED
Binary file
|
Binary file
|
Binary file
|
ipex_llm/libs/llama-api.dll
CHANGED
Binary file
|
ipex_llm/libs/llama.dll
CHANGED
Binary file
|
ipex_llm/libs/main-bloom.exe
CHANGED
Binary file
|
ipex_llm/libs/main-gptneox.exe
CHANGED
Binary file
|
ipex_llm/libs/main-llama.exe
CHANGED
Binary file
|
ipex_llm/libs/main-starcoder.exe
CHANGED
Binary file
|
ipex_llm/libs/pipeline.dll
CHANGED
Binary file
|
ipex_llm/libs/quantize-bloom.exe
CHANGED
Binary file
|
Binary file
|
Binary file
|
Binary file
|
ipex_llm/libs/quantize-llama.exe
CHANGED
Binary file
|
Binary file
|
Binary file
|
Binary file
|
ipex_llm/libs/starcoder-api.dll
CHANGED
Binary file
|
ipex_llm/libs/starcoder.dll
CHANGED
Binary file
|
ipex_llm/transformers/convert.py
CHANGED
@@ -847,18 +847,9 @@ def replace_with_low_bit_linear_for_module(model, qtype, module_name=None,
|
|
847
847
|
mp_group=mp_group,
|
848
848
|
)
|
849
849
|
device = module.weight.data.device
|
850
|
-
|
851
|
-
if get_ipex_version() < "2.1.10+xpu":
|
852
|
-
new_linear._parameters['weight'] = nn.Parameter(module.weight)
|
853
|
-
else:
|
854
|
-
# only from 2.1, ipex provides matmul_bias_out
|
855
|
-
# so we need to transpose weight
|
856
|
-
new_weight = module.weight.transpose(0, 1).contiguous()
|
857
|
-
new_linear._parameters['weight'] = nn.Parameter(new_weight)
|
858
|
-
new_linear.weight_type = 2
|
850
|
+
new_linear._parameters['weight'] = nn.Parameter(module.weight)
|
859
851
|
if module.bias is not None:
|
860
|
-
new_linear._parameters['bias'] = nn.Parameter(module.bias.data)
|
861
|
-
.to(device)
|
852
|
+
new_linear._parameters['bias'] = nn.Parameter(module.bias.data).to(device)
|
862
853
|
elif qtype == ggml_tensor_qtype["bf16"]:
|
863
854
|
module.to(torch.bfloat16)
|
864
855
|
new_linear = BF16Linear(
|
@@ -1420,6 +1411,7 @@ def _optimize_post(model):
|
|
1420
1411
|
convert_forward(model, module.GlmRMSNorm, rms_norm_forward)
|
1421
1412
|
convert_forward(model, module.GlmMLP, mlp_silu_forward)
|
1422
1413
|
convert_forward(model, module.GlmAttention, glm_attention_forward)
|
1414
|
+
convert_forward(model, module.GlmSdpaAttention, glm_attention_forward)
|
1423
1415
|
glm_model_forward = glm_model_forward_wrapper(module.GlmModel.forward)
|
1424
1416
|
convert_forward(model, module.GlmModel, glm_model_forward)
|
1425
1417
|
|
@@ -1428,10 +1420,12 @@ def _optimize_post(model):
|
|
1428
1420
|
vision_module_name = model.model.vision.__class__.__module__
|
1429
1421
|
vision_module = importlib.import_module(vision_module_name)
|
1430
1422
|
from transformers.models.siglip.modeling_siglip import SiglipAttention
|
1423
|
+
from transformers.models.siglip.modeling_siglip import SiglipSdpaAttention
|
1431
1424
|
from ipex_llm.transformers.models.chatglm4v import vision_model_forward
|
1432
1425
|
from ipex_llm.transformers.models.minicpmv import siglip_attention_forward
|
1433
1426
|
convert_forward(model, vision_module.VisionModel, vision_model_forward)
|
1434
1427
|
convert_forward(model, SiglipAttention, siglip_attention_forward)
|
1428
|
+
convert_forward(model, SiglipSdpaAttention, siglip_attention_forward)
|
1435
1429
|
|
1436
1430
|
elif "mpt" in model.config.model_type:
|
1437
1431
|
if model.config.architectures is not None:
|
@@ -1667,8 +1661,10 @@ def _optimize_post(model):
|
|
1667
1661
|
convert_forward(model, module.Qwen2MLP, qwen2_mlp_forward)
|
1668
1662
|
model.visual.get_dtype = MethodType(qwen2_vision_get_dtype, model.visual)
|
1669
1663
|
convert_forward(model, module.VisionAttention, qwen2_vision_attention_forward)
|
1664
|
+
convert_forward(model, module.VisionSdpaAttention, qwen2_vision_attention_forward)
|
1670
1665
|
convert_forward(model, module.Qwen2VLModel, qwen2_vl_model_forward)
|
1671
1666
|
convert_forward(model, module.Qwen2VLAttention, qwen2_vl_attention_forward)
|
1667
|
+
convert_forward(model, module.Qwen2VLSdpaAttention, qwen2_vl_attention_forward)
|
1672
1668
|
elif model.config.model_type == "aquila":
|
1673
1669
|
modeling_module_name = model.__class__.__module__
|
1674
1670
|
module = importlib.import_module(modeling_module_name)
|
@@ -1814,6 +1810,7 @@ def _optimize_post(model):
|
|
1814
1810
|
from ipex_llm.transformers.models.starcoder2 import attention_forward
|
1815
1811
|
from ipex_llm.transformers.models.starcoder2 import model_forward
|
1816
1812
|
convert_forward(model, module.Starcoder2Attention, attention_forward)
|
1813
|
+
convert_forward(model, module.Starcoder2SdpaAttention, attention_forward)
|
1817
1814
|
convert_forward(model, module.Starcoder2Model, model_forward)
|
1818
1815
|
elif model.config.model_type == "phi":
|
1819
1816
|
# for phi-2
|
@@ -1829,6 +1826,7 @@ def _optimize_post(model):
|
|
1829
1826
|
module = importlib.import_module(modeling_module_name)
|
1830
1827
|
from ipex_llm.transformers.models.phi3 import attention_forward
|
1831
1828
|
convert_forward(model, module.Phi3Attention, attention_forward)
|
1829
|
+
convert_forward(model, module.Phi3SdpaAttention, attention_forward)
|
1832
1830
|
from ipex_llm.transformers.models.phi3 import mlp_forward
|
1833
1831
|
convert_forward(model, module.Phi3MLP, mlp_forward)
|
1834
1832
|
from ipex_llm.transformers.models.common import rms_norm_forward
|
@@ -1872,6 +1870,8 @@ def _optimize_post(model):
|
|
1872
1870
|
module.StableLmAttention,
|
1873
1871
|
stablelm_attention_forward
|
1874
1872
|
)
|
1873
|
+
if hasattr(module, "StableLmSdpaAttention"):
|
1874
|
+
convert_forward(model, module.StableLmSdpaAttention, stablelm_attention_forward)
|
1875
1875
|
convert_forward(model,
|
1876
1876
|
module.StableLmMLP,
|
1877
1877
|
mlp_silu_forward)
|
@@ -1886,6 +1886,7 @@ def _optimize_post(model):
|
|
1886
1886
|
from ipex_llm.transformers.models.minicpm import minicpm_model_forward_wrapper
|
1887
1887
|
from ipex_llm.transformers.models.minicpm import minicpm_decoder_layer_forward
|
1888
1888
|
convert_forward(model, module.MiniCPMAttention, minicpm_attention_forward)
|
1889
|
+
convert_forward(model, module.MiniCPMSdpaAttention, minicpm_attention_forward)
|
1889
1890
|
convert_forward(model, module.MiniCPMMLP, mlp_silu_forward)
|
1890
1891
|
convert_forward(model, module.MiniCPMRMSNorm, rms_norm_forward)
|
1891
1892
|
convert_forward(model, module.MiniCPMDecoderLayer, minicpm_decoder_layer_forward)
|
@@ -1901,6 +1902,7 @@ def _optimize_post(model):
|
|
1901
1902
|
convert_forward(model, module.MiniCPMRMSNorm, rms_norm_forward)
|
1902
1903
|
convert_forward(model, module.MiniCPMMLP, mlp_silu_forward)
|
1903
1904
|
convert_forward(model, module.MiniCPMAttention, minicpm3_attention_forward)
|
1905
|
+
convert_forward(model, module.MiniCPMSdpaAttention, minicpm3_attention_forward)
|
1904
1906
|
minicpm3_model_forward = minicpm3_model_forward_wrapper(module.MiniCPM3Model.forward)
|
1905
1907
|
convert_forward(model, module.MiniCPM3Model, minicpm3_model_forward)
|
1906
1908
|
elif model.config.model_type == "minicpmv":
|
@@ -51,8 +51,7 @@ from torch import Tensor, device, dtype, nn
|
|
51
51
|
from operator import mul
|
52
52
|
from functools import reduce
|
53
53
|
from ipex_llm.transformers.xpu_customize_fwd import custom_fwd, custom_bwd
|
54
|
-
from ipex_llm.transformers.utils import get_autocast_dtype, get_xpu_device_name
|
55
|
-
get_ipex_version
|
54
|
+
from ipex_llm.transformers.utils import get_autocast_dtype, get_xpu_device_name
|
56
55
|
from ipex_llm.transformers.convert import is_deepspeed_available, get_use_vllm
|
57
56
|
|
58
57
|
T = TypeVar("T", bound="torch.nn.Module")
|
@@ -301,6 +301,7 @@ def patch_embedding_forward(self, images: "tensor(B, C, H, W)") -> "tensor(B, L,
|
|
301
301
|
|
302
302
|
def merge_qkv(module: torch.nn.Module):
|
303
303
|
merge_qkv_base(module, "SiglipAttention")
|
304
|
+
merge_qkv_base(module, "SiglipSdpaAttention")
|
304
305
|
|
305
306
|
|
306
307
|
def vision_model_forward(self: torch.nn.Module, image: torch.Tensor):
|
@@ -37,6 +37,7 @@ import torch
|
|
37
37
|
|
38
38
|
from typing import Optional, Tuple
|
39
39
|
from transformers.cache_utils import Cache
|
40
|
+
from transformers.models.glm.modeling_glm import GlmAttention
|
40
41
|
from transformers.models.glm.modeling_glm import apply_rotary_pos_emb
|
41
42
|
from ipex_llm.transformers.kv import DynamicNormalCache, DynamicFp8Cache
|
42
43
|
from ipex_llm.transformers.models.common import merge_qkv_base
|
@@ -46,8 +47,9 @@ from ipex_llm.transformers.models.utils import use_quantize_kv_cache
|
|
46
47
|
|
47
48
|
|
48
49
|
def merge_qkv(module: torch.nn.Module):
|
49
|
-
merge_qkv_base(module,
|
50
|
+
merge_qkv_base(module, GlmAttention)
|
50
51
|
merge_qkv_base(module, "SiglipAttention")
|
52
|
+
merge_qkv_base(module, "SiglipSdpaAttention")
|
51
53
|
|
52
54
|
|
53
55
|
def split_mlp(module: torch.nn.Module):
|
@@ -51,7 +51,8 @@ from transformers.cache_utils import Cache
|
|
51
51
|
|
52
52
|
|
53
53
|
def merge_qkv(module: torch.nn.Module):
|
54
|
-
|
54
|
+
merge_qkv_base(module, "MiniCPMAttention")
|
55
|
+
merge_qkv_base(module, "MiniCPMSdpaAttention")
|
55
56
|
|
56
57
|
|
57
58
|
def apply_residual_scale(module: torch.nn.Module):
|
@@ -36,6 +36,7 @@ from transformers.generation.logits_process import RepetitionPenaltyLogitsProces
|
|
36
36
|
# MiniCPM-V-2_5 and MiniCPM-V-2_6
|
37
37
|
def merge_qkv(module: torch.nn.Module):
|
38
38
|
merge_qkv_base(module, "SiglipAttention")
|
39
|
+
merge_qkv_base(module, "SiglipSdpaAttention")
|
39
40
|
merge_qkv_base(module, "Idefics2VisionAttention")
|
40
41
|
|
41
42
|
|
@@ -19,7 +19,7 @@ import torch
|
|
19
19
|
import warnings
|
20
20
|
from ipex_llm.utils.common import invalidInputError
|
21
21
|
from ipex_llm.ggml.quantize import ggml_tensor_qtype
|
22
|
-
from ipex_llm.transformers.utils import
|
22
|
+
from ipex_llm.transformers.utils import get_xpu_device_name
|
23
23
|
from ipex_llm.transformers.low_bit_linear import SYM_INT4, SYM_INT8, FP8E5, IQ2_XXS, FP4, FP8E4,\
|
24
24
|
FP6, ASYM_INT4
|
25
25
|
|
ipex_llm/transformers/utils.py
CHANGED
@@ -154,20 +154,6 @@ def get_autocast_dtype(x):
|
|
154
154
|
f"Device {x.device} is not supported.")
|
155
155
|
|
156
156
|
|
157
|
-
_ipex_version = None
|
158
|
-
|
159
|
-
|
160
|
-
def get_ipex_version():
|
161
|
-
|
162
|
-
global _ipex_version
|
163
|
-
if _ipex_version is not None:
|
164
|
-
return _ipex_version
|
165
|
-
|
166
|
-
import intel_extension_for_pytorch as ipex
|
167
|
-
_ipex_version = ipex.__version__
|
168
|
-
return _ipex_version
|
169
|
-
|
170
|
-
|
171
157
|
def get_xpu_device_name(device: torch.device):
|
172
158
|
if device.type != "xpu":
|
173
159
|
return device.type
|
ipex_llm/transformers/xpu_ops.py
CHANGED
@@ -20,9 +20,9 @@ import xe_batch
|
|
20
20
|
import xe_addons
|
21
21
|
|
22
22
|
|
23
|
-
@torch.library.register_fake("ipex_llm::forward_new")
|
24
|
-
def _(x, weight, qtype, input_size):
|
25
|
-
|
23
|
+
# @torch.library.register_fake("ipex_llm::forward_new")
|
24
|
+
# def _(x, weight, qtype, input_size):
|
25
|
+
# return ???
|
26
26
|
|
27
27
|
|
28
28
|
# @torch.library.register_fake("ipex_llm::dequant")
|
@@ -32,32 +32,38 @@ def _(x, weight, qtype, input_size):
|
|
32
32
|
|
33
33
|
@torch.library.register_fake("ipex_llm::mlp_forward_xpu")
|
34
34
|
def _(x, weight1, weight2, batch_size, state_size, output_size, act_type, qtype):
|
35
|
-
return torch.
|
35
|
+
return torch.empty([batch_size, output_size],
|
36
|
+
dtype=x.dtype, device=x.device)
|
36
37
|
|
37
38
|
|
38
|
-
|
39
|
-
|
40
|
-
|
39
|
+
@torch.library.register_fake("ipex_llm::rwkv_linear_attention_v4")
|
40
|
+
def _(time_decay, time_first, key, value, num_state, den_state, max_state):
|
41
|
+
return torch.empty_like(key)
|
41
42
|
|
42
43
|
|
43
|
-
|
44
|
-
|
45
|
-
|
44
|
+
@torch.library.register_fake("ipex_llm::rwkv_linear_attention_v5")
|
45
|
+
def _(time_decay, time_first, receptance, key, value, state):
|
46
|
+
bsz, n_heads, seq_len, head_dim = key.shape
|
47
|
+
return torch.empty([bsz, seq_len, n_heads, head_dim],
|
48
|
+
dtype=key.dtype, device=key.device)
|
46
49
|
|
47
50
|
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
+
@torch.library.register_fake("ipex_llm::rwkv_time_shift")
|
52
|
+
def _(hidden, shifted, mix):
|
53
|
+
bsz, seq_len, hidden_size = hidden.shape
|
54
|
+
return torch.empty([mix.size(0), bsz, seq_len, hidden_size],
|
55
|
+
dtype=hidden.dtype, device=hidden.device)
|
51
56
|
|
52
57
|
|
53
|
-
|
54
|
-
|
55
|
-
|
58
|
+
@torch.library.register_fake("ipex_llm::dequantize_rows")
|
59
|
+
def _(x, weight, qtype, state_size, output_size):
|
60
|
+
return torch.empty([x.size(0), x.size(1), state_size],
|
61
|
+
dtype=torch.float, device=weight.device)
|
56
62
|
|
57
63
|
|
58
|
-
@torch.library.register_fake("ipex_llm::batch_forward")
|
59
|
-
def _(x, weight, qtype):
|
60
|
-
|
64
|
+
# @torch.library.register_fake("ipex_llm::batch_forward")
|
65
|
+
# def _(x, weight, qtype):
|
66
|
+
# return ???
|
61
67
|
|
62
68
|
|
63
69
|
@torch.library.register_fake("ipex_llm::sdp")
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: ipex-llm
|
3
|
-
Version: 2.2.
|
3
|
+
Version: 2.2.0b20250108
|
4
4
|
Summary: Large Language Model Develop Toolkit
|
5
5
|
Home-page: https://github.com/intel-analytics/ipex-llm
|
6
6
|
Author: BigDL Authors
|
@@ -27,10 +27,10 @@ Requires-Dist: intel-openmp ; (platform_machine == "x86_64" or platform_machine
|
|
27
27
|
Requires-Dist: torch ==2.1.2+cpu ; (platform_system == "Linux") and extra == 'all'
|
28
28
|
Requires-Dist: torch ==2.1.2 ; (platform_system == "Windows") and extra == 'all'
|
29
29
|
Provides-Extra: cpp
|
30
|
-
Requires-Dist: bigdl-core-cpp ==2.6.
|
30
|
+
Requires-Dist: bigdl-core-cpp ==2.6.0b20250108 ; extra == 'cpp'
|
31
31
|
Requires-Dist: setuptools ; extra == 'cpp'
|
32
32
|
Provides-Extra: cpp-arl
|
33
|
-
Requires-Dist: bigdl-core-cpp ==2.6.
|
33
|
+
Requires-Dist: bigdl-core-cpp ==2.6.0b20250108 ; extra == 'cpp-arl'
|
34
34
|
Requires-Dist: setuptools ; extra == 'cpp-arl'
|
35
35
|
Requires-Dist: onednn-devel ==2024.1.1 ; (platform_system == "Windows") and extra == 'cpp-arl'
|
36
36
|
Requires-Dist: onednn ==2024.1.1 ; (platform_system == "Windows") and extra == 'cpp-arl'
|
@@ -67,7 +67,7 @@ Requires-Dist: transformers ==4.40.0 ; extra == 'npu'
|
|
67
67
|
Requires-Dist: intel-openmp ; (platform_machine == "x86_64" or platform_machine == "AMD64") and extra == 'npu'
|
68
68
|
Requires-Dist: torch ==2.1.2+cpu ; (platform_system == "Linux") and extra == 'npu'
|
69
69
|
Requires-Dist: torch ==2.1.2 ; (platform_system == "Windows") and extra == 'npu'
|
70
|
-
Requires-Dist: bigdl-core-npu ==2.6.
|
70
|
+
Requires-Dist: bigdl-core-npu ==2.6.0b20250108 ; (platform_system == "Windows") and extra == 'npu'
|
71
71
|
Provides-Extra: serving
|
72
72
|
Requires-Dist: py-cpuinfo ; extra == 'serving'
|
73
73
|
Requires-Dist: fschat[model_worker,webui] ==0.2.36 ; extra == 'serving'
|
@@ -87,9 +87,9 @@ Requires-Dist: setuptools <70.0.0 ; extra == 'xpu'
|
|
87
87
|
Requires-Dist: torch ==2.1.0a0 ; extra == 'xpu'
|
88
88
|
Requires-Dist: torchvision ==0.16.0a0 ; extra == 'xpu'
|
89
89
|
Requires-Dist: intel-extension-for-pytorch ==2.1.10+xpu ; extra == 'xpu'
|
90
|
-
Requires-Dist: bigdl-core-xe-21 ==2.6.
|
91
|
-
Requires-Dist: bigdl-core-xe-batch-21 ==2.6.
|
92
|
-
Requires-Dist: bigdl-core-xe-addons-21 ==2.6.
|
90
|
+
Requires-Dist: bigdl-core-xe-21 ==2.6.0b20250108 ; extra == 'xpu'
|
91
|
+
Requires-Dist: bigdl-core-xe-batch-21 ==2.6.0b20250108 ; extra == 'xpu'
|
92
|
+
Requires-Dist: bigdl-core-xe-addons-21 ==2.6.0b20250108 ; extra == 'xpu'
|
93
93
|
Provides-Extra: xpu-2-1
|
94
94
|
Requires-Dist: py-cpuinfo ; extra == 'xpu-2-1'
|
95
95
|
Requires-Dist: protobuf ; extra == 'xpu-2-1'
|
@@ -104,9 +104,9 @@ Requires-Dist: setuptools <70.0.0 ; extra == 'xpu-2-1'
|
|
104
104
|
Requires-Dist: torch ==2.1.0a0 ; extra == 'xpu-2-1'
|
105
105
|
Requires-Dist: torchvision ==0.16.0a0 ; extra == 'xpu-2-1'
|
106
106
|
Requires-Dist: intel-extension-for-pytorch ==2.1.10+xpu ; extra == 'xpu-2-1'
|
107
|
-
Requires-Dist: bigdl-core-xe-21 ==2.6.
|
108
|
-
Requires-Dist: bigdl-core-xe-batch-21 ==2.6.
|
109
|
-
Requires-Dist: bigdl-core-xe-addons-21 ==2.6.
|
107
|
+
Requires-Dist: bigdl-core-xe-21 ==2.6.0b20250108 ; extra == 'xpu-2-1'
|
108
|
+
Requires-Dist: bigdl-core-xe-batch-21 ==2.6.0b20250108 ; extra == 'xpu-2-1'
|
109
|
+
Requires-Dist: bigdl-core-xe-addons-21 ==2.6.0b20250108 ; extra == 'xpu-2-1'
|
110
110
|
Requires-Dist: intel-openmp ; (platform_machine == "x86_64" or platform_machine == "AMD64") and extra == 'xpu-2-1'
|
111
111
|
Requires-Dist: dpcpp-cpp-rt ==2024.0.2 ; (platform_system == "Windows") and extra == 'xpu-2-1'
|
112
112
|
Requires-Dist: mkl-dpcpp ==2024.0.0 ; (platform_system == "Windows") and extra == 'xpu-2-1'
|
@@ -124,7 +124,7 @@ Requires-Dist: setuptools ; extra == 'xpu-2-6'
|
|
124
124
|
Requires-Dist: torch ==2.6.0+xpu ; extra == 'xpu-2-6'
|
125
125
|
Requires-Dist: torchvision ==0.21.0+xpu ; extra == 'xpu-2-6'
|
126
126
|
Requires-Dist: torchaudio ==2.6.0+xpu ; extra == 'xpu-2-6'
|
127
|
-
Requires-Dist: bigdl-core-xe-all ==2.6.
|
127
|
+
Requires-Dist: bigdl-core-xe-all ==2.6.0b20250108 ; extra == 'xpu-2-6'
|
128
128
|
Requires-Dist: intel-openmp ; (platform_machine == "x86_64" or platform_machine == "AMD64") and extra == 'xpu-2-6'
|
129
129
|
Provides-Extra: xpu-arc
|
130
130
|
Requires-Dist: py-cpuinfo ; extra == 'xpu-arc'
|
@@ -137,9 +137,9 @@ Requires-Dist: tokenizers ==0.15.2 ; extra == 'xpu-arc'
|
|
137
137
|
Requires-Dist: accelerate ==0.23.0 ; extra == 'xpu-arc'
|
138
138
|
Requires-Dist: tabulate ; extra == 'xpu-arc'
|
139
139
|
Requires-Dist: setuptools ; extra == 'xpu-arc'
|
140
|
-
Requires-Dist: bigdl-core-xe-23 ==2.6.
|
141
|
-
Requires-Dist: bigdl-core-xe-batch-23 ==2.6.
|
142
|
-
Requires-Dist: bigdl-core-xe-addons-23 ==2.6.
|
140
|
+
Requires-Dist: bigdl-core-xe-23 ==2.6.0b20250108 ; extra == 'xpu-arc'
|
141
|
+
Requires-Dist: bigdl-core-xe-batch-23 ==2.6.0b20250108 ; extra == 'xpu-arc'
|
142
|
+
Requires-Dist: bigdl-core-xe-addons-23 ==2.6.0b20250108 ; extra == 'xpu-arc'
|
143
143
|
Requires-Dist: intel-openmp ; (platform_machine == "x86_64" or platform_machine == "AMD64") and extra == 'xpu-arc'
|
144
144
|
Requires-Dist: torch ==2.3.1+cxx11.abi ; (platform_system == "Linux") and extra == 'xpu-arc'
|
145
145
|
Requires-Dist: torchvision ==0.18.1+cxx11.abi ; (platform_system == "Linux") and extra == 'xpu-arc'
|
@@ -160,9 +160,9 @@ Requires-Dist: tokenizers ==0.15.2 ; extra == 'xpu-arl'
|
|
160
160
|
Requires-Dist: accelerate ==0.23.0 ; extra == 'xpu-arl'
|
161
161
|
Requires-Dist: tabulate ; extra == 'xpu-arl'
|
162
162
|
Requires-Dist: setuptools ; extra == 'xpu-arl'
|
163
|
-
Requires-Dist: bigdl-core-xe-23 ==2.6.
|
164
|
-
Requires-Dist: bigdl-core-xe-batch-23 ==2.6.
|
165
|
-
Requires-Dist: bigdl-core-xe-addons-23 ==2.6.
|
163
|
+
Requires-Dist: bigdl-core-xe-23 ==2.6.0b20250108 ; extra == 'xpu-arl'
|
164
|
+
Requires-Dist: bigdl-core-xe-batch-23 ==2.6.0b20250108 ; extra == 'xpu-arl'
|
165
|
+
Requires-Dist: bigdl-core-xe-addons-23 ==2.6.0b20250108 ; extra == 'xpu-arl'
|
166
166
|
Requires-Dist: intel-openmp ; (platform_machine == "x86_64" or platform_machine == "AMD64") and extra == 'xpu-arl'
|
167
167
|
Requires-Dist: torch ==2.3.1+cxx11.abi ; (platform_system == "Linux") and extra == 'xpu-arl'
|
168
168
|
Requires-Dist: torchvision ==0.18.1+cxx11.abi ; (platform_system == "Linux") and extra == 'xpu-arl'
|
@@ -183,9 +183,9 @@ Requires-Dist: tokenizers ==0.15.2 ; extra == 'xpu-lnl'
|
|
183
183
|
Requires-Dist: accelerate ==0.23.0 ; extra == 'xpu-lnl'
|
184
184
|
Requires-Dist: tabulate ; extra == 'xpu-lnl'
|
185
185
|
Requires-Dist: setuptools ; extra == 'xpu-lnl'
|
186
|
-
Requires-Dist: bigdl-core-xe-23 ==2.6.
|
187
|
-
Requires-Dist: bigdl-core-xe-batch-23 ==2.6.
|
188
|
-
Requires-Dist: bigdl-core-xe-addons-23 ==2.6.
|
186
|
+
Requires-Dist: bigdl-core-xe-23 ==2.6.0b20250108 ; extra == 'xpu-lnl'
|
187
|
+
Requires-Dist: bigdl-core-xe-batch-23 ==2.6.0b20250108 ; extra == 'xpu-lnl'
|
188
|
+
Requires-Dist: bigdl-core-xe-addons-23 ==2.6.0b20250108 ; extra == 'xpu-lnl'
|
189
189
|
Requires-Dist: intel-openmp ; (platform_machine == "x86_64" or platform_machine == "AMD64") and extra == 'xpu-lnl'
|
190
190
|
Requires-Dist: torch ==2.3.1+cxx11.abi ; (platform_system == "Linux") and extra == 'xpu-lnl'
|
191
191
|
Requires-Dist: torchvision ==0.18.1+cxx11.abi ; (platform_system == "Linux") and extra == 'xpu-lnl'
|
@@ -41,35 +41,35 @@ ipex_llm/langchain/llms/transformerspipelinellm.py,sha256=vm522YPPwWxxAPVvQBtxRf
|
|
41
41
|
ipex_llm/langchain/vllm/__init__.py,sha256=T-EbRT6GJ_8RCu-iLmSzcftOimXSPQf2d5X72AUAy2Y,874
|
42
42
|
ipex_llm/langchain/vllm/vllm.py,sha256=6dxc-ZISZQrJilEa_HA827l75Dv9rcHpY_G6FdJ8BVs,7793
|
43
43
|
ipex_llm/libs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
44
|
-
ipex_llm/libs/bloom-api.dll,sha256=
|
45
|
-
ipex_llm/libs/bloom.dll,sha256=
|
46
|
-
ipex_llm/libs/gptneox-api.dll,sha256=
|
47
|
-
ipex_llm/libs/gptneox.dll,sha256=
|
48
|
-
ipex_llm/libs/libbloom_avx.dll,sha256=
|
49
|
-
ipex_llm/libs/libbloom_vnni.dll,sha256=
|
50
|
-
ipex_llm/libs/libgptneox_avx.dll,sha256
|
51
|
-
ipex_llm/libs/libgptneox_vnni.dll,sha256=
|
52
|
-
ipex_llm/libs/libllama_avx.dll,sha256=
|
53
|
-
ipex_llm/libs/libllama_vnni.dll,sha256=
|
54
|
-
ipex_llm/libs/libstarcoder_avx.dll,sha256
|
55
|
-
ipex_llm/libs/libstarcoder_vnni.dll,sha256=
|
56
|
-
ipex_llm/libs/llama-api.dll,sha256=
|
57
|
-
ipex_llm/libs/llama.dll,sha256=
|
58
|
-
ipex_llm/libs/main-bloom.exe,sha256=
|
59
|
-
ipex_llm/libs/main-gptneox.exe,sha256=
|
60
|
-
ipex_llm/libs/main-llama.exe,sha256=
|
61
|
-
ipex_llm/libs/main-starcoder.exe,sha256=
|
62
|
-
ipex_llm/libs/pipeline.dll,sha256=
|
63
|
-
ipex_llm/libs/quantize-bloom.exe,sha256=
|
64
|
-
ipex_llm/libs/quantize-bloom_vnni.exe,sha256=
|
65
|
-
ipex_llm/libs/quantize-gptneox.exe,sha256=
|
66
|
-
ipex_llm/libs/quantize-gptneox_vnni.exe,sha256=
|
67
|
-
ipex_llm/libs/quantize-llama.exe,sha256=
|
68
|
-
ipex_llm/libs/quantize-llama_vnni.exe,sha256=
|
69
|
-
ipex_llm/libs/quantize-starcoder.exe,sha256
|
70
|
-
ipex_llm/libs/quantize-starcoder_vnni.exe,sha256=
|
71
|
-
ipex_llm/libs/starcoder-api.dll,sha256=
|
72
|
-
ipex_llm/libs/starcoder.dll,sha256=
|
44
|
+
ipex_llm/libs/bloom-api.dll,sha256=t7Nyzwaw_n5nvLlhgJiwSqNg1c7YBfenefXc183BRyk,36352
|
45
|
+
ipex_llm/libs/bloom.dll,sha256=ovvFDBSr66rJ8qI0k3q80OhM_L_RdhkdFIyMdhuNN60,506880
|
46
|
+
ipex_llm/libs/gptneox-api.dll,sha256=G6BNZuxVROD4tACkc6wKvB8E0Nxxih5EMOtN8DQ2sdc,24576
|
47
|
+
ipex_llm/libs/gptneox.dll,sha256=0jy7OHIcGS0R73wJQWN9B4gPyzep3hFRA9In9GNDUJE,567296
|
48
|
+
ipex_llm/libs/libbloom_avx.dll,sha256=DlJmmwkqg8pfKwml32BcLF6XpXScL-YN92DOVFV_miI,535040
|
49
|
+
ipex_llm/libs/libbloom_vnni.dll,sha256=rdLpFY-J6HLoed0qePhUTX-XJ0YVNX_R4sBZP95MOWI,506880
|
50
|
+
ipex_llm/libs/libgptneox_avx.dll,sha256=-Jl7ni0hPESfPU9lA8ce_NkGkwpm0bSm7XFJNwfLMRc,595456
|
51
|
+
ipex_llm/libs/libgptneox_vnni.dll,sha256=1Z7j6wAopq1o8uuTK7M6MWiHAHV_hWk-t--ZY-pWRIQ,567808
|
52
|
+
ipex_llm/libs/libllama_avx.dll,sha256=mri2vXRQsWDaRXxVF3A6p6xZhv8MIpL13bmYcc210yk,589824
|
53
|
+
ipex_llm/libs/libllama_vnni.dll,sha256=XBYn0-ITZ5HsbzS4bRjzepYGAm5-beVdO1euHjSCpjA,561664
|
54
|
+
ipex_llm/libs/libstarcoder_avx.dll,sha256=-BCtiz3zd3b6MKD-CwHtzFsPt18aixsl92kHFIqxips,626688
|
55
|
+
ipex_llm/libs/libstarcoder_vnni.dll,sha256=tXI6hrDAt5a3mrdtFrhdwl0HSc7pknd_LwpsIYzk4gY,598528
|
56
|
+
ipex_llm/libs/llama-api.dll,sha256=_9ho1AFFsg44Zw79bQ7bzo55tgG35wLHUr_EPEBIgwA,25600
|
57
|
+
ipex_llm/libs/llama.dll,sha256=53Ox4JEoucwN7GZwfp5DcQEQ_sGFFG5Ltf8yf7HRmjo,561152
|
58
|
+
ipex_llm/libs/main-bloom.exe,sha256=KQWdkGczuRmIzaXw_706jkh2Y_SdVxEBpQyzeaBG_uA,103424
|
59
|
+
ipex_llm/libs/main-gptneox.exe,sha256=oLK8czzwTAtSI6oC6fS5CndD6tMHsMZzJe1p-zOxN0Y,98816
|
60
|
+
ipex_llm/libs/main-llama.exe,sha256=4keLFggPPNguBG1sbTwLmM_DExpiIwlKdXdF3IcQYKI,99840
|
61
|
+
ipex_llm/libs/main-starcoder.exe,sha256=Re5Tv8QjdmWSEzQb5m72DJnRAW6sLKxDBFLkvcU33Eg,157696
|
62
|
+
ipex_llm/libs/pipeline.dll,sha256=8MYtxJgWAt_meXvNz6W14uu-_w7o-DDBTkRajcToS9A,72704
|
63
|
+
ipex_llm/libs/quantize-bloom.exe,sha256=vW97INEmsF-Krnqu7yVYMcNGrBcyQrmHSe9tAqQGGVg,126464
|
64
|
+
ipex_llm/libs/quantize-bloom_vnni.exe,sha256=jyBpPHZa5qL757oigXZiGDTFpOtAYm8c8DS-DNXvlew,127488
|
65
|
+
ipex_llm/libs/quantize-gptneox.exe,sha256=WR-2syRXF0yTgVmGkzwfXBWj7F9eSKVRLqaF05cGJco,104448
|
66
|
+
ipex_llm/libs/quantize-gptneox_vnni.exe,sha256=TwxQOINAsmkLbjhRpGxr_vyRFDtW_qXkWmfXQX75O9o,104960
|
67
|
+
ipex_llm/libs/quantize-llama.exe,sha256=__XzuraAfAofURLra-jjo4XEgGFmfqZFEJoiwfNaQEo,109568
|
68
|
+
ipex_llm/libs/quantize-llama_vnni.exe,sha256=MeWkIB0C5nJIM3czBltbFjhKeP3-THdR2Yezze6q1R4,110592
|
69
|
+
ipex_llm/libs/quantize-starcoder.exe,sha256=hnVDigU8QyKDplAaZwe1BAraTHkBw-seGLs8qjHHRDc,127488
|
70
|
+
ipex_llm/libs/quantize-starcoder_vnni.exe,sha256=bcYLxXFJ_CHjXbbzCm5jpPQO13IDZePKoZf9vD6CUCc,128512
|
71
|
+
ipex_llm/libs/starcoder-api.dll,sha256=fec8YDBzBoRQrnNbNsZZ7TljcBAUyk7fx19hpSiaxfY,21504
|
72
|
+
ipex_llm/libs/starcoder.dll,sha256=zlVWa_5DzHEfCMGCIIOfy6POODnAcKFv1jp9BlcZgw4,598016
|
73
73
|
ipex_llm/llamaindex/__init__.py,sha256=T-EbRT6GJ_8RCu-iLmSzcftOimXSPQf2d5X72AUAy2Y,874
|
74
74
|
ipex_llm/llamaindex/llms/__init__.py,sha256=KP1lEdGqDuxPoxL1ZSH25Pm2kKMPJBWUTLR0ckSLMIU,1139
|
75
75
|
ipex_llm/llamaindex/llms/bigdlllm.py,sha256=FQBzq1KOjfc6uofTXAha3O7TqpJkNfOFepXQmOVlbnI,26314
|
@@ -87,14 +87,14 @@ ipex_llm/serving/fastchat/tgi_api_protocol.py,sha256=brT3k3-V0NJrU4fRqUwWjC0O3iO
|
|
87
87
|
ipex_llm/serving/fastchat/tgi_api_server.py,sha256=agNTAEiZPSuj3dEdIdYKwkoY0cXOUDX06DiM9VP2knQ,24418
|
88
88
|
ipex_llm/serving/fastchat/vllm_worker.py,sha256=ZLz2Q9GxJO6r_LOiP6epgCRjBGk-K4EB1SNEWSJp5DA,11091
|
89
89
|
ipex_llm/transformers/__init__.py,sha256=l4KkMkLe-pRC7b_kj6LCfeifgE-Uo33_Av_FwN9HnFA,1074
|
90
|
-
ipex_llm/transformers/convert.py,sha256=
|
90
|
+
ipex_llm/transformers/convert.py,sha256=APf2uHMgEeiAhsKm9dPgPWlyO0ADq2yHtZgovv9oczU,99101
|
91
91
|
ipex_llm/transformers/convert_ipex.py,sha256=iKXo0n8fVFTOA2fNYYrByMFK0dovL-kLd2sVDk88AlQ,14334
|
92
92
|
ipex_llm/transformers/embedding.py,sha256=bdgk59DvD4ZZyxRzewXOR7g56nThgO6uhIwk8QL7f-s,9299
|
93
93
|
ipex_llm/transformers/kv.py,sha256=k4TU18LlA-Sbq9WNNQnfuzu3RSFBwFhmaV3BcGN5bAo,19191
|
94
94
|
ipex_llm/transformers/lisa.py,sha256=F5WxbtXQ7RdKulj83h_2DnEIgKiKGZf7zvOmg6QBl2s,3289
|
95
95
|
ipex_llm/transformers/loader.py,sha256=AwjV5RpI2t2bedlv7ZhLm8cfd-QJZm5hny-XyjIvdnk,6876
|
96
96
|
ipex_llm/transformers/lookup.py,sha256=b6OlZ9OV10R9qeWw8mVryVpDxszkjwLkldvi7GPMJY8,19614
|
97
|
-
ipex_llm/transformers/low_bit_linear.py,sha256=
|
97
|
+
ipex_llm/transformers/low_bit_linear.py,sha256=Obdd08D9dvuroS_6XWo4DXO_DrNRsbAqjz-mQAHmfxY,40845
|
98
98
|
ipex_llm/transformers/model.py,sha256=fj7LBjrWtWwDJJYXnWiXsLGS4ayqqHfnh0p51dSDssE,40908
|
99
99
|
ipex_llm/transformers/modelling_bigdl.py,sha256=7JpNVMuyq_OmtNUaMFMXdxPWZp2q0QHC02QeA-VTPOw,6709
|
100
100
|
ipex_llm/transformers/npu_model.py,sha256=YW02GeVz-9ZGqxAeSz0AOvciS-17bo9eK5ZOBrICwSQ,39508
|
@@ -105,9 +105,9 @@ ipex_llm/transformers/relora.py,sha256=-dYzUV0P-IhO2jFdnzN9-v_sFzJpRj3ZwN9eCJzOo
|
|
105
105
|
ipex_llm/transformers/speculative.py,sha256=0XNLgc9dGswJHVPrXo4iM7pPxkWwfFfJMECcivJSnIc,63368
|
106
106
|
ipex_llm/transformers/streamer.py,sha256=RrVlLblzCOtABRUpaMXAyaMnCGgLUtAi_YesLumRbww,4842
|
107
107
|
ipex_llm/transformers/training_patch.py,sha256=oxMkUtqyvqJiprw6dE3skkYfD1HOmUlH9N0hBkbn0G0,10799
|
108
|
-
ipex_llm/transformers/utils.py,sha256=
|
108
|
+
ipex_llm/transformers/utils.py,sha256=9IRSqfDokf8QFW9T47R--i3RL1E-_O31HO7IJf7H6pg,16748
|
109
109
|
ipex_llm/transformers/xpu_customize_fwd.py,sha256=wFpIhs5F6tkNs8gBOrLxWdhLzO3EDHovVkERPIAoAvg,7611
|
110
|
-
ipex_llm/transformers/xpu_ops.py,sha256=
|
110
|
+
ipex_llm/transformers/xpu_ops.py,sha256=vw4cUwvqUqDr45d-WMIkCpM2oiHfjN-VjF0bjMSF4kY,4830
|
111
111
|
ipex_llm/transformers/awq/__init__.py,sha256=Du5gu3-eeAkeDO_dEMBTzrDBA66DSN3uL3-rn8WGXQw,875
|
112
112
|
ipex_llm/transformers/awq/act.py,sha256=YwomJzOOKwkKtzGrm4L4kwBstBLO1Z8SK4CKi8PSYVQ,2172
|
113
113
|
ipex_llm/transformers/awq/awq.py,sha256=cGyRQJWwAEJtOtdSbsBoQ33KX_Ie0pv5OJHC0ACEELE,8861
|
@@ -143,21 +143,21 @@ ipex_llm/transformers/models/bloom.py,sha256=PxfzyYT-nFn3K5rZhTQjmcEjUUzAhUFzxIN
|
|
143
143
|
ipex_llm/transformers/models/chatglm.py,sha256=UHai1t2AUtGmF765_eHF8LUMVQzp_oCBx8TJB21WrHk,12597
|
144
144
|
ipex_llm/transformers/models/chatglm2.py,sha256=SGCABJdYQLW0zDarEoWrEQLuWlbq9iQhYU8ZeR1-ptQ,15957
|
145
145
|
ipex_llm/transformers/models/chatglm4.py,sha256=AAhAFFDDas5DBQPfh2Mwl7a2v7taKf6xphoeeNNFaBI,16593
|
146
|
-
ipex_llm/transformers/models/chatglm4v.py,sha256=
|
146
|
+
ipex_llm/transformers/models/chatglm4v.py,sha256=tyjDDyF6FEgLAT24EG3i4-auxZvkwmeLIy0Hds4K5Yo,14105
|
147
147
|
ipex_llm/transformers/models/common.py,sha256=4obQMGF02FCiXrHnFle9Fsx7C33b1FDt37qJJ4YgxRc,11578
|
148
148
|
ipex_llm/transformers/models/decilm.py,sha256=P-PBuDPf07GvKggLwJx_wPwIn6esN3rX8ai2JxRuZmE,5246
|
149
149
|
ipex_llm/transformers/models/gemma.py,sha256=_E3Yw8Y45xyNVeLqyVKcpr8kjuICtETeL82cJ-bWJuU,9424
|
150
150
|
ipex_llm/transformers/models/gemma2.py,sha256=2WZuv-FLzJyTJFaYxOuzJt47QE64M0lHnzAiO5T6ozI,8049
|
151
|
-
ipex_llm/transformers/models/glm.py,sha256=
|
151
|
+
ipex_llm/transformers/models/glm.py,sha256=PE43uks9lojndBBHFVXK1VWisHhbY-kuCmhq0CwmD4s,7204
|
152
152
|
ipex_llm/transformers/models/gpt2.py,sha256=YSaNgK1uLCFDuIFqnKO0Mi-AsOZsYav-7pNf_NpKGdM,3445
|
153
153
|
ipex_llm/transformers/models/gptbigcode.py,sha256=cP1_qGWoa43R2WacAMblShjku4QupcCZiLaPPAoOUs4,9101
|
154
154
|
ipex_llm/transformers/models/gptneox.py,sha256=loRh1x_5S6BCeOr_s5xr-N_1SQHL3Y5IiUBAEyoMUqQ,6172
|
155
155
|
ipex_llm/transformers/models/internlm.py,sha256=ZbIUMDwNRcrCeduXfbA_uq1AUEWawEt6CJRvQl3LkAg,17832
|
156
156
|
ipex_llm/transformers/models/internvl.py,sha256=Vx0vENIEQLX2M6P398mw5TOhpks0U8xf8rtRQvy94go,8154
|
157
|
-
ipex_llm/transformers/models/llama.py,sha256=
|
158
|
-
ipex_llm/transformers/models/minicpm.py,sha256=
|
157
|
+
ipex_llm/transformers/models/llama.py,sha256=n1JG1uElMB8t3Hpae94S6YTO_5q2N5BUAhb7mncvA6E,8560
|
158
|
+
ipex_llm/transformers/models/minicpm.py,sha256=_eYBYafQxnuqKo9ENNkua73KU5goU2z-dkaLlF5uHnA,10147
|
159
159
|
ipex_llm/transformers/models/minicpm3.py,sha256=FhNS6mi2rg7dSdF_QQGrao3g9EC6XLn1MTKd-kd0wF0,9191
|
160
|
-
ipex_llm/transformers/models/minicpmv.py,sha256=
|
160
|
+
ipex_llm/transformers/models/minicpmv.py,sha256=PP05b5iTnrMpiseCn8iJcxKJDnfq7WqXp9Mrch0kKZ0,9876
|
161
161
|
ipex_llm/transformers/models/mistral.py,sha256=rE1GWQxXvF6aG-buPHDR13zeynDZEDIubPF4PiVhZbM,7451
|
162
162
|
ipex_llm/transformers/models/mllama.py,sha256=ogpLmmN_OwcFUyjYB-oDC-l3uw8urFvUEc5edkjWHAk,10939
|
163
163
|
ipex_llm/transformers/models/mpt.py,sha256=z02NwHogJZVh-Mk4sYoIzR90SFIKhoNN_-ifsD907TQ,9540
|
@@ -174,7 +174,7 @@ ipex_llm/transformers/models/rwkv5.py,sha256=OkRNj1pCAZg1z2Fw-I0DEnxLEdZyPeRSQ6m
|
|
174
174
|
ipex_llm/transformers/models/sd.py,sha256=VvHV5u-0k2MgHu3NL9113hPj7DgfxqctuKzEEeNfRDU,5981
|
175
175
|
ipex_llm/transformers/models/stablelm.py,sha256=RGQCYuQhYqtZ1j3RZkYi0_QvCRnUgUIPYxfBcLnElzg,6885
|
176
176
|
ipex_llm/transformers/models/starcoder2.py,sha256=4P3mhRYf2Kreb1ESjrQGfy1puLMmZXgV35zf-Tksvao,6462
|
177
|
-
ipex_llm/transformers/models/utils.py,sha256=
|
177
|
+
ipex_llm/transformers/models/utils.py,sha256=isBCMMQP3j_opmda9XzD_dPk1ejvEXTztggbu1yIMSc,15439
|
178
178
|
ipex_llm/transformers/models/yuan.py,sha256=1jRPebwAK2ENbyYokOmb4LSVo-szucWiygz9zTv-scs,7656
|
179
179
|
ipex_llm/transformers/npu_models/__init__.py,sha256=ulEUGLjaP48LCrVeury3UxLjXxKzRi0UpSG4bYu-7f8,585
|
180
180
|
ipex_llm/transformers/npu_models/baichuan.py,sha256=fJtd7fBrttySghRUgfZTAdxLjsSNC-XL08HISsXigLE,4685
|
@@ -243,11 +243,11 @@ ipex_llm/vllm/xpu/engine/__init__.py,sha256=pY_CpyuZd72fr6s32ejeKHKFW0K4vUU2rzZj
|
|
243
243
|
ipex_llm/vllm/xpu/engine/engine.py,sha256=k4-D27WS_Gk3mA--w3HWAjPjb4Aiu043MVPi0ZoAUBc,5984
|
244
244
|
ipex_llm/vllm/xpu/entrypoints/openai/api_server.py,sha256=GshTZFB8e4PWvqckfbmTOU6b0oLkNn7A-vzLuG9--j8,21544
|
245
245
|
ipex_llm/vllm/xpu/entrypoints/openai/cli_args.py,sha256=2rENA2ucynMaIjiZBEh2ez1o5vR32GaP514t39CD7KM,8676
|
246
|
-
ipex_llm-2.2.
|
247
|
-
ipex_llm-2.2.
|
248
|
-
ipex_llm-2.2.
|
249
|
-
ipex_llm-2.2.
|
250
|
-
ipex_llm-2.2.
|
251
|
-
ipex_llm-2.2.
|
252
|
-
ipex_llm-2.2.
|
253
|
-
ipex_llm-2.2.
|
246
|
+
ipex_llm-2.2.0b20250108.data/scripts/ipex-llm-init.bat,sha256=HPtCYuDYwEatq7dAwOvdfVcHYCpAVdbj75K1qh0vQek,2578
|
247
|
+
ipex_llm-2.2.0b20250108.data/scripts/llm-chat.ps1,sha256=6qrs-hGVAV8IKh7Jx8nq_XrnZcjd7qGU5wndArM7Yag,2769
|
248
|
+
ipex_llm-2.2.0b20250108.data/scripts/llm-cli.ps1,sha256=3qBtTLs_EjYDnM8YyCpJhzLnGCKTEGssu9UNqfkjVXs,3009
|
249
|
+
ipex_llm-2.2.0b20250108.dist-info/METADATA,sha256=NJp_uuPOJe8b5UQ8ASJbfzen2BGoc2DEM1ZInzr0X9E,12705
|
250
|
+
ipex_llm-2.2.0b20250108.dist-info/WHEEL,sha256=6iYPr8vTHsyDK75jr9X0V3I9wPSVmtwr_8fdATBciGk,98
|
251
|
+
ipex_llm-2.2.0b20250108.dist-info/entry_points.txt,sha256=TiUyBB2MRmfF3ko-pyAEzqeBCRnyhu27bNOAsWPp3e8,61
|
252
|
+
ipex_llm-2.2.0b20250108.dist-info/top_level.txt,sha256=CGCMHM-SyqUabU4h8RqJ2KTYckQUO3LvIWwmUQ6Qbzw,9
|
253
|
+
ipex_llm-2.2.0b20250108.dist-info/RECORD,,
|
{ipex_llm-2.2.0b20250106.post1.data → ipex_llm-2.2.0b20250108.data}/scripts/ipex-llm-init.bat
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{ipex_llm-2.2.0b20250106.post1.dist-info → ipex_llm-2.2.0b20250108.dist-info}/entry_points.txt
RENAMED
File without changes
|
File without changes
|