ipex-llm 2.2.0b20250326__py3-none-win_amd64.whl → 2.2.0b20250328__py3-none-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. ipex_llm/libs/bloom-api.dll +0 -0
  2. ipex_llm/libs/bloom.dll +0 -0
  3. ipex_llm/libs/gptneox-api.dll +0 -0
  4. ipex_llm/libs/gptneox.dll +0 -0
  5. ipex_llm/libs/libbloom_avx.dll +0 -0
  6. ipex_llm/libs/libbloom_vnni.dll +0 -0
  7. ipex_llm/libs/libgptneox_avx.dll +0 -0
  8. ipex_llm/libs/libgptneox_vnni.dll +0 -0
  9. ipex_llm/libs/libllama_avx.dll +0 -0
  10. ipex_llm/libs/libllama_vnni.dll +0 -0
  11. ipex_llm/libs/libstarcoder_avx.dll +0 -0
  12. ipex_llm/libs/libstarcoder_vnni.dll +0 -0
  13. ipex_llm/libs/llama-api.dll +0 -0
  14. ipex_llm/libs/llama.dll +0 -0
  15. ipex_llm/libs/main-bloom.exe +0 -0
  16. ipex_llm/libs/main-gptneox.exe +0 -0
  17. ipex_llm/libs/main-llama.exe +0 -0
  18. ipex_llm/libs/main-starcoder.exe +0 -0
  19. ipex_llm/libs/pipeline.dll +0 -0
  20. ipex_llm/libs/quantize-bloom.exe +0 -0
  21. ipex_llm/libs/quantize-bloom_vnni.exe +0 -0
  22. ipex_llm/libs/quantize-gptneox.exe +0 -0
  23. ipex_llm/libs/quantize-gptneox_vnni.exe +0 -0
  24. ipex_llm/libs/quantize-llama.exe +0 -0
  25. ipex_llm/libs/quantize-llama_vnni.exe +0 -0
  26. ipex_llm/libs/quantize-starcoder.exe +0 -0
  27. ipex_llm/libs/quantize-starcoder_vnni.exe +0 -0
  28. ipex_llm/libs/starcoder-api.dll +0 -0
  29. ipex_llm/libs/starcoder.dll +0 -0
  30. ipex_llm/transformers/convert.py +34 -0
  31. ipex_llm/transformers/models/qwen2_5_omni.py +286 -0
  32. {ipex_llm-2.2.0b20250326.dist-info → ipex_llm-2.2.0b20250328.dist-info}/METADATA +20 -20
  33. {ipex_llm-2.2.0b20250326.dist-info → ipex_llm-2.2.0b20250328.dist-info}/RECORD +39 -38
  34. {ipex_llm-2.2.0b20250326.data → ipex_llm-2.2.0b20250328.data}/scripts/ipex-llm-init.bat +0 -0
  35. {ipex_llm-2.2.0b20250326.data → ipex_llm-2.2.0b20250328.data}/scripts/llm-chat.ps1 +0 -0
  36. {ipex_llm-2.2.0b20250326.data → ipex_llm-2.2.0b20250328.data}/scripts/llm-cli.ps1 +0 -0
  37. {ipex_llm-2.2.0b20250326.dist-info → ipex_llm-2.2.0b20250328.dist-info}/WHEEL +0 -0
  38. {ipex_llm-2.2.0b20250326.dist-info → ipex_llm-2.2.0b20250328.dist-info}/entry_points.txt +0 -0
  39. {ipex_llm-2.2.0b20250326.dist-info → ipex_llm-2.2.0b20250328.dist-info}/top_level.txt +0 -0
Binary file
ipex_llm/libs/bloom.dll CHANGED
Binary file
Binary file
ipex_llm/libs/gptneox.dll CHANGED
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
ipex_llm/libs/llama.dll CHANGED
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
@@ -1074,6 +1074,9 @@ def _optimize_pre(model, qtype=None):
1074
1074
  elif model.config.model_type == "deepseek_v3" and model.config.hidden_size == 2048:
1075
1075
  from ipex_llm.transformers.models.deepseek import padding_mla_v_hd
1076
1076
  model.apply(padding_mla_v_hd)
1077
+ elif model.config.model_type == "qwen2_5_omni":
1078
+ from ipex_llm.transformers.models.qwen2_5_omni import merge_qkv
1079
+ model.apply(merge_qkv)
1077
1080
  return model
1078
1081
 
1079
1082
 
@@ -2043,7 +2046,38 @@ def _optimize_post(model):
2043
2046
  convert_forward(model, module.DeepseekV3Model, deepseek_model_forward)
2044
2047
  convert_forward(model, module.DeepseekV3Attention, deepseek_attention_forward)
2045
2048
  convert_forward(model, module.DeepseekV3MoE, deepseek_moe_forward)
2049
+ elif model.config.model_type == "qwen2_5_omni":
2050
+ modeling_module_name = model.__class__.__module__
2051
+ module = importlib.import_module(modeling_module_name)
2046
2052
 
2053
+ # llm opt
2054
+ from ipex_llm.transformers.models.qwen2_5_omni import qwen2_5_omni_attention_forward
2055
+ from ipex_llm.transformers.models.qwen2_5_omni import qwen2_5_omni_thinker_model_forward
2056
+ from ipex_llm.transformers.models.qwen2 import qwen2_mlp_forward
2057
+ from ipex_llm.transformers.models.common import rms_norm_forward
2058
+ convert_forward(model.thinker.model, module.Qwen2_5OmniAttention,
2059
+ qwen2_5_omni_attention_forward)
2060
+ convert_forward(model.thinker.model, module.Qwen2_5OmniSdpaAttention,
2061
+ qwen2_5_omni_attention_forward)
2062
+ convert_forward(model.thinker.model, module.Qwen2_5OmniThinkerModel,
2063
+ qwen2_5_omni_thinker_model_forward)
2064
+ convert_forward(model.thinker.model, module.Qwen2MLP, qwen2_mlp_forward)
2065
+ convert_forward(model, module.Qwen2RMSNorm, rms_norm_forward)
2066
+
2067
+ # vision opt
2068
+ from ipex_llm.transformers.models.qwen2_vl import qwen2_vision_get_dtype
2069
+ from ipex_llm.transformers.models.qwen2_5_omni import qwen2_5_omni_vision_attention_forward
2070
+ convert_forward(model.thinker.visual, module.Qwen2_5OmniVisionAttention,
2071
+ qwen2_5_omni_vision_attention_forward)
2072
+ convert_forward(model.thinker.visual, module.Qwen2_5OmniVisionSdpaAttention,
2073
+ qwen2_5_omni_vision_attention_forward)
2074
+
2075
+ # tts opt
2076
+ if hasattr(model, "talker"):
2077
+ convert_forward(model.talker, module.Qwen2_5OmniAttention,
2078
+ qwen2_5_omni_attention_forward)
2079
+ convert_forward(model.talker, module.Qwen2_5OmniThinkerModel,
2080
+ qwen2_5_omni_thinker_model_forward)
2047
2081
  return model
2048
2082
 
2049
2083
 
@@ -0,0 +1,286 @@
1
+ #
2
+ # Copyright 2016 The BigDL Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ # Some parts of this file is adapted from
17
+ # https://github.com/huggingface/transformers/blob/3a1ead0aabed473eafe527915eea8c197d424356/src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py
18
+ # which is licensed under Apache License 2.0
19
+
20
+ import math
21
+ import torch
22
+ from typing import Optional, Tuple, List, Union
23
+ from transformers.cache_utils import Cache
24
+ from transformers.modeling_outputs import BaseModelOutputWithPast
25
+ from transformers.models.qwen2_5_omni.modeling_qwen2_5_omni import Qwen2_5OmniAttention
26
+ from transformers.models.qwen2_5_omni.modeling_qwen2_5_omni import apply_rotary_pos_emb_vision
27
+ from transformers.models.qwen2_5_omni.modeling_qwen2_5_omni import apply_multimodal_rotary_pos_emb
28
+
29
+ from ipex_llm.utils.common import invalidInputError
30
+ from ipex_llm.transformers.kv import DynamicNormalCache
31
+ from ipex_llm.transformers.models.common import merge_qkv_base
32
+ from ipex_llm.transformers.models.common import attention_softmax
33
+ from ipex_llm.transformers.models.common import scaled_dot_product_attention
34
+ from ipex_llm.transformers.models.utils import use_sdp_non_causal
35
+
36
+
37
+ def merge_qkv(module: torch.nn.Module):
38
+ merge_qkv_base(module, Qwen2_5OmniAttention)
39
+
40
+
41
+ def qwen2_5_omni_attention_forward(
42
+ self,
43
+ hidden_states: torch.Tensor,
44
+ attention_mask: Optional[torch.Tensor] = None,
45
+ position_ids: Optional[torch.LongTensor] = None,
46
+ past_key_value: Optional[Cache] = None,
47
+ output_attentions: bool = False,
48
+ use_cache: bool = False,
49
+ cache_position: Optional[torch.LongTensor]=None,
50
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor]=None,
51
+ ):
52
+ bsz, q_len, _ = hidden_states.size()
53
+
54
+ qkv = self.qkv_proj(hidden_states)
55
+ qkv = qkv.view(bsz, q_len, self.num_heads + 2 * self.num_key_value_heads, self.head_dim)
56
+ qkv = qkv.transpose(1, 2)
57
+ query_states, key_states, value_states = qkv.split([self.num_heads,
58
+ self.num_key_value_heads,
59
+ self.num_key_value_heads], dim=1)
60
+
61
+ cos, sin = position_embeddings
62
+ if query_states.device.type == "xpu":
63
+ import xe_addons
64
+ xe_addons.rotary_half_with_cache_inplaced(query_states, key_states, cos, sin)
65
+ else:
66
+ query_states, key_states = apply_multimodal_rotary_pos_emb(
67
+ query_states, key_states, cos, sin, self.rope_scaling["mrope_section"]
68
+ )
69
+
70
+ key_states, value_states = past_key_value.update(key_states, value_states,
71
+ self.layer_idx, None)
72
+
73
+ attn_weights = None
74
+ attn_output = scaled_dot_product_attention(
75
+ query_states, key_states, value_states,
76
+ attention_mask, q_len == key_states.size(2)
77
+ )
78
+
79
+ attn_output = attn_output.transpose(1, 2).contiguous()
80
+ attn_output = attn_output.reshape(bsz, q_len, -1)
81
+
82
+ attn_output = self.o_proj(attn_output)
83
+
84
+ if not output_attentions:
85
+ attn_weights = None
86
+ return attn_output, attn_weights, past_key_value
87
+
88
+
89
+ def qwen2_5_omni_thinker_model_forward(
90
+ self,
91
+ input_ids: torch.LongTensor = None,
92
+ attention_mask: Optional[torch.Tensor] = None,
93
+ position_ids: Optional[torch.LongTensor] = None,
94
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
95
+ inputs_embeds: Optional[torch.FloatTensor] = None,
96
+ use_cache: Optional[bool] = None,
97
+ output_attentions: Optional[bool] = None,
98
+ output_hidden_states: Optional[bool] = None,
99
+ return_dict: Optional[bool] = None,
100
+ cache_position: Optional[torch.LongTensor] = None,
101
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
102
+ output_attentions = (
103
+ output_attentions if output_attentions is not None
104
+ else self.config.output_attentions
105
+ )
106
+ output_hidden_states = (
107
+ output_hidden_states if output_hidden_states is not None
108
+ else self.config.output_hidden_states
109
+ )
110
+
111
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
112
+
113
+ invalidInputError((input_ids is None) ^ (inputs_embeds is None),
114
+ "You must specify exactly one of input_ids or inputs_embeds")
115
+
116
+ if inputs_embeds is None:
117
+ inputs_embeds = self.embed_tokens(input_ids)
118
+
119
+ # ipex-llm changes start: kv cache
120
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
121
+ use_cache = True if inputs_embeds.device.type == "xpu" else use_cache
122
+ # torch.jit.trace() doesn't support cache objects in the output
123
+ if use_cache and not torch.jit.is_tracing():
124
+ if not isinstance(past_key_values, DynamicNormalCache):
125
+ past_key_values = DynamicNormalCache.from_legacy_cache(past_key_values)
126
+ # ipex-llm changes end
127
+
128
+ if cache_position is None:
129
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
130
+ cache_position = torch.arange(
131
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
132
+ )
133
+
134
+ # the hard coded `3` is for temporal, height and width.
135
+ if position_ids is None:
136
+ position_ids = cache_position.view(1, 1, -1).expand(3, inputs_embeds.shape[0], -1)
137
+ elif position_ids.dim() == 2:
138
+ position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1)
139
+
140
+ causal_mask = self._update_causal_mask(
141
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
142
+ )
143
+
144
+ hidden_states = inputs_embeds
145
+
146
+ # create position embeddings to be shared across the decoder layers
147
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
148
+ # ipex-llm changes start: rotary embedding
149
+ if inputs_embeds.device.type == "xpu":
150
+ cos, sin = position_embeddings
151
+ mrope_section = self.config.rope_scaling["mrope_section"] * 2
152
+ cos = torch.cat([m[i % 3] for i, m in
153
+ enumerate(cos.split(mrope_section, dim=-1))], dim=-1).unsqueeze(1)
154
+ sin = torch.cat([m[i % 3] for i, m in
155
+ enumerate(sin.split(mrope_section, dim=-1))], dim=-1).unsqueeze(1)
156
+ position_embeddings = cos.contiguous(), sin.contiguous()
157
+ # ipex-llm changes end
158
+
159
+ # decoder layers
160
+ all_hidden_states = () if output_hidden_states else None
161
+ all_self_attns = () if output_attentions else None
162
+ next_decoder_cache = None
163
+
164
+ for decoder_layer in self.layers:
165
+ if output_hidden_states:
166
+ all_hidden_states += (hidden_states,)
167
+
168
+ layer_outputs = decoder_layer(
169
+ hidden_states,
170
+ attention_mask=causal_mask,
171
+ position_ids=position_ids,
172
+ past_key_value=past_key_values,
173
+ output_attentions=output_attentions,
174
+ use_cache=use_cache,
175
+ cache_position=cache_position,
176
+ position_embeddings=position_embeddings,
177
+ )
178
+
179
+ hidden_states = layer_outputs[0]
180
+
181
+ if use_cache:
182
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
183
+
184
+ if output_attentions:
185
+ all_self_attns += (layer_outputs[1],)
186
+
187
+ hidden_states = self.norm(hidden_states)
188
+
189
+ # add hidden states from the last decoder layer
190
+ if output_hidden_states:
191
+ all_hidden_states += (hidden_states,)
192
+
193
+ next_cache = next_decoder_cache if use_cache else None
194
+
195
+ if not return_dict:
196
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns]
197
+ if v is not None)
198
+ return BaseModelOutputWithPast(
199
+ last_hidden_state=hidden_states,
200
+ past_key_values=next_cache,
201
+ hidden_states=all_hidden_states,
202
+ attentions=all_self_attns,
203
+ )
204
+
205
+
206
+ def qwen2_5_omni_vision_attention_forward(
207
+ self,
208
+ hidden_states: torch.Tensor,
209
+ cu_seqlens: torch.Tensor,
210
+ rotary_pos_emb: torch.Tensor = None
211
+ ) -> torch.Tensor:
212
+ seq_length = hidden_states.shape[0]
213
+ q = self.q(hidden_states).reshape(seq_length, self.num_heads, -1)
214
+ k = self.k(hidden_states).reshape(seq_length, self.num_heads, -1)
215
+ v = self.v(hidden_states).reshape(seq_length, self.num_heads, -1)
216
+ q = apply_rotary_pos_emb_vision(q.unsqueeze(0), rotary_pos_emb).squeeze(0)
217
+ k = apply_rotary_pos_emb_vision(k.unsqueeze(0), rotary_pos_emb).squeeze(0)
218
+ # q, k, v: [seq_length, num_heads, head_dim]
219
+
220
+ seq_lens = cu_seqlens.tolist()
221
+ invalidInputError(seq_lens[0] == 0 and seq_lens[-1] == seq_length,
222
+ "unexpected input")
223
+
224
+ head_dim = q.size(-1)
225
+ if use_sdp_non_causal(head_dim, q.device, q.dtype):
226
+ image_num = len(seq_lens) - 1
227
+ image_size = seq_lens[1] - seq_lens[0]
228
+ guessed_seq_lens = torch.arange(0, (image_num + 1) * image_size, image_size,
229
+ dtype=cu_seqlens.dtype, device=cu_seqlens.device)
230
+ if (guessed_seq_lens == cu_seqlens).all():
231
+ q = q.view(image_num, image_size, self.num_heads, head_dim).permute(0, 2, 1, 3)
232
+ k = k.view(image_num, image_size, self.num_heads, head_dim).permute(0, 2, 1, 3)
233
+ v = v.view(image_num, image_size, self.num_heads, head_dim).permute(0, 2, 1, 3)
234
+ # q, k, v: [image_num, num_heads, image_size, head_dim]
235
+
236
+ attn_output = scaled_dot_product_attention(
237
+ q, k.contiguous(), v.contiguous(),
238
+ None, False
239
+ )
240
+ attn_output = attn_output.permute(0, 2, 1, 3).contiguous()
241
+ attn_output = attn_output.view(seq_length, self.num_heads, head_dim)
242
+ # attn_output: [seq_length, num_heads, head_dim]
243
+ else:
244
+ q = q.transpose(0, 1).unsqueeze(0)
245
+ k = k.transpose(0, 1).unsqueeze(0).contiguous()
246
+ v = v.transpose(0, 1).unsqueeze(0).contiguous()
247
+ # q, k, v: [1, num_heads, seq_length, head_dim]
248
+
249
+ attn_outputs = []
250
+ for i in range(image_num):
251
+ start_idx = seq_lens[i]
252
+ end_idx = seq_lens[i + 1]
253
+ tmp_q = q[:, :, start_idx:end_idx, :]
254
+ tmp_k = k[:, :, start_idx:end_idx, :]
255
+ tmp_v = v[:, :, start_idx:end_idx, :]
256
+ attn_output = scaled_dot_product_attention(
257
+ tmp_q, tmp_k, tmp_v,
258
+ None, False
259
+ )
260
+ attn_output = attn_output.permute(0, 2, 1, 3)
261
+ # attn_output: [1, seq_length, num_heads, head_dim]
262
+ attn_outputs.append(attn_output)
263
+ attn_output = torch.cat(attn_outputs, dim=1).squeeze(0)
264
+ # attn_output: [seq_length, num_heads, head_dim]
265
+ else:
266
+ attention_mask = torch.full(
267
+ [1, seq_length, seq_length], torch.finfo(q.dtype).min, device=q.device, dtype=q.dtype
268
+ )
269
+ for i in range(1, len(seq_lens)):
270
+ attention_mask[..., seq_lens[i - 1]:seq_lens[i], seq_lens[i - 1]:seq_lens[i]] = 0
271
+
272
+ q = q.transpose(0, 1)
273
+ k = k.transpose(0, 1)
274
+ v = v.transpose(0, 1)
275
+ # q, k, v: [num_heads, seq_length, head_dim]
276
+
277
+ attn_weights = torch.matmul(q, k.transpose(1, 2)) / math.sqrt(head_dim)
278
+ attn_weights = attn_weights + attention_mask
279
+ attn_weights = attention_softmax(attn_weights)
280
+ attn_output = torch.matmul(attn_weights, v)
281
+ attn_output = attn_output.transpose(0, 1)
282
+ # attn_output: [seq_length, num_heads, head_dim]
283
+
284
+ attn_output = attn_output.reshape(seq_length, -1)
285
+ attn_output = self.proj(attn_output)
286
+ return attn_output
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ipex-llm
3
- Version: 2.2.0b20250326
3
+ Version: 2.2.0b20250328
4
4
  Summary: Large Language Model Develop Toolkit
5
5
  Home-page: https://github.com/intel-analytics/ipex-llm
6
6
  Author: BigDL Authors
@@ -27,7 +27,7 @@ Requires-Dist: intel-openmp ; (platform_machine == "x86_64" or platform_machine
27
27
  Requires-Dist: torch ==2.1.2+cpu ; (platform_system == "Linux") and extra == 'all'
28
28
  Requires-Dist: torch ==2.1.2 ; (platform_system == "Windows") and extra == 'all'
29
29
  Provides-Extra: cpp
30
- Requires-Dist: bigdl-core-cpp ==2.6.0b20250326 ; extra == 'cpp'
30
+ Requires-Dist: bigdl-core-cpp ==2.6.0b20250328 ; extra == 'cpp'
31
31
  Requires-Dist: setuptools ; extra == 'cpp'
32
32
  Requires-Dist: onednn-devel ==2025.0.1 ; (platform_system == "Windows") and extra == 'cpp'
33
33
  Requires-Dist: onednn ==2025.0.1 ; (platform_system == "Windows") and extra == 'cpp'
@@ -60,7 +60,7 @@ Requires-Dist: transformers ==4.40.0 ; extra == 'npu'
60
60
  Requires-Dist: intel-openmp ; (platform_machine == "x86_64" or platform_machine == "AMD64") and extra == 'npu'
61
61
  Requires-Dist: torch ==2.1.2+cpu ; (platform_system == "Linux") and extra == 'npu'
62
62
  Requires-Dist: torch ==2.1.2 ; (platform_system == "Windows") and extra == 'npu'
63
- Requires-Dist: bigdl-core-npu ==2.6.0b20250326 ; (platform_system == "Windows") and extra == 'npu'
63
+ Requires-Dist: bigdl-core-npu ==2.6.0b20250328 ; (platform_system == "Windows") and extra == 'npu'
64
64
  Provides-Extra: serving
65
65
  Requires-Dist: py-cpuinfo ; extra == 'serving'
66
66
  Requires-Dist: fschat[model_worker,webui] ==0.2.36 ; extra == 'serving'
@@ -80,9 +80,9 @@ Requires-Dist: setuptools <70.0.0 ; extra == 'xpu'
80
80
  Requires-Dist: torch ==2.1.0a0 ; extra == 'xpu'
81
81
  Requires-Dist: torchvision ==0.16.0a0 ; extra == 'xpu'
82
82
  Requires-Dist: intel-extension-for-pytorch ==2.1.10+xpu ; extra == 'xpu'
83
- Requires-Dist: bigdl-core-xe-21 ==2.6.0b20250326 ; extra == 'xpu'
84
- Requires-Dist: bigdl-core-xe-batch-21 ==2.6.0b20250326 ; extra == 'xpu'
85
- Requires-Dist: bigdl-core-xe-addons-21 ==2.6.0b20250326 ; extra == 'xpu'
83
+ Requires-Dist: bigdl-core-xe-21 ==2.6.0b20250328 ; extra == 'xpu'
84
+ Requires-Dist: bigdl-core-xe-batch-21 ==2.6.0b20250328 ; extra == 'xpu'
85
+ Requires-Dist: bigdl-core-xe-addons-21 ==2.6.0b20250328 ; extra == 'xpu'
86
86
  Provides-Extra: xpu-2-1
87
87
  Requires-Dist: py-cpuinfo ; extra == 'xpu-2-1'
88
88
  Requires-Dist: protobuf ; extra == 'xpu-2-1'
@@ -97,9 +97,9 @@ Requires-Dist: setuptools <70.0.0 ; extra == 'xpu-2-1'
97
97
  Requires-Dist: torch ==2.1.0a0 ; extra == 'xpu-2-1'
98
98
  Requires-Dist: torchvision ==0.16.0a0 ; extra == 'xpu-2-1'
99
99
  Requires-Dist: intel-extension-for-pytorch ==2.1.10+xpu ; extra == 'xpu-2-1'
100
- Requires-Dist: bigdl-core-xe-21 ==2.6.0b20250326 ; extra == 'xpu-2-1'
101
- Requires-Dist: bigdl-core-xe-batch-21 ==2.6.0b20250326 ; extra == 'xpu-2-1'
102
- Requires-Dist: bigdl-core-xe-addons-21 ==2.6.0b20250326 ; extra == 'xpu-2-1'
100
+ Requires-Dist: bigdl-core-xe-21 ==2.6.0b20250328 ; extra == 'xpu-2-1'
101
+ Requires-Dist: bigdl-core-xe-batch-21 ==2.6.0b20250328 ; extra == 'xpu-2-1'
102
+ Requires-Dist: bigdl-core-xe-addons-21 ==2.6.0b20250328 ; extra == 'xpu-2-1'
103
103
  Requires-Dist: intel-openmp ; (platform_machine == "x86_64" or platform_machine == "AMD64") and extra == 'xpu-2-1'
104
104
  Requires-Dist: dpcpp-cpp-rt ==2024.0.2 ; (platform_system == "Windows") and extra == 'xpu-2-1'
105
105
  Requires-Dist: mkl-dpcpp ==2024.0.0 ; (platform_system == "Windows") and extra == 'xpu-2-1'
@@ -117,7 +117,7 @@ Requires-Dist: setuptools ; extra == 'xpu-2-6'
117
117
  Requires-Dist: torch ==2.6.0+xpu ; extra == 'xpu-2-6'
118
118
  Requires-Dist: torchvision ==0.21.0+xpu ; extra == 'xpu-2-6'
119
119
  Requires-Dist: torchaudio ==2.6.0+xpu ; extra == 'xpu-2-6'
120
- Requires-Dist: bigdl-core-xe-all ==2.6.0b20250326 ; extra == 'xpu-2-6'
120
+ Requires-Dist: bigdl-core-xe-all ==2.6.0b20250328 ; extra == 'xpu-2-6'
121
121
  Requires-Dist: onednn-devel ==2025.0.1 ; extra == 'xpu-2-6'
122
122
  Requires-Dist: onednn ==2025.0.1 ; extra == 'xpu-2-6'
123
123
  Requires-Dist: dpcpp-cpp-rt ==2025.0.2 ; extra == 'xpu-2-6'
@@ -132,7 +132,7 @@ Requires-Dist: tokenizers ==0.15.2 ; extra == 'xpu-2-6-arl'
132
132
  Requires-Dist: accelerate ==0.23.0 ; extra == 'xpu-2-6-arl'
133
133
  Requires-Dist: tabulate ; extra == 'xpu-2-6-arl'
134
134
  Requires-Dist: setuptools ; extra == 'xpu-2-6-arl'
135
- Requires-Dist: bigdl-core-xe-all ==2.6.0b20250326 ; extra == 'xpu-2-6-arl'
135
+ Requires-Dist: bigdl-core-xe-all ==2.6.0b20250328 ; extra == 'xpu-2-6-arl'
136
136
  Requires-Dist: onednn-devel ==2025.0.1 ; extra == 'xpu-2-6-arl'
137
137
  Requires-Dist: onednn ==2025.0.1 ; extra == 'xpu-2-6-arl'
138
138
  Requires-Dist: dpcpp-cpp-rt ==2025.0.2 ; extra == 'xpu-2-6-arl'
@@ -155,9 +155,9 @@ Requires-Dist: tokenizers ==0.15.2 ; extra == 'xpu-arc'
155
155
  Requires-Dist: accelerate ==0.23.0 ; extra == 'xpu-arc'
156
156
  Requires-Dist: tabulate ; extra == 'xpu-arc'
157
157
  Requires-Dist: setuptools ; extra == 'xpu-arc'
158
- Requires-Dist: bigdl-core-xe-23 ==2.6.0b20250326 ; extra == 'xpu-arc'
159
- Requires-Dist: bigdl-core-xe-batch-23 ==2.6.0b20250326 ; extra == 'xpu-arc'
160
- Requires-Dist: bigdl-core-xe-addons-23 ==2.6.0b20250326 ; extra == 'xpu-arc'
158
+ Requires-Dist: bigdl-core-xe-23 ==2.6.0b20250328 ; extra == 'xpu-arc'
159
+ Requires-Dist: bigdl-core-xe-batch-23 ==2.6.0b20250328 ; extra == 'xpu-arc'
160
+ Requires-Dist: bigdl-core-xe-addons-23 ==2.6.0b20250328 ; extra == 'xpu-arc'
161
161
  Requires-Dist: intel-openmp ; (platform_machine == "x86_64" or platform_machine == "AMD64") and extra == 'xpu-arc'
162
162
  Requires-Dist: torch ==2.3.1+cxx11.abi ; (platform_system == "Linux") and extra == 'xpu-arc'
163
163
  Requires-Dist: torchvision ==0.18.1+cxx11.abi ; (platform_system == "Linux") and extra == 'xpu-arc'
@@ -178,9 +178,9 @@ Requires-Dist: tokenizers ==0.15.2 ; extra == 'xpu-arl'
178
178
  Requires-Dist: accelerate ==0.23.0 ; extra == 'xpu-arl'
179
179
  Requires-Dist: tabulate ; extra == 'xpu-arl'
180
180
  Requires-Dist: setuptools ; extra == 'xpu-arl'
181
- Requires-Dist: bigdl-core-xe-23 ==2.6.0b20250326 ; extra == 'xpu-arl'
182
- Requires-Dist: bigdl-core-xe-batch-23 ==2.6.0b20250326 ; extra == 'xpu-arl'
183
- Requires-Dist: bigdl-core-xe-addons-23 ==2.6.0b20250326 ; extra == 'xpu-arl'
181
+ Requires-Dist: bigdl-core-xe-23 ==2.6.0b20250328 ; extra == 'xpu-arl'
182
+ Requires-Dist: bigdl-core-xe-batch-23 ==2.6.0b20250328 ; extra == 'xpu-arl'
183
+ Requires-Dist: bigdl-core-xe-addons-23 ==2.6.0b20250328 ; extra == 'xpu-arl'
184
184
  Requires-Dist: intel-openmp ; (platform_machine == "x86_64" or platform_machine == "AMD64") and extra == 'xpu-arl'
185
185
  Requires-Dist: torch ==2.3.1+cxx11.abi ; (platform_system == "Linux") and extra == 'xpu-arl'
186
186
  Requires-Dist: torchvision ==0.18.1+cxx11.abi ; (platform_system == "Linux") and extra == 'xpu-arl'
@@ -201,9 +201,9 @@ Requires-Dist: tokenizers ==0.15.2 ; extra == 'xpu-lnl'
201
201
  Requires-Dist: accelerate ==0.23.0 ; extra == 'xpu-lnl'
202
202
  Requires-Dist: tabulate ; extra == 'xpu-lnl'
203
203
  Requires-Dist: setuptools ; extra == 'xpu-lnl'
204
- Requires-Dist: bigdl-core-xe-23 ==2.6.0b20250326 ; extra == 'xpu-lnl'
205
- Requires-Dist: bigdl-core-xe-batch-23 ==2.6.0b20250326 ; extra == 'xpu-lnl'
206
- Requires-Dist: bigdl-core-xe-addons-23 ==2.6.0b20250326 ; extra == 'xpu-lnl'
204
+ Requires-Dist: bigdl-core-xe-23 ==2.6.0b20250328 ; extra == 'xpu-lnl'
205
+ Requires-Dist: bigdl-core-xe-batch-23 ==2.6.0b20250328 ; extra == 'xpu-lnl'
206
+ Requires-Dist: bigdl-core-xe-addons-23 ==2.6.0b20250328 ; extra == 'xpu-lnl'
207
207
  Requires-Dist: intel-openmp ; (platform_machine == "x86_64" or platform_machine == "AMD64") and extra == 'xpu-lnl'
208
208
  Requires-Dist: torch ==2.3.1+cxx11.abi ; (platform_system == "Linux") and extra == 'xpu-lnl'
209
209
  Requires-Dist: torchvision ==0.18.1+cxx11.abi ; (platform_system == "Linux") and extra == 'xpu-lnl'
@@ -41,35 +41,35 @@ ipex_llm/langchain/llms/transformerspipelinellm.py,sha256=vm522YPPwWxxAPVvQBtxRf
41
41
  ipex_llm/langchain/vllm/__init__.py,sha256=T-EbRT6GJ_8RCu-iLmSzcftOimXSPQf2d5X72AUAy2Y,874
42
42
  ipex_llm/langchain/vllm/vllm.py,sha256=6dxc-ZISZQrJilEa_HA827l75Dv9rcHpY_G6FdJ8BVs,7793
43
43
  ipex_llm/libs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
44
- ipex_llm/libs/bloom-api.dll,sha256=pqTVajDI-O28V6_tMMU5ylAZz6KnhkNU1hZF0qcQmXc,36352
45
- ipex_llm/libs/bloom.dll,sha256=wdaa-Xyja6Cxd-JvmkqWQCFHsIJ_J2d1RZG0kqQRSJI,507904
46
- ipex_llm/libs/gptneox-api.dll,sha256=84H-AtLwz-L1zKmGOkkUF91tg31RyB9ReRxOf9tjhd0,24576
47
- ipex_llm/libs/gptneox.dll,sha256=S-ekc_gepDq_w905_X0wVecKy1Su0bQ17Osb2wmyqWU,568320
48
- ipex_llm/libs/libbloom_avx.dll,sha256=aJMe0Vf9FkZE-oh5m0xuAPh6Td8OPUbWMbhmkPQ4G-k,536576
49
- ipex_llm/libs/libbloom_vnni.dll,sha256=kfZOWQHNDrpnBIyAf59vR1VCpMmHkoxR7Qqrmg7TeGk,508416
50
- ipex_llm/libs/libgptneox_avx.dll,sha256=TGGRYlBnWRh8B6UERRZaxsF3aKT4eG1bwZXstU_AukM,596992
51
- ipex_llm/libs/libgptneox_vnni.dll,sha256=xdYuHYCTFcvIzpbybOcoMl1R4C942aTIZLO08rP0uX8,568832
52
- ipex_llm/libs/libllama_avx.dll,sha256=idhrt9J7wVzgNe3VV6TzwecsQlPJiCcQoXOwg9lgFcQ,591360
53
- ipex_llm/libs/libllama_vnni.dll,sha256=1_zWBiKs3IqO4WXF7tFwi_x4roh-MhOntf0FAwI1rh0,563200
54
- ipex_llm/libs/libstarcoder_avx.dll,sha256=8aFkmm6I-ui50-2KxF-CRCpqvp9gPK05dNnVZ23Ka-A,627712
55
- ipex_llm/libs/libstarcoder_vnni.dll,sha256=_IcUGJmh3vAdlyybVZi3TGevTCjWKWo0xm3lh-a8kIs,599552
56
- ipex_llm/libs/llama-api.dll,sha256=DMn2bTbxybghbfbhi4A30l1SwnPm-1yUQhfHKKUA2zI,25600
57
- ipex_llm/libs/llama.dll,sha256=9KDpAN-a92Ei93xwyZ6q0vlQvgw0Vtx6Du6xKbIOU4k,562688
58
- ipex_llm/libs/main-bloom.exe,sha256=TE0o7kdnDHBQ-TdFtFOHNRuW6iZLOaQkkMgODnLmfdo,103424
59
- ipex_llm/libs/main-gptneox.exe,sha256=AIz6Pud8TGqjeCyqt7PpgIkSoJkfrHVFEk_MADVL-XQ,98816
60
- ipex_llm/libs/main-llama.exe,sha256=UXD50b7iFZBMFNNutn8he0b1WCrVZMLQyls-UZeWDvQ,99840
61
- ipex_llm/libs/main-starcoder.exe,sha256=vfLp8wknjBhAyk5dEnd_SxZyc35hCw-UTBIQTSgxRYg,157696
62
- ipex_llm/libs/pipeline.dll,sha256=KSAXxye5J9pn3DM_jegYio4cKpCu42cByqyJyD735zQ,73216
63
- ipex_llm/libs/quantize-bloom.exe,sha256=lv21nEsnw5JKCyYvS311zpBW4siXoi0d11tmJMDDO9I,126464
64
- ipex_llm/libs/quantize-bloom_vnni.exe,sha256=pW0feE2Egqo8hdnE8BgYYH6ulZS17oGEoRAaWdh94ro,128000
65
- ipex_llm/libs/quantize-gptneox.exe,sha256=Dux1lopaaO1L76CJaXKdAyl4pT3hv9M2gkOuysrRHiQ,104448
66
- ipex_llm/libs/quantize-gptneox_vnni.exe,sha256=fsBLc0pl7r7VbuYipbKNfZ73uTO3V_Re-_2SvNW1bn8,104960
67
- ipex_llm/libs/quantize-llama.exe,sha256=eoWV68mg5peDopzK5FwKH3E8hQwu13LgL76inE2A2rA,110080
68
- ipex_llm/libs/quantize-llama_vnni.exe,sha256=C_q5J5vMWKoqjF529FwL6kUbOBgjZzmPXk_QJcFnUZo,110592
69
- ipex_llm/libs/quantize-starcoder.exe,sha256=zXb514xNlefTjlyOGYLmFjGiAFEaHtYb4Vjvzv4vcyA,127488
70
- ipex_llm/libs/quantize-starcoder_vnni.exe,sha256=EgLdrKE7R-EH7NKL9iRP5-5BP6c6fNia8mqSYgCZynU,128512
71
- ipex_llm/libs/starcoder-api.dll,sha256=TozlAJewwHxElGpYiew2naU7HLHZPTFu0pgUpOwecik,21504
72
- ipex_llm/libs/starcoder.dll,sha256=H9fiXMXtmwIgUZjeN9g71q0Ab8rxQKU9_Yn3Tejh7g8,599040
44
+ ipex_llm/libs/bloom-api.dll,sha256=Ts41ETMTULbFvlg9mMqw084zBha5hP5T1gHw9HZkf1E,36352
45
+ ipex_llm/libs/bloom.dll,sha256=BMXbtcBpiyARbOh9fZb4pfPEZLqAvtqCzSPhbQsxCdA,507904
46
+ ipex_llm/libs/gptneox-api.dll,sha256=r_qjHto49nA4Ik2LZUsF2zpcQduIbgociD6UaH1HtAU,24576
47
+ ipex_llm/libs/gptneox.dll,sha256=q1_5H7bxVe0dC4NmnpLMHAtuNdQuQQIBUKGrG2sgx2k,568320
48
+ ipex_llm/libs/libbloom_avx.dll,sha256=Hop7_52kTcnPquuH1MvUb-c1halOLK0F56NNdNjyMOo,536576
49
+ ipex_llm/libs/libbloom_vnni.dll,sha256=L-5Gd0Derh-6jffrFd-qfIQkst9SVfLGYwXUAVyQlbg,508416
50
+ ipex_llm/libs/libgptneox_avx.dll,sha256=BKByiQHeKNZuRhx62VVgjq-aracBadJOUTEMz13onNw,596992
51
+ ipex_llm/libs/libgptneox_vnni.dll,sha256=sNpCWU6uQp8fUDt0fljX6KDwSDnuUTdvGHhkxbHZOO8,568832
52
+ ipex_llm/libs/libllama_avx.dll,sha256=K2CC5yda4M1NgobPCXZcJCUt8pgoJZraAzqIFUugmJ8,591360
53
+ ipex_llm/libs/libllama_vnni.dll,sha256=klgeb_4tsCBxmt0cE1Yae-drQkacd8VsT-HmuUdJ5hk,563200
54
+ ipex_llm/libs/libstarcoder_avx.dll,sha256=L2xrqzfee1-HNEnmbSHOKkG6p34CFiyJ8k3r5b3mMCQ,627712
55
+ ipex_llm/libs/libstarcoder_vnni.dll,sha256=Z0k9K3VqMFqnr3vLhACbyRnLcJBNt1pghE6ZsbIwB7o,599552
56
+ ipex_llm/libs/llama-api.dll,sha256=ZOtLxLBbxY5LmtMOIZijetNIMpX_1a-RaYj0NobdtFw,25600
57
+ ipex_llm/libs/llama.dll,sha256=DvzfMBusPfpfOMJdyDu6iDJeKT-dBfuN1566Q6BCqwI,562688
58
+ ipex_llm/libs/main-bloom.exe,sha256=dTu_LbEK4w6bXeFF__Dwpj76QNyS-DZRN-khVoqnKHE,103424
59
+ ipex_llm/libs/main-gptneox.exe,sha256=v-uJaAaaL3qMR-r1eddj9JisqZeJSGXxK61HP01JITs,98816
60
+ ipex_llm/libs/main-llama.exe,sha256=pv0qyPACjPpsA_MQ17oGVQc4F3q8fGP9ip866XKdKag,99840
61
+ ipex_llm/libs/main-starcoder.exe,sha256=jds-PpbYmDHT8FcYDtf7p-KoC3y9JLjxQAZvqjCQEYI,157696
62
+ ipex_llm/libs/pipeline.dll,sha256=wkTpoUoilbta0wEhCuOdwuntJ4Qyp-L6Qvj43WhdXnY,73216
63
+ ipex_llm/libs/quantize-bloom.exe,sha256=d7tG88PlzBNQw87qvzbSaxM30iRJQghbH7bqPd-RoYU,126464
64
+ ipex_llm/libs/quantize-bloom_vnni.exe,sha256=8uH4onmTtaS37KFnT9uO-077T5-OjQj1bswR4QjJgOc,128000
65
+ ipex_llm/libs/quantize-gptneox.exe,sha256=UwwX002f7N6ucLI3rTtJGPzPjkuVJSN_OQnPzMGvYk8,104448
66
+ ipex_llm/libs/quantize-gptneox_vnni.exe,sha256=W826KPed7EFGamySWnFySY8Fn0yHvjx2ZJL5wXDbDGE,104960
67
+ ipex_llm/libs/quantize-llama.exe,sha256=90S6ZcTtVANWWqxZq8ObYIaiYt8WdiOuGZZJbXK0tI0,110080
68
+ ipex_llm/libs/quantize-llama_vnni.exe,sha256=4RfT-4FEytTcRPK7OAcKQmz24e-Ej3SWfb8SyXxVhxE,110592
69
+ ipex_llm/libs/quantize-starcoder.exe,sha256=FyItO9Doa6qKNQPqM0AuTL8PhGyntdpcVddZU_b-q18,127488
70
+ ipex_llm/libs/quantize-starcoder_vnni.exe,sha256=2Ft41uF6zfOOCnrytztIvb-pw_i_7etfGIQ9jVD-qwY,128512
71
+ ipex_llm/libs/starcoder-api.dll,sha256=Z0OH30QyHbQbKlohSLBNDgEj0GJbieTACkzSnHVDfno,21504
72
+ ipex_llm/libs/starcoder.dll,sha256=9bCpmwwgIa6A61GlkEsRDaMfq8j4KMUmCFhR_p15kaU,599040
73
73
  ipex_llm/llamaindex/__init__.py,sha256=T-EbRT6GJ_8RCu-iLmSzcftOimXSPQf2d5X72AUAy2Y,874
74
74
  ipex_llm/llamaindex/llms/__init__.py,sha256=KP1lEdGqDuxPoxL1ZSH25Pm2kKMPJBWUTLR0ckSLMIU,1139
75
75
  ipex_llm/llamaindex/llms/bigdlllm.py,sha256=FQBzq1KOjfc6uofTXAha3O7TqpJkNfOFepXQmOVlbnI,26314
@@ -87,7 +87,7 @@ ipex_llm/serving/fastchat/tgi_api_protocol.py,sha256=brT3k3-V0NJrU4fRqUwWjC0O3iO
87
87
  ipex_llm/serving/fastchat/tgi_api_server.py,sha256=agNTAEiZPSuj3dEdIdYKwkoY0cXOUDX06DiM9VP2knQ,24418
88
88
  ipex_llm/serving/fastchat/vllm_worker.py,sha256=ZLz2Q9GxJO6r_LOiP6epgCRjBGk-K4EB1SNEWSJp5DA,11091
89
89
  ipex_llm/transformers/__init__.py,sha256=BreA3EY6hkNq0rVixb_sUuTLzMrcWXTt3yRsshCPHHQ,1214
90
- ipex_llm/transformers/convert.py,sha256=CfdQz1wDgjmAT4uT_y9ZRbOrFjmsXxFUgPeWEExskk8,103770
90
+ ipex_llm/transformers/convert.py,sha256=D3QSoD48NzOKz9rKIsgrI0sNzdmJvb0sP4k_0f8Lcmo,105824
91
91
  ipex_llm/transformers/convert_ipex.py,sha256=_nSnUTQy-yfkKaqGdqnBdWztZf3NGmnbZ0TKaDrF4X4,14617
92
92
  ipex_llm/transformers/embedding.py,sha256=bdgk59DvD4ZZyxRzewXOR7g56nThgO6uhIwk8QL7f-s,9299
93
93
  ipex_llm/transformers/kv.py,sha256=k4TU18LlA-Sbq9WNNQnfuzu3RSFBwFhmaV3BcGN5bAo,19191
@@ -171,6 +171,7 @@ ipex_llm/transformers/models/phi3.py,sha256=AaWB7TPQdrDYgpcVHglG0Q0480bxNOw1mFeP
171
171
  ipex_llm/transformers/models/phixtral.py,sha256=MDTMghcu7qAmZmRcUGqXXDXhSU3y_N59HRIXmlcjp5g,4890
172
172
  ipex_llm/transformers/models/qwen.py,sha256=A3WiVCzA7NLkcjp4zhFkZvKZzZWZlg0WFuVV_556TAI,19543
173
173
  ipex_llm/transformers/models/qwen2.py,sha256=zK-FpUaxEhjD4gZa1ZvArodAilz29T_cpeAqfCGosc0,14317
174
+ ipex_llm/transformers/models/qwen2_5_omni.py,sha256=uAm_dZBAf53nYt3d1bFitFcIWJV80wqU4q4NJRbwxIE,12015
174
175
  ipex_llm/transformers/models/qwen2_moe.py,sha256=a0gYo-ngf8SxaEnBdZUJDnPS6Mkn_poDd8xqhx50icI,19516
175
176
  ipex_llm/transformers/models/qwen2_vl.py,sha256=G-9e2oN4f5p5IWQ-zsBZuONxTura3BjlgyT2meigbHQ,13579
176
177
  ipex_llm/transformers/models/qwen_vl.py,sha256=j7Nzzz2Qvynu9yrCXmoEfERjw43hXof5TbXIs7Ms-oY,17105
@@ -255,11 +256,11 @@ ipex_llm/vllm/xpu/engine/__init__.py,sha256=pY_CpyuZd72fr6s32ejeKHKFW0K4vUU2rzZj
255
256
  ipex_llm/vllm/xpu/engine/engine.py,sha256=NvCMbp0X8NVrOqbwm4FTvXOptTRLzu9jQsy37ZHnTk8,9493
256
257
  ipex_llm/vllm/xpu/entrypoints/openai/api_server.py,sha256=IjiSze9vzBCAkLu_VwIcJwuO1jyFna7DLrj6aSL7RaQ,35220
257
258
  ipex_llm/vllm/xpu/entrypoints/openai/cli_args.py,sha256=hB398yYtKauASRzevctScdbFIjiiSGMAe1bwEuIHrhY,10893
258
- ipex_llm-2.2.0b20250326.data/scripts/ipex-llm-init.bat,sha256=HPtCYuDYwEatq7dAwOvdfVcHYCpAVdbj75K1qh0vQek,2578
259
- ipex_llm-2.2.0b20250326.data/scripts/llm-chat.ps1,sha256=6qrs-hGVAV8IKh7Jx8nq_XrnZcjd7qGU5wndArM7Yag,2769
260
- ipex_llm-2.2.0b20250326.data/scripts/llm-cli.ps1,sha256=3qBtTLs_EjYDnM8YyCpJhzLnGCKTEGssu9UNqfkjVXs,3009
261
- ipex_llm-2.2.0b20250326.dist-info/METADATA,sha256=fHSU2c7G6TDPCkVikxp0pUvb-oq-QgVvbFVE1Ea17BM,13917
262
- ipex_llm-2.2.0b20250326.dist-info/WHEEL,sha256=6iYPr8vTHsyDK75jr9X0V3I9wPSVmtwr_8fdATBciGk,98
263
- ipex_llm-2.2.0b20250326.dist-info/entry_points.txt,sha256=TiUyBB2MRmfF3ko-pyAEzqeBCRnyhu27bNOAsWPp3e8,61
264
- ipex_llm-2.2.0b20250326.dist-info/top_level.txt,sha256=CGCMHM-SyqUabU4h8RqJ2KTYckQUO3LvIWwmUQ6Qbzw,9
265
- ipex_llm-2.2.0b20250326.dist-info/RECORD,,
259
+ ipex_llm-2.2.0b20250328.data/scripts/ipex-llm-init.bat,sha256=HPtCYuDYwEatq7dAwOvdfVcHYCpAVdbj75K1qh0vQek,2578
260
+ ipex_llm-2.2.0b20250328.data/scripts/llm-chat.ps1,sha256=6qrs-hGVAV8IKh7Jx8nq_XrnZcjd7qGU5wndArM7Yag,2769
261
+ ipex_llm-2.2.0b20250328.data/scripts/llm-cli.ps1,sha256=3qBtTLs_EjYDnM8YyCpJhzLnGCKTEGssu9UNqfkjVXs,3009
262
+ ipex_llm-2.2.0b20250328.dist-info/METADATA,sha256=94dtiTavz1mtEX8Bd4VuZtum6co-x8W9ZaITI8DqM7w,13917
263
+ ipex_llm-2.2.0b20250328.dist-info/WHEEL,sha256=6iYPr8vTHsyDK75jr9X0V3I9wPSVmtwr_8fdATBciGk,98
264
+ ipex_llm-2.2.0b20250328.dist-info/entry_points.txt,sha256=TiUyBB2MRmfF3ko-pyAEzqeBCRnyhu27bNOAsWPp3e8,61
265
+ ipex_llm-2.2.0b20250328.dist-info/top_level.txt,sha256=CGCMHM-SyqUabU4h8RqJ2KTYckQUO3LvIWwmUQ6Qbzw,9
266
+ ipex_llm-2.2.0b20250328.dist-info/RECORD,,