bigdl-core-cpp 2.7.0b20250629__py3-none-win_amd64.whl → 2.7.0b20250701__py3-none-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- bigdl/cpp/convert_hf_to_gguf.py +1987 -558
- bigdl/cpp/convert_hf_to_gguf_update.py +131 -67
- bigdl/cpp/convert_lora_to_gguf.py +3 -3
- bigdl/cpp/gguf-py/gguf/constants.py +546 -16
- bigdl/cpp/gguf-py/gguf/gguf_reader.py +57 -6
- bigdl/cpp/gguf-py/gguf/gguf_writer.py +119 -7
- bigdl/cpp/gguf-py/gguf/lazy.py +10 -0
- bigdl/cpp/gguf-py/gguf/metadata.py +28 -8
- bigdl/cpp/gguf-py/gguf/tensor_mapping.py +461 -48
- bigdl/cpp/gguf-py/gguf/utility.py +195 -0
- bigdl/cpp/gguf-py/gguf/vocab.py +6 -1
- bigdl/cpp/libs/llama_cpp/ggml-base.dll +0 -0
- bigdl/cpp/libs/llama_cpp/ggml-cpu.dll +0 -0
- bigdl/cpp/libs/llama_cpp/ggml-sycl.dll +0 -0
- bigdl/cpp/libs/llama_cpp/ggml.dll +0 -0
- bigdl/cpp/libs/llama_cpp/llama-batched.exe +0 -0
- bigdl/cpp/libs/llama_cpp/llama-bench.exe +0 -0
- bigdl/cpp/libs/llama_cpp/llama-cli.exe +0 -0
- bigdl/cpp/libs/llama_cpp/llama-embedding.exe +0 -0
- bigdl/cpp/libs/llama_cpp/llama-gemma3-cli.exe +0 -0
- bigdl/cpp/libs/llama_cpp/llama-gguf.exe +0 -0
- bigdl/cpp/libs/llama_cpp/llama-llava-cli.exe +0 -0
- bigdl/cpp/libs/llama_cpp/llama-lookup.exe +0 -0
- bigdl/cpp/libs/llama_cpp/llama-ls-sycl-device.exe +0 -0
- bigdl/cpp/libs/llama_cpp/llama-minicpmv-cli.exe +0 -0
- bigdl/cpp/libs/llama_cpp/llama-perplexity.exe +0 -0
- bigdl/cpp/libs/llama_cpp/llama-quantize.exe +0 -0
- bigdl/cpp/libs/llama_cpp/llama-server.exe +0 -0
- bigdl/cpp/libs/llama_cpp/llama-simple.exe +0 -0
- bigdl/cpp/libs/llama_cpp/llama-speculative.exe +0 -0
- bigdl/cpp/libs/llama_cpp/llama-tokenize.exe +0 -0
- bigdl/cpp/libs/llama_cpp/llama.dll +0 -0
- bigdl/cpp/libs/ollama/ggml-base.dll +0 -0
- bigdl/cpp/libs/ollama/ggml-cpu.dll +0 -0
- bigdl/cpp/libs/ollama/ggml-sycl.dll +0 -0
- bigdl/cpp/libs/ollama/ggml.dll +0 -0
- bigdl/cpp/libs/ollama/llama.dll +0 -0
- bigdl/cpp/libs/ollama/llava_shared.dll +0 -0
- bigdl/cpp/libs/ollama/mtmd_shared.dll +0 -0
- bigdl/cpp/libs/ollama/ollama-lib.exe +0 -0
- bigdl/cpp/libs/ollama/ollama.exe +0 -0
- {bigdl_core_cpp-2.7.0b20250629.data → bigdl_core_cpp-2.7.0b20250701.data}/scripts/init-ollama.bat +1 -5
- {bigdl_core_cpp-2.7.0b20250629.dist-info → bigdl_core_cpp-2.7.0b20250701.dist-info}/METADATA +1 -1
- bigdl_core_cpp-2.7.0b20250701.dist-info/RECORD +56 -0
- bigdl/cpp/libs/llama_cpp/llava_shared.dll +0 -0
- bigdl_core_cpp-2.7.0b20250629.dist-info/RECORD +0 -56
- {bigdl_core_cpp-2.7.0b20250629.data → bigdl_core_cpp-2.7.0b20250701.data}/scripts/init-llama-cpp.bat +0 -0
- {bigdl_core_cpp-2.7.0b20250629.data → bigdl_core_cpp-2.7.0b20250701.data}/scripts/init-llama-cpp.ps1 +0 -0
- {bigdl_core_cpp-2.7.0b20250629.dist-info → bigdl_core_cpp-2.7.0b20250701.dist-info}/WHEEL +0 -0
- {bigdl_core_cpp-2.7.0b20250629.dist-info → bigdl_core_cpp-2.7.0b20250701.dist-info}/top_level.txt +0 -0
@@ -1,7 +1,11 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
|
+
from dataclasses import dataclass
|
3
4
|
from typing import Literal
|
4
5
|
|
6
|
+
import os
|
7
|
+
import json
|
8
|
+
|
5
9
|
|
6
10
|
def fill_templated_filename(filename: str, output_type: str | None) -> str:
|
7
11
|
# Given a file name fill in any type templates e.g. 'some-model-name.{ftype}.gguf'
|
@@ -67,3 +71,194 @@ def naming_convention(model_name: str | None, base_name: str | None, finetune_st
|
|
67
71
|
kind = f"-{model_type.strip().replace(' ', '-')}" if model_type is not None else ""
|
68
72
|
|
69
73
|
return f"{name}{parameters}{finetune}{version}{encoding}{kind}"
|
74
|
+
|
75
|
+
|
76
|
+
@dataclass
|
77
|
+
class RemoteTensor:
|
78
|
+
dtype: str
|
79
|
+
shape: tuple[int, ...]
|
80
|
+
offset_start: int
|
81
|
+
size: int
|
82
|
+
url: str
|
83
|
+
|
84
|
+
def data(self) -> bytearray:
|
85
|
+
# TODO: handle request errors (maybe with limited retries?)
|
86
|
+
# NOTE: using a bytearray, otherwise PyTorch complains the buffer is not writeable
|
87
|
+
data = bytearray(SafetensorRemote.get_data_by_range(url=self.url, start=self.offset_start, size=self.size))
|
88
|
+
return data
|
89
|
+
|
90
|
+
|
91
|
+
class SafetensorRemote:
|
92
|
+
"""
|
93
|
+
Uility class to handle remote safetensor files.
|
94
|
+
This class is designed to work with Hugging Face model repositories.
|
95
|
+
|
96
|
+
Example (one model has single safetensor file, the other has multiple):
|
97
|
+
for model_id in ["ngxson/TEST-Tiny-Llama4", "Qwen/Qwen2.5-7B-Instruct"]:
|
98
|
+
tensors = SafetensorRemote.get_list_tensors_hf_model(model_id)
|
99
|
+
print(tensors)
|
100
|
+
|
101
|
+
Example reading tensor data:
|
102
|
+
tensors = SafetensorRemote.get_list_tensors_hf_model(model_id)
|
103
|
+
for name, meta in tensors.items():
|
104
|
+
dtype, shape, offset_start, size, remote_safetensor_url = meta
|
105
|
+
# read the tensor data
|
106
|
+
data = SafetensorRemote.get_data_by_range(remote_safetensor_url, offset_start, size)
|
107
|
+
print(data)
|
108
|
+
"""
|
109
|
+
|
110
|
+
BASE_DOMAIN = "https://huggingface.co"
|
111
|
+
ALIGNMENT = 8 # bytes
|
112
|
+
|
113
|
+
@classmethod
|
114
|
+
def get_list_tensors_hf_model(cls, model_id: str) -> dict[str, RemoteTensor]:
|
115
|
+
"""
|
116
|
+
Get list of tensors from a Hugging Face model repository.
|
117
|
+
|
118
|
+
Returns a dictionary of tensor names and their metadata.
|
119
|
+
Each tensor is represented as a tuple of (dtype, shape, offset_start, size, remote_safetensor_url)
|
120
|
+
"""
|
121
|
+
# case 1: model has only one single model.safetensor file
|
122
|
+
is_single_file = cls.check_file_exist(f"{cls.BASE_DOMAIN}/{model_id}/resolve/main/model.safetensors")
|
123
|
+
if is_single_file:
|
124
|
+
url = f"{cls.BASE_DOMAIN}/{model_id}/resolve/main/model.safetensors"
|
125
|
+
return cls.get_list_tensors(url)
|
126
|
+
|
127
|
+
# case 2: model has multiple files
|
128
|
+
index_url = f"{cls.BASE_DOMAIN}/{model_id}/resolve/main/model.safetensors.index.json"
|
129
|
+
is_multiple_files = cls.check_file_exist(index_url)
|
130
|
+
if is_multiple_files:
|
131
|
+
# read the index file
|
132
|
+
index_data = cls.get_data_by_range(index_url, 0)
|
133
|
+
index_str = index_data.decode('utf-8')
|
134
|
+
index_json = json.loads(index_str)
|
135
|
+
assert index_json.get("weight_map") is not None, "weight_map not found in index file"
|
136
|
+
weight_map = index_json["weight_map"]
|
137
|
+
# get the list of files
|
138
|
+
all_files = list(set(weight_map.values()))
|
139
|
+
all_files.sort() # make sure we load shard files in order
|
140
|
+
# get the list of tensors
|
141
|
+
tensors: dict[str, RemoteTensor] = {}
|
142
|
+
for file in all_files:
|
143
|
+
url = f"{cls.BASE_DOMAIN}/{model_id}/resolve/main/{file}"
|
144
|
+
for key, val in cls.get_list_tensors(url).items():
|
145
|
+
tensors[key] = val
|
146
|
+
return tensors
|
147
|
+
|
148
|
+
raise ValueError(f"Model {model_id} does not have any safetensor files")
|
149
|
+
|
150
|
+
@classmethod
|
151
|
+
def get_list_tensors(cls, url: str) -> dict[str, RemoteTensor]:
|
152
|
+
"""
|
153
|
+
Get list of tensors from a remote safetensor file.
|
154
|
+
|
155
|
+
Returns a dictionary of tensor names and their metadata.
|
156
|
+
Each tensor is represented as a tuple of (dtype, shape, offset_start, size)
|
157
|
+
"""
|
158
|
+
metadata, data_start_offset = cls.get_metadata(url)
|
159
|
+
res: dict[str, RemoteTensor] = {}
|
160
|
+
|
161
|
+
for name, meta in metadata.items():
|
162
|
+
if name == "__metadata__":
|
163
|
+
continue
|
164
|
+
if not isinstance(meta, dict):
|
165
|
+
raise ValueError(f"Invalid metadata for tensor '{name}': {meta}")
|
166
|
+
try:
|
167
|
+
dtype = meta["dtype"]
|
168
|
+
shape = meta["shape"]
|
169
|
+
offset_start_relative, offset_end_relative = meta["data_offsets"]
|
170
|
+
size = offset_end_relative - offset_start_relative
|
171
|
+
offset_start = data_start_offset + offset_start_relative
|
172
|
+
res[name] = RemoteTensor(dtype=dtype, shape=tuple(shape), offset_start=offset_start, size=size, url=url)
|
173
|
+
except KeyError as e:
|
174
|
+
raise ValueError(f"Missing key in metadata for tensor '{name}': {e}, meta = {meta}")
|
175
|
+
|
176
|
+
return res
|
177
|
+
|
178
|
+
@classmethod
|
179
|
+
def get_metadata(cls, url: str) -> tuple[dict, int]:
|
180
|
+
"""
|
181
|
+
Get JSON metadata from a remote safetensor file.
|
182
|
+
|
183
|
+
Returns tuple of (metadata, data_start_offset)
|
184
|
+
"""
|
185
|
+
# Request first 5MB of the file (hopefully enough for metadata)
|
186
|
+
read_size = 5 * 1024 * 1024
|
187
|
+
raw_data = cls.get_data_by_range(url, 0, read_size)
|
188
|
+
|
189
|
+
# Parse header
|
190
|
+
# First 8 bytes contain the metadata length as u64 little-endian
|
191
|
+
if len(raw_data) < 8:
|
192
|
+
raise ValueError("Not enough data to read metadata size")
|
193
|
+
metadata_length = int.from_bytes(raw_data[:8], byteorder='little')
|
194
|
+
|
195
|
+
# Calculate the data start offset
|
196
|
+
data_start_offset = 8 + metadata_length
|
197
|
+
alignment = SafetensorRemote.ALIGNMENT
|
198
|
+
if data_start_offset % alignment != 0:
|
199
|
+
data_start_offset += alignment - (data_start_offset % alignment)
|
200
|
+
|
201
|
+
# Check if we have enough data to read the metadata
|
202
|
+
if len(raw_data) < 8 + metadata_length:
|
203
|
+
raise ValueError(f"Could not read complete metadata. Need {8 + metadata_length} bytes, got {len(raw_data)}")
|
204
|
+
|
205
|
+
# Extract metadata bytes and parse as JSON
|
206
|
+
metadata_bytes = raw_data[8:8 + metadata_length]
|
207
|
+
metadata_str = metadata_bytes.decode('utf-8')
|
208
|
+
try:
|
209
|
+
metadata = json.loads(metadata_str)
|
210
|
+
return metadata, data_start_offset
|
211
|
+
except json.JSONDecodeError as e:
|
212
|
+
raise ValueError(f"Failed to parse safetensor metadata as JSON: {e}")
|
213
|
+
|
214
|
+
@classmethod
|
215
|
+
def get_data_by_range(cls, url: str, start: int, size: int = -1) -> bytes:
|
216
|
+
"""
|
217
|
+
Get raw byte data from a remote file by range.
|
218
|
+
If size is not specified, it will read the entire file.
|
219
|
+
"""
|
220
|
+
import requests
|
221
|
+
from urllib.parse import urlparse
|
222
|
+
|
223
|
+
parsed_url = urlparse(url)
|
224
|
+
if not parsed_url.scheme or not parsed_url.netloc:
|
225
|
+
raise ValueError(f"Invalid URL: {url}")
|
226
|
+
|
227
|
+
headers = cls._get_request_headers()
|
228
|
+
if size > -1:
|
229
|
+
headers["Range"] = f"bytes={start}-{start + size}"
|
230
|
+
response = requests.get(url, allow_redirects=True, headers=headers)
|
231
|
+
response.raise_for_status()
|
232
|
+
|
233
|
+
# Get raw byte data
|
234
|
+
return response.content[slice(size if size > -1 else None)]
|
235
|
+
|
236
|
+
@classmethod
|
237
|
+
def check_file_exist(cls, url: str) -> bool:
|
238
|
+
"""
|
239
|
+
Check if a file exists at the given URL.
|
240
|
+
Returns True if the file exists, False otherwise.
|
241
|
+
"""
|
242
|
+
import requests
|
243
|
+
from urllib.parse import urlparse
|
244
|
+
|
245
|
+
parsed_url = urlparse(url)
|
246
|
+
if not parsed_url.scheme or not parsed_url.netloc:
|
247
|
+
raise ValueError(f"Invalid URL: {url}")
|
248
|
+
|
249
|
+
try:
|
250
|
+
headers = cls._get_request_headers()
|
251
|
+
headers["Range"] = "bytes=0-0"
|
252
|
+
response = requests.head(url, allow_redirects=True, headers=headers)
|
253
|
+
# Success (2xx) or redirect (3xx)
|
254
|
+
return 200 <= response.status_code < 400
|
255
|
+
except requests.RequestException:
|
256
|
+
return False
|
257
|
+
|
258
|
+
@classmethod
|
259
|
+
def _get_request_headers(cls) -> dict[str, str]:
|
260
|
+
"""Prepare common headers for requests."""
|
261
|
+
headers = {"User-Agent": "convert_hf_to_gguf"}
|
262
|
+
if os.environ.get("HF_TOKEN"):
|
263
|
+
headers["Authorization"] = f"Bearer {os.environ['HF_TOKEN']}"
|
264
|
+
return headers
|
bigdl/cpp/gguf-py/gguf/vocab.py
CHANGED
@@ -154,7 +154,12 @@ class SpecialVocab:
|
|
154
154
|
return True
|
155
155
|
with open(tokenizer_config_file, encoding = 'utf-8') as f:
|
156
156
|
tokenizer_config = json.load(f)
|
157
|
-
|
157
|
+
chat_template_alt = None
|
158
|
+
chat_template_file = path / 'chat_template.json'
|
159
|
+
if chat_template_file.is_file():
|
160
|
+
with open(chat_template_file, encoding = 'utf-8') as f:
|
161
|
+
chat_template_alt = json.load(f).get('chat_template')
|
162
|
+
chat_template = tokenizer_config.get('chat_template', chat_template_alt)
|
158
163
|
if chat_template is None or isinstance(chat_template, (str, list)):
|
159
164
|
self.chat_template = chat_template
|
160
165
|
else:
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
bigdl/cpp/libs/ollama/ggml.dll
CHANGED
Binary file
|
bigdl/cpp/libs/ollama/llama.dll
CHANGED
Binary file
|
Binary file
|
Binary file
|
Binary file
|
bigdl/cpp/libs/ollama/ollama.exe
CHANGED
Binary file
|
{bigdl_core_cpp-2.7.0b20250629.data → bigdl_core_cpp-2.7.0b20250701.data}/scripts/init-ollama.bat
RENAMED
@@ -6,11 +6,7 @@ set "cpp_dir=%cpp_dir:~0,-1%"
|
|
6
6
|
set "lib_dir=%cpp_dir%\libs\ollama"
|
7
7
|
|
8
8
|
:: Create symlinks for DLLs and EXE
|
9
|
-
for %%f in (ollama.exe ollama-lib.exe llama.dll ggml.dll llava_shared.dll ggml-base.dll ggml-cpu.dll ggml-sycl.dll libc++.dll) do (
|
9
|
+
for %%f in (ollama.exe ollama-lib.exe llama.dll ggml.dll llava_shared.dll ggml-base.dll ggml-cpu.dll ggml-sycl.dll mtmd_shared.dll libc++.dll) do (
|
10
10
|
if exist "%cd%\%%f" del /f "%cd%\%%f"
|
11
11
|
mklink "%cd%\%%f" "%lib_dir%\%%f"
|
12
12
|
)
|
13
|
-
|
14
|
-
:: Create symlink for dist directory
|
15
|
-
if exist "%cd%\dist" rmdir /s /q "%cd%\dist"
|
16
|
-
mklink /D "%cd%\dist" "%lib_dir%\dist"
|
@@ -0,0 +1,56 @@
|
|
1
|
+
bigdl/cpp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
|
+
bigdl/cpp/convert_hf_to_gguf.py,sha256=6zyOyZqUQKE305aTdq1T8ACQvaNFs-gYpK0seKqm7JA,311817
|
3
|
+
bigdl/cpp/convert_hf_to_gguf_update.py,sha256=AY-MFWH2W9kzW53B8Hy9V0ZOeUmlK9NIb7nO5ktqvUM,21608
|
4
|
+
bigdl/cpp/convert_llama_ggml_to_gguf.py,sha256=0dKjRhmFzvWV4e-cuLmaeW14JrWUtZwerBmz8mYyMvI,19556
|
5
|
+
bigdl/cpp/convert_lora_to_gguf.py,sha256=T54sMiIFmjde8oZyu2X4Xik2TfgI9g6lVtja-0AAl38,19085
|
6
|
+
bigdl/cpp/gguf-py/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
7
|
+
bigdl/cpp/gguf-py/gguf/__init__.py,sha256=h5GWs6SMXYR8giWZ7MTZzAc3hYsIJF-HAkdxtgXLOPo,228
|
8
|
+
bigdl/cpp/gguf-py/gguf/constants.py,sha256=wGAhVlG2jwJWpc2-52muxvMJwJrabSEI2Pnh7UESkPg,91511
|
9
|
+
bigdl/cpp/gguf-py/gguf/gguf.py,sha256=QpLc-xU055W2d7CEFvJp2gLIfGO63bdM24ZndZCH6rw,493
|
10
|
+
bigdl/cpp/gguf-py/gguf/gguf_reader.py,sha256=c9MluXgDwda_7hKk3atem3TeI4j6uAjrzM7Ba9RRkVQ,15195
|
11
|
+
bigdl/cpp/gguf-py/gguf/gguf_writer.py,sha256=Uyuwsqi6J_ImV9UP2VzjA0ZuNIviqDulrlU5T6hNg5k,45011
|
12
|
+
bigdl/cpp/gguf-py/gguf/lazy.py,sha256=DNfI7JuWgZGg93KBifQ_QDlXhtQZJhoH3ILdJUCX3AM,9437
|
13
|
+
bigdl/cpp/gguf-py/gguf/metadata.py,sha256=yO_QN_-ofePJjWn_9AQgeQLHQ-iCvh9Hkn-7UuSphag,33943
|
14
|
+
bigdl/cpp/gguf-py/gguf/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
15
|
+
bigdl/cpp/gguf-py/gguf/quants.py,sha256=2z6vcK-kBefqZbYNmSEVmdZF_tXHeVb5NC6jCbBdgKc,62040
|
16
|
+
bigdl/cpp/gguf-py/gguf/tensor_mapping.py,sha256=plkSVMYW_ZCdeM4V28-AH_UcuWSDCNojAhqDTZkbTtc,56880
|
17
|
+
bigdl/cpp/gguf-py/gguf/utility.py,sha256=xGTPr9Cbwrdz-E8Y3szcUts9y5e_HwjKTFLiuEqW5FM,11072
|
18
|
+
bigdl/cpp/gguf-py/gguf/vocab.py,sha256=zwKJHLRfaMX3ue7k7N_9BFhctNbp43Wg9wlksLsmtIs,21108
|
19
|
+
bigdl/cpp/libs/llama_cpp/ggml-base.dll,sha256=xpqSvmDT0HiNcn5PGwp6jzQIRVpJ592TemL5tBjGwzc,657408
|
20
|
+
bigdl/cpp/libs/llama_cpp/ggml-cpu.dll,sha256=Ld6-BhZRFPx9D9m_0WlILzcz5grzbmXaMnLkzkv_DYI,1351680
|
21
|
+
bigdl/cpp/libs/llama_cpp/ggml-sycl.dll,sha256=GPROGDyoXo7uGvlqo7_G3ZifW31ezlFGPvh2miV7TBg,7772672
|
22
|
+
bigdl/cpp/libs/llama_cpp/ggml.dll,sha256=p0oUNv_FG6qwNT8sUqFum0sGy7pKiREOSqAWmXRwHGw,110592
|
23
|
+
bigdl/cpp/libs/llama_cpp/llama-batched.exe,sha256=tWkDPR7zVdv1ELPnEFZce4wuRW1WEIGW2YII4mBnB3w,1802752
|
24
|
+
bigdl/cpp/libs/llama_cpp/llama-bench.exe,sha256=6LfsnX-Vt-tuesi2ifNf1WdyqO6-0AbB03gEmVaZA1w,305664
|
25
|
+
bigdl/cpp/libs/llama_cpp/llama-cli.exe,sha256=UsxXLJOl2hxLsYiJrQyGwoc6CQUfh5jjeNdkxBWFZAc,1880576
|
26
|
+
bigdl/cpp/libs/llama_cpp/llama-embedding.exe,sha256=rPaTbLXWm-zs6R0RCWENpw9gOXin7yj_sXte67aIJgw,1829888
|
27
|
+
bigdl/cpp/libs/llama_cpp/llama-gemma3-cli.exe,sha256=ertpLxP7gUOM50Ee7AluBpnm4OrYq7xZwhattXq6Bi8,46080
|
28
|
+
bigdl/cpp/libs/llama_cpp/llama-gguf.exe,sha256=4XdgPkfln9uU40hb_HHain6gIL8jlK9ICgUU5mGSh5U,59392
|
29
|
+
bigdl/cpp/libs/llama_cpp/llama-llava-cli.exe,sha256=UyCVe42zNsFTeYe0yDvX22QEGrqyRe4YdWFsivLPT98,46080
|
30
|
+
bigdl/cpp/libs/llama_cpp/llama-lookup.exe,sha256=xuL_8L8eQmiMlg4Ul-P6ShOO38ayfaxRvcxEmTy-sQ4,1862656
|
31
|
+
bigdl/cpp/libs/llama_cpp/llama-ls-sycl-device.exe,sha256=c3vu69RE7bvJZRM5iNHdlnY1yPxk1Uw-KBramkHga9c,10240
|
32
|
+
bigdl/cpp/libs/llama_cpp/llama-minicpmv-cli.exe,sha256=ertpLxP7gUOM50Ee7AluBpnm4OrYq7xZwhattXq6Bi8,46080
|
33
|
+
bigdl/cpp/libs/llama_cpp/llama-perplexity.exe,sha256=hcYuxT0whG33F4pqMd5jfbZdROHxkIrw5jA82zd13-g,1952768
|
34
|
+
bigdl/cpp/libs/llama_cpp/llama-quantize.exe,sha256=M0YhtLoK4JLspUfTru3YG1lZPl07qQgZf_Qdcw-xTbI,126976
|
35
|
+
bigdl/cpp/libs/llama_cpp/llama-server.exe,sha256=Qwj_objzcHiJxnmtghKOH2hnxBBK6Cq8hJr67TbkJG0,5030400
|
36
|
+
bigdl/cpp/libs/llama_cpp/llama-simple.exe,sha256=mNV0L7mmOXmPo6cInP2mp0_1T_pDk9f9cLmPpJnaqEc,62464
|
37
|
+
bigdl/cpp/libs/llama_cpp/llama-speculative.exe,sha256=tZP8K2Q_HkSC2MUHtqwbripHPqAwDqTvxIvrD9s6JXc,1870336
|
38
|
+
bigdl/cpp/libs/llama_cpp/llama-tokenize.exe,sha256=xfaE4mbfi9wGbW3NxZGY5rSmjaZ0Exe1ndXHfGMbryM,89088
|
39
|
+
bigdl/cpp/libs/llama_cpp/llama.dll,sha256=dpeRYX01oAEIG-bqmxkYTJj05MP7gEXrGge7OrCQDgM,1657344
|
40
|
+
bigdl/cpp/libs/ollama/ggml-base.dll,sha256=n6gdpo6eC7pqLXAl0XnyvMUSYwJGgpdNSGOxTrbYfi8,654336
|
41
|
+
bigdl/cpp/libs/ollama/ggml-cpu.dll,sha256=dBvCeK0_O7Rxexdjl4D7LVubSJ1liZMxLFV-CdPx7o8,1318400
|
42
|
+
bigdl/cpp/libs/ollama/ggml-sycl.dll,sha256=rIHt4fe3-hAIuw0dB-fCwq-My5olA3OV1nfhnqkPQPo,7575040
|
43
|
+
bigdl/cpp/libs/ollama/ggml.dll,sha256=ikPkCg1mjlt6to16juB3Kjb2HpysotVkqPoxZup1dlc,110592
|
44
|
+
bigdl/cpp/libs/ollama/libc++.dll,sha256=U0TVK2WfFQIJPP6Bz9SeJmgskm2iqZWJorx_DGdfKIw,1561600
|
45
|
+
bigdl/cpp/libs/ollama/llama.dll,sha256=gCa6QVA0xsYNbupSV1JZ6CdP4Uob61GmZv1EDZbstVE,1586176
|
46
|
+
bigdl/cpp/libs/ollama/llava_shared.dll,sha256=pEek9O2r4wVSvYO3PoMHEX-MAaehGP0UwKYa8DCfecA,382976
|
47
|
+
bigdl/cpp/libs/ollama/mtmd_shared.dll,sha256=riOn9ACyzDuHwp2SGClsAe9atEQEGNptjAR_knLm_QA,418816
|
48
|
+
bigdl/cpp/libs/ollama/ollama-lib.exe,sha256=TSIp7YzgM0Skl5El36ne8FSS72vD-tPQj3Arbr7-V_Q,81615360
|
49
|
+
bigdl/cpp/libs/ollama/ollama.exe,sha256=UEPW2_nQ8RCBHe0z1DoofqHSNedj-FNZaO7MQJ_mjC8,207872
|
50
|
+
bigdl_core_cpp-2.7.0b20250701.data/scripts/init-llama-cpp.bat,sha256=u1MA0OIgqtL3KdXsWrY3Slirn6jmmLbX0mnuh6zXnrg,761
|
51
|
+
bigdl_core_cpp-2.7.0b20250701.data/scripts/init-llama-cpp.ps1,sha256=SJdTXrObj0WofdpTA0Uj2gxw2E9pnuB9X_5tcws4gbc,511
|
52
|
+
bigdl_core_cpp-2.7.0b20250701.data/scripts/init-ollama.bat,sha256=0QYgTisXqvdz_OfgvWxmRnw280WFFc-JI5fXOBEDNcI,512
|
53
|
+
bigdl_core_cpp-2.7.0b20250701.dist-info/METADATA,sha256=bCDx5VkFZ0J3IwMYzXddCJwSwk8VBDebTDUfeOFhySA,750
|
54
|
+
bigdl_core_cpp-2.7.0b20250701.dist-info/WHEEL,sha256=ZjXRCNaQ9YSypEK2TE0LRB0sy2OVXSszb4Sx1XjM99k,97
|
55
|
+
bigdl_core_cpp-2.7.0b20250701.dist-info/top_level.txt,sha256=iGuLfZARD_qANcIMfy0tbbrC3EtCg6BSiH8icc3dLWs,6
|
56
|
+
bigdl_core_cpp-2.7.0b20250701.dist-info/RECORD,,
|
Binary file
|
@@ -1,56 +0,0 @@
|
|
1
|
-
bigdl/cpp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
|
-
bigdl/cpp/convert_hf_to_gguf.py,sha256=QhF3QN-FR-OS6IazG8oWWYKESaaBFuEe4aOTR_n8570,240765
|
3
|
-
bigdl/cpp/convert_hf_to_gguf_update.py,sha256=1BFKEkj0BMDB90lUB5p_-iR9rSVcjgYPGWmEw28avB8,17721
|
4
|
-
bigdl/cpp/convert_llama_ggml_to_gguf.py,sha256=0dKjRhmFzvWV4e-cuLmaeW14JrWUtZwerBmz8mYyMvI,19556
|
5
|
-
bigdl/cpp/convert_lora_to_gguf.py,sha256=sHrcutdgzrDR5H7ZiLPOLoMnkJKg8uZ7OcFhAZhPrLo,19073
|
6
|
-
bigdl/cpp/gguf-py/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
7
|
-
bigdl/cpp/gguf-py/gguf/__init__.py,sha256=h5GWs6SMXYR8giWZ7MTZzAc3hYsIJF-HAkdxtgXLOPo,228
|
8
|
-
bigdl/cpp/gguf-py/gguf/constants.py,sha256=6fXPUmyGM71MXjOBX-WJdn9yzlOEMxP7iQIbN4NS8V0,70390
|
9
|
-
bigdl/cpp/gguf-py/gguf/gguf.py,sha256=QpLc-xU055W2d7CEFvJp2gLIfGO63bdM24ZndZCH6rw,493
|
10
|
-
bigdl/cpp/gguf-py/gguf/gguf_reader.py,sha256=PUrx08ZwaUOz1gLw5JQ459Hi7JIeCdlHgZX7wXcTqbI,12702
|
11
|
-
bigdl/cpp/gguf-py/gguf/gguf_writer.py,sha256=pFgnwrsDupKxI3SHNQbfiuz7dUopCOqj3ERBPuZMkMo,39955
|
12
|
-
bigdl/cpp/gguf-py/gguf/lazy.py,sha256=YIYxGBWD-oKXU4HOvpHs9eiEn81HUgeSmt1mmHJlbdM,8814
|
13
|
-
bigdl/cpp/gguf-py/gguf/metadata.py,sha256=oBTb4DXi_h1L_gYm8x_JRVuEPR4GHlVHuM-iN0OxWoY,33244
|
14
|
-
bigdl/cpp/gguf-py/gguf/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
15
|
-
bigdl/cpp/gguf-py/gguf/quants.py,sha256=2z6vcK-kBefqZbYNmSEVmdZF_tXHeVb5NC6jCbBdgKc,62040
|
16
|
-
bigdl/cpp/gguf-py/gguf/tensor_mapping.py,sha256=w1JZkRIKHj2tKYADLDUvCOsQfVf8y7Y0ZwqmtmrtLBA,39549
|
17
|
-
bigdl/cpp/gguf-py/gguf/utility.py,sha256=Mx4mqamXtatL15LCH04mG-7SNBwPzP2T75ts0uBnEuI,3002
|
18
|
-
bigdl/cpp/gguf-py/gguf/vocab.py,sha256=QTdt4HZrn7onHqm2tpHTaEq2sL3yG07zbHqQR9iVfu8,20815
|
19
|
-
bigdl/cpp/libs/llama_cpp/ggml-base.dll,sha256=pwI5JfPbOLczxN0Lv7uWBmP1_tsV4M5f0gaihcJThRY,649728
|
20
|
-
bigdl/cpp/libs/llama_cpp/ggml-cpu.dll,sha256=jXzGKKswMWmgiCgyGALLlGYMgUoyy_G_5ZntH_fKwKQ,1114624
|
21
|
-
bigdl/cpp/libs/llama_cpp/ggml-sycl.dll,sha256=Ov1U7rso_DRyv9KQ2gCBD-sK61T89DCj5REIhusaBKE,7268352
|
22
|
-
bigdl/cpp/libs/llama_cpp/ggml.dll,sha256=rZ0iy-jBi-LB5X04KuThLvUFETt9_yeYS5lPqQrqf5g,118272
|
23
|
-
bigdl/cpp/libs/llama_cpp/llama-batched.exe,sha256=L3pydeWAGYWIOwqS-VXRG8O4wfwYalSpfFq6-a4h5_k,1741824
|
24
|
-
bigdl/cpp/libs/llama_cpp/llama-bench.exe,sha256=U78XevDQbdg47OnSto8KCYL2oZghmJUQzpBqsI3cjl4,279552
|
25
|
-
bigdl/cpp/libs/llama_cpp/llama-cli.exe,sha256=q-1AMMeIeM_IIj23VSMKr-xlACMLRG2x7etsjQ4_tZo,1814528
|
26
|
-
bigdl/cpp/libs/llama_cpp/llama-embedding.exe,sha256=KusPFb276EVcvpI4xCgX6MUfNqbZkGdmRGVebYQ0uRQ,1766912
|
27
|
-
bigdl/cpp/libs/llama_cpp/llama-gemma3-cli.exe,sha256=yB3dVtctBs2ibPbB7SsVGcxo92sVjr-uhCEh999Z0Ak,2035712
|
28
|
-
bigdl/cpp/libs/llama_cpp/llama-gguf.exe,sha256=wG7pEifavfhJtxEXf_4hjJ7oUIybGEqTEurC5N4J7cw,59392
|
29
|
-
bigdl/cpp/libs/llama_cpp/llama-llava-cli.exe,sha256=hVn1XJCsyG262_x9Yr2TCUXmO_ic1Er_m0of5bcGcec,2019840
|
30
|
-
bigdl/cpp/libs/llama_cpp/llama-lookup.exe,sha256=02xzV2hQ2VRBehrw3rhlbPtrV-fMi8v08g15_0-LEOc,1802752
|
31
|
-
bigdl/cpp/libs/llama_cpp/llama-ls-sycl-device.exe,sha256=HS9TzpAXcrwPeYk6sfMtT_hhqwy6BNsc0nTZ49uhzHQ,10240
|
32
|
-
bigdl/cpp/libs/llama_cpp/llama-minicpmv-cli.exe,sha256=0eL8zTNFQL8Krbf6RXiBI-9hwbNlcpiW5xrzHUR_4NA,2018304
|
33
|
-
bigdl/cpp/libs/llama_cpp/llama-perplexity.exe,sha256=db2foeH-B6UIo0pJgYfxRRT2-rWL3xWWU5ddyxwQSsk,1888256
|
34
|
-
bigdl/cpp/libs/llama_cpp/llama-quantize.exe,sha256=hH8eWCqNt9JiUPGENvfhSN8IJ-t0WUQlRK12CokA__E,122880
|
35
|
-
bigdl/cpp/libs/llama_cpp/llama-server.exe,sha256=6JCYTpKzxXegv3QO8Idc5kC4V9WCC7AKAu8Gq6FRCMw,4132864
|
36
|
-
bigdl/cpp/libs/llama_cpp/llama-simple.exe,sha256=ktwdmHz8YllxgdN3VZbyK0dLQoKlxnNeHXuT62mXbOo,62464
|
37
|
-
bigdl/cpp/libs/llama_cpp/llama-speculative.exe,sha256=peQsBo8VtwBTr3AjZwhf460wGwkg0azzhoyKeZLmy8I,1805312
|
38
|
-
bigdl/cpp/libs/llama_cpp/llama-tokenize.exe,sha256=-oqY9v8Oyi9OD4xCVrVnsh7TufPPi4oxIkQiWV2zKKw,89088
|
39
|
-
bigdl/cpp/libs/llama_cpp/llama.dll,sha256=rMzuKMKHfya2QkW4sbXxtf20vA8gYAYRWp3P3i-1uS8,1481216
|
40
|
-
bigdl/cpp/libs/llama_cpp/llava_shared.dll,sha256=NErBuOlRrKDOJeh7OuuG4TlH6REL1XD8QTOvMe-kRLI,380416
|
41
|
-
bigdl/cpp/libs/ollama/ggml-base.dll,sha256=B1ICZHPkZ5zvaOYeFF-37OxNX_K2_UaNKJeM6JUM0KU,644608
|
42
|
-
bigdl/cpp/libs/ollama/ggml-cpu.dll,sha256=Y0qCiJ9-vU1HYkeg8M0wwjr26RxudnXsm2qtbBr3xHE,1116672
|
43
|
-
bigdl/cpp/libs/ollama/ggml-sycl.dll,sha256=r5IfXs7kkebd9HF3dfRyfsF_X6kvWUrzX9sz3TTm4as,7205376
|
44
|
-
bigdl/cpp/libs/ollama/ggml.dll,sha256=a1LzAwxCuanOVhWfqxNjM-kVZELNch4Ow9MH3jZwMvs,120832
|
45
|
-
bigdl/cpp/libs/ollama/libc++.dll,sha256=U0TVK2WfFQIJPP6Bz9SeJmgskm2iqZWJorx_DGdfKIw,1561600
|
46
|
-
bigdl/cpp/libs/ollama/llama.dll,sha256=F-aRrmEXyfaohGa3cMoPum_ndB3RSK3VUxWBjL8nq0g,1522176
|
47
|
-
bigdl/cpp/libs/ollama/llava_shared.dll,sha256=tJxru-ZqEEdXlnvc6hgQb4SShptzgMv9s5FxwkMsX-g,377856
|
48
|
-
bigdl/cpp/libs/ollama/ollama-lib.exe,sha256=xdREn7Lby5WhbJwaxvlDtcZVutUyVaNhqhtPqmYA7J8,79792640
|
49
|
-
bigdl/cpp/libs/ollama/ollama.exe,sha256=o0ZucHEC0FZvFkcZEdALm-iOJTOfLZTYi7uAuhWeiHE,207872
|
50
|
-
bigdl_core_cpp-2.7.0b20250629.data/scripts/init-llama-cpp.bat,sha256=u1MA0OIgqtL3KdXsWrY3Slirn6jmmLbX0mnuh6zXnrg,761
|
51
|
-
bigdl_core_cpp-2.7.0b20250629.data/scripts/init-llama-cpp.ps1,sha256=SJdTXrObj0WofdpTA0Uj2gxw2E9pnuB9X_5tcws4gbc,511
|
52
|
-
bigdl_core_cpp-2.7.0b20250629.data/scripts/init-ollama.bat,sha256=9eqJaqG87mOHxT0a3d712bVVTQbC2-SRhrV8EZ4eKXQ,622
|
53
|
-
bigdl_core_cpp-2.7.0b20250629.dist-info/METADATA,sha256=FDKotjv3DvpFPksOXX_B1fO9dqY4KY43uDdcNvDK94Q,750
|
54
|
-
bigdl_core_cpp-2.7.0b20250629.dist-info/WHEEL,sha256=ZjXRCNaQ9YSypEK2TE0LRB0sy2OVXSszb4Sx1XjM99k,97
|
55
|
-
bigdl_core_cpp-2.7.0b20250629.dist-info/top_level.txt,sha256=iGuLfZARD_qANcIMfy0tbbrC3EtCg6BSiH8icc3dLWs,6
|
56
|
-
bigdl_core_cpp-2.7.0b20250629.dist-info/RECORD,,
|
{bigdl_core_cpp-2.7.0b20250629.data → bigdl_core_cpp-2.7.0b20250701.data}/scripts/init-llama-cpp.bat
RENAMED
File without changes
|
{bigdl_core_cpp-2.7.0b20250629.data → bigdl_core_cpp-2.7.0b20250701.data}/scripts/init-llama-cpp.ps1
RENAMED
File without changes
|
File without changes
|
{bigdl_core_cpp-2.7.0b20250629.dist-info → bigdl_core_cpp-2.7.0b20250701.dist-info}/top_level.txt
RENAMED
File without changes
|