bigdl-core-cpp 2.5.0rc1__py3-none-win_amd64.whl → 2.6.0b2__py3-none-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- bigdl/cpp/{convert-hf-to-gguf.py → convert_hf_to_gguf.py} +413 -67
- bigdl/cpp/convert_hf_to_gguf_update.py +354 -0
- bigdl/cpp/convert_llama_ggml_to_gguf.py +454 -0
- bigdl/cpp/convert_lora_to_gguf.py +393 -0
- bigdl/cpp/gguf-py/gguf/__init__.py +1 -1
- bigdl/cpp/gguf-py/gguf/constants.py +71 -2
- bigdl/cpp/gguf-py/gguf/gguf_writer.py +16 -1
- bigdl/cpp/gguf-py/gguf/lazy.py +4 -1
- bigdl/cpp/gguf-py/gguf/metadata.py +70 -63
- bigdl/cpp/gguf-py/gguf/quants.py +1129 -64
- bigdl/cpp/gguf-py/gguf/tensor_mapping.py +23 -15
- bigdl/cpp/gguf-py/gguf/utility.py +1 -1
- bigdl/cpp/gguf-py/gguf/vocab.py +301 -1
- bigdl/cpp/libs/common.lib +0 -0
- bigdl/cpp/libs/dist/windows-amd64/lib/ollama/runners/cpu/ggml.dll +0 -0
- bigdl/cpp/libs/dist/windows-amd64/lib/ollama/runners/cpu/llama.dll +0 -0
- bigdl/cpp/libs/dist/windows-amd64/lib/ollama/runners/cpu/ollama_llama_server.exe +0 -0
- bigdl/cpp/libs/dist/windows-amd64/lib/ollama/runners/cpu_avx/ggml.dll +0 -0
- bigdl/cpp/libs/dist/windows-amd64/lib/ollama/runners/cpu_avx/llama.dll +0 -0
- bigdl/cpp/libs/dist/windows-amd64/lib/ollama/runners/cpu_avx/ollama_llama_server.exe +0 -0
- bigdl/cpp/libs/dist/windows-amd64/lib/ollama/runners/cpu_avx2/ggml.dll +0 -0
- bigdl/cpp/libs/dist/windows-amd64/lib/ollama/runners/cpu_avx2/llama.dll +0 -0
- bigdl/cpp/libs/dist/windows-amd64/lib/ollama/runners/cpu_avx2/ollama_llama_server.exe +0 -0
- bigdl/cpp/libs/ggml.dll +0 -0
- bigdl/cpp/libs/llama-batched.exe +0 -0
- bigdl/cpp/libs/llama-bench.exe +0 -0
- bigdl/cpp/libs/llama-cli.exe +0 -0
- bigdl/cpp/libs/llama-embedding.exe +0 -0
- bigdl/cpp/libs/llama-gguf.exe +0 -0
- bigdl/cpp/libs/llama-llava-cli.exe +0 -0
- bigdl/cpp/libs/llama-lookup.exe +0 -0
- bigdl/cpp/libs/{ls-sycl-device.exe → llama-ls-sycl-device.exe} +0 -0
- bigdl/cpp/libs/llama-minicpmv-cli.exe +0 -0
- bigdl/cpp/libs/llama-perplexity.exe +0 -0
- bigdl/cpp/libs/llama-quantize.exe +0 -0
- bigdl/cpp/libs/llama-server.exe +0 -0
- bigdl/cpp/libs/llama-simple.exe +0 -0
- bigdl/cpp/libs/llama-speculative.exe +0 -0
- bigdl/cpp/libs/llama-tokenize.exe +0 -0
- bigdl/cpp/libs/llama.dll +0 -0
- bigdl/cpp/libs/llava_shared.dll +0 -0
- bigdl/cpp/libs/ollama.exe +0 -0
- {bigdl_core_cpp-2.5.0rc1.data → bigdl_core_cpp-2.6.0b2.data}/scripts/init-llama-cpp.bat +7 -2
- {bigdl_core_cpp-2.5.0rc1.data → bigdl_core_cpp-2.6.0b2.data}/scripts/init-ollama.bat +6 -0
- {bigdl_core_cpp-2.5.0rc1.dist-info → bigdl_core_cpp-2.6.0b2.dist-info}/METADATA +3 -3
- bigdl_core_cpp-2.6.0b2.dist-info/RECORD +54 -0
- {bigdl_core_cpp-2.5.0rc1.dist-info → bigdl_core_cpp-2.6.0b2.dist-info}/WHEEL +1 -1
- bigdl/cpp/convert.py +0 -1714
- bigdl/cpp/libs/baby-llama.exe +0 -0
- bigdl/cpp/libs/batched-bench.exe +0 -0
- bigdl/cpp/libs/batched.exe +0 -0
- bigdl/cpp/libs/beam-search.exe +0 -0
- bigdl/cpp/libs/benchmark.exe +0 -0
- bigdl/cpp/libs/convert-llama2c-to-ggml.exe +0 -0
- bigdl/cpp/libs/dist/windows-amd64/ollama_runners/cpu/ollama_llama_server.exe +0 -0
- bigdl/cpp/libs/dist/windows-amd64/ollama_runners/cpu_avx/ollama_llama_server.exe +0 -0
- bigdl/cpp/libs/dist/windows-amd64/ollama_runners/cpu_avx2/ollama_llama_server.exe +0 -0
- bigdl/cpp/libs/embedding.exe +0 -0
- bigdl/cpp/libs/export-lora.exe +0 -0
- bigdl/cpp/libs/finetune.exe +0 -0
- bigdl/cpp/libs/ggml_shared.dll +0 -0
- bigdl/cpp/libs/gguf.exe +0 -0
- bigdl/cpp/libs/gritlm.exe +0 -0
- bigdl/cpp/libs/imatrix.exe +0 -0
- bigdl/cpp/libs/infill.exe +0 -0
- bigdl/cpp/libs/llava-cli.exe +0 -0
- bigdl/cpp/libs/lookahead.exe +0 -0
- bigdl/cpp/libs/lookup.exe +0 -0
- bigdl/cpp/libs/main.exe +0 -0
- bigdl/cpp/libs/parallel.exe +0 -0
- bigdl/cpp/libs/passkey.exe +0 -0
- bigdl/cpp/libs/perplexity.exe +0 -0
- bigdl/cpp/libs/q8dot.exe +0 -0
- bigdl/cpp/libs/quantize-stats.exe +0 -0
- bigdl/cpp/libs/quantize.exe +0 -0
- bigdl/cpp/libs/save-load-state.exe +0 -0
- bigdl/cpp/libs/server.exe +0 -0
- bigdl/cpp/libs/simple.exe +0 -0
- bigdl/cpp/libs/speculative.exe +0 -0
- bigdl/cpp/libs/tokenize.exe +0 -0
- bigdl/cpp/libs/train-text-from-scratch.exe +0 -0
- bigdl/cpp/libs/vdot.exe +0 -0
- bigdl_core_cpp-2.5.0rc1.dist-info/RECORD +0 -63
- {bigdl_core_cpp-2.5.0rc1.data → bigdl_core_cpp-2.6.0b2.data}/scripts/init-llama-cpp.ps1 +0 -0
- {bigdl_core_cpp-2.5.0rc1.dist-info → bigdl_core_cpp-2.6.0b2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,354 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
# This script downloads the tokenizer models of the specified models from Huggingface and
|
5
|
+
# generates the get_vocab_base_pre() function for convert_hf_to_gguf.py
|
6
|
+
#
|
7
|
+
# This is necessary in order to analyze the type of pre-tokenizer used by the model and
|
8
|
+
# provide the necessary information to llama.cpp via the GGUF header in order to implement
|
9
|
+
# the same pre-tokenizer.
|
10
|
+
#
|
11
|
+
# ref: https://github.com/ggerganov/llama.cpp/pull/6920
|
12
|
+
#
|
13
|
+
# Instructions:
|
14
|
+
#
|
15
|
+
# - Add a new model to the "models" list
|
16
|
+
# - Run the script with your huggingface token:
|
17
|
+
#
|
18
|
+
# python3 convert_hf_to_gguf_update.py <huggingface_token>
|
19
|
+
#
|
20
|
+
# - Copy-paste the generated get_vocab_base_pre() function into convert_hf_to_gguf.py
|
21
|
+
# - Update llama.cpp with the new pre-tokenizer if necessary
|
22
|
+
#
|
23
|
+
# TODO: generate tokenizer tests for llama.cpp
|
24
|
+
#
|
25
|
+
|
26
|
+
import logging
|
27
|
+
import os
|
28
|
+
import pathlib
|
29
|
+
import re
|
30
|
+
|
31
|
+
import requests
|
32
|
+
import sys
|
33
|
+
import json
|
34
|
+
|
35
|
+
from hashlib import sha256
|
36
|
+
from enum import IntEnum, auto
|
37
|
+
from transformers import AutoTokenizer
|
38
|
+
|
39
|
+
logging.basicConfig(level=logging.DEBUG)
|
40
|
+
logger = logging.getLogger("convert_hf_to_gguf_update")
|
41
|
+
sess = requests.Session()
|
42
|
+
|
43
|
+
|
44
|
+
class TOKENIZER_TYPE(IntEnum):
|
45
|
+
SPM = auto()
|
46
|
+
BPE = auto()
|
47
|
+
WPM = auto()
|
48
|
+
UGM = auto()
|
49
|
+
|
50
|
+
|
51
|
+
# TODO: this string has to exercise as much pre-tokenizer functionality as possible
|
52
|
+
# will be updated with time - contributions welcome
|
53
|
+
CHK_TXT = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````\"\"\"\"......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL'
|
54
|
+
|
55
|
+
if len(sys.argv) == 2:
|
56
|
+
token = sys.argv[1]
|
57
|
+
if not token.startswith("hf_"):
|
58
|
+
logger.info("Huggingface token seems invalid")
|
59
|
+
logger.info("Usage: python convert_hf_to_gguf_update.py <huggingface_token>")
|
60
|
+
sys.exit(1)
|
61
|
+
else:
|
62
|
+
logger.info("Usage: python convert_hf_to_gguf_update.py <huggingface_token>")
|
63
|
+
sys.exit(1)
|
64
|
+
|
65
|
+
# TODO: add models here, base models preferred
|
66
|
+
models = [
|
67
|
+
{"name": "llama-spm", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/meta-llama/Llama-2-7b-hf", },
|
68
|
+
{"name": "llama-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/meta-llama/Meta-Llama-3-8B", },
|
69
|
+
{"name": "phi-3", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct", },
|
70
|
+
{"name": "deepseek-llm", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/deepseek-llm-7b-base", },
|
71
|
+
{"name": "deepseek-coder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-base", },
|
72
|
+
{"name": "falcon", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/tiiuae/falcon-7b", },
|
73
|
+
{"name": "bert-bge", "tokt": TOKENIZER_TYPE.WPM, "repo": "https://huggingface.co/BAAI/bge-small-en-v1.5", },
|
74
|
+
{"name": "mpt", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mosaicml/mpt-7b", },
|
75
|
+
{"name": "starcoder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/bigcode/starcoder2-3b", },
|
76
|
+
{"name": "gpt-2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/openai-community/gpt2", },
|
77
|
+
{"name": "stablelm2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/stabilityai/stablelm-2-zephyr-1_6b", },
|
78
|
+
{"name": "refact", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/smallcloudai/Refact-1_6-base", },
|
79
|
+
{"name": "command-r", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/CohereForAI/c4ai-command-r-v01", },
|
80
|
+
{"name": "qwen2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Qwen/Qwen1.5-7B", },
|
81
|
+
{"name": "olmo", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/allenai/OLMo-1.7-7B-hf", },
|
82
|
+
{"name": "dbrx", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/databricks/dbrx-base", },
|
83
|
+
{"name": "jina-v2-en", "tokt": TOKENIZER_TYPE.WPM, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-en", }, # WPM!
|
84
|
+
{"name": "jina-v2-es", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-es", },
|
85
|
+
{"name": "jina-v2-de", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-de", },
|
86
|
+
{"name": "smaug-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct", },
|
87
|
+
{"name": "poro-chat", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LumiOpen/Poro-34B-chat", },
|
88
|
+
{"name": "jina-v2-code", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-code", },
|
89
|
+
{"name": "viking", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LumiOpen/Viking-7B", }, # Also used for Viking 13B and 33B
|
90
|
+
{"name": "gemma", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/google/gemma-2b", },
|
91
|
+
{"name": "gemma-2", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/google/gemma-2-9b", },
|
92
|
+
{"name": "jais", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/core42/jais-13b", },
|
93
|
+
{"name": "t5", "tokt": TOKENIZER_TYPE.UGM, "repo": "https://huggingface.co/google-t5/t5-small", },
|
94
|
+
{"name": "codeshell", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/WisdomShell/CodeShell-7B", },
|
95
|
+
{"name": "tekken", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mistralai/Mistral-Nemo-Base-2407", },
|
96
|
+
{"name": "smollm", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/HuggingFaceTB/SmolLM-135M", },
|
97
|
+
{'name': "bloom", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/bigscience/bloom", },
|
98
|
+
{'name': "gpt3-finnish", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/TurkuNLP/gpt3-finnish-small", },
|
99
|
+
{"name": "exaone", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct", },
|
100
|
+
]
|
101
|
+
|
102
|
+
|
103
|
+
def download_file_with_auth(url, token, save_path):
|
104
|
+
headers = {"Authorization": f"Bearer {token}"}
|
105
|
+
response = sess.get(url, headers=headers)
|
106
|
+
response.raise_for_status()
|
107
|
+
os.makedirs(os.path.dirname(save_path), exist_ok=True)
|
108
|
+
with open(save_path, 'wb') as downloaded_file:
|
109
|
+
downloaded_file.write(response.content)
|
110
|
+
logger.info(f"File {save_path} downloaded successfully")
|
111
|
+
|
112
|
+
|
113
|
+
def download_model(model):
|
114
|
+
name = model["name"]
|
115
|
+
repo = model["repo"]
|
116
|
+
tokt = model["tokt"]
|
117
|
+
|
118
|
+
os.makedirs(f"models/tokenizers/{name}", exist_ok=True)
|
119
|
+
|
120
|
+
files = ["config.json", "tokenizer.json", "tokenizer_config.json"]
|
121
|
+
|
122
|
+
if tokt == TOKENIZER_TYPE.SPM:
|
123
|
+
files.append("tokenizer.model")
|
124
|
+
|
125
|
+
if tokt == TOKENIZER_TYPE.UGM:
|
126
|
+
files.append("spiece.model")
|
127
|
+
|
128
|
+
for file in files:
|
129
|
+
save_path = f"models/tokenizers/{name}/{file}"
|
130
|
+
if os.path.isfile(save_path):
|
131
|
+
logger.info(f"{name}: File {save_path} already exists - skipping")
|
132
|
+
continue
|
133
|
+
download_file_with_auth(f"{repo}/resolve/main/{file}", token, save_path)
|
134
|
+
|
135
|
+
|
136
|
+
for model in models:
|
137
|
+
try:
|
138
|
+
download_model(model)
|
139
|
+
except Exception as e:
|
140
|
+
logger.error(f"Failed to download model {model['name']}. Error: {e}")
|
141
|
+
|
142
|
+
|
143
|
+
# generate the source code for the convert_hf_to_gguf.py:get_vocab_base_pre() function:
|
144
|
+
|
145
|
+
src_ifs = ""
|
146
|
+
for model in models:
|
147
|
+
name = model["name"]
|
148
|
+
tokt = model["tokt"]
|
149
|
+
|
150
|
+
if tokt == TOKENIZER_TYPE.SPM or tokt == TOKENIZER_TYPE.UGM:
|
151
|
+
continue
|
152
|
+
|
153
|
+
# Skip if the tokenizer folder does not exist or there are other download issues previously
|
154
|
+
if not os.path.exists(f"models/tokenizers/{name}"):
|
155
|
+
logger.warning(f"Directory for tokenizer {name} not found. Skipping...")
|
156
|
+
continue
|
157
|
+
|
158
|
+
# create the tokenizer
|
159
|
+
try:
|
160
|
+
if name == "t5":
|
161
|
+
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}", use_fast=False)
|
162
|
+
else:
|
163
|
+
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}")
|
164
|
+
except OSError as e:
|
165
|
+
logger.error(f"Error loading tokenizer for model {name}. The model may not exist or is not accessible with the provided token. Error: {e}")
|
166
|
+
continue # Skip to the next model if the tokenizer can't be loaded
|
167
|
+
|
168
|
+
chktok = tokenizer.encode(CHK_TXT)
|
169
|
+
chkhsh = sha256(str(chktok).encode()).hexdigest()
|
170
|
+
|
171
|
+
logger.info(f"model: {name}")
|
172
|
+
logger.info(f"tokt: {tokt}")
|
173
|
+
logger.info(f"repo: {model['repo']}")
|
174
|
+
logger.info(f"chktok: {chktok}")
|
175
|
+
logger.info(f"chkhsh: {chkhsh}")
|
176
|
+
|
177
|
+
# print the "pre_tokenizer" content from the tokenizer.json
|
178
|
+
with open(f"models/tokenizers/{name}/tokenizer.json", "r", encoding="utf-8") as f:
|
179
|
+
cfg = json.load(f)
|
180
|
+
normalizer = cfg["normalizer"]
|
181
|
+
logger.info("normalizer: " + json.dumps(normalizer, indent=4))
|
182
|
+
pre_tokenizer = cfg["pre_tokenizer"]
|
183
|
+
logger.info("pre_tokenizer: " + json.dumps(pre_tokenizer, indent=4))
|
184
|
+
if "ignore_merges" in cfg["model"]:
|
185
|
+
logger.info("ignore_merges: " + json.dumps(cfg["model"]["ignore_merges"], indent=4))
|
186
|
+
|
187
|
+
logger.info("")
|
188
|
+
|
189
|
+
src_ifs += f" if chkhsh == \"{chkhsh}\":\n"
|
190
|
+
src_ifs += f" # ref: {model['repo']}\n"
|
191
|
+
src_ifs += f" res = \"{name}\"\n"
|
192
|
+
|
193
|
+
src_func = f"""
|
194
|
+
def get_vocab_base_pre(self, tokenizer) -> str:
|
195
|
+
# encoding this string and hashing the resulting tokens would (hopefully) give us a unique identifier that
|
196
|
+
# is specific for the BPE pre-tokenizer used by the model
|
197
|
+
# we will use this unique identifier to write a "tokenizer.ggml.pre" entry in the GGUF file which we can
|
198
|
+
# use in llama.cpp to implement the same pre-tokenizer
|
199
|
+
|
200
|
+
chktxt = {repr(CHK_TXT)}
|
201
|
+
|
202
|
+
chktok = tokenizer.encode(chktxt)
|
203
|
+
chkhsh = sha256(str(chktok).encode()).hexdigest()
|
204
|
+
|
205
|
+
logger.debug(f"chktok: {{chktok}}")
|
206
|
+
logger.debug(f"chkhsh: {{chkhsh}}")
|
207
|
+
|
208
|
+
res = None
|
209
|
+
|
210
|
+
# NOTE: if you get an error here, you need to update the convert_hf_to_gguf_update.py script
|
211
|
+
# or pull the latest version of the model from Huggingface
|
212
|
+
# don't edit the hashes manually!
|
213
|
+
{src_ifs}
|
214
|
+
if res is None:
|
215
|
+
logger.warning("\\n")
|
216
|
+
logger.warning("**************************************************************************************")
|
217
|
+
logger.warning("** WARNING: The BPE pre-tokenizer was not recognized!")
|
218
|
+
logger.warning("** There are 2 possible reasons for this:")
|
219
|
+
logger.warning("** - the model has not been added to convert_hf_to_gguf_update.py yet")
|
220
|
+
logger.warning("** - the pre-tokenization config has changed upstream")
|
221
|
+
logger.warning("** Check your model files and convert_hf_to_gguf_update.py and update them accordingly.")
|
222
|
+
logger.warning("** ref: https://github.com/ggerganov/llama.cpp/pull/6920")
|
223
|
+
logger.warning("**")
|
224
|
+
logger.warning(f"** chkhsh: {{chkhsh}}")
|
225
|
+
logger.warning("**************************************************************************************")
|
226
|
+
logger.warning("\\n")
|
227
|
+
raise NotImplementedError("BPE pre-tokenizer was not recognized - update get_vocab_base_pre()")
|
228
|
+
|
229
|
+
logger.debug(f"tokenizer.ggml.pre: {{repr(res)}}")
|
230
|
+
logger.debug(f"chkhsh: {{chkhsh}}")
|
231
|
+
|
232
|
+
return res
|
233
|
+
"""
|
234
|
+
|
235
|
+
convert_py_pth = pathlib.Path("convert_hf_to_gguf.py")
|
236
|
+
convert_py = convert_py_pth.read_text(encoding="utf-8")
|
237
|
+
convert_py = re.sub(
|
238
|
+
r"(# Marker: Start get_vocab_base_pre)(.+?)( +# Marker: End get_vocab_base_pre)",
|
239
|
+
lambda m: m.group(1) + src_func + m.group(3),
|
240
|
+
convert_py,
|
241
|
+
flags=re.DOTALL | re.MULTILINE,
|
242
|
+
)
|
243
|
+
|
244
|
+
convert_py_pth.write_text(convert_py, encoding="utf-8")
|
245
|
+
|
246
|
+
logger.info("+++ convert_hf_to_gguf.py was updated")
|
247
|
+
|
248
|
+
# generate tests for each tokenizer model
|
249
|
+
|
250
|
+
tests = [
|
251
|
+
"ied 4 ½ months",
|
252
|
+
"Führer",
|
253
|
+
"",
|
254
|
+
" ",
|
255
|
+
" ",
|
256
|
+
" ",
|
257
|
+
"\t",
|
258
|
+
"\n",
|
259
|
+
"\n\n",
|
260
|
+
"\n\n\n",
|
261
|
+
"\t\n",
|
262
|
+
"Hello world",
|
263
|
+
" Hello world",
|
264
|
+
"Hello World",
|
265
|
+
" Hello World",
|
266
|
+
" Hello World!",
|
267
|
+
"Hello, world!",
|
268
|
+
" Hello, world!",
|
269
|
+
" this is 🦙.cpp",
|
270
|
+
"w048 7tuijk dsdfhu",
|
271
|
+
"нещо на Български",
|
272
|
+
"កាន់តែពិសេសអាចខលចេញ",
|
273
|
+
"🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token)",
|
274
|
+
"Hello",
|
275
|
+
" Hello",
|
276
|
+
" Hello",
|
277
|
+
" Hello",
|
278
|
+
" Hello",
|
279
|
+
" Hello\n Hello",
|
280
|
+
" (",
|
281
|
+
"\n =",
|
282
|
+
"' era",
|
283
|
+
"Hello, y'all! How are you 😁 ?我想在apple工作1314151天~",
|
284
|
+
"!!!!!!",
|
285
|
+
"3",
|
286
|
+
"33",
|
287
|
+
"333",
|
288
|
+
"3333",
|
289
|
+
"33333",
|
290
|
+
"333333",
|
291
|
+
"3333333",
|
292
|
+
"33333333",
|
293
|
+
"333333333",
|
294
|
+
"Cửa Việt", # llama-bpe fails on this
|
295
|
+
" discards",
|
296
|
+
CHK_TXT,
|
297
|
+
]
|
298
|
+
|
299
|
+
# write the tests to ./models/ggml-vocab-{name}.gguf.inp
|
300
|
+
# the format is:
|
301
|
+
#
|
302
|
+
# test0
|
303
|
+
# __ggml_vocab_test__
|
304
|
+
# test1
|
305
|
+
# __ggml_vocab_test__
|
306
|
+
# ...
|
307
|
+
#
|
308
|
+
|
309
|
+
# with each model, encode all tests and write the results in ./models/ggml-vocab-{name}.gguf.out
|
310
|
+
# for each test, write the resulting tokens on a separate line
|
311
|
+
|
312
|
+
for model in models:
|
313
|
+
name = model["name"]
|
314
|
+
tokt = model["tokt"]
|
315
|
+
|
316
|
+
# Skip if the tokenizer folder does not exist or there are other download issues previously
|
317
|
+
if not os.path.exists(f"models/tokenizers/{name}"):
|
318
|
+
logger.warning(f"Directory for tokenizer {name} not found. Skipping...")
|
319
|
+
continue
|
320
|
+
|
321
|
+
# create the tokenizer
|
322
|
+
try:
|
323
|
+
if name == "t5":
|
324
|
+
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}", use_fast=False)
|
325
|
+
else:
|
326
|
+
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}")
|
327
|
+
except OSError as e:
|
328
|
+
logger.error(f"Failed to load tokenizer for model {name}. Error: {e}")
|
329
|
+
continue # Skip this model and continue with the next one in the loop
|
330
|
+
|
331
|
+
with open(f"models/ggml-vocab-{name}.gguf.inp", "w", encoding="utf-8") as f:
|
332
|
+
for text in tests:
|
333
|
+
f.write(f"{text}")
|
334
|
+
f.write("\n__ggml_vocab_test__\n")
|
335
|
+
|
336
|
+
with open(f"models/ggml-vocab-{name}.gguf.out", "w") as f:
|
337
|
+
for text in tests:
|
338
|
+
res = tokenizer.encode(text, add_special_tokens=False)
|
339
|
+
for r in res:
|
340
|
+
f.write(f" {r}")
|
341
|
+
f.write("\n")
|
342
|
+
|
343
|
+
logger.info(f"Tests for {name} written in ./models/ggml-vocab-{name}.gguf.*")
|
344
|
+
|
345
|
+
# generate commands for creating vocab files
|
346
|
+
|
347
|
+
logger.info("\nRun the following commands to generate the vocab files for testing:\n")
|
348
|
+
|
349
|
+
for model in models:
|
350
|
+
name = model["name"]
|
351
|
+
|
352
|
+
print(f"python3 convert_hf_to_gguf.py models/tokenizers/{name}/ --outfile models/ggml-vocab-{name}.gguf --vocab-only") # noqa: NP100
|
353
|
+
|
354
|
+
logger.info("\n")
|