bigdl-core-cpp 2.1.0b20230202__py3-none-win_amd64.whl → 2.1.0b20240820.post1__py3-none-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. bigdl/cpp/convert-hf-to-gguf.py +1169 -311
  2. bigdl/cpp/gguf-py/gguf/__init__.py +2 -0
  3. bigdl/cpp/gguf-py/gguf/constants.py +463 -167
  4. bigdl/cpp/gguf-py/gguf/gguf.py +1 -1
  5. bigdl/cpp/gguf-py/gguf/gguf_reader.py +29 -8
  6. bigdl/cpp/gguf-py/gguf/gguf_writer.py +475 -156
  7. bigdl/cpp/gguf-py/gguf/lazy.py +24 -49
  8. bigdl/cpp/gguf-py/gguf/metadata.py +503 -0
  9. bigdl/cpp/gguf-py/gguf/tensor_mapping.py +209 -23
  10. bigdl/cpp/gguf-py/gguf/utility.py +69 -0
  11. bigdl/cpp/libs/baby-llama.exe +0 -0
  12. bigdl/cpp/libs/batched-bench.exe +0 -0
  13. bigdl/cpp/libs/batched.exe +0 -0
  14. bigdl/cpp/libs/beam-search.exe +0 -0
  15. bigdl/cpp/libs/benchmark.exe +0 -0
  16. bigdl/cpp/libs/common.lib +0 -0
  17. bigdl/cpp/libs/convert-llama2c-to-ggml.exe +0 -0
  18. bigdl/cpp/libs/dist/windows-amd64/ollama_runners/cpu/ollama_llama_server.exe +0 -0
  19. bigdl/cpp/libs/dist/windows-amd64/ollama_runners/cpu_avx/ollama_llama_server.exe +0 -0
  20. bigdl/cpp/libs/dist/windows-amd64/ollama_runners/cpu_avx2/ollama_llama_server.exe +0 -0
  21. bigdl/cpp/libs/embedding.exe +0 -0
  22. bigdl/cpp/libs/export-lora.exe +0 -0
  23. bigdl/cpp/libs/finetune.exe +0 -0
  24. bigdl/cpp/libs/ggml_shared.dll +0 -0
  25. bigdl/cpp/libs/gguf.exe +0 -0
  26. bigdl/cpp/libs/gritlm.exe +0 -0
  27. bigdl/cpp/libs/imatrix.exe +0 -0
  28. bigdl/cpp/libs/infill.exe +0 -0
  29. bigdl/cpp/libs/llama-bench.exe +0 -0
  30. bigdl/cpp/libs/llama.dll +0 -0
  31. bigdl/cpp/libs/llava-cli.exe +0 -0
  32. bigdl/cpp/libs/llava_shared.dll +0 -0
  33. bigdl/cpp/libs/lookahead.exe +0 -0
  34. bigdl/cpp/libs/lookup.exe +0 -0
  35. bigdl/cpp/libs/ls-sycl-device.exe +0 -0
  36. bigdl/cpp/libs/main.exe +0 -0
  37. bigdl/cpp/libs/ollama.exe +0 -0
  38. bigdl/cpp/libs/parallel.exe +0 -0
  39. bigdl/cpp/libs/passkey.exe +0 -0
  40. bigdl/cpp/libs/perplexity.exe +0 -0
  41. bigdl/cpp/libs/q8dot.exe +0 -0
  42. bigdl/cpp/libs/quantize-stats.exe +0 -0
  43. bigdl/cpp/libs/quantize.exe +0 -0
  44. bigdl/cpp/libs/save-load-state.exe +0 -0
  45. bigdl/cpp/libs/server.exe +0 -0
  46. bigdl/cpp/libs/simple.exe +0 -0
  47. bigdl/cpp/libs/speculative.exe +0 -0
  48. bigdl/cpp/libs/tokenize.exe +0 -0
  49. bigdl/cpp/libs/train-text-from-scratch.exe +0 -0
  50. bigdl/cpp/libs/vdot.exe +0 -0
  51. {bigdl_core_cpp-2.1.0b20230202.dist-info → bigdl_core_cpp-2.1.0b20240820.post1.dist-info}/METADATA +8 -8
  52. bigdl_core_cpp-2.1.0b20240820.post1.dist-info/RECORD +63 -0
  53. {bigdl_core_cpp-2.1.0b20230202.dist-info → bigdl_core_cpp-2.1.0b20240820.post1.dist-info}/WHEEL +1 -1
  54. bigdl_core_cpp-2.1.0b20230202.dist-info/RECORD +0 -61
  55. {bigdl_core_cpp-2.1.0b20230202.data → bigdl_core_cpp-2.1.0b20240820.post1.data}/scripts/init-llama-cpp.bat +0 -0
  56. {bigdl_core_cpp-2.1.0b20230202.data → bigdl_core_cpp-2.1.0b20240820.post1.data}/scripts/init-llama-cpp.ps1 +0 -0
  57. {bigdl_core_cpp-2.1.0b20230202.data → bigdl_core_cpp-2.1.0b20240820.post1.data}/scripts/init-ollama.bat +0 -0
  58. {bigdl_core_cpp-2.1.0b20230202.dist-info → bigdl_core_cpp-2.1.0b20240820.post1.dist-info}/top_level.txt +0 -0
@@ -3,10 +3,8 @@ from abc import ABC, ABCMeta, abstractmethod
3
3
 
4
4
  import logging
5
5
  from typing import Any, Callable
6
- from collections import deque
7
6
 
8
7
  import numpy as np
9
- from numpy._typing import _Shape
10
8
  from numpy.typing import DTypeLike
11
9
 
12
10
 
@@ -16,16 +14,16 @@ logger = logging.getLogger(__name__)
16
14
  class LazyMeta(ABCMeta):
17
15
 
18
16
  def __new__(cls, name: str, bases: tuple[type, ...], namespace: dict[str, Any], **kwargs):
19
- def __getattr__(self, __name: str) -> Any:
20
- meta_attr = getattr(self._meta, __name)
17
+ def __getattr__(self, name: str) -> Any:
18
+ meta_attr = getattr(self._meta, name)
21
19
  if callable(meta_attr):
22
20
  return type(self)._wrap_fn(
23
- (lambda s, *args, **kwargs: getattr(s, __name)(*args, **kwargs)),
21
+ (lambda s, *args, **kwargs: getattr(s, name)(*args, **kwargs)),
24
22
  use_self=self,
25
23
  )
26
24
  elif isinstance(meta_attr, self._tensor_type):
27
25
  # e.g. self.T with torch.Tensor should still be wrapped
28
- return type(self)._wrap_fn(lambda s: getattr(s, __name))(self)
26
+ return type(self)._wrap_fn(lambda s: getattr(s, name))(self)
29
27
  else:
30
28
  # no need to wrap non-tensor properties,
31
29
  # and they likely don't depend on the actual contents of the tensor
@@ -75,20 +73,18 @@ class LazyBase(ABC, metaclass=LazyMeta):
75
73
  _tensor_type: type
76
74
  _meta: Any
77
75
  _data: Any | None
78
- _lazy: deque[LazyBase] # shared within a graph, to avoid deep recursion when making eager
79
76
  _args: tuple
80
- _func: Callable[[tuple], Any] | None
77
+ _kwargs: dict[str, Any]
78
+ _func: Callable[[Any], Any] | None
81
79
 
82
- def __init__(self, *, meta: Any, data: Any | None = None, lazy: deque[LazyBase] | None = None, args: tuple = (), func: Callable[[tuple], Any] | None = None):
80
+ def __init__(self, *, meta: Any, data: Any | None = None, args: tuple = (), kwargs: dict[str, Any] | None = None, func: Callable[[Any], Any] | None = None):
83
81
  super().__init__()
84
82
  self._meta = meta
85
83
  self._data = data
86
- self._lazy = lazy if lazy is not None else deque()
87
84
  self._args = args
85
+ self._kwargs = kwargs if kwargs is not None else {}
88
86
  self._func = func
89
87
  assert self._func is not None or self._data is not None
90
- if self._data is None:
91
- self._lazy.append(self)
92
88
 
93
89
  def __init_subclass__(cls) -> None:
94
90
  if "_tensor_type" not in cls.__dict__:
@@ -118,6 +114,7 @@ class LazyBase(ABC, metaclass=LazyMeta):
118
114
  args = ((use_self,) if use_self is not None else ()) + args
119
115
 
120
116
  meta_args = LazyBase._recurse_apply(args, lambda t: t._meta)
117
+ # TODO: maybe handle tensors in kwargs too
121
118
 
122
119
  if isinstance(meta_noop, bool) and not meta_noop:
123
120
  try:
@@ -141,21 +138,7 @@ class LazyBase(ABC, metaclass=LazyMeta):
141
138
  res = cls.meta_with_dtype_and_shape(meta_noop, res.shape)
142
139
 
143
140
  if isinstance(res, cls._tensor_type):
144
- def collect_replace(t: LazyBase):
145
- if collect_replace.shared_lazy is None:
146
- collect_replace.shared_lazy = t._lazy
147
- else:
148
- collect_replace.shared_lazy.extend(t._lazy)
149
- t._lazy = collect_replace.shared_lazy
150
-
151
- # emulating a static variable
152
- collect_replace.shared_lazy = None
153
-
154
- LazyBase._recurse_apply(args, collect_replace)
155
-
156
- shared_lazy = collect_replace.shared_lazy
157
-
158
- return cls(meta=cls.eager_to_meta(res), lazy=shared_lazy, args=args, func=lambda a: fn(*a, **kwargs))
141
+ return cls(meta=cls.eager_to_meta(res), args=args, kwargs=kwargs, func=fn)
159
142
  else:
160
143
  del res # not needed
161
144
  # non-tensor return likely relies on the contents of the args
@@ -167,25 +150,18 @@ class LazyBase(ABC, metaclass=LazyMeta):
167
150
  @classmethod
168
151
  def to_eager(cls, t: Any) -> Any:
169
152
  def simple_to_eager(_t: LazyBase) -> Any:
170
- def already_eager_to_eager(_t: LazyBase) -> Any:
171
- assert _t._data is not None
153
+ if _t._data is not None:
172
154
  return _t._data
173
155
 
174
- while _t._data is None:
175
- lt = _t._lazy.popleft()
176
- if lt._data is not None:
177
- # Lazy tensor did not belong in the lazy queue.
178
- # Weirdly only happens with Bloom models...
179
- # likely because tensors aren't unique in the queue.
180
- # The final output is still the same as in eager mode,
181
- # so it's safe to ignore this.
182
- continue
183
- assert lt._func is not None
184
- lt._args = cls._recurse_apply(lt._args, already_eager_to_eager)
185
- lt._data = lt._func(lt._args)
186
- # sanity check
187
- assert lt._data.dtype == lt._meta.dtype
188
- assert lt._data.shape == lt._meta.shape
156
+ # NOTE: there's a recursion limit in Python (usually 1000)
157
+
158
+ assert _t._func is not None
159
+ _t._args = cls._recurse_apply(_t._args, simple_to_eager)
160
+ _t._data = _t._func(*_t._args, **_t._kwargs)
161
+ # sanity check
162
+ assert _t._data is not None
163
+ assert _t._data.dtype == _t._meta.dtype
164
+ assert _t._data.shape == _t._meta.shape
189
165
 
190
166
  return _t._data
191
167
 
@@ -204,7 +180,7 @@ class LazyBase(ABC, metaclass=LazyMeta):
204
180
  @classmethod
205
181
  def from_eager(cls, t: Any) -> Any:
206
182
  if type(t) is cls:
207
- # already eager
183
+ # already lazy
208
184
  return t
209
185
  elif isinstance(t, cls._tensor_type):
210
186
  return cls(meta=cls.eager_to_meta(t), data=t)
@@ -216,7 +192,7 @@ class LazyNumpyTensor(LazyBase):
216
192
  _tensor_type = np.ndarray
217
193
 
218
194
  @classmethod
219
- def meta_with_dtype_and_shape(cls, dtype: DTypeLike, shape: _Shape) -> np.ndarray[Any, Any]:
195
+ def meta_with_dtype_and_shape(cls, dtype: DTypeLike, shape: tuple[int, ...]) -> np.ndarray[Any, Any]:
220
196
  # The initial idea was to use np.nan as the fill value,
221
197
  # but non-float types like np.int16 can't use that.
222
198
  # So zero it is.
@@ -226,11 +202,10 @@ class LazyNumpyTensor(LazyBase):
226
202
  def astype(self, dtype, *args, **kwargs):
227
203
  meta = type(self).meta_with_dtype_and_shape(dtype, self._meta.shape)
228
204
  full_args = (self, dtype,) + args
229
- # very important to pass the shared _lazy deque, or else there's an infinite loop somewhere.
230
- return type(self)(meta=meta, args=full_args, lazy=self._lazy, func=(lambda a: a[0].astype(*a[1:], **kwargs)))
205
+ return type(self)(meta=meta, args=full_args, kwargs=kwargs, func=(lambda a, *args, **kwargs: a.astype(*args, **kwargs)))
231
206
 
232
207
  def tofile(self, *args, **kwargs):
233
208
  eager = LazyNumpyTensor.to_eager(self)
234
209
  return eager.tofile(*args, **kwargs)
235
210
 
236
- # TODO: __array_function__
211
+ # TODO: __array_function__
@@ -0,0 +1,503 @@
1
+ from __future__ import annotations
2
+
3
+ import re
4
+ import json
5
+ import yaml
6
+ import logging
7
+ from pathlib import Path
8
+ from typing import Any, Literal, Optional
9
+ from dataclasses import dataclass
10
+
11
+ from .constants import Keys
12
+
13
+ import gguf
14
+
15
+ logger = logging.getLogger("metadata")
16
+
17
+
18
+ @dataclass
19
+ class Metadata:
20
+ # Authorship Metadata to be written to GGUF KV Store
21
+ name: Optional[str] = None
22
+ author: Optional[str] = None
23
+ version: Optional[str] = None
24
+ organization: Optional[str] = None
25
+ finetune: Optional[str] = None
26
+ basename: Optional[str] = None
27
+ description: Optional[str] = None
28
+ quantized_by: Optional[str] = None
29
+ size_label: Optional[str] = None
30
+ url: Optional[str] = None
31
+ doi: Optional[str] = None
32
+ uuid: Optional[str] = None
33
+ repo_url: Optional[str] = None
34
+ source_url: Optional[str] = None
35
+ source_doi: Optional[str] = None
36
+ source_uuid: Optional[str] = None
37
+ source_repo_url: Optional[str] = None
38
+ license: Optional[str] = None
39
+ license_name: Optional[str] = None
40
+ license_link: Optional[str] = None
41
+ base_models: Optional[list[dict]] = None
42
+ tags: Optional[list[str]] = None
43
+ languages: Optional[list[str]] = None
44
+ datasets: Optional[list[str]] = None
45
+
46
+ @staticmethod
47
+ def load(metadata_override_path: Optional[Path] = None, model_path: Optional[Path] = None, model_name: Optional[str] = None, total_params: int = 0) -> Metadata:
48
+ # This grabs as many contextual authorship metadata as possible from the model repository
49
+ # making any conversion as required to match the gguf kv store metadata format
50
+ # as well as giving users the ability to override any authorship metadata that may be incorrect
51
+
52
+ # Create a new Metadata instance
53
+ metadata = Metadata()
54
+
55
+ model_card = Metadata.load_model_card(model_path)
56
+ hf_params = Metadata.load_hf_parameters(model_path)
57
+ # TODO: load adapter_config.json when possible, it usually contains the base model of the LoRA adapter
58
+
59
+ # heuristics
60
+ metadata = Metadata.apply_metadata_heuristic(metadata, model_card, hf_params, model_path, total_params)
61
+
62
+ # Metadata Override File Provided
63
+ # This is based on LLM_KV_NAMES mapping in llama.cpp
64
+ metadata_override = Metadata.load_metadata_override(metadata_override_path)
65
+
66
+ metadata.name = metadata_override.get(Keys.General.NAME, metadata.name)
67
+ metadata.author = metadata_override.get(Keys.General.AUTHOR, metadata.author)
68
+ metadata.version = metadata_override.get(Keys.General.VERSION, metadata.version)
69
+ metadata.organization = metadata_override.get(Keys.General.ORGANIZATION, metadata.organization)
70
+
71
+ metadata.finetune = metadata_override.get(Keys.General.FINETUNE, metadata.finetune)
72
+ metadata.basename = metadata_override.get(Keys.General.BASENAME, metadata.basename)
73
+
74
+ metadata.description = metadata_override.get(Keys.General.DESCRIPTION, metadata.description)
75
+ metadata.quantized_by = metadata_override.get(Keys.General.QUANTIZED_BY, metadata.quantized_by)
76
+
77
+ metadata.size_label = metadata_override.get(Keys.General.SIZE_LABEL, metadata.size_label)
78
+ metadata.license_name = metadata_override.get(Keys.General.LICENSE_NAME, metadata.license_name)
79
+ metadata.license_link = metadata_override.get(Keys.General.LICENSE_LINK, metadata.license_link)
80
+
81
+ metadata.url = metadata_override.get(Keys.General.URL, metadata.url)
82
+ metadata.doi = metadata_override.get(Keys.General.DOI, metadata.doi)
83
+ metadata.uuid = metadata_override.get(Keys.General.UUID, metadata.uuid)
84
+ metadata.repo_url = metadata_override.get(Keys.General.REPO_URL, metadata.repo_url)
85
+
86
+ metadata.source_url = metadata_override.get(Keys.General.SOURCE_URL, metadata.source_url)
87
+ metadata.source_doi = metadata_override.get(Keys.General.SOURCE_DOI, metadata.source_doi)
88
+ metadata.source_uuid = metadata_override.get(Keys.General.SOURCE_UUID, metadata.source_uuid)
89
+ metadata.source_repo_url = metadata_override.get(Keys.General.SOURCE_REPO_URL, metadata.source_repo_url)
90
+
91
+ # Base Models is received here as an array of models
92
+ metadata.base_models = metadata_override.get("general.base_models", metadata.base_models)
93
+
94
+ metadata.tags = metadata_override.get(Keys.General.TAGS, metadata.tags)
95
+ metadata.languages = metadata_override.get(Keys.General.LANGUAGES, metadata.languages)
96
+ metadata.datasets = metadata_override.get(Keys.General.DATASETS, metadata.datasets)
97
+
98
+ # Direct Metadata Override (via direct cli argument)
99
+ if model_name is not None:
100
+ metadata.name = model_name
101
+
102
+ return metadata
103
+
104
+ @staticmethod
105
+ def load_metadata_override(metadata_override_path: Optional[Path] = None) -> dict[str, Any]:
106
+ if metadata_override_path is None or not metadata_override_path.is_file():
107
+ return {}
108
+
109
+ with open(metadata_override_path, "r", encoding="utf-8") as f:
110
+ return json.load(f)
111
+
112
+ @staticmethod
113
+ def load_model_card(model_path: Optional[Path] = None) -> dict[str, Any]:
114
+ if model_path is None or not model_path.is_dir():
115
+ return {}
116
+
117
+ model_card_path = model_path / "README.md"
118
+
119
+ if not model_card_path.is_file():
120
+ return {}
121
+
122
+ # The model card metadata is assumed to always be in YAML
123
+ # ref: https://github.com/huggingface/transformers/blob/a5c642fe7a1f25d3bdcd76991443ba6ff7ee34b2/src/transformers/modelcard.py#L468-L473
124
+ with open(model_card_path, "r", encoding="utf-8") as f:
125
+ if f.readline() == "---\n":
126
+ raw = f.read().partition("---\n")[0]
127
+ data = yaml.safe_load(raw)
128
+ if isinstance(data, dict):
129
+ return data
130
+ else:
131
+ logger.error(f"while reading YAML model card frontmatter, data is {type(data)} instead of dict")
132
+ return {}
133
+ else:
134
+ return {}
135
+
136
+ @staticmethod
137
+ def load_hf_parameters(model_path: Optional[Path] = None) -> dict[str, Any]:
138
+ if model_path is None or not model_path.is_dir():
139
+ return {}
140
+
141
+ config_path = model_path / "config.json"
142
+
143
+ if not config_path.is_file():
144
+ return {}
145
+
146
+ with open(config_path, "r", encoding="utf-8") as f:
147
+ return json.load(f)
148
+
149
+ @staticmethod
150
+ def id_to_title(string):
151
+ # Convert capitalization into title form unless acronym or version number
152
+ return ' '.join([w.title() if w.islower() and not re.match(r'^(v\d+(?:\.\d+)*|\d.*)$', w) else w for w in string.strip().replace('-', ' ').split()])
153
+
154
+ @staticmethod
155
+ def get_model_id_components(model_id: Optional[str] = None, total_params: int = 0) -> tuple[str | None, str | None, str | None, str | None, str | None, str | None]:
156
+ # Huggingface often store model id as '<org>/<model name>'
157
+ # so let's parse it and apply some heuristics if possible for model name components
158
+
159
+ if model_id is None:
160
+ # model ID missing
161
+ return None, None, None, None, None, None
162
+
163
+ if ' ' in model_id:
164
+ # model ID is actually a normal human sentence
165
+ # which means its most likely a normal model name only
166
+ # not part of the hugging face naming standard, but whatever
167
+ return model_id, None, None, None, None, None
168
+
169
+ if '/' in model_id:
170
+ # model ID (huggingface style)
171
+ org_component, model_full_name_component = model_id.split('/', 1)
172
+ else:
173
+ # model ID but missing org components
174
+ org_component, model_full_name_component = None, model_id
175
+
176
+ # Check if we erroneously matched against './' or '../' etc...
177
+ if org_component is not None and org_component[0] == '.':
178
+ org_component = None
179
+
180
+ name_parts: list[str] = model_full_name_component.split('-')
181
+
182
+ # Remove empty parts
183
+ for i in reversed(range(len(name_parts))):
184
+ if len(name_parts[i]) == 0:
185
+ del name_parts[i]
186
+
187
+ name_types: list[
188
+ set[Literal["basename", "size_label", "finetune", "version", "type"]]
189
+ ] = [set() for _ in name_parts]
190
+
191
+ # Annotate the name
192
+ for i, part in enumerate(name_parts):
193
+ # Version
194
+ if re.fullmatch(r'(v|iter)?\d+([.]\d+)*', part, re.IGNORECASE):
195
+ name_types[i].add("version")
196
+ # Quant type (should not be there for base models, but still annotated)
197
+ elif re.fullmatch(r'i?q\d(_\w)*|b?fp?(16|32)', part, re.IGNORECASE):
198
+ name_types[i].add("type")
199
+ name_parts[i] = part.upper()
200
+ # Model size
201
+ elif i > 0 and re.fullmatch(r'(([A]|\d+[x])?\d+([._]\d+)?[KMBT][\d]?|small|mini|medium|large|x?xl)', part, re.IGNORECASE):
202
+ part = part.replace("_", ".")
203
+ # Handle weird bloom-7b1 notation
204
+ if part[-1].isdecimal():
205
+ part = part[:-2] + "." + part[-1] + part[-2]
206
+ # Normalize the size suffixes
207
+ if len(part) > 1 and part[-2].isdecimal():
208
+ if part[-1] in "kmbt":
209
+ part = part[:-1] + part[-1].upper()
210
+ if total_params != 0:
211
+ try:
212
+ label_params = float(part[:-1]) * pow(1000, " KMBT".find(part[-1]))
213
+ # Only use it as a size label if it's close or bigger than the model size
214
+ # Note that LoRA adapters don't necessarily include all layers,
215
+ # so this is why bigger label sizes are accepted.
216
+ # Do not use the size label when it's smaller than 1/8 of the model size
217
+ if (total_params < 0 and label_params < abs(total_params) // 8) or (
218
+ # Check both directions when the current model isn't a LoRA adapter
219
+ total_params > 0 and abs(label_params - total_params) > 7 * total_params // 8
220
+ ):
221
+ # Likely a context length
222
+ name_types[i].add("finetune")
223
+ # Lowercase the size when it's a context length
224
+ part = part[:-1] + part[-1].lower()
225
+ except ValueError:
226
+ # Failed to convert the size label to float, use it anyway
227
+ pass
228
+ if len(name_types[i]) == 0:
229
+ name_types[i].add("size_label")
230
+ name_parts[i] = part
231
+ # Some easy to recognize finetune names
232
+ elif i > 0 and re.fullmatch(r'chat|instruct|vision|lora', part, re.IGNORECASE):
233
+ if total_params < 0 and part.lower() == "lora":
234
+ # ignore redundant "lora" in the finetune part when the output is a lora adapter
235
+ name_types[i].add("type")
236
+ else:
237
+ name_types[i].add("finetune")
238
+
239
+ # Ignore word-based size labels when there is at least a number-based one present
240
+ # TODO: should word-based size labels always be removed instead?
241
+ if any(c.isdecimal() for n, t in zip(name_parts, name_types) if "size_label" in t for c in n):
242
+ for n, t in zip(name_parts, name_types):
243
+ if "size_label" in t:
244
+ if all(c.isalpha() for c in n):
245
+ t.remove("size_label")
246
+
247
+ at_start = True
248
+ # Find the basename through the annotated name
249
+ for part, t in zip(name_parts, name_types):
250
+ if at_start and ((len(t) == 0 and part[0].isalpha()) or "version" in t):
251
+ t.add("basename")
252
+ else:
253
+ if at_start:
254
+ at_start = False
255
+ if len(t) == 0:
256
+ t.add("finetune")
257
+
258
+ # Remove the basename annotation from trailing version
259
+ for part, t in zip(reversed(name_parts), reversed(name_types)):
260
+ if "basename" in t and len(t) > 1:
261
+ t.remove("basename")
262
+ else:
263
+ break
264
+
265
+ basename = "-".join(n for n, t in zip(name_parts, name_types) if "basename" in t) or None
266
+ # Deduplicate size labels using order-preserving 'dict' ('set' seems to sort the keys)
267
+ size_label = "-".join(dict.fromkeys(s for s, t in zip(name_parts, name_types) if "size_label" in t).keys()) or None
268
+ finetune = "-".join(f for f, t in zip(name_parts, name_types) if "finetune" in t) or None
269
+ # TODO: should the basename version always be excluded?
270
+ # NOTE: multiple finetune versions are joined together
271
+ version = "-".join(v for v, t, in zip(name_parts, name_types) if "version" in t and "basename" not in t) or None
272
+
273
+ if size_label is None and finetune is None and version is None:
274
+ # Too ambiguous, output nothing
275
+ basename = None
276
+
277
+ return model_full_name_component, org_component, basename, finetune, version, size_label
278
+
279
+ @staticmethod
280
+ def apply_metadata_heuristic(metadata: Metadata, model_card: Optional[dict] = None, hf_params: Optional[dict] = None, model_path: Optional[Path] = None, total_params: int = 0) -> Metadata:
281
+ # Reference Model Card Metadata: https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1
282
+
283
+ # Model Card Heuristics
284
+ ########################
285
+ if model_card is not None:
286
+
287
+ if "model_name" in model_card and metadata.name is None:
288
+ # Not part of huggingface model card standard but notice some model creator using it
289
+ # such as TheBloke in 'TheBloke/Mistral-7B-Instruct-v0.2-GGUF'
290
+ metadata.name = model_card.get("model_name")
291
+
292
+ if "model_creator" in model_card and metadata.author is None:
293
+ # Not part of huggingface model card standard but notice some model creator using it
294
+ # such as TheBloke in 'TheBloke/Mistral-7B-Instruct-v0.2-GGUF'
295
+ metadata.author = model_card.get("model_creator")
296
+
297
+ if "model_type" in model_card and metadata.basename is None:
298
+ # Not part of huggingface model card standard but notice some model creator using it
299
+ # such as TheBloke in 'TheBloke/Mistral-7B-Instruct-v0.2-GGUF'
300
+ metadata.basename = model_card.get("model_type")
301
+
302
+ if "base_model" in model_card:
303
+ # This represents the parent models that this is based on
304
+ # Example: stabilityai/stable-diffusion-xl-base-1.0. Can also be a list (for merges)
305
+ # Example of merges: https://huggingface.co/EmbeddedLLM/Mistral-7B-Merge-14-v0.1/blob/main/README.md
306
+ metadata_base_models = []
307
+ base_model_value = model_card.get("base_model", None)
308
+
309
+ if base_model_value is not None:
310
+ if isinstance(base_model_value, str):
311
+ metadata_base_models.append(base_model_value)
312
+ elif isinstance(base_model_value, list):
313
+ metadata_base_models.extend(base_model_value)
314
+
315
+ if metadata.base_models is None:
316
+ metadata.base_models = []
317
+
318
+ for model_id in metadata_base_models:
319
+ # NOTE: model size of base model is assumed to be similar to the size of the current model
320
+ model_full_name_component, org_component, basename, finetune, version, size_label = Metadata.get_model_id_components(model_id, total_params)
321
+ base_model = {}
322
+ if model_full_name_component is not None:
323
+ base_model["name"] = Metadata.id_to_title(model_full_name_component)
324
+ if org_component is not None:
325
+ base_model["organization"] = Metadata.id_to_title(org_component)
326
+ if version is not None:
327
+ base_model["version"] = version
328
+ if org_component is not None and model_full_name_component is not None:
329
+ base_model["repo_url"] = f"https://huggingface.co/{org_component}/{model_full_name_component}"
330
+ metadata.base_models.append(base_model)
331
+
332
+ if "license" in model_card and metadata.license is None:
333
+ metadata.license = model_card.get("license")
334
+
335
+ if "license_name" in model_card and metadata.license_name is None:
336
+ metadata.license_name = model_card.get("license_name")
337
+
338
+ if "license_link" in model_card and metadata.license_link is None:
339
+ metadata.license_link = model_card.get("license_link")
340
+
341
+ tags_value = model_card.get("tags", None)
342
+ if tags_value is not None:
343
+
344
+ if metadata.tags is None:
345
+ metadata.tags = []
346
+
347
+ if isinstance(tags_value, str):
348
+ metadata.tags.append(tags_value)
349
+ elif isinstance(tags_value, list):
350
+ metadata.tags.extend(tags_value)
351
+
352
+ pipeline_tags_value = model_card.get("pipeline_tag", None)
353
+ if pipeline_tags_value is not None:
354
+
355
+ if metadata.tags is None:
356
+ metadata.tags = []
357
+
358
+ if isinstance(pipeline_tags_value, str):
359
+ metadata.tags.append(pipeline_tags_value)
360
+ elif isinstance(pipeline_tags_value, list):
361
+ metadata.tags.extend(pipeline_tags_value)
362
+
363
+ language_value = model_card.get("languages", model_card.get("language", None))
364
+ if language_value is not None:
365
+
366
+ if metadata.languages is None:
367
+ metadata.languages = []
368
+
369
+ if isinstance(language_value, str):
370
+ metadata.languages.append(language_value)
371
+ elif isinstance(language_value, list):
372
+ metadata.languages.extend(language_value)
373
+
374
+ dataset_value = model_card.get("datasets", model_card.get("dataset", None))
375
+ if dataset_value is not None:
376
+
377
+ if metadata.datasets is None:
378
+ metadata.datasets = []
379
+
380
+ if isinstance(dataset_value, str):
381
+ metadata.datasets.append(dataset_value)
382
+ elif isinstance(dataset_value, list):
383
+ metadata.datasets.extend(dataset_value)
384
+
385
+ # Hugging Face Parameter Heuristics
386
+ ####################################
387
+
388
+ if hf_params is not None:
389
+
390
+ hf_name_or_path = hf_params.get("_name_or_path")
391
+ if hf_name_or_path is not None and hf_name_or_path.count('/') <= 1:
392
+ # Use _name_or_path only if its actually a model name and not some computer path
393
+ # e.g. 'meta-llama/Llama-2-7b-hf'
394
+ model_id = hf_name_or_path
395
+ model_full_name_component, org_component, basename, finetune, version, size_label = Metadata.get_model_id_components(model_id, total_params)
396
+ if metadata.name is None and model_full_name_component is not None:
397
+ metadata.name = Metadata.id_to_title(model_full_name_component)
398
+ if metadata.organization is None and org_component is not None:
399
+ metadata.organization = Metadata.id_to_title(org_component)
400
+ if metadata.basename is None and basename is not None:
401
+ metadata.basename = basename
402
+ if metadata.finetune is None and finetune is not None:
403
+ metadata.finetune = finetune
404
+ if metadata.version is None and version is not None:
405
+ metadata.version = version
406
+ if metadata.size_label is None and size_label is not None:
407
+ metadata.size_label = size_label
408
+
409
+ # Directory Folder Name Fallback Heuristics
410
+ ############################################
411
+ if model_path is not None:
412
+ model_id = model_path.name
413
+ model_full_name_component, org_component, basename, finetune, version, size_label = Metadata.get_model_id_components(model_id, total_params)
414
+ if metadata.name is None and model_full_name_component is not None:
415
+ metadata.name = Metadata.id_to_title(model_full_name_component)
416
+ if metadata.organization is None and org_component is not None:
417
+ metadata.organization = Metadata.id_to_title(org_component)
418
+ if metadata.basename is None and basename is not None:
419
+ metadata.basename = basename
420
+ if metadata.finetune is None and finetune is not None:
421
+ metadata.finetune = finetune
422
+ if metadata.version is None and version is not None:
423
+ metadata.version = version
424
+ if metadata.size_label is None and size_label is not None:
425
+ metadata.size_label = size_label
426
+
427
+ return metadata
428
+
429
+ def set_gguf_meta_model(self, gguf_writer: gguf.GGUFWriter):
430
+ assert self.name is not None
431
+ gguf_writer.add_name(self.name)
432
+
433
+ if self.author is not None:
434
+ gguf_writer.add_author(self.author)
435
+ if self.version is not None:
436
+ gguf_writer.add_version(self.version)
437
+ if self.organization is not None:
438
+ gguf_writer.add_organization(self.organization)
439
+
440
+ if self.finetune is not None:
441
+ gguf_writer.add_finetune(self.finetune)
442
+ if self.basename is not None:
443
+ gguf_writer.add_basename(self.basename)
444
+
445
+ if self.description is not None:
446
+ gguf_writer.add_description(self.description)
447
+ if self.quantized_by is not None:
448
+ gguf_writer.add_quantized_by(self.quantized_by)
449
+
450
+ if self.size_label is not None:
451
+ gguf_writer.add_size_label(self.size_label)
452
+
453
+ if self.license is not None:
454
+ gguf_writer.add_license(self.license)
455
+ if self.license_name is not None:
456
+ gguf_writer.add_license_name(self.license_name)
457
+ if self.license_link is not None:
458
+ gguf_writer.add_license_link(self.license_link)
459
+
460
+ if self.url is not None:
461
+ gguf_writer.add_url(self.url)
462
+ if self.doi is not None:
463
+ gguf_writer.add_doi(self.doi)
464
+ if self.uuid is not None:
465
+ gguf_writer.add_uuid(self.uuid)
466
+ if self.repo_url is not None:
467
+ gguf_writer.add_repo_url(self.repo_url)
468
+
469
+ if self.source_url is not None:
470
+ gguf_writer.add_source_url(self.source_url)
471
+ if self.source_doi is not None:
472
+ gguf_writer.add_source_doi(self.source_doi)
473
+ if self.source_uuid is not None:
474
+ gguf_writer.add_source_uuid(self.source_uuid)
475
+ if self.source_repo_url is not None:
476
+ gguf_writer.add_source_repo_url(self.source_repo_url)
477
+
478
+ if self.base_models is not None:
479
+ gguf_writer.add_base_model_count(len(self.base_models))
480
+ for key, base_model_entry in enumerate(self.base_models):
481
+ if "name" in base_model_entry:
482
+ gguf_writer.add_base_model_name(key, base_model_entry["name"])
483
+ if "author" in base_model_entry:
484
+ gguf_writer.add_base_model_author(key, base_model_entry["author"])
485
+ if "version" in base_model_entry:
486
+ gguf_writer.add_base_model_version(key, base_model_entry["version"])
487
+ if "organization" in base_model_entry:
488
+ gguf_writer.add_base_model_organization(key, base_model_entry["organization"])
489
+ if "url" in base_model_entry:
490
+ gguf_writer.add_base_model_url(key, base_model_entry["url"])
491
+ if "doi" in base_model_entry:
492
+ gguf_writer.add_base_model_doi(key, base_model_entry["doi"])
493
+ if "uuid" in base_model_entry:
494
+ gguf_writer.add_base_model_uuid(key, base_model_entry["uuid"])
495
+ if "repo_url" in base_model_entry:
496
+ gguf_writer.add_base_model_repo_url(key, base_model_entry["repo_url"])
497
+
498
+ if self.tags is not None:
499
+ gguf_writer.add_tags(self.tags)
500
+ if self.languages is not None:
501
+ gguf_writer.add_languages(self.languages)
502
+ if self.datasets is not None:
503
+ gguf_writer.add_datasets(self.datasets)