spark-nlp 5.5.1__py2.py3-none-any.whl → 5.5.2__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of spark-nlp might be problematic. Click here for more details.

@@ -0,0 +1,538 @@
1
+ # Copyright 2017-2023 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains classes for the AutoGGUFEmbeddings."""
15
+ from typing import List
16
+
17
+ from sparknlp.common import *
18
+
19
+
20
+ class AutoGGUFEmbeddings(AnnotatorModel, HasBatchedAnnotate):
21
+ """
22
+ Annotator that uses the llama.cpp library to generate text embeddings with large language
23
+ models
24
+
25
+ The type of embedding pooling can be set with the `setPoolingType` method. The default is
26
+ `"MEAN"`. The available options are `"NONE"`, `"MEAN"`, `"CLS"`, and `"LAST"`.
27
+
28
+ Pretrained models can be loaded with :meth:`.pretrained` of the companion
29
+ object:
30
+
31
+ >>> auto_gguf_model = AutoGGUFEmbeddings.pretrained() \\
32
+ ... .setInputCols(["document"]) \\
33
+ ... .setOutputCol("embeddings")
34
+
35
+ The default model is ``"nomic-embed-text-v1.5.Q8_0.gguf"``, if no name is provided.
36
+
37
+ For extended examples of usage, see the
38
+ `AutoGGUFEmbeddingsTest <https://github.com/JohnSnowLabs/spark-nlp/tree/master/src/test/scala/com/johnsnowlabs/nlp/embeddings/AutoGGUFEmbeddingsTest.scala>`__
39
+ and the
40
+ `example notebook <https://github.com/JohnSnowLabs/spark-nlp/tree/master/examples/python/llama.cpp/llama.cpp_in_Spark_NLP_AutoGGUFEmbeddings.ipynb>`__.
41
+
42
+ For available pretrained models please see the `Models Hub <https://sparknlp.org/models>`__.
43
+
44
+ ====================== ======================
45
+ Input Annotation types Output Annotation type
46
+ ====================== ======================
47
+ ``DOCUMENT`` ``SENTENCE_EMBEDDINGS``
48
+ ====================== ======================
49
+
50
+ Parameters
51
+ ----------
52
+ nThreads
53
+ Set the number of threads to use during generation
54
+ nThreadsBatch
55
+ Set the number of threads to use during batch and prompt processing
56
+ nCtx
57
+ Set the size of the prompt context
58
+ nBatch
59
+ Set the logical batch size for prompt processing (must be >=32 to use BLAS)
60
+ nUbatch
61
+ Set the physical batch size for prompt processing (must be >=32 to use BLAS)
62
+ nChunks
63
+ Set the maximal number of chunks to process
64
+ nSequences
65
+ Set the number of sequences to decode
66
+ nGpuLayers
67
+ Set the number of layers to store in VRAM (-1 - use default)
68
+ gpuSplitMode
69
+ Set how to split the model across GPUs
70
+ mainGpu
71
+ Set the main GPU that is used for scratch and small tensors.
72
+ tensorSplit
73
+ Set how split tensors should be distributed across GPUs
74
+ grpAttnN
75
+ Set the group-attention factor
76
+ grpAttnW
77
+ Set the group-attention width
78
+ ropeFreqBase
79
+ Set the RoPE base frequency, used by NTK-aware scaling
80
+ ropeFreqScale
81
+ Set the RoPE frequency scaling factor, expands context by a factor of 1/N
82
+ yarnExtFactor
83
+ Set the YaRN extrapolation mix factor
84
+ yarnAttnFactor
85
+ Set the YaRN scale sqrt(t) or attention magnitude
86
+ yarnBetaFast
87
+ Set the YaRN low correction dim or beta
88
+ yarnBetaSlow
89
+ Set the YaRN high correction dim or alpha
90
+ yarnOrigCtx
91
+ Set the YaRN original context size of model
92
+ defragmentationThreshold
93
+ Set the KV cache defragmentation threshold
94
+ numaStrategy
95
+ Set optimization strategies that help on some NUMA systems (if available)
96
+ ropeScalingType
97
+ Set the RoPE frequency scaling method, defaults to linear unless specified by the model
98
+ poolingType
99
+ Set the pooling type for embeddings, use model default if unspecified
100
+ flashAttention
101
+ Whether to enable Flash Attention
102
+ useMmap
103
+ Whether to use memory-map model (faster load but may increase pageouts if not using mlock)
104
+ useMlock
105
+ Whether to force the system to keep model in RAM rather than swapping or compressing
106
+ noKvOffload
107
+ Whether to disable KV offload
108
+
109
+ Notes
110
+ -----
111
+ To use GPU inference with this annotator, make sure to use the Spark NLP GPU package and set
112
+ the number of GPU layers with the `setNGpuLayers` method.
113
+
114
+ When using larger models, we recommend adjusting GPU usage with `setNCtx` and `setNGpuLayers`
115
+ according to your hardware to avoid out-of-memory errors.
116
+
117
+ Examples
118
+ --------
119
+ >>> import sparknlp
120
+ >>> from sparknlp.base import *
121
+ >>> from sparknlp.annotator import *
122
+ >>> from pyspark.ml import Pipeline
123
+ >>> document = DocumentAssembler() \\
124
+ ... .setInputCol("text") \\
125
+ ... .setOutputCol("document")
126
+ >>> autoGGUFEmbeddings = AutoGGUFEmbeddings.pretrained() \\
127
+ ... .setInputCols(["document"]) \\
128
+ ... .setOutputCol("embeddings") \\
129
+ ... .setBatchSize(4) \\
130
+ ... .setNGpuLayers(99) \\
131
+ ... .setPoolingType("MEAN")
132
+ >>> pipeline = Pipeline().setStages([document, autoGGUFEmbeddings])
133
+ >>> data = spark.createDataFrame([["The moons of Jupiter are 77 in total, with 79 confirmed natural satellites and 2 man-made ones."]]).toDF("text")
134
+ >>> result = pipeline.fit(data).transform(data)
135
+ >>> result.select("embeddings.embeddings").show(truncate = False)
136
+ +--------------------------------------------------------------------------------+
137
+ | embeddings|
138
+ +--------------------------------------------------------------------------------+
139
+ |[[-0.034486726, 0.07770534, -0.15982522, -0.017873349, 0.013914132, 0.0365736...|
140
+ +--------------------------------------------------------------------------------+
141
+ """
142
+
143
+ name = "AutoGGUFEmbeddings"
144
+ inputAnnotatorTypes = [AnnotatorType.DOCUMENT]
145
+ outputAnnotatorType = AnnotatorType.DOCUMENT
146
+
147
+ # -------- MODEl PARAMETERS --------
148
+ nThreads = Param(
149
+ Params._dummy(),
150
+ "nThreads",
151
+ "Set the number of threads to use during generation",
152
+ typeConverter=TypeConverters.toInt,
153
+ )
154
+ nThreadsBatch = Param(
155
+ Params._dummy(),
156
+ "nThreadsBatch",
157
+ "Set the number of threads to use during batch and prompt processing",
158
+ typeConverter=TypeConverters.toInt,
159
+ )
160
+ nCtx = Param(
161
+ Params._dummy(),
162
+ "nCtx",
163
+ "Set the size of the prompt context",
164
+ typeConverter=TypeConverters.toInt,
165
+ )
166
+ nBatch = Param(
167
+ Params._dummy(),
168
+ "nBatch",
169
+ "Set the logical batch size for prompt processing (must be >=32 to use BLAS)",
170
+ typeConverter=TypeConverters.toInt,
171
+ )
172
+ nUbatch = Param(
173
+ Params._dummy(),
174
+ "nUbatch",
175
+ "Set the physical batch size for prompt processing (must be >=32 to use BLAS)",
176
+ typeConverter=TypeConverters.toInt,
177
+ )
178
+ nChunks = Param(
179
+ Params._dummy(),
180
+ "nChunks",
181
+ "Set the maximal number of chunks to process",
182
+ typeConverter=TypeConverters.toInt,
183
+ )
184
+ nSequences = Param(
185
+ Params._dummy(),
186
+ "nSequences",
187
+ "Set the number of sequences to decode",
188
+ typeConverter=TypeConverters.toInt,
189
+ )
190
+ nGpuLayers = Param(
191
+ Params._dummy(),
192
+ "nGpuLayers",
193
+ "Set the number of layers to store in VRAM (-1 - use default)",
194
+ typeConverter=TypeConverters.toInt,
195
+ )
196
+ # Set how to split the model across GPUs
197
+ #
198
+ # - NONE: No GPU split
199
+ # - LAYER: Split the model across GPUs by layer
200
+ # - ROW: Split the model across GPUs by rows
201
+ gpuSplitMode = Param(
202
+ Params._dummy(),
203
+ "gpuSplitMode",
204
+ "Set how to split the model across GPUs",
205
+ typeConverter=TypeConverters.toString,
206
+ )
207
+ mainGpu = Param(
208
+ Params._dummy(),
209
+ "mainGpu",
210
+ "Set the main GPU that is used for scratch and small tensors.",
211
+ typeConverter=TypeConverters.toInt,
212
+ )
213
+ tensorSplit = Param(
214
+ Params._dummy(),
215
+ "tensorSplit",
216
+ "Set how split tensors should be distributed across GPUs",
217
+ typeConverter=TypeConverters.toListFloat,
218
+ )
219
+ grpAttnN = Param(
220
+ Params._dummy(),
221
+ "grpAttnN",
222
+ "Set the group-attention factor",
223
+ typeConverter=TypeConverters.toInt,
224
+ )
225
+ grpAttnW = Param(
226
+ Params._dummy(),
227
+ "grpAttnW",
228
+ "Set the group-attention width",
229
+ typeConverter=TypeConverters.toInt,
230
+ )
231
+ ropeFreqBase = Param(
232
+ Params._dummy(),
233
+ "ropeFreqBase",
234
+ "Set the RoPE base frequency, used by NTK-aware scaling",
235
+ typeConverter=TypeConverters.toFloat,
236
+ )
237
+ ropeFreqScale = Param(
238
+ Params._dummy(),
239
+ "ropeFreqScale",
240
+ "Set the RoPE frequency scaling factor, expands context by a factor of 1/N",
241
+ typeConverter=TypeConverters.toFloat,
242
+ )
243
+ yarnExtFactor = Param(
244
+ Params._dummy(),
245
+ "yarnExtFactor",
246
+ "Set the YaRN extrapolation mix factor",
247
+ typeConverter=TypeConverters.toFloat,
248
+ )
249
+ yarnAttnFactor = Param(
250
+ Params._dummy(),
251
+ "yarnAttnFactor",
252
+ "Set the YaRN scale sqrt(t) or attention magnitude",
253
+ typeConverter=TypeConverters.toFloat,
254
+ )
255
+ yarnBetaFast = Param(
256
+ Params._dummy(),
257
+ "yarnBetaFast",
258
+ "Set the YaRN low correction dim or beta",
259
+ typeConverter=TypeConverters.toFloat,
260
+ )
261
+ yarnBetaSlow = Param(
262
+ Params._dummy(),
263
+ "yarnBetaSlow",
264
+ "Set the YaRN high correction dim or alpha",
265
+ typeConverter=TypeConverters.toFloat,
266
+ )
267
+ yarnOrigCtx = Param(
268
+ Params._dummy(),
269
+ "yarnOrigCtx",
270
+ "Set the YaRN original context size of model",
271
+ typeConverter=TypeConverters.toInt,
272
+ )
273
+ defragmentationThreshold = Param(
274
+ Params._dummy(),
275
+ "defragmentationThreshold",
276
+ "Set the KV cache defragmentation threshold",
277
+ typeConverter=TypeConverters.toFloat,
278
+ )
279
+ # Set optimization strategies that help on some NUMA systems (if available)
280
+ #
281
+ # Available Strategies:
282
+ #
283
+ # - DISABLED: No NUMA optimizations
284
+ # - DISTRIBUTE: Spread execution evenly over all
285
+ # - ISOLATE: Only spawn threads on CPUs on the node that execution started on
286
+ # - NUMA_CTL: Use the CPU map provided by numactl
287
+ # - MIRROR: Mirrors the model across NUMA nodes
288
+ numaStrategy = Param(
289
+ Params._dummy(),
290
+ "numaStrategy",
291
+ "Set optimization strategies that help on some NUMA systems (if available)",
292
+ typeConverter=TypeConverters.toString,
293
+ )
294
+ # Set the RoPE frequency scaling method, defaults to linear unless specified by the model.
295
+ #
296
+ # - UNSPECIFIED: Don't use any scaling
297
+ # - LINEAR: Linear scaling
298
+ # - YARN: YaRN RoPE scaling
299
+ ropeScalingType = Param(
300
+ Params._dummy(),
301
+ "ropeScalingType",
302
+ "Set the RoPE frequency scaling method, defaults to linear unless specified by the model",
303
+ typeConverter=TypeConverters.toString,
304
+ )
305
+ # Set the pooling type for embeddings, use model default if unspecified
306
+ #
307
+ # - 0 UNSPECIFIED: Don't use any pooling
308
+ # - 1 MEAN: Mean Pooling
309
+ # - 2 CLS: CLS Pooling
310
+ poolingType = Param(
311
+ Params._dummy(),
312
+ "poolingType",
313
+ "Set the pooling type for embeddings, use model default if unspecified",
314
+ typeConverter=TypeConverters.toString,
315
+ )
316
+ embedding = Param(
317
+ Params._dummy(),
318
+ "embedding",
319
+ "Whether to load model with embedding support",
320
+ typeConverter=TypeConverters.toBoolean,
321
+ )
322
+ flashAttention = Param(
323
+ Params._dummy(),
324
+ "flashAttention",
325
+ "Whether to enable Flash Attention",
326
+ typeConverter=TypeConverters.toBoolean,
327
+ )
328
+ useMmap = Param(
329
+ Params._dummy(),
330
+ "useMmap",
331
+ "Whether to use memory-map model (faster load but may increase pageouts if not using mlock)",
332
+ typeConverter=TypeConverters.toBoolean,
333
+ )
334
+ useMlock = Param(
335
+ Params._dummy(),
336
+ "useMlock",
337
+ "Whether to force the system to keep model in RAM rather than swapping or compressing",
338
+ typeConverter=TypeConverters.toBoolean,
339
+ )
340
+ noKvOffload = Param(
341
+ Params._dummy(),
342
+ "noKvOffload",
343
+ "Whether to disable KV offload",
344
+ typeConverter=TypeConverters.toBoolean,
345
+ )
346
+
347
+ # -------- MODEL SETTERS --------
348
+ def setNThreads(self, nThreads: int):
349
+ """Set the number of threads to use during generation"""
350
+ return self._set(nThreads=nThreads)
351
+
352
+ def setNThreadsBatch(self, nThreadsBatch: int):
353
+ """Set the number of threads to use during batch and prompt processing"""
354
+ return self._set(nThreadsBatch=nThreadsBatch)
355
+
356
+ def setNCtx(self, nCtx: int):
357
+ """Set the size of the prompt context"""
358
+ return self._set(nCtx=nCtx)
359
+
360
+ def setNBatch(self, nBatch: int):
361
+ """Set the logical batch size for prompt processing (must be >=32 to use BLAS)"""
362
+ return self._set(nBatch=nBatch)
363
+
364
+ def setNUbatch(self, nUbatch: int):
365
+ """Set the physical batch size for prompt processing (must be >=32 to use BLAS)"""
366
+ return self._set(nUbatch=nUbatch)
367
+
368
+ def setNChunks(self, nChunks: int):
369
+ """Set the maximal number of chunks to process"""
370
+ return self._set(nChunks=nChunks)
371
+
372
+ def setNSequences(self, nSequences: int):
373
+ """Set the number of sequences to decode"""
374
+ return self._set(nSequences=nSequences)
375
+
376
+ def setNGpuLayers(self, nGpuLayers: int):
377
+ """Set the number of layers to store in VRAM (-1 - use default)"""
378
+ return self._set(nGpuLayers=nGpuLayers)
379
+
380
+ def setGpuSplitMode(self, gpuSplitMode: str):
381
+ """Set how to split the model across GPUs"""
382
+ return self._set(gpuSplitMode=gpuSplitMode)
383
+
384
+ def setMainGpu(self, mainGpu: int):
385
+ """Set the main GPU that is used for scratch and small tensors."""
386
+ return self._set(mainGpu=mainGpu)
387
+
388
+ def setTensorSplit(self, tensorSplit: List[float]):
389
+ """Set how split tensors should be distributed across GPUs"""
390
+ return self._set(tensorSplit=tensorSplit)
391
+
392
+ def setGrpAttnN(self, grpAttnN: int):
393
+ """Set the group-attention factor"""
394
+ return self._set(grpAttnN=grpAttnN)
395
+
396
+ def setGrpAttnW(self, grpAttnW: int):
397
+ """Set the group-attention width"""
398
+ return self._set(grpAttnW=grpAttnW)
399
+
400
+ def setRopeFreqBase(self, ropeFreqBase: float):
401
+ """Set the RoPE base frequency, used by NTK-aware scaling"""
402
+ return self._set(ropeFreqBase=ropeFreqBase)
403
+
404
+ def setRopeFreqScale(self, ropeFreqScale: float):
405
+ """Set the RoPE frequency scaling factor, expands context by a factor of 1/N"""
406
+ return self._set(ropeFreqScale=ropeFreqScale)
407
+
408
+ def setYarnExtFactor(self, yarnExtFactor: float):
409
+ """Set the YaRN extrapolation mix factor"""
410
+ return self._set(yarnExtFactor=yarnExtFactor)
411
+
412
+ def setYarnAttnFactor(self, yarnAttnFactor: float):
413
+ """Set the YaRN scale sqrt(t) or attention magnitude"""
414
+ return self._set(yarnAttnFactor=yarnAttnFactor)
415
+
416
+ def setYarnBetaFast(self, yarnBetaFast: float):
417
+ """Set the YaRN low correction dim or beta"""
418
+ return self._set(yarnBetaFast=yarnBetaFast)
419
+
420
+ def setYarnBetaSlow(self, yarnBetaSlow: float):
421
+ """Set the YaRN high correction dim or alpha"""
422
+ return self._set(yarnBetaSlow=yarnBetaSlow)
423
+
424
+ def setYarnOrigCtx(self, yarnOrigCtx: int):
425
+ """Set the YaRN original context size of model"""
426
+ return self._set(yarnOrigCtx=yarnOrigCtx)
427
+
428
+ def setDefragmentationThreshold(self, defragmentationThreshold: float):
429
+ """Set the KV cache defragmentation threshold"""
430
+ return self._set(defragmentationThreshold=defragmentationThreshold)
431
+
432
+ def setNumaStrategy(self, numaStrategy: str):
433
+ """Set optimization strategies that help on some NUMA systems (if available)"""
434
+ numaUpper = numaStrategy.upper()
435
+ numaStrategies = ["DISABLED", "DISTRIBUTE", "ISOLATE", "NUMA_CTL", "MIRROR"]
436
+ if numaUpper not in numaStrategies:
437
+ raise ValueError(
438
+ f"Invalid NUMA strategy: {numaUpper}. "
439
+ + f"Valid values are: {numaStrategies}"
440
+ )
441
+ return self._set(numaStrategy=numaStrategy)
442
+
443
+ def setRopeScalingType(self, ropeScalingType: str):
444
+ """Set the RoPE frequency scaling method, defaults to linear unless specified by the model"""
445
+ return self._set(ropeScalingType=ropeScalingType)
446
+
447
+ def setPoolingType(self, poolingType: str):
448
+ """Set the pooling type for embeddings, use model default if unspecified"""
449
+ poolingTypeUpper = poolingType.upper()
450
+ poolingTypes = ["NONE", "MEAN", "CLS", "LAST"]
451
+ if poolingTypeUpper not in poolingTypes:
452
+ raise ValueError(
453
+ f"Invalid pooling type: {poolingType}. "
454
+ + f"Valid values are: {poolingTypes}"
455
+ )
456
+ return self._set(poolingType=poolingType)
457
+
458
+ def setFlashAttention(self, flashAttention: bool):
459
+ """Whether to enable Flash Attention"""
460
+ return self._set(flashAttention=flashAttention)
461
+
462
+ def setUseMmap(self, useMmap: bool):
463
+ """Whether to use memory-map model (faster load but may increase pageouts if not using mlock)"""
464
+ return self._set(useMmap=useMmap)
465
+
466
+ def setUseMlock(self, useMlock: bool):
467
+ """Whether to force the system to keep model in RAM rather than swapping or compressing"""
468
+ return self._set(useMlock=useMlock)
469
+
470
+ def setNoKvOffload(self, noKvOffload: bool):
471
+ """Whether to disable KV offload"""
472
+ return self._set(noKvOffload=noKvOffload)
473
+
474
+ def getMetadata(self):
475
+ """Gets the metadata of the model"""
476
+ return self._call_java("getMetadata")
477
+
478
+ @keyword_only
479
+ def __init__(
480
+ self,
481
+ classname="com.johnsnowlabs.nlp.embeddings.AutoGGUFEmbeddings",
482
+ java_model=None,
483
+ ):
484
+ super(AutoGGUFEmbeddings, self).__init__(
485
+ classname=classname, java_model=java_model
486
+ )
487
+ self._setDefault(
488
+ embedding=True,
489
+ nCtx=4096,
490
+ nBatch=512,
491
+ poolingType="MEAN",
492
+ )
493
+
494
+ @staticmethod
495
+ def loadSavedModel(folder, spark_session):
496
+ """Loads a locally saved model.
497
+
498
+ Parameters
499
+ ----------
500
+ folder : str
501
+ Folder of the saved model
502
+ spark_session : pyspark.sql.SparkSession
503
+ The current SparkSession
504
+
505
+ Returns
506
+ -------
507
+ AutoGGUFEmbeddings
508
+ The restored model
509
+ """
510
+ from sparknlp.internal import _AutoGGUFEmbeddingsLoader
511
+
512
+ jModel = _AutoGGUFEmbeddingsLoader(folder, spark_session._jsparkSession)._java_obj
513
+ return AutoGGUFEmbeddings(java_model=jModel)
514
+
515
+ @staticmethod
516
+ def pretrained(name="nomic-embed-text-v1.5.Q8_0.gguf", lang="en", remote_loc=None):
517
+ """Downloads and loads a pretrained model.
518
+
519
+ Parameters
520
+ ----------
521
+ name : str, optional
522
+ Name of the pretrained model, by default "nomic-embed-text-v1.5.Q8_0.gguf"
523
+ lang : str, optional
524
+ Language of the pretrained model, by default "en"
525
+ remote_loc : str, optional
526
+ Optional remote address of the resource, by default None. Will use
527
+ Spark NLPs repositories otherwise.
528
+
529
+ Returns
530
+ -------
531
+ AutoGGUFEmbeddings
532
+ The restored model
533
+ """
534
+ from sparknlp.pretrained import ResourceDownloader
535
+
536
+ return ResourceDownloader.downloadModel(
537
+ AutoGGUFEmbeddings, name, lang, remote_loc
538
+ )
@@ -31,7 +31,7 @@ class NomicEmbeddings(AnnotatorModel, HasEmbeddingsProperties, HasCaseSensitiveP
31
31
  ... .setOutputCol("nomic_embeddings")
32
32
 
33
33
 
34
- The default model is ``"nomic_small"``, if no name is provided.
34
+ The default model is ``"nomic_embed_v1"``, if no name is provided.
35
35
 
36
36
  For available pretrained models please see the
37
37
  `Models Hub <https://sparknlp.org/models?q=Nomic>`__.
@@ -159,13 +159,13 @@ class NomicEmbeddings(AnnotatorModel, HasEmbeddingsProperties, HasCaseSensitiveP
159
159
  return NomicEmbeddings(java_model=jModel)
160
160
 
161
161
  @staticmethod
162
- def pretrained(name="nomic_small", lang="en", remote_loc=None):
162
+ def pretrained(name="nomic_embed_v1", lang="en", remote_loc=None):
163
163
  """Downloads and loads a pretrained model.
164
164
 
165
165
  Parameters
166
166
  ----------
167
167
  name : str, optional
168
- Name of the pretrained model, by default "nomic_small"
168
+ Name of the pretrained model, by default "nomic_embed_v1"
169
169
  lang : str, optional
170
170
  Language of the pretrained model, by default "en"
171
171
  remote_loc : str, optional
@@ -199,7 +199,6 @@ class AutoGGUFModel(AnnotatorModel, HasBatchedAnnotate):
199
199
  useChatTemplate
200
200
  Set whether or not generate should apply a chat template
201
201
 
202
-
203
202
  Notes
204
203
  -----
205
204
  To use GPU inference with this annotator, make sure to use the Spark NLP GPU package and set
@@ -208,29 +207,6 @@ class AutoGGUFModel(AnnotatorModel, HasBatchedAnnotate):
208
207
  When using larger models, we recommend adjusting GPU usage with `setNCtx` and `setNGpuLayers`
209
208
  according to your hardware to avoid out-of-memory errors.
210
209
 
211
- References
212
- ----------
213
- - `Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension
214
- <https://arxiv.org/abs/1910.13461>`__
215
- - https://github.com/pytorch/fairseq
216
-
217
- **Paper Abstract:**
218
- *We present BART, a denoising autoencoder for pretraining sequence-to-sequence models.
219
- BART is trained by (1) corrupting text with an arbitrary noising function, and (2)
220
- learning a model to reconstruct the original text. It uses a standard Tranformer-based
221
- neural machine translation architecture which, despite its simplicity, can be seen as
222
- generalizing BERT (due to the bidirectional encoder), GPT (with the left-to-right decoder),
223
- and many other more recent pretraining schemes. We evaluate a number of noising approaches,
224
- finding the best performance by both randomly shuffling the order of the original sentences
225
- and using a novel in-filling scheme, where spans of text are replaced with a single mask token.
226
- BART is particularly effective when fine tuned for text generation but also works well for
227
- comprehension tasks. It matches the performance of RoBERTa with comparable training resources
228
- on GLUE and SQuAD, achieves new state-of-the-art results on a range of abstractive dialogue,
229
- question answering, and summarization tasks, with gains of up to 6 ROUGE. BART also provides
230
- a 1.1 BLEU increase over a back-translation system for machine translation, with only target
231
- language pretraining. We also report ablation experiments that replicate other pretraining
232
- schemes within the BART framework, to better measure which factors most influence end-task performance.*
233
-
234
210
  Examples
235
211
  --------
236
212
  >>> import sparknlp
@@ -553,6 +529,13 @@ class AutoGGUFModel(AnnotatorModel, HasBatchedAnnotate):
553
529
 
554
530
  def setNumaStrategy(self, numaStrategy: str):
555
531
  """Set optimization strategies that help on some NUMA systems (if available)"""
532
+ numaUpper = numaStrategy.upper()
533
+ numaStrategies = ["DISABLED", "DISTRIBUTE", "ISOLATE", "NUMA_CTL", "MIRROR"]
534
+ if numaUpper not in numaStrategies:
535
+ raise ValueError(
536
+ f"Invalid NUMA strategy: {numaUpper}. "
537
+ + f"Valid values are: {numaStrategies}"
538
+ )
556
539
  return self._set(numaStrategy=numaStrategy)
557
540
 
558
541
  def setRopeScalingType(self, ropeScalingType: str):
@@ -561,6 +544,13 @@ class AutoGGUFModel(AnnotatorModel, HasBatchedAnnotate):
561
544
 
562
545
  def setPoolingType(self, poolingType: bool):
563
546
  """Set the pooling type for embeddings, use model default if unspecified"""
547
+ poolingTypeUpper = poolingType.upper()
548
+ poolingTypes = ["NONE", "MEAN", "CLS", "LAST"]
549
+ if poolingTypeUpper not in poolingTypes:
550
+ raise ValueError(
551
+ f"Invalid pooling type: {poolingType}. "
552
+ + f"Valid values are: {poolingTypes}"
553
+ )
564
554
  return self._set(poolingType=poolingType)
565
555
 
566
556
  def setModelDraft(self, modelDraft: str):
@@ -44,7 +44,7 @@ class CPMTransformer(AnnotatorModel, HasBatchedAnnotate, HasEngine):
44
44
  ... .setOutputCol("generation")
45
45
 
46
46
 
47
- The default model is ``"llam2-7b"``, if no name is provided. For available
47
+ The default model is ``"mini_cpm_2b_8bit"``, if no name is provided. For available
48
48
  pretrained models please see the `Models Hub
49
49
  <https://sparknlp.org/models?q=cpm>`__.
50
50
 
@@ -104,7 +104,7 @@ class CPMTransformer(AnnotatorModel, HasBatchedAnnotate, HasEngine):
104
104
  >>> documentAssembler = DocumentAssembler() \\
105
105
  ... .setInputCol("text") \\
106
106
  ... .setOutputCol("documents")
107
- >>> cpm = CPMTransformer.pretrained("llama_2_7b_chat_hf_int4") \\
107
+ >>> cpm = CPMTransformer.pretrained("mini_cpm_2b_8bit","xx") \\
108
108
  ... .setInputCols(["documents"]) \\
109
109
  ... .setMaxOutputLength(50) \\
110
110
  ... .setOutputCol("generation")
@@ -299,15 +299,15 @@ class CPMTransformer(AnnotatorModel, HasBatchedAnnotate, HasEngine):
299
299
  return CPMTransformer(java_model=jModel)
300
300
 
301
301
  @staticmethod
302
- def pretrained(name="llama_2_7b_chat_hf_int4", lang="en", remote_loc=None):
302
+ def pretrained(name="mini_cpm_2b_8bit", lang="xx", remote_loc=None):
303
303
  """Downloads and loads a pretrained model.
304
304
 
305
305
  Parameters
306
306
  ----------
307
307
  name : str, optional
308
- Name of the pretrained model, by default "llama_2_7b_chat_hf_int4"
308
+ Name of the pretrained model, by default "mini_cpm_2b_8bit"
309
309
  lang : str, optional
310
- Language of the pretrained model, by default "en"
310
+ Language of the pretrained model, by default "xx"
311
311
  remote_loc : str, optional
312
312
  Optional remote address of the resource, by default None. Will use
313
313
  Spark NLPs repositories otherwise.