spark-nlp 4.2.6__py2.py3-none-any.whl → 6.2.1__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- com/johnsnowlabs/ml/__init__.py +0 -0
- com/johnsnowlabs/ml/ai/__init__.py +10 -0
- spark_nlp-6.2.1.dist-info/METADATA +362 -0
- spark_nlp-6.2.1.dist-info/RECORD +292 -0
- {spark_nlp-4.2.6.dist-info → spark_nlp-6.2.1.dist-info}/WHEEL +1 -1
- sparknlp/__init__.py +81 -28
- sparknlp/annotation.py +3 -2
- sparknlp/annotator/__init__.py +6 -0
- sparknlp/annotator/audio/__init__.py +2 -0
- sparknlp/annotator/audio/hubert_for_ctc.py +188 -0
- sparknlp/annotator/audio/wav2vec2_for_ctc.py +14 -14
- sparknlp/annotator/audio/whisper_for_ctc.py +251 -0
- sparknlp/{base → annotator}/chunk2_doc.py +4 -7
- sparknlp/annotator/chunker.py +1 -2
- sparknlp/annotator/classifier_dl/__init__.py +17 -0
- sparknlp/annotator/classifier_dl/albert_for_multiple_choice.py +161 -0
- sparknlp/annotator/classifier_dl/albert_for_question_answering.py +3 -15
- sparknlp/annotator/classifier_dl/albert_for_sequence_classification.py +4 -18
- sparknlp/annotator/classifier_dl/albert_for_token_classification.py +3 -17
- sparknlp/annotator/classifier_dl/albert_for_zero_shot_classification.py +211 -0
- sparknlp/annotator/classifier_dl/bart_for_zero_shot_classification.py +225 -0
- sparknlp/annotator/classifier_dl/bert_for_multiple_choice.py +161 -0
- sparknlp/annotator/classifier_dl/bert_for_question_answering.py +6 -20
- sparknlp/annotator/classifier_dl/bert_for_sequence_classification.py +3 -17
- sparknlp/annotator/classifier_dl/bert_for_token_classification.py +3 -17
- sparknlp/annotator/classifier_dl/bert_for_zero_shot_classification.py +212 -0
- sparknlp/annotator/classifier_dl/camembert_for_question_answering.py +168 -0
- sparknlp/annotator/classifier_dl/camembert_for_sequence_classification.py +5 -19
- sparknlp/annotator/classifier_dl/camembert_for_token_classification.py +5 -19
- sparknlp/annotator/classifier_dl/camembert_for_zero_shot_classification.py +202 -0
- sparknlp/annotator/classifier_dl/classifier_dl.py +4 -4
- sparknlp/annotator/classifier_dl/deberta_for_question_answering.py +3 -17
- sparknlp/annotator/classifier_dl/deberta_for_sequence_classification.py +4 -19
- sparknlp/annotator/classifier_dl/deberta_for_token_classification.py +5 -21
- sparknlp/annotator/classifier_dl/deberta_for_zero_shot_classification.py +193 -0
- sparknlp/annotator/classifier_dl/distil_bert_for_question_answering.py +3 -17
- sparknlp/annotator/classifier_dl/distil_bert_for_sequence_classification.py +4 -18
- sparknlp/annotator/classifier_dl/distil_bert_for_token_classification.py +3 -17
- sparknlp/annotator/classifier_dl/distil_bert_for_zero_shot_classification.py +211 -0
- sparknlp/annotator/classifier_dl/distilbert_for_multiple_choice.py +161 -0
- sparknlp/annotator/classifier_dl/longformer_for_question_answering.py +3 -17
- sparknlp/annotator/classifier_dl/longformer_for_sequence_classification.py +4 -18
- sparknlp/annotator/classifier_dl/longformer_for_token_classification.py +3 -17
- sparknlp/annotator/classifier_dl/mpnet_for_question_answering.py +148 -0
- sparknlp/annotator/classifier_dl/mpnet_for_sequence_classification.py +188 -0
- sparknlp/annotator/classifier_dl/mpnet_for_token_classification.py +173 -0
- sparknlp/annotator/classifier_dl/multi_classifier_dl.py +3 -3
- sparknlp/annotator/classifier_dl/roberta_for_multiple_choice.py +161 -0
- sparknlp/annotator/classifier_dl/roberta_for_question_answering.py +3 -17
- sparknlp/annotator/classifier_dl/roberta_for_sequence_classification.py +4 -18
- sparknlp/annotator/classifier_dl/roberta_for_token_classification.py +1 -1
- sparknlp/annotator/classifier_dl/roberta_for_zero_shot_classification.py +225 -0
- sparknlp/annotator/classifier_dl/sentiment_dl.py +4 -4
- sparknlp/annotator/classifier_dl/tapas_for_question_answering.py +2 -2
- sparknlp/annotator/classifier_dl/xlm_roberta_for_multiple_choice.py +149 -0
- sparknlp/annotator/classifier_dl/xlm_roberta_for_question_answering.py +3 -17
- sparknlp/annotator/classifier_dl/xlm_roberta_for_sequence_classification.py +4 -18
- sparknlp/annotator/classifier_dl/xlm_roberta_for_token_classification.py +6 -20
- sparknlp/annotator/classifier_dl/xlm_roberta_for_zero_shot_classification.py +225 -0
- sparknlp/annotator/classifier_dl/xlnet_for_sequence_classification.py +4 -18
- sparknlp/annotator/classifier_dl/xlnet_for_token_classification.py +3 -17
- sparknlp/annotator/cleaners/__init__.py +15 -0
- sparknlp/annotator/cleaners/cleaner.py +202 -0
- sparknlp/annotator/cleaners/extractor.py +191 -0
- sparknlp/annotator/coref/spanbert_coref.py +4 -18
- sparknlp/annotator/cv/__init__.py +15 -0
- sparknlp/annotator/cv/blip_for_question_answering.py +172 -0
- sparknlp/annotator/cv/clip_for_zero_shot_classification.py +193 -0
- sparknlp/annotator/cv/convnext_for_image_classification.py +269 -0
- sparknlp/annotator/cv/florence2_transformer.py +180 -0
- sparknlp/annotator/cv/gemma3_for_multimodal.py +346 -0
- sparknlp/annotator/cv/internvl_for_multimodal.py +280 -0
- sparknlp/annotator/cv/janus_for_multimodal.py +351 -0
- sparknlp/annotator/cv/llava_for_multimodal.py +328 -0
- sparknlp/annotator/cv/mllama_for_multimodal.py +340 -0
- sparknlp/annotator/cv/paligemma_for_multimodal.py +308 -0
- sparknlp/annotator/cv/phi3_vision_for_multimodal.py +328 -0
- sparknlp/annotator/cv/qwen2vl_transformer.py +332 -0
- sparknlp/annotator/cv/smolvlm_transformer.py +426 -0
- sparknlp/annotator/cv/swin_for_image_classification.py +242 -0
- sparknlp/annotator/cv/vision_encoder_decoder_for_image_captioning.py +240 -0
- sparknlp/annotator/cv/vit_for_image_classification.py +36 -4
- sparknlp/annotator/dataframe_optimizer.py +216 -0
- sparknlp/annotator/date2_chunk.py +88 -0
- sparknlp/annotator/dependency/dependency_parser.py +2 -3
- sparknlp/annotator/dependency/typed_dependency_parser.py +3 -4
- sparknlp/annotator/document_character_text_splitter.py +228 -0
- sparknlp/annotator/document_normalizer.py +37 -1
- sparknlp/annotator/document_token_splitter.py +175 -0
- sparknlp/annotator/document_token_splitter_test.py +85 -0
- sparknlp/annotator/embeddings/__init__.py +11 -0
- sparknlp/annotator/embeddings/albert_embeddings.py +4 -18
- sparknlp/annotator/embeddings/auto_gguf_embeddings.py +539 -0
- sparknlp/annotator/embeddings/bert_embeddings.py +9 -22
- sparknlp/annotator/embeddings/bert_sentence_embeddings.py +12 -24
- sparknlp/annotator/embeddings/bge_embeddings.py +199 -0
- sparknlp/annotator/embeddings/camembert_embeddings.py +4 -20
- sparknlp/annotator/embeddings/chunk_embeddings.py +1 -2
- sparknlp/annotator/embeddings/deberta_embeddings.py +2 -16
- sparknlp/annotator/embeddings/distil_bert_embeddings.py +5 -19
- sparknlp/annotator/embeddings/doc2vec.py +7 -1
- sparknlp/annotator/embeddings/e5_embeddings.py +195 -0
- sparknlp/annotator/embeddings/e5v_embeddings.py +138 -0
- sparknlp/annotator/embeddings/elmo_embeddings.py +2 -2
- sparknlp/annotator/embeddings/instructor_embeddings.py +204 -0
- sparknlp/annotator/embeddings/longformer_embeddings.py +3 -17
- sparknlp/annotator/embeddings/minilm_embeddings.py +189 -0
- sparknlp/annotator/embeddings/mpnet_embeddings.py +192 -0
- sparknlp/annotator/embeddings/mxbai_embeddings.py +184 -0
- sparknlp/annotator/embeddings/nomic_embeddings.py +181 -0
- sparknlp/annotator/embeddings/roberta_embeddings.py +9 -21
- sparknlp/annotator/embeddings/roberta_sentence_embeddings.py +7 -21
- sparknlp/annotator/embeddings/sentence_embeddings.py +2 -3
- sparknlp/annotator/embeddings/snowflake_embeddings.py +202 -0
- sparknlp/annotator/embeddings/uae_embeddings.py +211 -0
- sparknlp/annotator/embeddings/universal_sentence_encoder.py +3 -3
- sparknlp/annotator/embeddings/word2vec.py +7 -1
- sparknlp/annotator/embeddings/word_embeddings.py +4 -5
- sparknlp/annotator/embeddings/xlm_roberta_embeddings.py +9 -21
- sparknlp/annotator/embeddings/xlm_roberta_sentence_embeddings.py +7 -21
- sparknlp/annotator/embeddings/xlnet_embeddings.py +4 -18
- sparknlp/annotator/er/entity_ruler.py +37 -23
- sparknlp/annotator/keyword_extraction/yake_keyword_extraction.py +2 -3
- sparknlp/annotator/ld_dl/language_detector_dl.py +2 -2
- sparknlp/annotator/lemmatizer.py +3 -4
- sparknlp/annotator/matcher/date_matcher.py +35 -3
- sparknlp/annotator/matcher/multi_date_matcher.py +1 -2
- sparknlp/annotator/matcher/regex_matcher.py +3 -3
- sparknlp/annotator/matcher/text_matcher.py +2 -3
- sparknlp/annotator/n_gram_generator.py +1 -2
- sparknlp/annotator/ner/__init__.py +3 -1
- sparknlp/annotator/ner/ner_converter.py +18 -0
- sparknlp/annotator/ner/ner_crf.py +4 -5
- sparknlp/annotator/ner/ner_dl.py +10 -5
- sparknlp/annotator/ner/ner_dl_graph_checker.py +293 -0
- sparknlp/annotator/ner/ner_overwriter.py +2 -2
- sparknlp/annotator/ner/zero_shot_ner_model.py +173 -0
- sparknlp/annotator/normalizer.py +2 -2
- sparknlp/annotator/openai/__init__.py +16 -0
- sparknlp/annotator/openai/openai_completion.py +349 -0
- sparknlp/annotator/openai/openai_embeddings.py +106 -0
- sparknlp/annotator/pos/perceptron.py +6 -7
- sparknlp/annotator/sentence/sentence_detector.py +2 -2
- sparknlp/annotator/sentence/sentence_detector_dl.py +3 -3
- sparknlp/annotator/sentiment/sentiment_detector.py +4 -5
- sparknlp/annotator/sentiment/vivekn_sentiment.py +4 -5
- sparknlp/annotator/seq2seq/__init__.py +17 -0
- sparknlp/annotator/seq2seq/auto_gguf_model.py +304 -0
- sparknlp/annotator/seq2seq/auto_gguf_reranker.py +334 -0
- sparknlp/annotator/seq2seq/auto_gguf_vision_model.py +336 -0
- sparknlp/annotator/seq2seq/bart_transformer.py +420 -0
- sparknlp/annotator/seq2seq/cohere_transformer.py +357 -0
- sparknlp/annotator/seq2seq/cpm_transformer.py +321 -0
- sparknlp/annotator/seq2seq/gpt2_transformer.py +1 -1
- sparknlp/annotator/seq2seq/llama2_transformer.py +343 -0
- sparknlp/annotator/seq2seq/llama3_transformer.py +381 -0
- sparknlp/annotator/seq2seq/m2m100_transformer.py +392 -0
- sparknlp/annotator/seq2seq/marian_transformer.py +124 -3
- sparknlp/annotator/seq2seq/mistral_transformer.py +348 -0
- sparknlp/annotator/seq2seq/nllb_transformer.py +420 -0
- sparknlp/annotator/seq2seq/olmo_transformer.py +326 -0
- sparknlp/annotator/seq2seq/phi2_transformer.py +326 -0
- sparknlp/annotator/seq2seq/phi3_transformer.py +330 -0
- sparknlp/annotator/seq2seq/phi4_transformer.py +387 -0
- sparknlp/annotator/seq2seq/qwen_transformer.py +340 -0
- sparknlp/annotator/seq2seq/starcoder_transformer.py +335 -0
- sparknlp/annotator/seq2seq/t5_transformer.py +54 -4
- sparknlp/annotator/similarity/__init__.py +0 -0
- sparknlp/annotator/similarity/document_similarity_ranker.py +379 -0
- sparknlp/annotator/spell_check/context_spell_checker.py +116 -17
- sparknlp/annotator/spell_check/norvig_sweeting.py +3 -6
- sparknlp/annotator/spell_check/symmetric_delete.py +1 -1
- sparknlp/annotator/stemmer.py +2 -3
- sparknlp/annotator/stop_words_cleaner.py +3 -4
- sparknlp/annotator/tf_ner_dl_graph_builder.py +1 -1
- sparknlp/annotator/token/__init__.py +0 -1
- sparknlp/annotator/token/recursive_tokenizer.py +2 -3
- sparknlp/annotator/token/tokenizer.py +2 -3
- sparknlp/annotator/ws/word_segmenter.py +35 -10
- sparknlp/base/__init__.py +2 -3
- sparknlp/base/doc2_chunk.py +0 -3
- sparknlp/base/document_assembler.py +5 -5
- sparknlp/base/embeddings_finisher.py +14 -2
- sparknlp/base/finisher.py +15 -4
- sparknlp/base/gguf_ranking_finisher.py +234 -0
- sparknlp/base/image_assembler.py +69 -0
- sparknlp/base/light_pipeline.py +53 -21
- sparknlp/base/multi_document_assembler.py +9 -13
- sparknlp/base/prompt_assembler.py +207 -0
- sparknlp/base/token_assembler.py +1 -2
- sparknlp/common/__init__.py +2 -0
- sparknlp/common/annotator_type.py +1 -0
- sparknlp/common/completion_post_processing.py +37 -0
- sparknlp/common/match_strategy.py +33 -0
- sparknlp/common/properties.py +914 -9
- sparknlp/internal/__init__.py +841 -116
- sparknlp/internal/annotator_java_ml.py +1 -1
- sparknlp/internal/annotator_transformer.py +3 -0
- sparknlp/logging/comet.py +2 -2
- sparknlp/partition/__init__.py +16 -0
- sparknlp/partition/partition.py +244 -0
- sparknlp/partition/partition_properties.py +902 -0
- sparknlp/partition/partition_transformer.py +200 -0
- sparknlp/pretrained/pretrained_pipeline.py +1 -1
- sparknlp/pretrained/resource_downloader.py +126 -2
- sparknlp/reader/__init__.py +15 -0
- sparknlp/reader/enums.py +19 -0
- sparknlp/reader/pdf_to_text.py +190 -0
- sparknlp/reader/reader2doc.py +124 -0
- sparknlp/reader/reader2image.py +136 -0
- sparknlp/reader/reader2table.py +44 -0
- sparknlp/reader/reader_assembler.py +159 -0
- sparknlp/reader/sparknlp_reader.py +461 -0
- sparknlp/training/__init__.py +1 -0
- sparknlp/training/conll.py +8 -2
- sparknlp/training/spacy_to_annotation.py +57 -0
- sparknlp/util.py +26 -0
- spark_nlp-4.2.6.dist-info/METADATA +0 -1256
- spark_nlp-4.2.6.dist-info/RECORD +0 -196
- {spark_nlp-4.2.6.dist-info → spark_nlp-6.2.1.dist-info}/top_level.txt +0 -0
- /sparknlp/annotator/{token/token2_chunk.py → token2_chunk.py} +0 -0
|
@@ -0,0 +1,304 @@
|
|
|
1
|
+
# Copyright 2017-2023 John Snow Labs
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
"""Contains classes for the AutoGGUFModel."""
|
|
15
|
+
from sparknlp.common import *
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class AutoGGUFModel(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppProperties, CompletionPostProcessing):
|
|
19
|
+
"""
|
|
20
|
+
Annotator that uses the llama.cpp library to generate text completions with large language
|
|
21
|
+
models.
|
|
22
|
+
|
|
23
|
+
For settable parameters, and their explanations, see the parameters of this class and refer to
|
|
24
|
+
the llama.cpp documentation of
|
|
25
|
+
`server.cpp <https://github.com/ggerganov/llama.cpp/tree/7d5e8777ae1d21af99d4f95be10db4870720da91/examples/server>`__
|
|
26
|
+
for more information.
|
|
27
|
+
|
|
28
|
+
If the parameters are not set, the annotator will default to use the parameters provided by
|
|
29
|
+
the model.
|
|
30
|
+
|
|
31
|
+
Pretrained models can be loaded with :meth:`.pretrained` of the companion
|
|
32
|
+
object:
|
|
33
|
+
|
|
34
|
+
>>> auto_gguf_model = AutoGGUFModel.pretrained() \\
|
|
35
|
+
... .setInputCols(["document"]) \\
|
|
36
|
+
... .setOutputCol("completions")
|
|
37
|
+
|
|
38
|
+
The default model is ``"Phi_4_mini_instruct_Q4_K_M_gguf"``, if no name is provided.
|
|
39
|
+
|
|
40
|
+
AutoGGUFModel is also able to load pretrained models from AutoGGUFVisionModel. Just
|
|
41
|
+
specify the same name for the pretrained method, and it will load the text-part of the
|
|
42
|
+
multimodal model automatically.
|
|
43
|
+
|
|
44
|
+
For extended examples of usage, see the
|
|
45
|
+
`AutoGGUFModelTest <https://github.com/JohnSnowLabs/spark-nlp/tree/master/src/test/scala/com/johnsnowlabs/nlp/annotators/seq2seq/AutoGGUFModelTest.scala>`__
|
|
46
|
+
and the
|
|
47
|
+
`example notebook <https://github.com/JohnSnowLabs/spark-nlp/tree/master/examples/python/llama.cpp/llama.cpp_in_Spark_NLP_AutoGGUFModel.ipynb>`__.
|
|
48
|
+
|
|
49
|
+
For available pretrained models please see the `Models Hub <https://sparknlp.org/models>`__.
|
|
50
|
+
|
|
51
|
+
====================== ======================
|
|
52
|
+
Input Annotation types Output Annotation type
|
|
53
|
+
====================== ======================
|
|
54
|
+
``DOCUMENT`` ``DOCUMENT``
|
|
55
|
+
====================== ======================
|
|
56
|
+
|
|
57
|
+
Parameters
|
|
58
|
+
----------
|
|
59
|
+
nThreads
|
|
60
|
+
Set the number of threads to use during generation
|
|
61
|
+
nThreadsDraft
|
|
62
|
+
Set the number of threads to use during draft generation
|
|
63
|
+
nThreadsBatch
|
|
64
|
+
Set the number of threads to use during batch and prompt processing
|
|
65
|
+
nThreadsBatchDraft
|
|
66
|
+
Set the number of threads to use during batch and prompt processing
|
|
67
|
+
nCtx
|
|
68
|
+
Set the size of the prompt context
|
|
69
|
+
nBatch
|
|
70
|
+
Set the logical batch size for prompt processing (must be >=32 to use BLAS)
|
|
71
|
+
nUbatch
|
|
72
|
+
Set the physical batch size for prompt processing (must be >=32 to use BLAS)
|
|
73
|
+
nDraft
|
|
74
|
+
Set the number of tokens to draft for speculative decoding
|
|
75
|
+
nChunks
|
|
76
|
+
Set the maximal number of chunks to process
|
|
77
|
+
nSequences
|
|
78
|
+
Set the number of sequences to decode
|
|
79
|
+
pSplit
|
|
80
|
+
Set the speculative decoding split probability
|
|
81
|
+
nGpuLayers
|
|
82
|
+
Set the number of layers to store in VRAM (-1 - use default)
|
|
83
|
+
nGpuLayersDraft
|
|
84
|
+
Set the number of layers to store in VRAM for the draft model (-1 - use default)
|
|
85
|
+
gpuSplitMode
|
|
86
|
+
Set how to split the model across GPUs
|
|
87
|
+
mainGpu
|
|
88
|
+
Set the main GPU that is used for scratch and small tensors.
|
|
89
|
+
tensorSplit
|
|
90
|
+
Set how split tensors should be distributed across GPUs
|
|
91
|
+
grpAttnN
|
|
92
|
+
Set the group-attention factor
|
|
93
|
+
grpAttnW
|
|
94
|
+
Set the group-attention width
|
|
95
|
+
ropeFreqBase
|
|
96
|
+
Set the RoPE base frequency, used by NTK-aware scaling
|
|
97
|
+
ropeFreqScale
|
|
98
|
+
Set the RoPE frequency scaling factor, expands context by a factor of 1/N
|
|
99
|
+
yarnExtFactor
|
|
100
|
+
Set the YaRN extrapolation mix factor
|
|
101
|
+
yarnAttnFactor
|
|
102
|
+
Set the YaRN scale sqrt(t) or attention magnitude
|
|
103
|
+
yarnBetaFast
|
|
104
|
+
Set the YaRN low correction dim or beta
|
|
105
|
+
yarnBetaSlow
|
|
106
|
+
Set the YaRN high correction dim or alpha
|
|
107
|
+
yarnOrigCtx
|
|
108
|
+
Set the YaRN original context size of model
|
|
109
|
+
defragmentationThreshold
|
|
110
|
+
Set the KV cache defragmentation threshold
|
|
111
|
+
numaStrategy
|
|
112
|
+
Set optimization strategies that help on some NUMA systems (if available)
|
|
113
|
+
ropeScalingType
|
|
114
|
+
Set the RoPE frequency scaling method, defaults to linear unless specified by the model
|
|
115
|
+
poolingType
|
|
116
|
+
Set the pooling type for embeddings, use model default if unspecified
|
|
117
|
+
modelDraft
|
|
118
|
+
Set the draft model for speculative decoding
|
|
119
|
+
modelAlias
|
|
120
|
+
Set a model alias
|
|
121
|
+
lookupCacheStaticFilePath
|
|
122
|
+
Set path to static lookup cache to use for lookup decoding (not updated by generation)
|
|
123
|
+
lookupCacheDynamicFilePath
|
|
124
|
+
Set path to dynamic lookup cache to use for lookup decoding (updated by generation)
|
|
125
|
+
flashAttention
|
|
126
|
+
Whether to enable Flash Attention
|
|
127
|
+
inputPrefixBos
|
|
128
|
+
Whether to add prefix BOS to user inputs, preceding the `--in-prefix` string
|
|
129
|
+
useMmap
|
|
130
|
+
Whether to use memory-map model (faster load but may increase pageouts if not using mlock)
|
|
131
|
+
useMlock
|
|
132
|
+
Whether to force the system to keep model in RAM rather than swapping or compressing
|
|
133
|
+
noKvOffload
|
|
134
|
+
Whether to disable KV offload
|
|
135
|
+
systemPrompt
|
|
136
|
+
Set a system prompt to use
|
|
137
|
+
chatTemplate
|
|
138
|
+
The chat template to use
|
|
139
|
+
inputPrefix
|
|
140
|
+
Set the prompt to start generation with
|
|
141
|
+
inputSuffix
|
|
142
|
+
Set a suffix for infilling
|
|
143
|
+
cachePrompt
|
|
144
|
+
Whether to remember the prompt to avoid reprocessing it
|
|
145
|
+
nPredict
|
|
146
|
+
Set the number of tokens to predict
|
|
147
|
+
topK
|
|
148
|
+
Set top-k sampling
|
|
149
|
+
topP
|
|
150
|
+
Set top-p sampling
|
|
151
|
+
minP
|
|
152
|
+
Set min-p sampling
|
|
153
|
+
tfsZ
|
|
154
|
+
Set tail free sampling, parameter z
|
|
155
|
+
typicalP
|
|
156
|
+
Set locally typical sampling, parameter p
|
|
157
|
+
temperature
|
|
158
|
+
Set the temperature
|
|
159
|
+
dynatempRange
|
|
160
|
+
Set the dynamic temperature range
|
|
161
|
+
dynatempExponent
|
|
162
|
+
Set the dynamic temperature exponent
|
|
163
|
+
repeatLastN
|
|
164
|
+
Set the last n tokens to consider for penalties
|
|
165
|
+
repeatPenalty
|
|
166
|
+
Set the penalty of repeated sequences of tokens
|
|
167
|
+
frequencyPenalty
|
|
168
|
+
Set the repetition alpha frequency penalty
|
|
169
|
+
presencePenalty
|
|
170
|
+
Set the repetition alpha presence penalty
|
|
171
|
+
miroStat
|
|
172
|
+
Set MiroStat sampling strategies.
|
|
173
|
+
mirostatTau
|
|
174
|
+
Set the MiroStat target entropy, parameter tau
|
|
175
|
+
mirostatEta
|
|
176
|
+
Set the MiroStat learning rate, parameter eta
|
|
177
|
+
penalizeNl
|
|
178
|
+
Whether to penalize newline tokens
|
|
179
|
+
nKeep
|
|
180
|
+
Set the number of tokens to keep from the initial prompt
|
|
181
|
+
seed
|
|
182
|
+
Set the RNG seed
|
|
183
|
+
nProbs
|
|
184
|
+
Set the amount top tokens probabilities to output if greater than 0.
|
|
185
|
+
minKeep
|
|
186
|
+
Set the amount of tokens the samplers should return at least (0 = disabled)
|
|
187
|
+
grammar
|
|
188
|
+
Set BNF-like grammar to constrain generations
|
|
189
|
+
penaltyPrompt
|
|
190
|
+
Override which part of the prompt is penalized for repetition.
|
|
191
|
+
ignoreEos
|
|
192
|
+
Set whether to ignore end of stream token and continue generating (implies --logit-bias 2-inf)
|
|
193
|
+
disableTokenIds
|
|
194
|
+
Set the token ids to disable in the completion
|
|
195
|
+
stopStrings
|
|
196
|
+
Set strings upon seeing which token generation is stopped
|
|
197
|
+
samplers
|
|
198
|
+
Set which samplers to use for token generation in the given order
|
|
199
|
+
useChatTemplate
|
|
200
|
+
Set whether or not generate should apply a chat template
|
|
201
|
+
|
|
202
|
+
Notes
|
|
203
|
+
-----
|
|
204
|
+
To use GPU inference with this annotator, make sure to use the Spark NLP GPU package and set
|
|
205
|
+
the number of GPU layers with the `setNGpuLayers` method.
|
|
206
|
+
|
|
207
|
+
When using larger models, we recommend adjusting GPU usage with `setNCtx` and `setNGpuLayers`
|
|
208
|
+
according to your hardware to avoid out-of-memory errors.
|
|
209
|
+
|
|
210
|
+
Examples
|
|
211
|
+
--------
|
|
212
|
+
>>> import sparknlp
|
|
213
|
+
>>> from sparknlp.base import *
|
|
214
|
+
>>> from sparknlp.annotator import *
|
|
215
|
+
>>> from pyspark.ml import Pipeline
|
|
216
|
+
>>> document = DocumentAssembler() \\
|
|
217
|
+
... .setInputCol("text") \\
|
|
218
|
+
... .setOutputCol("document")
|
|
219
|
+
>>> autoGGUFModel = AutoGGUFModel.pretrained() \\
|
|
220
|
+
... .setInputCols(["document"]) \\
|
|
221
|
+
... .setOutputCol("completions") \\
|
|
222
|
+
... .setBatchSize(4) \\
|
|
223
|
+
... .setNPredict(20) \\
|
|
224
|
+
... .setNGpuLayers(99) \\
|
|
225
|
+
... .setTemperature(0.4) \\
|
|
226
|
+
... .setTopK(40) \\
|
|
227
|
+
... .setTopP(0.9) \\
|
|
228
|
+
... .setPenalizeNl(True)
|
|
229
|
+
>>> pipeline = Pipeline().setStages([document, autoGGUFModel])
|
|
230
|
+
>>> data = spark.createDataFrame([["Hello, I am a"]]).toDF("text")
|
|
231
|
+
>>> result = pipeline.fit(data).transform(data)
|
|
232
|
+
>>> result.select("completions").show(truncate = False)
|
|
233
|
+
+-----------------------------------------------------------------------------------------------------------------------------------+
|
|
234
|
+
|completions |
|
|
235
|
+
+-----------------------------------------------------------------------------------------------------------------------------------+
|
|
236
|
+
|[{document, 0, 78, new user. I am currently working on a project and I need to create a list of , {prompt -> Hello, I am a}, []}]|
|
|
237
|
+
+-----------------------------------------------------------------------------------------------------------------------------------+
|
|
238
|
+
"""
|
|
239
|
+
|
|
240
|
+
name = "AutoGGUFModel"
|
|
241
|
+
inputAnnotatorTypes = [AnnotatorType.DOCUMENT]
|
|
242
|
+
outputAnnotatorType = AnnotatorType.DOCUMENT
|
|
243
|
+
|
|
244
|
+
@keyword_only
|
|
245
|
+
def __init__(self, classname="com.johnsnowlabs.nlp.annotators.seq2seq.AutoGGUFModel", java_model=None):
|
|
246
|
+
super(AutoGGUFModel, self).__init__(
|
|
247
|
+
classname=classname,
|
|
248
|
+
java_model=java_model
|
|
249
|
+
)
|
|
250
|
+
self._setDefault(
|
|
251
|
+
useChatTemplate=True,
|
|
252
|
+
nCtx=4096,
|
|
253
|
+
nBatch=512,
|
|
254
|
+
nPredict=100,
|
|
255
|
+
nGpuLayers=99,
|
|
256
|
+
systemPrompt="You are a helpful assistant."
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
@staticmethod
|
|
260
|
+
def loadSavedModel(path, spark_session):
|
|
261
|
+
"""Loads a locally saved model.
|
|
262
|
+
|
|
263
|
+
Parameters
|
|
264
|
+
----------
|
|
265
|
+
path : str
|
|
266
|
+
Path to the gguf model
|
|
267
|
+
spark_session : pyspark.sql.SparkSession
|
|
268
|
+
The current SparkSession
|
|
269
|
+
|
|
270
|
+
Returns
|
|
271
|
+
-------
|
|
272
|
+
AutoGGUFModel
|
|
273
|
+
The restored model
|
|
274
|
+
"""
|
|
275
|
+
from sparknlp.internal import _AutoGGUFLoader
|
|
276
|
+
jModel = _AutoGGUFLoader(path, spark_session._jsparkSession)._java_obj
|
|
277
|
+
return AutoGGUFModel(java_model=jModel)
|
|
278
|
+
|
|
279
|
+
@staticmethod
|
|
280
|
+
def pretrained(name="Phi_4_mini_instruct_Q4_K_M_gguf", lang="en", remote_loc=None):
|
|
281
|
+
"""Downloads and loads a pretrained model.
|
|
282
|
+
|
|
283
|
+
Parameters
|
|
284
|
+
----------
|
|
285
|
+
name : str, optional
|
|
286
|
+
Name of the pretrained model, by default "Phi_4_mini_instruct_Q4_K_M_gguf"
|
|
287
|
+
lang : str, optional
|
|
288
|
+
Language of the pretrained model, by default "en"
|
|
289
|
+
remote_loc : str, optional
|
|
290
|
+
Optional remote address of the resource, by default None. Will use
|
|
291
|
+
Spark NLPs repositories otherwise.
|
|
292
|
+
|
|
293
|
+
Returns
|
|
294
|
+
-------
|
|
295
|
+
AutoGGUFModel
|
|
296
|
+
The restored model
|
|
297
|
+
"""
|
|
298
|
+
from sparknlp.pretrained import ResourceDownloader
|
|
299
|
+
return ResourceDownloader.downloadModel(AutoGGUFModel, name, lang, remote_loc)
|
|
300
|
+
|
|
301
|
+
def close(self):
|
|
302
|
+
"""Closes the llama.cpp model backend freeing resources. The model is reloaded when used again.
|
|
303
|
+
"""
|
|
304
|
+
self._java_obj.close()
|
|
@@ -0,0 +1,334 @@
|
|
|
1
|
+
# Copyright 2017-2023 John Snow Labs
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
"""Contains classes for the AutoGGUFReranker."""
|
|
15
|
+
from typing import List, Dict
|
|
16
|
+
|
|
17
|
+
from sparknlp.common import *
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class AutoGGUFReranker(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppProperties):
|
|
21
|
+
"""
|
|
22
|
+
Annotator that uses the llama.cpp library to rerank text documents based on their relevance
|
|
23
|
+
to a given query using GGUF-format reranking models.
|
|
24
|
+
|
|
25
|
+
This annotator is specifically designed for text reranking tasks, where multiple documents
|
|
26
|
+
or text passages are ranked according to their relevance to a query. It uses specialized
|
|
27
|
+
reranking models in GGUF format that output relevance scores for each input document.
|
|
28
|
+
|
|
29
|
+
The reranker takes a query (set via :meth:`.setQuery`) and a list of documents, then returns the
|
|
30
|
+
same documents with added metadata containing relevance scores. The documents are processed
|
|
31
|
+
in batches and each receives a ``relevance_score`` in its metadata indicating how relevant
|
|
32
|
+
it is to the provided query.
|
|
33
|
+
|
|
34
|
+
For settable parameters, and their explanations, see the parameters of this class and refer to
|
|
35
|
+
the llama.cpp documentation of
|
|
36
|
+
`server.cpp <https://github.com/ggerganov/llama.cpp/tree/7d5e8777ae1d21af99d4f95be10db4870720da91/examples/server>`__
|
|
37
|
+
for more information.
|
|
38
|
+
|
|
39
|
+
If the parameters are not set, the annotator will default to use the parameters provided by
|
|
40
|
+
the model.
|
|
41
|
+
|
|
42
|
+
Pretrained models can be loaded with :meth:`.pretrained` of the companion
|
|
43
|
+
object:
|
|
44
|
+
|
|
45
|
+
>>> reranker = AutoGGUFReranker.pretrained() \\
|
|
46
|
+
... .setInputCols(["document"]) \\
|
|
47
|
+
... .setOutputCol("reranked_documents") \\
|
|
48
|
+
... .setQuery("A man is eating pasta.")
|
|
49
|
+
|
|
50
|
+
The default model is ``"bge_reranker_v2_m3_Q4_K_M"``, if no name is provided.
|
|
51
|
+
|
|
52
|
+
For extended examples of usage, see the
|
|
53
|
+
`AutoGGUFRerankerTest <https://github.com/JohnSnowLabs/spark-nlp/tree/master/src/test/scala/com/johnsnowlabs/nlp/annotators/seq2seq/AutoGGUFRerankerTest.scala>`__
|
|
54
|
+
and the
|
|
55
|
+
`example notebook <https://github.com/JohnSnowLabs/spark-nlp/tree/master/examples/python/llama.cpp/llama.cpp_in_Spark_NLP_AutoGGUFReranker.ipynb>`__.
|
|
56
|
+
|
|
57
|
+
For available pretrained models please see the `Models Hub <https://sparknlp.org/models>`__.
|
|
58
|
+
|
|
59
|
+
====================== ======================
|
|
60
|
+
Input Annotation types Output Annotation type
|
|
61
|
+
====================== ======================
|
|
62
|
+
``DOCUMENT`` ``DOCUMENT``
|
|
63
|
+
====================== ======================
|
|
64
|
+
|
|
65
|
+
Parameters
|
|
66
|
+
----------
|
|
67
|
+
query
|
|
68
|
+
The query to be used for reranking. If not set, the input text will be used as the query.
|
|
69
|
+
nThreads
|
|
70
|
+
Set the number of threads to use during generation
|
|
71
|
+
nThreadsDraft
|
|
72
|
+
Set the number of threads to use during draft generation
|
|
73
|
+
nThreadsBatch
|
|
74
|
+
Set the number of threads to use during batch and prompt processing
|
|
75
|
+
nThreadsBatchDraft
|
|
76
|
+
Set the number of threads to use during batch and prompt processing
|
|
77
|
+
nCtx
|
|
78
|
+
Set the size of the prompt context
|
|
79
|
+
nBatch
|
|
80
|
+
Set the logical batch size for prompt processing (must be >=32 to use BLAS)
|
|
81
|
+
nUbatch
|
|
82
|
+
Set the physical batch size for prompt processing (must be >=32 to use BLAS)
|
|
83
|
+
nGpuLayers
|
|
84
|
+
Set the number of layers to store in VRAM (-1 - use default)
|
|
85
|
+
nGpuLayersDraft
|
|
86
|
+
Set the number of layers to store in VRAM for the draft model (-1 - use default)
|
|
87
|
+
gpuSplitMode
|
|
88
|
+
Set how to split the model across GPUs
|
|
89
|
+
mainGpu
|
|
90
|
+
Set the main GPU that is used for scratch and small tensors.
|
|
91
|
+
tensorSplit
|
|
92
|
+
Set how split tensors should be distributed across GPUs
|
|
93
|
+
grpAttnN
|
|
94
|
+
Set the group-attention factor
|
|
95
|
+
grpAttnW
|
|
96
|
+
Set the group-attention width
|
|
97
|
+
ropeFreqBase
|
|
98
|
+
Set the RoPE base frequency, used by NTK-aware scaling
|
|
99
|
+
ropeFreqScale
|
|
100
|
+
Set the RoPE frequency scaling factor, expands context by a factor of 1/N
|
|
101
|
+
yarnExtFactor
|
|
102
|
+
Set the YaRN extrapolation mix factor
|
|
103
|
+
yarnAttnFactor
|
|
104
|
+
Set the YaRN scale sqrt(t) or attention magnitude
|
|
105
|
+
yarnBetaFast
|
|
106
|
+
Set the YaRN low correction dim or beta
|
|
107
|
+
yarnBetaSlow
|
|
108
|
+
Set the YaRN high correction dim or alpha
|
|
109
|
+
yarnOrigCtx
|
|
110
|
+
Set the YaRN original context size of model
|
|
111
|
+
defragmentationThreshold
|
|
112
|
+
Set the KV cache defragmentation threshold
|
|
113
|
+
numaStrategy
|
|
114
|
+
Set optimization strategies that help on some NUMA systems (if available)
|
|
115
|
+
ropeScalingType
|
|
116
|
+
Set the RoPE frequency scaling method, defaults to linear unless specified by the model
|
|
117
|
+
poolingType
|
|
118
|
+
Set the pooling type for embeddings, use model default if unspecified
|
|
119
|
+
modelDraft
|
|
120
|
+
Set the draft model for speculative decoding
|
|
121
|
+
modelAlias
|
|
122
|
+
Set a model alias
|
|
123
|
+
lookupCacheStaticFilePath
|
|
124
|
+
Set path to static lookup cache to use for lookup decoding (not updated by generation)
|
|
125
|
+
lookupCacheDynamicFilePath
|
|
126
|
+
Set path to dynamic lookup cache to use for lookup decoding (updated by generation)
|
|
127
|
+
flashAttention
|
|
128
|
+
Whether to enable Flash Attention
|
|
129
|
+
inputPrefixBos
|
|
130
|
+
Whether to add prefix BOS to user inputs, preceding the `--in-prefix` string
|
|
131
|
+
useMmap
|
|
132
|
+
Whether to use memory-map model (faster load but may increase pageouts if not using mlock)
|
|
133
|
+
useMlock
|
|
134
|
+
Whether to force the system to keep model in RAM rather than swapping or compressing
|
|
135
|
+
noKvOffload
|
|
136
|
+
Whether to disable KV offload
|
|
137
|
+
systemPrompt
|
|
138
|
+
Set a system prompt to use
|
|
139
|
+
chatTemplate
|
|
140
|
+
The chat template to use
|
|
141
|
+
inputPrefix
|
|
142
|
+
Set the prompt to start generation with
|
|
143
|
+
inputSuffix
|
|
144
|
+
Set a suffix for infilling
|
|
145
|
+
cachePrompt
|
|
146
|
+
Whether to remember the prompt to avoid reprocessing it
|
|
147
|
+
nPredict
|
|
148
|
+
Set the number of tokens to predict
|
|
149
|
+
topK
|
|
150
|
+
Set top-k sampling
|
|
151
|
+
topP
|
|
152
|
+
Set top-p sampling
|
|
153
|
+
minP
|
|
154
|
+
Set min-p sampling
|
|
155
|
+
tfsZ
|
|
156
|
+
Set tail free sampling, parameter z
|
|
157
|
+
typicalP
|
|
158
|
+
Set locally typical sampling, parameter p
|
|
159
|
+
temperature
|
|
160
|
+
Set the temperature
|
|
161
|
+
dynatempRange
|
|
162
|
+
Set the dynamic temperature range
|
|
163
|
+
dynatempExponent
|
|
164
|
+
Set the dynamic temperature exponent
|
|
165
|
+
repeatLastN
|
|
166
|
+
Set the last n tokens to consider for penalties
|
|
167
|
+
repeatPenalty
|
|
168
|
+
Set the penalty of repeated sequences of tokens
|
|
169
|
+
frequencyPenalty
|
|
170
|
+
Set the repetition alpha frequency penalty
|
|
171
|
+
presencePenalty
|
|
172
|
+
Set the repetition alpha presence penalty
|
|
173
|
+
miroStat
|
|
174
|
+
Set MiroStat sampling strategies.
|
|
175
|
+
mirostatTau
|
|
176
|
+
Set the MiroStat target entropy, parameter tau
|
|
177
|
+
mirostatEta
|
|
178
|
+
Set the MiroStat learning rate, parameter eta
|
|
179
|
+
penalizeNl
|
|
180
|
+
Whether to penalize newline tokens
|
|
181
|
+
nKeep
|
|
182
|
+
Set the number of tokens to keep from the initial prompt
|
|
183
|
+
seed
|
|
184
|
+
Set the RNG seed
|
|
185
|
+
nProbs
|
|
186
|
+
Set the amount top tokens probabilities to output if greater than 0.
|
|
187
|
+
minKeep
|
|
188
|
+
Set the amount of tokens the samplers should return at least (0 = disabled)
|
|
189
|
+
grammar
|
|
190
|
+
Set BNF-like grammar to constrain generations
|
|
191
|
+
penaltyPrompt
|
|
192
|
+
Override which part of the prompt is penalized for repetition.
|
|
193
|
+
ignoreEos
|
|
194
|
+
Set whether to ignore end of stream token and continue generating (implies --logit-bias 2-inf)
|
|
195
|
+
disableTokenIds
|
|
196
|
+
Set the token ids to disable in the completion
|
|
197
|
+
stopStrings
|
|
198
|
+
Set strings upon seeing which token generation is stopped
|
|
199
|
+
samplers
|
|
200
|
+
Set which samplers to use for token generation in the given order
|
|
201
|
+
useChatTemplate
|
|
202
|
+
Set whether or not generate should apply a chat template
|
|
203
|
+
|
|
204
|
+
Notes
|
|
205
|
+
-----
|
|
206
|
+
This annotator is designed for reranking tasks and requires setting a query using ``setQuery``.
|
|
207
|
+
The query represents the search intent against which documents will be ranked. Each input
|
|
208
|
+
document receives a relevance score in the output metadata.
|
|
209
|
+
|
|
210
|
+
To use GPU inference with this annotator, make sure to use the Spark NLP GPU package and set
|
|
211
|
+
the number of GPU layers with the `setNGpuLayers` method.
|
|
212
|
+
|
|
213
|
+
When using larger models, we recommend adjusting GPU usage with `setNCtx` and `setNGpuLayers`
|
|
214
|
+
according to your hardware to avoid out-of-memory errors.
|
|
215
|
+
|
|
216
|
+
Examples
|
|
217
|
+
--------
|
|
218
|
+
>>> import sparknlp
|
|
219
|
+
>>> from sparknlp.base import *
|
|
220
|
+
>>> from sparknlp.annotator import *
|
|
221
|
+
>>> from pyspark.ml import Pipeline
|
|
222
|
+
>>> document = DocumentAssembler() \\
|
|
223
|
+
... .setInputCol("text") \\
|
|
224
|
+
... .setOutputCol("document")
|
|
225
|
+
>>> reranker = AutoGGUFReranker.pretrained() \\
|
|
226
|
+
... .setInputCols(["document"]) \\
|
|
227
|
+
... .setOutputCol("reranked_documents") \\
|
|
228
|
+
... .setBatchSize(4) \\
|
|
229
|
+
... .setQuery("A man is eating pasta.")
|
|
230
|
+
>>> pipeline = Pipeline().setStages([document, reranker])
|
|
231
|
+
>>> data = spark.createDataFrame([
|
|
232
|
+
... ["A man is eating food."],
|
|
233
|
+
... ["A man is eating a piece of bread."],
|
|
234
|
+
... ["The girl is carrying a baby."],
|
|
235
|
+
... ["A man is riding a horse."]
|
|
236
|
+
... ]).toDF("text")
|
|
237
|
+
>>> result = pipeline.fit(data).transform(data)
|
|
238
|
+
>>> result.select("reranked_documents").show(truncate = False)
|
|
239
|
+
# Each document will have a relevance_score in metadata showing how relevant it is to the query
|
|
240
|
+
"""
|
|
241
|
+
|
|
242
|
+
name = "AutoGGUFReranker"
|
|
243
|
+
inputAnnotatorTypes = [AnnotatorType.DOCUMENT]
|
|
244
|
+
outputAnnotatorType = AnnotatorType.DOCUMENT
|
|
245
|
+
|
|
246
|
+
query = Param(Params._dummy(), "query",
|
|
247
|
+
"The query to be used for reranking. If not set, the input text will be used as the query.",
|
|
248
|
+
typeConverter=TypeConverters.toString)
|
|
249
|
+
@keyword_only
|
|
250
|
+
def __init__(self, classname="com.johnsnowlabs.nlp.annotators.seq2seq.AutoGGUFReranker", java_model=None):
|
|
251
|
+
super(AutoGGUFReranker, self).__init__(
|
|
252
|
+
classname=classname,
|
|
253
|
+
java_model=java_model
|
|
254
|
+
)
|
|
255
|
+
self._setDefault(
|
|
256
|
+
useChatTemplate=True,
|
|
257
|
+
nCtx=4096,
|
|
258
|
+
nBatch=512,
|
|
259
|
+
nGpuLayers=99,
|
|
260
|
+
systemPrompt="You are a helpful assistant.",
|
|
261
|
+
query=""
|
|
262
|
+
)
|
|
263
|
+
|
|
264
|
+
def setQuery(self, value: str):
|
|
265
|
+
"""Set the query to be used for reranking.
|
|
266
|
+
|
|
267
|
+
Parameters
|
|
268
|
+
----------
|
|
269
|
+
value : str
|
|
270
|
+
The query text that documents will be ranked against.
|
|
271
|
+
|
|
272
|
+
Returns
|
|
273
|
+
-------
|
|
274
|
+
AutoGGUFReranker
|
|
275
|
+
This instance for method chaining.
|
|
276
|
+
"""
|
|
277
|
+
return self._set(query=value)
|
|
278
|
+
|
|
279
|
+
def getQuery(self):
|
|
280
|
+
"""Get the current query used for reranking.
|
|
281
|
+
|
|
282
|
+
Returns
|
|
283
|
+
-------
|
|
284
|
+
str
|
|
285
|
+
The current query string.
|
|
286
|
+
"""
|
|
287
|
+
return self._call_java("getQuery")
|
|
288
|
+
|
|
289
|
+
@staticmethod
|
|
290
|
+
def loadSavedModel(folder, spark_session):
|
|
291
|
+
"""Loads a locally saved model.
|
|
292
|
+
|
|
293
|
+
Parameters
|
|
294
|
+
----------
|
|
295
|
+
folder : str
|
|
296
|
+
Folder of the saved model
|
|
297
|
+
spark_session : pyspark.sql.SparkSession
|
|
298
|
+
The current SparkSession
|
|
299
|
+
|
|
300
|
+
Returns
|
|
301
|
+
-------
|
|
302
|
+
AutoGGUFReranker
|
|
303
|
+
The restored model
|
|
304
|
+
"""
|
|
305
|
+
from sparknlp.internal import _AutoGGUFRerankerLoader
|
|
306
|
+
jModel = _AutoGGUFRerankerLoader(folder, spark_session._jsparkSession)._java_obj
|
|
307
|
+
return AutoGGUFReranker(java_model=jModel)
|
|
308
|
+
|
|
309
|
+
@staticmethod
|
|
310
|
+
def pretrained(name="bge_reranker_v2_m3_Q4_K_M", lang="en", remote_loc=None):
|
|
311
|
+
"""Downloads and loads a pretrained model.
|
|
312
|
+
|
|
313
|
+
Parameters
|
|
314
|
+
----------
|
|
315
|
+
name : str, optional
|
|
316
|
+
Name of the pretrained model, by default "bge_reranker_v2_m3_Q4_K_M"
|
|
317
|
+
lang : str, optional
|
|
318
|
+
Language of the pretrained model, by default "en"
|
|
319
|
+
remote_loc : str, optional
|
|
320
|
+
Optional remote address of the resource, by default None. Will use
|
|
321
|
+
Spark NLPs repositories otherwise.
|
|
322
|
+
|
|
323
|
+
Returns
|
|
324
|
+
-------
|
|
325
|
+
AutoGGUFReranker
|
|
326
|
+
The restored model
|
|
327
|
+
"""
|
|
328
|
+
from sparknlp.pretrained import ResourceDownloader
|
|
329
|
+
return ResourceDownloader.downloadModel(AutoGGUFReranker, name, lang, remote_loc)
|
|
330
|
+
|
|
331
|
+
def close(self):
|
|
332
|
+
"""Closes the llama.cpp model backend freeing resources. The model is reloaded when used again.
|
|
333
|
+
"""
|
|
334
|
+
self._java_obj.close()
|