spark-nlp 6.1.2__py2.py3-none-any.whl → 6.1.3__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of spark-nlp might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: spark-nlp
3
- Version: 6.1.2
3
+ Version: 6.1.3
4
4
  Summary: John Snow Labs Spark NLP is a natural language processing library built on top of Apache Spark ML. It provides simple, performant & accurate NLP annotations for machine learning pipelines, that scale easily in a distributed environment.
5
5
  Home-page: https://github.com/JohnSnowLabs/spark-nlp
6
6
  Author: John Snow Labs
@@ -102,7 +102,7 @@ $ java -version
102
102
  $ conda create -n sparknlp python=3.7 -y
103
103
  $ conda activate sparknlp
104
104
  # spark-nlp by default is based on pyspark 3.x
105
- $ pip install spark-nlp==6.1.2 pyspark==3.3.1
105
+ $ pip install spark-nlp==6.1.3 pyspark==3.3.1
106
106
  ```
107
107
 
108
108
  In Python console or Jupyter `Python3` kernel:
@@ -168,7 +168,7 @@ For a quick example of using pipelines and models take a look at our official [d
168
168
 
169
169
  ### Apache Spark Support
170
170
 
171
- Spark NLP *6.1.2* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
171
+ Spark NLP *6.1.3* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
172
172
 
173
173
  | Spark NLP | Apache Spark 3.5.x | Apache Spark 3.4.x | Apache Spark 3.3.x | Apache Spark 3.2.x | Apache Spark 3.1.x | Apache Spark 3.0.x | Apache Spark 2.4.x | Apache Spark 2.3.x |
174
174
  |-----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
@@ -198,7 +198,7 @@ Find out more about 4.x `SparkNLP` versions in our official [documentation](http
198
198
 
199
199
  ### Databricks Support
200
200
 
201
- Spark NLP 6.1.2 has been tested and is compatible with the following runtimes:
201
+ Spark NLP 6.1.3 has been tested and is compatible with the following runtimes:
202
202
 
203
203
  | **CPU** | **GPU** |
204
204
  |--------------------|--------------------|
@@ -216,7 +216,7 @@ We are compatible with older runtimes. For a full list check databricks support
216
216
 
217
217
  ### EMR Support
218
218
 
219
- Spark NLP 6.1.2 has been tested and is compatible with the following EMR releases:
219
+ Spark NLP 6.1.3 has been tested and is compatible with the following EMR releases:
220
220
 
221
221
  | **EMR Release** |
222
222
  |--------------------|
@@ -3,7 +3,7 @@ com/johnsnowlabs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
3
3
  com/johnsnowlabs/ml/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  com/johnsnowlabs/ml/ai/__init__.py,sha256=YQiK2M7U4d8y5irPy_HB8ae0mSpqS9583MH44pnKJXc,295
5
5
  com/johnsnowlabs/nlp/__init__.py,sha256=DPIVXtONO5xXyOk-HB0-sNiHAcco17NN13zPS_6Uw8c,294
6
- sparknlp/__init__.py,sha256=beylcD_JfS6wohhs_UyT6WE2IBwRT4_C75_xPyG1_BE,13814
6
+ sparknlp/__init__.py,sha256=UR0dRykX67j-Ksuzk5Xe-Mod5qCK24iBjHHa0omOp2w,13814
7
7
  sparknlp/annotation.py,sha256=I5zOxG5vV2RfPZfqN9enT1i4mo6oBcn3Lrzs37QiOiA,5635
8
8
  sparknlp/annotation_audio.py,sha256=iRV_InSVhgvAwSRe9NTbUH9v6OGvTM-FPCpSAKVu0mE,1917
9
9
  sparknlp/annotation_image.py,sha256=xhCe8Ko-77XqWVuuYHFrjKqF6zPd8Z-RY_rmZXNwCXU,2547
@@ -146,11 +146,12 @@ sparknlp/annotator/matcher/date_matcher.py,sha256=FrjTVoNBq1Z7E4qSJKvfV5rC7Mlm9R
146
146
  sparknlp/annotator/matcher/multi_date_matcher.py,sha256=-zCp4HugIpSN6U4-c1uN_dGn7x69xYy6SUoQz6dY34s,4475
147
147
  sparknlp/annotator/matcher/regex_matcher.py,sha256=GYhJNjBnCvfeEMv7d2_kDnqjfHtC7Hts--Ttlkjdozs,8380
148
148
  sparknlp/annotator/matcher/text_matcher.py,sha256=kpP1-5BEFEW9kantoHvjhcCbX1qsGm4gEDtFHATWOwA,10636
149
- sparknlp/annotator/ner/__init__.py,sha256=4ni5IHCbGlEmhCgGDw7A61x633qUTvWko2pIixiKO2Q,948
149
+ sparknlp/annotator/ner/__init__.py,sha256=cd13PX6O92BIasE8NWd3JUR6up2fRbe7chRx4lQZRcY,1006
150
150
  sparknlp/annotator/ner/ner_approach.py,sha256=4Y4gcusVtDaQpXfBbBe8XMAZ5hw1mvdh5A7g1j_T1KY,2793
151
151
  sparknlp/annotator/ner/ner_converter.py,sha256=ANPp_Xe0DaK4z4n-0KujBj3Xp5jIbsFXBXvmp-aCKlM,5924
152
152
  sparknlp/annotator/ner/ner_crf.py,sha256=eFylEz3-CENW0dyc6K4jodz9Kig3tnCyfZ3s-KZMvH4,14283
153
153
  sparknlp/annotator/ner/ner_dl.py,sha256=ght1W6-ArjLRiNHCv_bKpozkyNd8HVIb8SDGhcbp8Fg,22123
154
+ sparknlp/annotator/ner/ner_dl_graph_checker.py,sha256=KB_BuNj2EPiVaHJEU9N1d40j-qGCn9e2tsTYctsioyc,8227
154
155
  sparknlp/annotator/ner/ner_overwriter.py,sha256=en5OxXIP46yTXokIE96YDP9kcHA9oxiRPgwXMo0otew,6798
155
156
  sparknlp/annotator/ner/zero_shot_ner_model.py,sha256=DohhnkGSG-JxjW72t8AOx3GY7R_qT-LA3I0KF9TBz-Y,7501
156
157
  sparknlp/annotator/openai/__init__.py,sha256=u6SpV_xS8UpBE95WnTl0IefOI5TrTRl7ZHuYoeTetiA,759
@@ -169,7 +170,7 @@ sparknlp/annotator/sentiment/sentiment_detector.py,sha256=m545NGU0Xzg_PO6_qIfpli
169
170
  sparknlp/annotator/sentiment/vivekn_sentiment.py,sha256=4rpXWDgzU6ddnbrSCp9VdLb2epCc9oZ3c6XcqxEw8nk,9655
170
171
  sparknlp/annotator/seq2seq/__init__.py,sha256=aDiph00Hyq7L8uDY0frtyuHtqFodBqTMbixx_nq4z1I,1841
171
172
  sparknlp/annotator/seq2seq/auto_gguf_model.py,sha256=yhZQHMHfp88rQvLHTWyS-8imZrwqp-8RQQwnw6PmHfc,11749
172
- sparknlp/annotator/seq2seq/auto_gguf_reranker.py,sha256=QpGpyO1_epWzMospTFrfVVLj2KZ_n3gbHN269vo9fbU,12667
173
+ sparknlp/annotator/seq2seq/auto_gguf_reranker.py,sha256=MS4wCm2A2YiQfkB4HVVZKuN-3A1yGzqSCF69nu7J2rQ,12640
173
174
  sparknlp/annotator/seq2seq/auto_gguf_vision_model.py,sha256=swBek2026dW6BOX5O9P8Uq41X2GC71VGW0ADFeUIvs0,15299
174
175
  sparknlp/annotator/seq2seq/bart_transformer.py,sha256=I1flM4yeCzEAKOdQllBC30XuedxVJ7ferkFhZ6gwEbE,18481
175
176
  sparknlp/annotator/seq2seq/cohere_transformer.py,sha256=43LZBVazZMgJRCsN7HaYjVYfJ5hRMV95QZyxMtXq-m4,13496
@@ -201,12 +202,13 @@ sparknlp/annotator/token/regex_tokenizer.py,sha256=FG2HvFwMb1G_4grfyIQaeBpaAgKv_
201
202
  sparknlp/annotator/token/tokenizer.py,sha256=Me3P3wogUKUJ7O7_2wLdPzF00vKpp_sHuiztpGWRVpU,19939
202
203
  sparknlp/annotator/ws/__init__.py,sha256=-l8bnl8Z6lGXWOBdRIBZ6958fzTHt4o87QhhLHIFF8A,693
203
204
  sparknlp/annotator/ws/word_segmenter.py,sha256=rrbshwn5wzXIHpCCDji6ZcsmiARpuA82_p_6TgNHfRc,16365
204
- sparknlp/base/__init__.py,sha256=fCL-kReIavZceUa1OC99pSRH7MsXzqGB8BXgzVS_f7s,1311
205
+ sparknlp/base/__init__.py,sha256=ug0e79fv03D67mUqmb-i-a89vOa6EcRx49Z0fS6rT3g,1361
205
206
  sparknlp/base/audio_assembler.py,sha256=HKa9mXvmuMUrjTihUZkppGj-WJjcUrm2BGapNuPifyI,3320
206
207
  sparknlp/base/doc2_chunk.py,sha256=TyvbdJNkVo9favHlOEoH5JwKbjpk5ZVJ75p8Cilp9jM,6551
207
208
  sparknlp/base/document_assembler.py,sha256=zl-SXWMTR3B0EZ8z6SWYchCwEo-61FhU6u7dHUKDIOg,6697
208
209
  sparknlp/base/embeddings_finisher.py,sha256=5QU1Okgl2ULrPVf4ze1H0SsRCMYXWGARtUsT7dagBYA,7659
209
210
  sparknlp/base/finisher.py,sha256=V4wkMm9Ug09q4zTQc9T9Wr-awmu2Hu-eNaJ039YgZXM,8583
211
+ sparknlp/base/gguf_ranking_finisher.py,sha256=tzoisuD70myfHo3t4WFtBs8i1jwdjkwXCbWknDEXOHk,8315
210
212
  sparknlp/base/graph_finisher.py,sha256=a8fxk3ei2YQw6s0Y9Yy8oMOF1i1XUrgqaiwVE0VPt4w,4834
211
213
  sparknlp/base/has_recursive_fit.py,sha256=P55rSHLIXhihXWS2bOC_DskcQTc3njieVD1JkjS2bcA,849
212
214
  sparknlp/base/has_recursive_transform.py,sha256=UkGNgo4LMsjQC-Coeefg4bJcg7FoPcPiG382zEa6Ywk,841
@@ -248,7 +250,7 @@ sparknlp/pretrained/utils.py,sha256=T1MrvW_DaWk_jcOjVLOea0NMFE9w8fe0ZT_5urZ_nEY,
248
250
  sparknlp/reader/__init__.py,sha256=-Toj3AIBki-zXPpV8ezFTI2LX1yP_rK2bhpoa8nBkTw,685
249
251
  sparknlp/reader/enums.py,sha256=MNGug9oJ1BBLM1Pbske13kAabalDzHa2kucF5xzFpHs,770
250
252
  sparknlp/reader/pdf_to_text.py,sha256=eWw-cwjosmcSZ9eHso0F5QQoeGBBnwsOhzhCXXvMjZA,7169
251
- sparknlp/reader/reader2doc.py,sha256=LRqfaL9nidhlPkJIwTJo7SnGYmNNfOqwEdrsWYGEdnI,7146
253
+ sparknlp/reader/reader2doc.py,sha256=8x1tvx7Hj2J4xpyRiCUvrG-kmOPBvIE8K1tJZY-e0Xw,8200
252
254
  sparknlp/reader/reader2table.py,sha256=GC6Yz0gQ83S6XKOi329TUNQuAvLrBxysqDkDRZPvcYA,4759
253
255
  sparknlp/reader/sparknlp_reader.py,sha256=MJs8v_ECYaV1SOabI1L_2MkVYEDVImtwgbYypO7DJSY,20623
254
256
  sparknlp/training/__init__.py,sha256=qREi9u-5Vc2VjpL6-XZsyvu5jSEIdIhowW7_kKaqMqo,852
@@ -281,7 +283,7 @@ sparknlp/training/_tf_graph_builders_1x/ner_dl/dataset_encoder.py,sha256=R4yHFN3
281
283
  sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model.py,sha256=EoCSdcIjqQ3wv13MAuuWrKV8wyVBP0SbOEW41omHlR0,23189
282
284
  sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model_saver.py,sha256=k5CQ7gKV6HZbZMB8cKLUJuZxoZWlP_DFWdZ--aIDwsc,2356
283
285
  sparknlp/training/_tf_graph_builders_1x/ner_dl/sentence_grouper.py,sha256=pAxjWhjazSX8Vg0MFqJiuRVw1IbnQNSs-8Xp26L4nko,870
284
- spark_nlp-6.1.2.dist-info/METADATA,sha256=l6za09CF7uliVRGYEFRi02vualYyzRt_kPuuVa8MnWg,19774
285
- spark_nlp-6.1.2.dist-info/WHEEL,sha256=JNWh1Fm1UdwIQV075glCn4MVuCRs0sotJIq-J6rbxCU,109
286
- spark_nlp-6.1.2.dist-info/top_level.txt,sha256=uuytur4pyMRw2H_txNY2ZkaucZHUs22QF8-R03ch_-E,13
287
- spark_nlp-6.1.2.dist-info/RECORD,,
286
+ spark_nlp-6.1.3.dist-info/METADATA,sha256=U4Fb5wRd8Ql6BULfRwQSE6Pa77wsLwOGwTk-s038YuI,19774
287
+ spark_nlp-6.1.3.dist-info/WHEEL,sha256=JNWh1Fm1UdwIQV075glCn4MVuCRs0sotJIq-J6rbxCU,109
288
+ spark_nlp-6.1.3.dist-info/top_level.txt,sha256=uuytur4pyMRw2H_txNY2ZkaucZHUs22QF8-R03ch_-E,13
289
+ spark_nlp-6.1.3.dist-info/RECORD,,
sparknlp/__init__.py CHANGED
@@ -66,7 +66,7 @@ sys.modules['com.johnsnowlabs.ml.ai'] = annotator
66
66
  annotators = annotator
67
67
  embeddings = annotator
68
68
 
69
- __version__ = "6.1.2"
69
+ __version__ = "6.1.3"
70
70
 
71
71
 
72
72
  def start(gpu=False,
@@ -16,5 +16,6 @@ from sparknlp.annotator.ner.ner_approach import *
16
16
  from sparknlp.annotator.ner.ner_converter import *
17
17
  from sparknlp.annotator.ner.ner_crf import *
18
18
  from sparknlp.annotator.ner.ner_dl import *
19
+ from sparknlp.annotator.ner.ner_dl_graph_checker import *
19
20
  from sparknlp.annotator.ner.ner_overwriter import *
20
21
  from sparknlp.annotator.ner.zero_shot_ner_model import *
@@ -0,0 +1,237 @@
1
+ # Copyright 2017-2025 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains classes for NerDL."""
15
+
16
+ from sparknlp.common import *
17
+ import sparknlp.internal as _internal
18
+ from pyspark.ml.util import JavaMLWritable
19
+ from pyspark.ml.wrapper import JavaEstimator
20
+
21
+
22
+ class NerDLGraphChecker(
23
+ JavaEstimator,
24
+ JavaMLWritable,
25
+ _internal.ParamsGettersSetters,
26
+ ):
27
+ """Checks whether a suitable NerDL graph is available for the given training dataset, before any
28
+ computations/training is done. This annotator is useful for custom training cases, where
29
+ specialized graphs are needed.
30
+
31
+ Important: This annotator should be used or positioned before any embedding or NerDLApproach
32
+ annotators in the pipeline and will process the whole dataset to extract the required graph parameters.
33
+
34
+ This annotator requires a dataset with at least two columns: one with tokens and one with the
35
+ labels. In addition, it requires the used embedding annotator in the pipeline to extract the
36
+ suitable embedding dimension.
37
+
38
+ For extended examples of usage, see the`Examples
39
+ <https://github.com/JohnSnowLabs/spark-nlp/blob/master//home/ducha/Workspace/scala/spark-nlp-feature/examples/python/training/english/dl-ner/ner_dl_graph_checker.ipynb>`__
40
+ and the `NerDLGraphCheckerTestSpec
41
+ <https://github.com/JohnSnowLabs/spark-nlp/blob/master/src/test/scala/com/johnsnowlabs/nlp/annotators/ner/dl/NerDLGraphCheckerTestSpec.scala>`__.
42
+
43
+ ==================================== ======================
44
+ Input Annotation types Output Annotation type
45
+ ==================================== ======================
46
+ ``DOCUMENT, TOKEN`` `NONE`
47
+ ==================================== ======================
48
+
49
+ Parameters
50
+ ----------
51
+ inputCols
52
+ Column names of input annotations
53
+ labelColumn
54
+ Column name for data labels
55
+ embeddingsDim
56
+ Dimensionality of embeddings
57
+
58
+ Examples
59
+ --------
60
+ >>> import sparknlp
61
+ >>> from sparknlp.base import *
62
+ >>> from sparknlp.annotator import *
63
+ >>> from pyspark.ml import Pipeline
64
+
65
+ This CoNLL dataset already includes a sentence, token and label
66
+ column with their respective annotator types. If a custom dataset is used,
67
+ these need to be defined with for example:
68
+
69
+ >>> conll = CoNLL()
70
+ >>> trainingData = conll.readDataset(spark, "src/test/resources/conll2003/eng.train")
71
+ >>> embeddings = BertEmbeddings \\
72
+ ... .pretrained() \\
73
+ ... .setInputCols(["sentence", "token"]) \\
74
+ ... .setOutputCol("embeddings")
75
+
76
+ This annotatorr requires the data for NerDLApproach graphs: text, tokens, labels and the embedding model
77
+
78
+ >>> nerDLGraphChecker = NerDLGraphChecker() \\
79
+ ... .setInputCols(["sentence", "token"]) \\
80
+ ... .setLabelColumn("label") \\
81
+ ... .setEmbeddingsModel(embeddings)
82
+ >>> nerTagger = NerDLApproach() \\
83
+ ... .setInputCols(["sentence", "token", "embeddings"]) \\
84
+ ... .setLabelColumn("label") \\
85
+ ... .setOutputCol("ner") \\
86
+ ... .setMaxEpochs(1) \\
87
+ ... .setRandomSeed(0) \\
88
+ ... .setVerbose(0)
89
+ >>> pipeline = Pipeline().setStages([nerDLGraphChecker, embeddings, nerTagger])
90
+
91
+ If we now fit the model with a graph missing, then an exception is raised.
92
+
93
+ >>> pipelineModel = pipeline.fit(trainingData)
94
+ """
95
+
96
+ inputCols = Param(
97
+ Params._dummy(),
98
+ "inputCols",
99
+ "Input columns",
100
+ typeConverter=TypeConverters.toListString,
101
+ )
102
+
103
+ def setInputCols(self, *value):
104
+ """Sets column names of input annotations.
105
+
106
+ Parameters
107
+ ----------
108
+ *value : List[str]
109
+ Input columns for the annotator
110
+ """
111
+ if type(value[0]) == str or type(value[0]) == list:
112
+ # self.inputColsValidation(value)
113
+ if len(value) == 1 and type(value[0]) == list:
114
+ return self._set(inputCols=value[0])
115
+ else:
116
+ return self._set(inputCols=list(value))
117
+ else:
118
+ raise TypeError(
119
+ "InputCols datatype not supported. It must be either str or list"
120
+ )
121
+
122
+ labelColumn = Param(
123
+ Params._dummy(),
124
+ "labelColumn",
125
+ "Column with label per each token",
126
+ typeConverter=TypeConverters.toString,
127
+ )
128
+
129
+ def setLabelColumn(self, value):
130
+ """Sets name of column for data labels.
131
+
132
+ Parameters
133
+ ----------
134
+ value : str
135
+ Column for data labels
136
+ """
137
+ return self._set(labelColumn=value)
138
+
139
+ embeddingsDim = Param(
140
+ Params._dummy(),
141
+ "embeddingsDim",
142
+ "Dimensionality of embeddings",
143
+ typeConverter=TypeConverters.toInt,
144
+ )
145
+
146
+ def setEmbeddingsDim(self, value: int):
147
+ """Sets Dimensionality of embeddings
148
+
149
+ Parameters
150
+ ----------
151
+ value : int
152
+ Dimensionality of embeddings
153
+ """
154
+ return self._set(embeddingsDim=value)
155
+
156
+ def setEmbeddingsModel(self, model: HasEmbeddingsProperties):
157
+ """
158
+ Get embeddingsDim from a given embeddings model, if possible.
159
+ Falls back to setEmbeddingsDim if dimension cannot be obtained automatically.
160
+ """
161
+ # Try Python API first
162
+ if hasattr(model, "getDimension"):
163
+ dim = model.getDimension()
164
+ return self.setEmbeddingsDim(int(dim))
165
+ # Try JVM side if available
166
+ if hasattr(model, "_java_obj") and hasattr(model._java_obj, "getDimension"):
167
+ dim = int(model._java_obj.getDimension())
168
+ return self.setEmbeddingsDim(dim)
169
+ raise ValueError(
170
+ "Could not infer embeddings dimension from provided model. "
171
+ "Use setEmbeddingsDim(dim) explicitly."
172
+ )
173
+
174
+ inputAnnotatorTypes = [
175
+ AnnotatorType.DOCUMENT,
176
+ AnnotatorType.TOKEN,
177
+ ]
178
+
179
+ graphFolder = Param(
180
+ Params._dummy(),
181
+ "graphFolder",
182
+ "Folder path that contain external graph files",
183
+ TypeConverters.toString,
184
+ )
185
+
186
+ def setGraphFolder(self, p):
187
+ """Sets folder path that contain external graph files.
188
+
189
+ Parameters
190
+ ----------
191
+ p : str
192
+ Folder path that contain external graph files
193
+ """
194
+ return self._set(graphFolder=p)
195
+
196
+ @keyword_only
197
+ def __init__(self):
198
+ _internal.ParamsGettersSetters.__init__(self)
199
+ classname = "com.johnsnowlabs.nlp.annotators.ner.dl.NerDLGraphChecker"
200
+ self.__class__._java_class_name = classname
201
+ self._java_obj = self._new_java_obj(classname, self.uid)
202
+ # self._setDefault()
203
+
204
+ def _create_model(self, java_model):
205
+ return NerDLGraphCheckerModel()
206
+
207
+
208
+ class NerDLGraphCheckerModel(
209
+ JavaModel,
210
+ JavaMLWritable,
211
+ _internal.ParamsGettersSetters,
212
+ ):
213
+ """
214
+ Resulting model from NerDLGraphChecker, that does not perform any transformations, as the
215
+ checks are done during the ``fit`` phase. It acts as the identity.
216
+
217
+ This annotator should never be used directly.
218
+ """
219
+
220
+ inputAnnotatorTypes = [
221
+ AnnotatorType.DOCUMENT,
222
+ AnnotatorType.TOKEN,
223
+ ]
224
+
225
+ @keyword_only
226
+ def __init__(
227
+ self,
228
+ classname="com.johnsnowlabs.nlp.annotators.ner.dl.NerDLGraphCheckerModel",
229
+ java_model=None,
230
+ ):
231
+ super(NerDLGraphCheckerModel, self).__init__(java_model=java_model)
232
+ if classname and not java_model:
233
+ self.__class__._java_class_name = classname
234
+ self._java_obj = self._new_java_obj(classname, self.uid)
235
+ if java_model is not None:
236
+ self._transfer_params_from_java()
237
+ # self._setDefault(lazyAnnotator=False)
@@ -47,7 +47,7 @@ class AutoGGUFReranker(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppProperties
47
47
  ... .setOutputCol("reranked_documents") \\
48
48
  ... .setQuery("A man is eating pasta.")
49
49
 
50
- The default model is ``"bge-reranker-v2-m3-Q4_K_M"``, if no name is provided.
50
+ The default model is ``"bge_reranker_v2_m3_Q4_K_M"``, if no name is provided.
51
51
 
52
52
  For extended examples of usage, see the
53
53
  `AutoGGUFRerankerTest <https://github.com/JohnSnowLabs/spark-nlp/tree/master/src/test/scala/com/johnsnowlabs/nlp/annotators/seq2seq/AutoGGUFRerankerTest.scala>`__
@@ -222,7 +222,7 @@ class AutoGGUFReranker(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppProperties
222
222
  >>> document = DocumentAssembler() \\
223
223
  ... .setInputCol("text") \\
224
224
  ... .setOutputCol("document")
225
- >>> reranker = AutoGGUFReranker.pretrained("bge-reranker-v2-m3-Q4_K_M") \\
225
+ >>> reranker = AutoGGUFReranker.pretrained() \\
226
226
  ... .setInputCols(["document"]) \\
227
227
  ... .setOutputCol("reranked_documents") \\
228
228
  ... .setBatchSize(4) \\
@@ -307,13 +307,13 @@ class AutoGGUFReranker(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppProperties
307
307
  return AutoGGUFReranker(java_model=jModel)
308
308
 
309
309
  @staticmethod
310
- def pretrained(name="bge-reranker-v2-m3-Q4_K_M", lang="en", remote_loc=None):
310
+ def pretrained(name="bge_reranker_v2_m3_Q4_K_M", lang="en", remote_loc=None):
311
311
  """Downloads and loads a pretrained model.
312
312
 
313
313
  Parameters
314
314
  ----------
315
315
  name : str, optional
316
- Name of the pretrained model, by default "bge-reranker-v2-m3-Q4_K_M"
316
+ Name of the pretrained model, by default "bge_reranker_v2_m3_Q4_K_M"
317
317
  lang : str, optional
318
318
  Language of the pretrained model, by default "en"
319
319
  remote_loc : str, optional
sparknlp/base/__init__.py CHANGED
@@ -17,6 +17,7 @@ from sparknlp.base.document_assembler import *
17
17
  from sparknlp.base.multi_document_assembler import *
18
18
  from sparknlp.base.embeddings_finisher import *
19
19
  from sparknlp.base.finisher import *
20
+ from sparknlp.base.gguf_ranking_finisher import *
20
21
  from sparknlp.base.graph_finisher import *
21
22
  from sparknlp.base.has_recursive_fit import *
22
23
  from sparknlp.base.has_recursive_transform import *
@@ -0,0 +1,234 @@
1
+ # Copyright 2017-2024 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains classes for the GGUFRankingFinisher."""
15
+
16
+ from pyspark import keyword_only
17
+ from pyspark.ml.param import TypeConverters, Params, Param
18
+ from sparknlp.internal import AnnotatorTransformer
19
+
20
+
21
+ class GGUFRankingFinisher(AnnotatorTransformer):
22
+ """Finisher for AutoGGUFReranker outputs that provides ranking capabilities
23
+ including top-k selection, sorting by relevance score, and score normalization.
24
+
25
+ This finisher processes the output of AutoGGUFReranker, which contains documents with
26
+ relevance scores in their metadata. It provides several options for post-processing:
27
+
28
+ - Top-k selection: Select only the top k documents by relevance score
29
+ - Score thresholding: Filter documents by minimum relevance score
30
+ - Min-max scaling: Normalize relevance scores to 0-1 range
31
+ - Sorting: Sort documents by relevance score in descending order
32
+ - Ranking: Add rank information to document metadata
33
+
34
+ The finisher preserves the document annotation structure while adding ranking information
35
+ to the metadata and optionally filtering/sorting the documents.
36
+
37
+ For extended examples of usage, see the `Examples
38
+ <https://github.com/JohnSnowLabs/spark-nlp/blob/master/examples/python/finisher/gguf_ranking_finisher_example.py>`__.
39
+
40
+ ====================== ======================
41
+ Input Annotation types Output Annotation type
42
+ ====================== ======================
43
+ ``DOCUMENT`` ``DOCUMENT``
44
+ ====================== ======================
45
+
46
+ Parameters
47
+ ----------
48
+ inputCols
49
+ Name of input annotation columns containing reranked documents
50
+ outputCol
51
+ Name of output annotation column containing ranked documents, by default "ranked_documents"
52
+ topK
53
+ Maximum number of top documents to return based on relevance score (-1 for no limit), by default -1
54
+ minRelevanceScore
55
+ Minimum relevance score threshold for filtering documents, by default Double.MinValue
56
+ minMaxScaling
57
+ Whether to apply min-max scaling to normalize relevance scores to 0-1 range, by default False
58
+
59
+ Examples
60
+ --------
61
+ >>> import sparknlp
62
+ >>> from sparknlp.base import *
63
+ >>> from sparknlp.annotator import *
64
+ >>> from pyspark.ml import Pipeline
65
+ >>> documentAssembler = DocumentAssembler() \\
66
+ ... .setInputCol("text") \\
67
+ ... .setOutputCol("document")
68
+ >>> reranker = AutoGGUFReranker.pretrained() \\
69
+ ... .setInputCols("document") \\
70
+ ... .setOutputCol("reranked_documents") \\
71
+ ... .setQuery("A man is eating pasta.")
72
+ >>> finisher = GGUFRankingFinisher() \\
73
+ ... .setInputCols("reranked_documents") \\
74
+ ... .setOutputCol("ranked_documents") \\
75
+ ... .setTopK(3) \\
76
+ ... .setMinMaxScaling(True)
77
+ >>> pipeline = Pipeline().setStages([documentAssembler, reranker, finisher])
78
+ >>> data = spark.createDataFrame([
79
+ ... ("A man is eating food.",),
80
+ ... ("A man is eating a piece of bread.",),
81
+ ... ("The girl is carrying a baby.",),
82
+ ... ("A man is riding a horse.",)
83
+ ... ], ["text"])
84
+ >>> result = pipeline.fit(data).transform(data)
85
+ >>> result.select("ranked_documents").show(truncate=False)
86
+ # Documents will be sorted by relevance with rank information in metadata
87
+ """
88
+
89
+ name = "GGUFRankingFinisher"
90
+
91
+ inputCols = Param(Params._dummy(),
92
+ "inputCols",
93
+ "Name of input annotation columns containing reranked documents",
94
+ typeConverter=TypeConverters.toListString)
95
+
96
+ outputCol = Param(Params._dummy(),
97
+ "outputCol",
98
+ "Name of output annotation column containing ranked documents",
99
+ typeConverter=TypeConverters.toListString)
100
+
101
+ topK = Param(Params._dummy(),
102
+ "topK",
103
+ "Maximum number of top documents to return based on relevance score (-1 for no limit)",
104
+ typeConverter=TypeConverters.toInt)
105
+
106
+ minRelevanceScore = Param(Params._dummy(),
107
+ "minRelevanceScore",
108
+ "Minimum relevance score threshold for filtering documents",
109
+ typeConverter=TypeConverters.toFloat)
110
+
111
+ minMaxScaling = Param(Params._dummy(),
112
+ "minMaxScaling",
113
+ "Whether to apply min-max scaling to normalize relevance scores to 0-1 range",
114
+ typeConverter=TypeConverters.toBoolean)
115
+
116
+ @keyword_only
117
+ def __init__(self):
118
+ super(GGUFRankingFinisher, self).__init__(
119
+ classname="com.johnsnowlabs.nlp.finisher.GGUFRankingFinisher")
120
+ self._setDefault(
121
+ topK=-1,
122
+ minRelevanceScore=float('-inf'), # Equivalent to Double.MinValue
123
+ minMaxScaling=False,
124
+ outputCol=["ranked_documents"]
125
+ )
126
+
127
+ @keyword_only
128
+ def setParams(self):
129
+ kwargs = self._input_kwargs
130
+ return self._set(**kwargs)
131
+
132
+ def setInputCols(self, *value):
133
+ """Sets input annotation column names.
134
+
135
+ Parameters
136
+ ----------
137
+ value : List[str]
138
+ Input annotation column names containing reranked documents
139
+ """
140
+ if len(value) == 1 and isinstance(value[0], list):
141
+ return self._set(inputCols=value[0])
142
+ else:
143
+ return self._set(inputCols=list(value))
144
+
145
+ def getInputCols(self):
146
+ """Gets input annotation column names.
147
+
148
+ Returns
149
+ -------
150
+ List[str]
151
+ Input annotation column names
152
+ """
153
+ return self.getOrDefault(self.inputCols)
154
+
155
+ def setOutputCol(self, value):
156
+ """Sets output annotation column name.
157
+
158
+ Parameters
159
+ ----------
160
+ value : str
161
+ Output annotation column name
162
+ """
163
+ return self._set(outputCol=[value])
164
+
165
+ def getOutputCol(self):
166
+ """Gets output annotation column name.
167
+
168
+ Returns
169
+ -------
170
+ str
171
+ Output annotation column name
172
+ """
173
+ output_cols = self.getOrDefault(self.outputCol)
174
+ return output_cols[0] if output_cols else "ranked_documents"
175
+
176
+ def setTopK(self, value):
177
+ """Sets maximum number of top documents to return.
178
+
179
+ Parameters
180
+ ----------
181
+ value : int
182
+ Maximum number of top documents to return (-1 for no limit)
183
+ """
184
+ return self._set(topK=value)
185
+
186
+ def getTopK(self):
187
+ """Gets maximum number of top documents to return.
188
+
189
+ Returns
190
+ -------
191
+ int
192
+ Maximum number of top documents to return
193
+ """
194
+ return self.getOrDefault(self.topK)
195
+
196
+ def setMinRelevanceScore(self, value):
197
+ """Sets minimum relevance score threshold.
198
+
199
+ Parameters
200
+ ----------
201
+ value : float
202
+ Minimum relevance score threshold
203
+ """
204
+ return self._set(minRelevanceScore=value)
205
+
206
+ def getMinRelevanceScore(self):
207
+ """Gets minimum relevance score threshold.
208
+
209
+ Returns
210
+ -------
211
+ float
212
+ Minimum relevance score threshold
213
+ """
214
+ return self.getOrDefault(self.minRelevanceScore)
215
+
216
+ def setMinMaxScaling(self, value):
217
+ """Sets whether to apply min-max scaling.
218
+
219
+ Parameters
220
+ ----------
221
+ value : bool
222
+ Whether to apply min-max scaling to normalize scores
223
+ """
224
+ return self._set(minMaxScaling=value)
225
+
226
+ def getMinMaxScaling(self):
227
+ """Gets whether to apply min-max scaling.
228
+
229
+ Returns
230
+ -------
231
+ bool
232
+ Whether min-max scaling is enabled
233
+ """
234
+ return self.getOrDefault(self.minMaxScaling)
@@ -122,6 +122,20 @@ class Reader2Doc(
122
122
  typeConverter=TypeConverters.toString
123
123
  )
124
124
 
125
+ outputAsDocument = Param(
126
+ Params._dummy(),
127
+ "outputAsDocument",
128
+ "Whether to return all sentences joined into a single document",
129
+ typeConverter=TypeConverters.toBoolean
130
+ )
131
+
132
+ excludeNonText = Param(
133
+ Params._dummy(),
134
+ "excludeNonText",
135
+ "Whether to exclude non-text content from the output. Default is False.",
136
+ typeConverter=TypeConverters.toBoolean
137
+ )
138
+
125
139
  @keyword_only
126
140
  def __init__(self):
127
141
  super(Reader2Doc, self).__init__(classname="com.johnsnowlabs.reader.Reader2Doc")
@@ -182,7 +196,7 @@ class Reader2Doc(
182
196
  def setFlattenOutput(self, value):
183
197
  """Sets whether to flatten the output to plain text with minimal metadata.
184
198
 
185
- Parameters
199
+ ParametersF
186
200
  ----------
187
201
  value : bool
188
202
  If true, output is flattened to plain text with minimal metadata
@@ -208,3 +222,23 @@ class Reader2Doc(
208
222
  Output format for the table content. Options are 'plain-text' or 'html-table'. Default is 'json-table'.
209
223
  """
210
224
  return self._set(outputFormat=value)
225
+
226
+ def setOutputAsDocument(self, value):
227
+ """Sets whether to return all sentences joined into a single document.
228
+
229
+ Parameters
230
+ ----------
231
+ value : bool
232
+ Whether to return all sentences joined into a single document
233
+ """
234
+ return self._set(outputAsDocument=value)
235
+
236
+ def setExcludeNonText(self, value):
237
+ """Sets whether to exclude non-text content from the output.
238
+
239
+ Parameters
240
+ ----------
241
+ value : bool
242
+ Whether to exclude non-text content from the output. Default is False.
243
+ """
244
+ return self._set(excludeNonText=value)