spark-nlp 6.1.3rc1__py2.py3-none-any.whl → 6.1.4__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of spark-nlp might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: spark-nlp
3
- Version: 6.1.3rc1
3
+ Version: 6.1.4
4
4
  Summary: John Snow Labs Spark NLP is a natural language processing library built on top of Apache Spark ML. It provides simple, performant & accurate NLP annotations for machine learning pipelines, that scale easily in a distributed environment.
5
5
  Home-page: https://github.com/JohnSnowLabs/spark-nlp
6
6
  Author: John Snow Labs
@@ -102,7 +102,7 @@ $ java -version
102
102
  $ conda create -n sparknlp python=3.7 -y
103
103
  $ conda activate sparknlp
104
104
  # spark-nlp by default is based on pyspark 3.x
105
- $ pip install spark-nlp==6.1.3 pyspark==3.3.1
105
+ $ pip install spark-nlp==6.1.4 pyspark==3.3.1
106
106
  ```
107
107
 
108
108
  In Python console or Jupyter `Python3` kernel:
@@ -168,7 +168,7 @@ For a quick example of using pipelines and models take a look at our official [d
168
168
 
169
169
  ### Apache Spark Support
170
170
 
171
- Spark NLP *6.1.3* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
171
+ Spark NLP *6.1.4* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
172
172
 
173
173
  | Spark NLP | Apache Spark 3.5.x | Apache Spark 3.4.x | Apache Spark 3.3.x | Apache Spark 3.2.x | Apache Spark 3.1.x | Apache Spark 3.0.x | Apache Spark 2.4.x | Apache Spark 2.3.x |
174
174
  |-----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
@@ -198,7 +198,7 @@ Find out more about 4.x `SparkNLP` versions in our official [documentation](http
198
198
 
199
199
  ### Databricks Support
200
200
 
201
- Spark NLP 6.1.3 has been tested and is compatible with the following runtimes:
201
+ Spark NLP 6.1.4 has been tested and is compatible with the following runtimes:
202
202
 
203
203
  | **CPU** | **GPU** |
204
204
  |--------------------|--------------------|
@@ -216,7 +216,7 @@ We are compatible with older runtimes. For a full list check databricks support
216
216
 
217
217
  ### EMR Support
218
218
 
219
- Spark NLP 6.1.3 has been tested and is compatible with the following EMR releases:
219
+ Spark NLP 6.1.4 has been tested and is compatible with the following EMR releases:
220
220
 
221
221
  | **EMR Release** |
222
222
  |--------------------|
@@ -3,7 +3,7 @@ com/johnsnowlabs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
3
3
  com/johnsnowlabs/ml/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  com/johnsnowlabs/ml/ai/__init__.py,sha256=YQiK2M7U4d8y5irPy_HB8ae0mSpqS9583MH44pnKJXc,295
5
5
  com/johnsnowlabs/nlp/__init__.py,sha256=DPIVXtONO5xXyOk-HB0-sNiHAcco17NN13zPS_6Uw8c,294
6
- sparknlp/__init__.py,sha256=UR0dRykX67j-Ksuzk5Xe-Mod5qCK24iBjHHa0omOp2w,13814
6
+ sparknlp/__init__.py,sha256=LcfC7bWeae5XgjWbNbWH94LlJkBon5dA8fYnb_2NyGc,13814
7
7
  sparknlp/annotation.py,sha256=I5zOxG5vV2RfPZfqN9enT1i4mo6oBcn3Lrzs37QiOiA,5635
8
8
  sparknlp/annotation_audio.py,sha256=iRV_InSVhgvAwSRe9NTbUH9v6OGvTM-FPCpSAKVu0mE,1917
9
9
  sparknlp/annotation_image.py,sha256=xhCe8Ko-77XqWVuuYHFrjKqF6zPd8Z-RY_rmZXNwCXU,2547
@@ -19,13 +19,10 @@ sparknlp/annotator/document_character_text_splitter.py,sha256=oNrOKJAKO2h1wr0bEu
19
19
  sparknlp/annotator/document_normalizer.py,sha256=hU2fG6vaPfdngQapoeSu-_zS_LiBZNp2tcVBGl6eTpk,10973
20
20
  sparknlp/annotator/document_token_splitter.py,sha256=-9xbQ9pVAjcKHQQrSk6Cb7f8W1cblCLwWXTNR8kFptA,7499
21
21
  sparknlp/annotator/document_token_splitter_test.py,sha256=NWO9mwhAIUJFuxPofB3c39iUm_6vKp4pteDsBOTH8ng,2684
22
- sparknlp/annotator/extractor.py,sha256=GObGwyl602jrt6EbnhZO2-KptrzVRop_5uA8HewpWcc,5303
23
22
  sparknlp/annotator/graph_extraction.py,sha256=b4SB3B_hFgCJT4e5Jcscyxdzfbvw3ujKTa6UNgX5Lhc,14471
24
23
  sparknlp/annotator/lemmatizer.py,sha256=w1nNMln2HgM4O1DOaISo5AGyzzFHuaIDQqWFcr4JmcA,8970
25
24
  sparknlp/annotator/n_gram_generator.py,sha256=KRX5xfxmorOfYQkQHZWkkXjwjC13gDTAXnukwgz6hKU,5274
26
25
  sparknlp/annotator/normalizer.py,sha256=7AkAOB-e8b2uyUBwYoq9HvMPijOwV3wEoxcB3BVsr4w,8780
27
- sparknlp/annotator/openai_completion.py,sha256=kigEY5G2lZl2THezx-WDDByJKY1Wx99CPoZdkpik9x4,16899
28
- sparknlp/annotator/openai_embeddings.py,sha256=eVnLK9t1jDOS58K2YHOyHakILG_4rGtzTIWelqieY9g,109860
29
26
  sparknlp/annotator/stemmer.py,sha256=Tl48voyG9wqbT5MAA1hDKW90NorU8rIDhttJxOo1s3Q,2948
30
27
  sparknlp/annotator/stop_words_cleaner.py,sha256=Z9yI9AWDIAXbPM2X6n84voiW31Z20XofCL-tTQNo5ro,7015
31
28
  sparknlp/annotator/tf_ner_dl_graph_builder.py,sha256=ovsRBUfw9lJkuetmrcYRmW1Ll-33sdDPi4xJ0M_Fs7k,6379
@@ -67,7 +64,6 @@ sparknlp/annotator/classifier_dl/mpnet_for_question_answering.py,sha256=w9hHLrQb
67
64
  sparknlp/annotator/classifier_dl/mpnet_for_sequence_classification.py,sha256=M__giFElL6Q3I88QD6OoXDzdQDk_Zp5sS__Kh_XpLdo,7308
68
65
  sparknlp/annotator/classifier_dl/mpnet_for_token_classification.py,sha256=SgFAJcv7ZE3BmJOehK_CjAaueqaaK6PR33zA5aE9-Ww,6754
69
66
  sparknlp/annotator/classifier_dl/multi_classifier_dl.py,sha256=ylKQzS7ROyeKeiOF4BZiIkQV1sfrnfUUQ9LXFSFK_Vo,16045
70
- sparknlp/annotator/classifier_dl/roberta_bert_for_zero_shot_classification.py,sha256=nP0D_jg8xPFUMP9uGNvmACIqfwAneDddVXbioHLHFJ0,8818
71
67
  sparknlp/annotator/classifier_dl/roberta_for_multiple_choice.py,sha256=SlzkA_fKurWOQDhvWlEBiMUfLgIoaRRglIdENMv7u38,6008
72
68
  sparknlp/annotator/classifier_dl/roberta_for_question_answering.py,sha256=WRxu1uhXnY9C4UHdtJ8qiVGhPSX7sCdSaML0AWHOdJw,6471
73
69
  sparknlp/annotator/classifier_dl/roberta_for_sequence_classification.py,sha256=z97uH5WkG8kPX1Y9qtpLwD7egl0kzbVmxtq4xzZgNNI,7857
@@ -150,11 +146,12 @@ sparknlp/annotator/matcher/date_matcher.py,sha256=FrjTVoNBq1Z7E4qSJKvfV5rC7Mlm9R
150
146
  sparknlp/annotator/matcher/multi_date_matcher.py,sha256=-zCp4HugIpSN6U4-c1uN_dGn7x69xYy6SUoQz6dY34s,4475
151
147
  sparknlp/annotator/matcher/regex_matcher.py,sha256=GYhJNjBnCvfeEMv7d2_kDnqjfHtC7Hts--Ttlkjdozs,8380
152
148
  sparknlp/annotator/matcher/text_matcher.py,sha256=kpP1-5BEFEW9kantoHvjhcCbX1qsGm4gEDtFHATWOwA,10636
153
- sparknlp/annotator/ner/__init__.py,sha256=4ni5IHCbGlEmhCgGDw7A61x633qUTvWko2pIixiKO2Q,948
149
+ sparknlp/annotator/ner/__init__.py,sha256=cd13PX6O92BIasE8NWd3JUR6up2fRbe7chRx4lQZRcY,1006
154
150
  sparknlp/annotator/ner/ner_approach.py,sha256=4Y4gcusVtDaQpXfBbBe8XMAZ5hw1mvdh5A7g1j_T1KY,2793
155
151
  sparknlp/annotator/ner/ner_converter.py,sha256=ANPp_Xe0DaK4z4n-0KujBj3Xp5jIbsFXBXvmp-aCKlM,5924
156
152
  sparknlp/annotator/ner/ner_crf.py,sha256=eFylEz3-CENW0dyc6K4jodz9Kig3tnCyfZ3s-KZMvH4,14283
157
153
  sparknlp/annotator/ner/ner_dl.py,sha256=ght1W6-ArjLRiNHCv_bKpozkyNd8HVIb8SDGhcbp8Fg,22123
154
+ sparknlp/annotator/ner/ner_dl_graph_checker.py,sha256=KB_BuNj2EPiVaHJEU9N1d40j-qGCn9e2tsTYctsioyc,8227
158
155
  sparknlp/annotator/ner/ner_overwriter.py,sha256=en5OxXIP46yTXokIE96YDP9kcHA9oxiRPgwXMo0otew,6798
159
156
  sparknlp/annotator/ner/zero_shot_ner_model.py,sha256=DohhnkGSG-JxjW72t8AOx3GY7R_qT-LA3I0KF9TBz-Y,7501
160
157
  sparknlp/annotator/openai/__init__.py,sha256=u6SpV_xS8UpBE95WnTl0IefOI5TrTRl7ZHuYoeTetiA,759
@@ -173,7 +170,7 @@ sparknlp/annotator/sentiment/sentiment_detector.py,sha256=m545NGU0Xzg_PO6_qIfpli
173
170
  sparknlp/annotator/sentiment/vivekn_sentiment.py,sha256=4rpXWDgzU6ddnbrSCp9VdLb2epCc9oZ3c6XcqxEw8nk,9655
174
171
  sparknlp/annotator/seq2seq/__init__.py,sha256=aDiph00Hyq7L8uDY0frtyuHtqFodBqTMbixx_nq4z1I,1841
175
172
  sparknlp/annotator/seq2seq/auto_gguf_model.py,sha256=yhZQHMHfp88rQvLHTWyS-8imZrwqp-8RQQwnw6PmHfc,11749
176
- sparknlp/annotator/seq2seq/auto_gguf_reranker.py,sha256=QpGpyO1_epWzMospTFrfVVLj2KZ_n3gbHN269vo9fbU,12667
173
+ sparknlp/annotator/seq2seq/auto_gguf_reranker.py,sha256=MS4wCm2A2YiQfkB4HVVZKuN-3A1yGzqSCF69nu7J2rQ,12640
177
174
  sparknlp/annotator/seq2seq/auto_gguf_vision_model.py,sha256=swBek2026dW6BOX5O9P8Uq41X2GC71VGW0ADFeUIvs0,15299
178
175
  sparknlp/annotator/seq2seq/bart_transformer.py,sha256=I1flM4yeCzEAKOdQllBC30XuedxVJ7ferkFhZ6gwEbE,18481
179
176
  sparknlp/annotator/seq2seq/cohere_transformer.py,sha256=43LZBVazZMgJRCsN7HaYjVYfJ5hRMV95QZyxMtXq-m4,13496
@@ -205,12 +202,13 @@ sparknlp/annotator/token/regex_tokenizer.py,sha256=FG2HvFwMb1G_4grfyIQaeBpaAgKv_
205
202
  sparknlp/annotator/token/tokenizer.py,sha256=Me3P3wogUKUJ7O7_2wLdPzF00vKpp_sHuiztpGWRVpU,19939
206
203
  sparknlp/annotator/ws/__init__.py,sha256=-l8bnl8Z6lGXWOBdRIBZ6958fzTHt4o87QhhLHIFF8A,693
207
204
  sparknlp/annotator/ws/word_segmenter.py,sha256=rrbshwn5wzXIHpCCDji6ZcsmiARpuA82_p_6TgNHfRc,16365
208
- sparknlp/base/__init__.py,sha256=fCL-kReIavZceUa1OC99pSRH7MsXzqGB8BXgzVS_f7s,1311
205
+ sparknlp/base/__init__.py,sha256=ug0e79fv03D67mUqmb-i-a89vOa6EcRx49Z0fS6rT3g,1361
209
206
  sparknlp/base/audio_assembler.py,sha256=HKa9mXvmuMUrjTihUZkppGj-WJjcUrm2BGapNuPifyI,3320
210
207
  sparknlp/base/doc2_chunk.py,sha256=TyvbdJNkVo9favHlOEoH5JwKbjpk5ZVJ75p8Cilp9jM,6551
211
208
  sparknlp/base/document_assembler.py,sha256=zl-SXWMTR3B0EZ8z6SWYchCwEo-61FhU6u7dHUKDIOg,6697
212
209
  sparknlp/base/embeddings_finisher.py,sha256=5QU1Okgl2ULrPVf4ze1H0SsRCMYXWGARtUsT7dagBYA,7659
213
210
  sparknlp/base/finisher.py,sha256=V4wkMm9Ug09q4zTQc9T9Wr-awmu2Hu-eNaJ039YgZXM,8583
211
+ sparknlp/base/gguf_ranking_finisher.py,sha256=tzoisuD70myfHo3t4WFtBs8i1jwdjkwXCbWknDEXOHk,8315
214
212
  sparknlp/base/graph_finisher.py,sha256=a8fxk3ei2YQw6s0Y9Yy8oMOF1i1XUrgqaiwVE0VPt4w,4834
215
213
  sparknlp/base/has_recursive_fit.py,sha256=P55rSHLIXhihXWS2bOC_DskcQTc3njieVD1JkjS2bcA,849
216
214
  sparknlp/base/has_recursive_transform.py,sha256=UkGNgo4LMsjQC-Coeefg4bJcg7FoPcPiG382zEa6Ywk,841
@@ -220,7 +218,6 @@ sparknlp/base/multi_document_assembler.py,sha256=4htET1fRAeOB6zhsNXsBq5rKZvn-LGD
220
218
  sparknlp/base/prompt_assembler.py,sha256=_C_9MdHqsxUjSOa3TqCV-6sSfSiRyhfHBQG5m7RlqxY,11578
221
219
  sparknlp/base/recursive_pipeline.py,sha256=V9rTnu8KMwgjoceykN9pF1mKGtOkkuiC_n9v8dE3LDk,4279
222
220
  sparknlp/base/table_assembler.py,sha256=Kxu3R2fY6JgCxEc07ibsMsjip6dgcPDHLiWAZ8gC_d8,5102
223
- sparknlp/base/token2_chunk.py,sha256=FtS2Doav9xL1IrC9ZUU4iXqyipp-iT3g68kZt-7YCcQ,2674
224
221
  sparknlp/base/token_assembler.py,sha256=qiHry07L7mVCqeHSH6hHxLygv1AsfZIE4jy1L75L3Do,5075
225
222
  sparknlp/common/__init__.py,sha256=MJuE__T1YS8f3As7X5sgzHibGjDeiFkQ5vc2bEEf0Ww,1148
226
223
  sparknlp/common/annotator_approach.py,sha256=CbkyaWl6rRX_VaXz2xJCjofijRGJGeJCsqQTDQgNTAw,1765
@@ -244,7 +241,7 @@ sparknlp/logging/__init__.py,sha256=DoROFF5KLZe4t4Q-OHxqk1nhqbw9NQ-wb64y8icNwgw,
244
241
  sparknlp/logging/comet.py,sha256=_ZBi9-hlilCAnd4lvdYMWiq4Vqsppv8kow3k0cf-NG4,15958
245
242
  sparknlp/partition/__init__.py,sha256=L0w-yv_HnnvoKlSX5MzI2GKHW3RLLfGyq8bgWYVeKjU,749
246
243
  sparknlp/partition/partition.py,sha256=GXEAUvOea04Vc_JK0z112cAKFrJ4AEpjLJ8xlzZt6Kw,8551
247
- sparknlp/partition/partition_properties.py,sha256=xhAMhlsTBg-WS6KWDyVbRPwO7IzpowVVhJNR-ZGhvdo,9520
244
+ sparknlp/partition/partition_properties.py,sha256=2tGdIv1NaJNaux_TTskKQHnARAwBkFctaqCcNw21Wr8,19920
248
245
  sparknlp/partition/partition_transformer.py,sha256=lRR1h-IMlHR8M0VeB50SbU39GHHF5PgMaJ42qOriS6A,6855
249
246
  sparknlp/pretrained/__init__.py,sha256=GV-x9UBK8F2_IR6zYatrzFcVJtkSUIMbxqWsxRUePmQ,793
250
247
  sparknlp/pretrained/pretrained_pipeline.py,sha256=lquxiaABuA68Rmu7csamJPqBoRJqMUO0oNHsmEZDAIs,5740
@@ -253,9 +250,9 @@ sparknlp/pretrained/utils.py,sha256=T1MrvW_DaWk_jcOjVLOea0NMFE9w8fe0ZT_5urZ_nEY,
253
250
  sparknlp/reader/__init__.py,sha256=-Toj3AIBki-zXPpV8ezFTI2LX1yP_rK2bhpoa8nBkTw,685
254
251
  sparknlp/reader/enums.py,sha256=MNGug9oJ1BBLM1Pbske13kAabalDzHa2kucF5xzFpHs,770
255
252
  sparknlp/reader/pdf_to_text.py,sha256=eWw-cwjosmcSZ9eHso0F5QQoeGBBnwsOhzhCXXvMjZA,7169
256
- sparknlp/reader/reader2doc.py,sha256=LRqfaL9nidhlPkJIwTJo7SnGYmNNfOqwEdrsWYGEdnI,7146
257
- sparknlp/reader/reader2image.py,sha256=JIwgVtznBpSExLl3JX7QY3BT4CUmZf1JWbsSwKLV1aY,3099
258
- sparknlp/reader/reader2table.py,sha256=GC6Yz0gQ83S6XKOi329TUNQuAvLrBxysqDkDRZPvcYA,4759
253
+ sparknlp/reader/reader2doc.py,sha256=87aMk8-_1NHd3bB1rxw56BQMJc6mGgtnYGXwKw2uCmU,5916
254
+ sparknlp/reader/reader2image.py,sha256=k3gb4LEiqDV-pnD-HEaA1KHoAxXmoYys2Y817i1yvP0,4557
255
+ sparknlp/reader/reader2table.py,sha256=pIR9r6NapUV4xdsFecadWlKTSJmRMAm36eqM9aXf13k,2416
259
256
  sparknlp/reader/sparknlp_reader.py,sha256=MJs8v_ECYaV1SOabI1L_2MkVYEDVImtwgbYypO7DJSY,20623
260
257
  sparknlp/training/__init__.py,sha256=qREi9u-5Vc2VjpL6-XZsyvu5jSEIdIhowW7_kKaqMqo,852
261
258
  sparknlp/training/conll.py,sha256=wKBiSTrjc6mjsl7Nyt6B8f4yXsDJkZb-sn8iOjix9cE,6961
@@ -287,7 +284,7 @@ sparknlp/training/_tf_graph_builders_1x/ner_dl/dataset_encoder.py,sha256=R4yHFN3
287
284
  sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model.py,sha256=EoCSdcIjqQ3wv13MAuuWrKV8wyVBP0SbOEW41omHlR0,23189
288
285
  sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model_saver.py,sha256=k5CQ7gKV6HZbZMB8cKLUJuZxoZWlP_DFWdZ--aIDwsc,2356
289
286
  sparknlp/training/_tf_graph_builders_1x/ner_dl/sentence_grouper.py,sha256=pAxjWhjazSX8Vg0MFqJiuRVw1IbnQNSs-8Xp26L4nko,870
290
- spark_nlp-6.1.3rc1.dist-info/METADATA,sha256=q-hbjHon_67zz6Zp0s7KhdoSSIQw0z9l-tzYPDs6pfU,19777
291
- spark_nlp-6.1.3rc1.dist-info/WHEEL,sha256=JNWh1Fm1UdwIQV075glCn4MVuCRs0sotJIq-J6rbxCU,109
292
- spark_nlp-6.1.3rc1.dist-info/top_level.txt,sha256=uuytur4pyMRw2H_txNY2ZkaucZHUs22QF8-R03ch_-E,13
293
- spark_nlp-6.1.3rc1.dist-info/RECORD,,
287
+ spark_nlp-6.1.4.dist-info/METADATA,sha256=CqRyNEZCA_8F_J5vHG4GUZXRiavXyfb3tPMTStidr4c,19774
288
+ spark_nlp-6.1.4.dist-info/WHEEL,sha256=JNWh1Fm1UdwIQV075glCn4MVuCRs0sotJIq-J6rbxCU,109
289
+ spark_nlp-6.1.4.dist-info/top_level.txt,sha256=uuytur4pyMRw2H_txNY2ZkaucZHUs22QF8-R03ch_-E,13
290
+ spark_nlp-6.1.4.dist-info/RECORD,,
sparknlp/__init__.py CHANGED
@@ -66,7 +66,7 @@ sys.modules['com.johnsnowlabs.ml.ai'] = annotator
66
66
  annotators = annotator
67
67
  embeddings = annotator
68
68
 
69
- __version__ = "6.1.3"
69
+ __version__ = "6.1.4"
70
70
 
71
71
 
72
72
  def start(gpu=False,
@@ -16,5 +16,6 @@ from sparknlp.annotator.ner.ner_approach import *
16
16
  from sparknlp.annotator.ner.ner_converter import *
17
17
  from sparknlp.annotator.ner.ner_crf import *
18
18
  from sparknlp.annotator.ner.ner_dl import *
19
+ from sparknlp.annotator.ner.ner_dl_graph_checker import *
19
20
  from sparknlp.annotator.ner.ner_overwriter import *
20
21
  from sparknlp.annotator.ner.zero_shot_ner_model import *
@@ -0,0 +1,237 @@
1
+ # Copyright 2017-2025 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains classes for NerDL."""
15
+
16
+ from sparknlp.common import *
17
+ import sparknlp.internal as _internal
18
+ from pyspark.ml.util import JavaMLWritable
19
+ from pyspark.ml.wrapper import JavaEstimator
20
+
21
+
22
+ class NerDLGraphChecker(
23
+ JavaEstimator,
24
+ JavaMLWritable,
25
+ _internal.ParamsGettersSetters,
26
+ ):
27
+ """Checks whether a suitable NerDL graph is available for the given training dataset, before any
28
+ computations/training is done. This annotator is useful for custom training cases, where
29
+ specialized graphs are needed.
30
+
31
+ Important: This annotator should be used or positioned before any embedding or NerDLApproach
32
+ annotators in the pipeline and will process the whole dataset to extract the required graph parameters.
33
+
34
+ This annotator requires a dataset with at least two columns: one with tokens and one with the
35
+ labels. In addition, it requires the used embedding annotator in the pipeline to extract the
36
+ suitable embedding dimension.
37
+
38
+ For extended examples of usage, see the`Examples
39
+ <https://github.com/JohnSnowLabs/spark-nlp/blob/master//home/ducha/Workspace/scala/spark-nlp-feature/examples/python/training/english/dl-ner/ner_dl_graph_checker.ipynb>`__
40
+ and the `NerDLGraphCheckerTestSpec
41
+ <https://github.com/JohnSnowLabs/spark-nlp/blob/master/src/test/scala/com/johnsnowlabs/nlp/annotators/ner/dl/NerDLGraphCheckerTestSpec.scala>`__.
42
+
43
+ ==================================== ======================
44
+ Input Annotation types Output Annotation type
45
+ ==================================== ======================
46
+ ``DOCUMENT, TOKEN`` `NONE`
47
+ ==================================== ======================
48
+
49
+ Parameters
50
+ ----------
51
+ inputCols
52
+ Column names of input annotations
53
+ labelColumn
54
+ Column name for data labels
55
+ embeddingsDim
56
+ Dimensionality of embeddings
57
+
58
+ Examples
59
+ --------
60
+ >>> import sparknlp
61
+ >>> from sparknlp.base import *
62
+ >>> from sparknlp.annotator import *
63
+ >>> from pyspark.ml import Pipeline
64
+
65
+ This CoNLL dataset already includes a sentence, token and label
66
+ column with their respective annotator types. If a custom dataset is used,
67
+ these need to be defined with for example:
68
+
69
+ >>> conll = CoNLL()
70
+ >>> trainingData = conll.readDataset(spark, "src/test/resources/conll2003/eng.train")
71
+ >>> embeddings = BertEmbeddings \\
72
+ ... .pretrained() \\
73
+ ... .setInputCols(["sentence", "token"]) \\
74
+ ... .setOutputCol("embeddings")
75
+
76
+ This annotatorr requires the data for NerDLApproach graphs: text, tokens, labels and the embedding model
77
+
78
+ >>> nerDLGraphChecker = NerDLGraphChecker() \\
79
+ ... .setInputCols(["sentence", "token"]) \\
80
+ ... .setLabelColumn("label") \\
81
+ ... .setEmbeddingsModel(embeddings)
82
+ >>> nerTagger = NerDLApproach() \\
83
+ ... .setInputCols(["sentence", "token", "embeddings"]) \\
84
+ ... .setLabelColumn("label") \\
85
+ ... .setOutputCol("ner") \\
86
+ ... .setMaxEpochs(1) \\
87
+ ... .setRandomSeed(0) \\
88
+ ... .setVerbose(0)
89
+ >>> pipeline = Pipeline().setStages([nerDLGraphChecker, embeddings, nerTagger])
90
+
91
+ If we now fit the model with a graph missing, then an exception is raised.
92
+
93
+ >>> pipelineModel = pipeline.fit(trainingData)
94
+ """
95
+
96
+ inputCols = Param(
97
+ Params._dummy(),
98
+ "inputCols",
99
+ "Input columns",
100
+ typeConverter=TypeConverters.toListString,
101
+ )
102
+
103
+ def setInputCols(self, *value):
104
+ """Sets column names of input annotations.
105
+
106
+ Parameters
107
+ ----------
108
+ *value : List[str]
109
+ Input columns for the annotator
110
+ """
111
+ if type(value[0]) == str or type(value[0]) == list:
112
+ # self.inputColsValidation(value)
113
+ if len(value) == 1 and type(value[0]) == list:
114
+ return self._set(inputCols=value[0])
115
+ else:
116
+ return self._set(inputCols=list(value))
117
+ else:
118
+ raise TypeError(
119
+ "InputCols datatype not supported. It must be either str or list"
120
+ )
121
+
122
+ labelColumn = Param(
123
+ Params._dummy(),
124
+ "labelColumn",
125
+ "Column with label per each token",
126
+ typeConverter=TypeConverters.toString,
127
+ )
128
+
129
+ def setLabelColumn(self, value):
130
+ """Sets name of column for data labels.
131
+
132
+ Parameters
133
+ ----------
134
+ value : str
135
+ Column for data labels
136
+ """
137
+ return self._set(labelColumn=value)
138
+
139
+ embeddingsDim = Param(
140
+ Params._dummy(),
141
+ "embeddingsDim",
142
+ "Dimensionality of embeddings",
143
+ typeConverter=TypeConverters.toInt,
144
+ )
145
+
146
+ def setEmbeddingsDim(self, value: int):
147
+ """Sets Dimensionality of embeddings
148
+
149
+ Parameters
150
+ ----------
151
+ value : int
152
+ Dimensionality of embeddings
153
+ """
154
+ return self._set(embeddingsDim=value)
155
+
156
+ def setEmbeddingsModel(self, model: HasEmbeddingsProperties):
157
+ """
158
+ Get embeddingsDim from a given embeddings model, if possible.
159
+ Falls back to setEmbeddingsDim if dimension cannot be obtained automatically.
160
+ """
161
+ # Try Python API first
162
+ if hasattr(model, "getDimension"):
163
+ dim = model.getDimension()
164
+ return self.setEmbeddingsDim(int(dim))
165
+ # Try JVM side if available
166
+ if hasattr(model, "_java_obj") and hasattr(model._java_obj, "getDimension"):
167
+ dim = int(model._java_obj.getDimension())
168
+ return self.setEmbeddingsDim(dim)
169
+ raise ValueError(
170
+ "Could not infer embeddings dimension from provided model. "
171
+ "Use setEmbeddingsDim(dim) explicitly."
172
+ )
173
+
174
+ inputAnnotatorTypes = [
175
+ AnnotatorType.DOCUMENT,
176
+ AnnotatorType.TOKEN,
177
+ ]
178
+
179
+ graphFolder = Param(
180
+ Params._dummy(),
181
+ "graphFolder",
182
+ "Folder path that contain external graph files",
183
+ TypeConverters.toString,
184
+ )
185
+
186
+ def setGraphFolder(self, p):
187
+ """Sets folder path that contain external graph files.
188
+
189
+ Parameters
190
+ ----------
191
+ p : str
192
+ Folder path that contain external graph files
193
+ """
194
+ return self._set(graphFolder=p)
195
+
196
+ @keyword_only
197
+ def __init__(self):
198
+ _internal.ParamsGettersSetters.__init__(self)
199
+ classname = "com.johnsnowlabs.nlp.annotators.ner.dl.NerDLGraphChecker"
200
+ self.__class__._java_class_name = classname
201
+ self._java_obj = self._new_java_obj(classname, self.uid)
202
+ # self._setDefault()
203
+
204
+ def _create_model(self, java_model):
205
+ return NerDLGraphCheckerModel()
206
+
207
+
208
+ class NerDLGraphCheckerModel(
209
+ JavaModel,
210
+ JavaMLWritable,
211
+ _internal.ParamsGettersSetters,
212
+ ):
213
+ """
214
+ Resulting model from NerDLGraphChecker, that does not perform any transformations, as the
215
+ checks are done during the ``fit`` phase. It acts as the identity.
216
+
217
+ This annotator should never be used directly.
218
+ """
219
+
220
+ inputAnnotatorTypes = [
221
+ AnnotatorType.DOCUMENT,
222
+ AnnotatorType.TOKEN,
223
+ ]
224
+
225
+ @keyword_only
226
+ def __init__(
227
+ self,
228
+ classname="com.johnsnowlabs.nlp.annotators.ner.dl.NerDLGraphCheckerModel",
229
+ java_model=None,
230
+ ):
231
+ super(NerDLGraphCheckerModel, self).__init__(java_model=java_model)
232
+ if classname and not java_model:
233
+ self.__class__._java_class_name = classname
234
+ self._java_obj = self._new_java_obj(classname, self.uid)
235
+ if java_model is not None:
236
+ self._transfer_params_from_java()
237
+ # self._setDefault(lazyAnnotator=False)
@@ -47,7 +47,7 @@ class AutoGGUFReranker(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppProperties
47
47
  ... .setOutputCol("reranked_documents") \\
48
48
  ... .setQuery("A man is eating pasta.")
49
49
 
50
- The default model is ``"bge-reranker-v2-m3-Q4_K_M"``, if no name is provided.
50
+ The default model is ``"bge_reranker_v2_m3_Q4_K_M"``, if no name is provided.
51
51
 
52
52
  For extended examples of usage, see the
53
53
  `AutoGGUFRerankerTest <https://github.com/JohnSnowLabs/spark-nlp/tree/master/src/test/scala/com/johnsnowlabs/nlp/annotators/seq2seq/AutoGGUFRerankerTest.scala>`__
@@ -222,7 +222,7 @@ class AutoGGUFReranker(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppProperties
222
222
  >>> document = DocumentAssembler() \\
223
223
  ... .setInputCol("text") \\
224
224
  ... .setOutputCol("document")
225
- >>> reranker = AutoGGUFReranker.pretrained("bge-reranker-v2-m3-Q4_K_M") \\
225
+ >>> reranker = AutoGGUFReranker.pretrained() \\
226
226
  ... .setInputCols(["document"]) \\
227
227
  ... .setOutputCol("reranked_documents") \\
228
228
  ... .setBatchSize(4) \\
@@ -307,13 +307,13 @@ class AutoGGUFReranker(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppProperties
307
307
  return AutoGGUFReranker(java_model=jModel)
308
308
 
309
309
  @staticmethod
310
- def pretrained(name="bge-reranker-v2-m3-Q4_K_M", lang="en", remote_loc=None):
310
+ def pretrained(name="bge_reranker_v2_m3_Q4_K_M", lang="en", remote_loc=None):
311
311
  """Downloads and loads a pretrained model.
312
312
 
313
313
  Parameters
314
314
  ----------
315
315
  name : str, optional
316
- Name of the pretrained model, by default "bge-reranker-v2-m3-Q4_K_M"
316
+ Name of the pretrained model, by default "bge_reranker_v2_m3_Q4_K_M"
317
317
  lang : str, optional
318
318
  Language of the pretrained model, by default "en"
319
319
  remote_loc : str, optional
sparknlp/base/__init__.py CHANGED
@@ -17,6 +17,7 @@ from sparknlp.base.document_assembler import *
17
17
  from sparknlp.base.multi_document_assembler import *
18
18
  from sparknlp.base.embeddings_finisher import *
19
19
  from sparknlp.base.finisher import *
20
+ from sparknlp.base.gguf_ranking_finisher import *
20
21
  from sparknlp.base.graph_finisher import *
21
22
  from sparknlp.base.has_recursive_fit import *
22
23
  from sparknlp.base.has_recursive_transform import *