spark-nlp 6.1.2__tar.gz → 6.1.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of spark-nlp might be problematic. Click here for more details.

Files changed (293) hide show
  1. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/PKG-INFO +5 -5
  2. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/README.md +4 -4
  3. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/setup.py +1 -1
  4. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/spark_nlp.egg-info/PKG-INFO +5 -5
  5. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/spark_nlp.egg-info/SOURCES.txt +2 -0
  6. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/__init__.py +1 -1
  7. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/ner/__init__.py +1 -0
  8. spark_nlp-6.1.3/sparknlp/annotator/ner/ner_dl_graph_checker.py +237 -0
  9. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/seq2seq/auto_gguf_reranker.py +4 -4
  10. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/base/__init__.py +1 -0
  11. spark_nlp-6.1.3/sparknlp/base/gguf_ranking_finisher.py +234 -0
  12. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/reader/reader2doc.py +35 -1
  13. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/com/__init__.py +0 -0
  14. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/com/johnsnowlabs/__init__.py +0 -0
  15. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/com/johnsnowlabs/ml/__init__.py +0 -0
  16. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/com/johnsnowlabs/ml/ai/__init__.py +0 -0
  17. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/com/johnsnowlabs/nlp/__init__.py +0 -0
  18. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/setup.cfg +0 -0
  19. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/spark_nlp.egg-info/dependency_links.txt +0 -0
  20. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/spark_nlp.egg-info/top_level.txt +0 -0
  21. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotation.py +0 -0
  22. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotation_audio.py +0 -0
  23. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotation_image.py +0 -0
  24. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/__init__.py +0 -0
  25. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/audio/__init__.py +0 -0
  26. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/audio/hubert_for_ctc.py +0 -0
  27. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/audio/wav2vec2_for_ctc.py +0 -0
  28. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/audio/whisper_for_ctc.py +0 -0
  29. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/chunk2_doc.py +0 -0
  30. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/chunker.py +0 -0
  31. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/__init__.py +0 -0
  32. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/albert_for_multiple_choice.py +0 -0
  33. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/albert_for_question_answering.py +0 -0
  34. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/albert_for_sequence_classification.py +0 -0
  35. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/albert_for_token_classification.py +0 -0
  36. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/albert_for_zero_shot_classification.py +0 -0
  37. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/bart_for_zero_shot_classification.py +0 -0
  38. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/bert_for_multiple_choice.py +0 -0
  39. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/bert_for_question_answering.py +0 -0
  40. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/bert_for_sequence_classification.py +0 -0
  41. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/bert_for_token_classification.py +0 -0
  42. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/bert_for_zero_shot_classification.py +0 -0
  43. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/camembert_for_question_answering.py +0 -0
  44. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/camembert_for_sequence_classification.py +0 -0
  45. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/camembert_for_token_classification.py +0 -0
  46. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/camembert_for_zero_shot_classification.py +0 -0
  47. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/classifier_dl.py +0 -0
  48. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/deberta_for_question_answering.py +0 -0
  49. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/deberta_for_sequence_classification.py +0 -0
  50. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/deberta_for_token_classification.py +0 -0
  51. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/deberta_for_zero_shot_classification.py +0 -0
  52. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/distil_bert_for_question_answering.py +0 -0
  53. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/distil_bert_for_sequence_classification.py +0 -0
  54. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/distil_bert_for_token_classification.py +0 -0
  55. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/distil_bert_for_zero_shot_classification.py +0 -0
  56. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/distilbert_for_multiple_choice.py +0 -0
  57. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/longformer_for_question_answering.py +0 -0
  58. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/longformer_for_sequence_classification.py +0 -0
  59. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/longformer_for_token_classification.py +0 -0
  60. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/mpnet_for_question_answering.py +0 -0
  61. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/mpnet_for_sequence_classification.py +0 -0
  62. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/mpnet_for_token_classification.py +0 -0
  63. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/multi_classifier_dl.py +0 -0
  64. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/roberta_for_multiple_choice.py +0 -0
  65. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/roberta_for_question_answering.py +0 -0
  66. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/roberta_for_sequence_classification.py +0 -0
  67. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/roberta_for_token_classification.py +0 -0
  68. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/roberta_for_zero_shot_classification.py +0 -0
  69. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/sentiment_dl.py +0 -0
  70. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/tapas_for_question_answering.py +0 -0
  71. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/xlm_roberta_for_multiple_choice.py +0 -0
  72. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/xlm_roberta_for_question_answering.py +0 -0
  73. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/xlm_roberta_for_sequence_classification.py +0 -0
  74. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/xlm_roberta_for_token_classification.py +0 -0
  75. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/xlm_roberta_for_zero_shot_classification.py +0 -0
  76. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/xlnet_for_sequence_classification.py +0 -0
  77. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/classifier_dl/xlnet_for_token_classification.py +0 -0
  78. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/cleaners/__init__.py +0 -0
  79. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/cleaners/cleaner.py +0 -0
  80. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/cleaners/extractor.py +0 -0
  81. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/coref/__init__.py +0 -0
  82. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/coref/spanbert_coref.py +0 -0
  83. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/cv/__init__.py +0 -0
  84. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/cv/blip_for_question_answering.py +0 -0
  85. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/cv/clip_for_zero_shot_classification.py +0 -0
  86. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/cv/convnext_for_image_classification.py +0 -0
  87. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/cv/florence2_transformer.py +0 -0
  88. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/cv/gemma3_for_multimodal.py +0 -0
  89. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/cv/internvl_for_multimodal.py +0 -0
  90. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/cv/janus_for_multimodal.py +0 -0
  91. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/cv/llava_for_multimodal.py +0 -0
  92. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/cv/mllama_for_multimodal.py +0 -0
  93. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/cv/paligemma_for_multimodal.py +0 -0
  94. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/cv/phi3_vision_for_multimodal.py +0 -0
  95. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/cv/qwen2vl_transformer.py +0 -0
  96. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/cv/smolvlm_transformer.py +0 -0
  97. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/cv/swin_for_image_classification.py +0 -0
  98. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/cv/vision_encoder_decoder_for_image_captioning.py +0 -0
  99. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/cv/vit_for_image_classification.py +0 -0
  100. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/dataframe_optimizer.py +0 -0
  101. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/date2_chunk.py +0 -0
  102. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/dependency/__init__.py +0 -0
  103. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/dependency/dependency_parser.py +0 -0
  104. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/dependency/typed_dependency_parser.py +0 -0
  105. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/document_character_text_splitter.py +0 -0
  106. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/document_normalizer.py +0 -0
  107. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/document_token_splitter.py +0 -0
  108. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/document_token_splitter_test.py +0 -0
  109. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/__init__.py +0 -0
  110. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/albert_embeddings.py +0 -0
  111. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/auto_gguf_embeddings.py +0 -0
  112. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/bert_embeddings.py +0 -0
  113. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/bert_sentence_embeddings.py +0 -0
  114. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/bge_embeddings.py +0 -0
  115. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/camembert_embeddings.py +0 -0
  116. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/chunk_embeddings.py +0 -0
  117. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/deberta_embeddings.py +0 -0
  118. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/distil_bert_embeddings.py +0 -0
  119. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/doc2vec.py +0 -0
  120. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/e5_embeddings.py +0 -0
  121. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/e5v_embeddings.py +0 -0
  122. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/elmo_embeddings.py +0 -0
  123. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/instructor_embeddings.py +0 -0
  124. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/longformer_embeddings.py +0 -0
  125. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/minilm_embeddings.py +0 -0
  126. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/mpnet_embeddings.py +0 -0
  127. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/mxbai_embeddings.py +0 -0
  128. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/nomic_embeddings.py +0 -0
  129. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/roberta_embeddings.py +0 -0
  130. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/roberta_sentence_embeddings.py +0 -0
  131. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/sentence_embeddings.py +0 -0
  132. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/snowflake_embeddings.py +0 -0
  133. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/uae_embeddings.py +0 -0
  134. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/universal_sentence_encoder.py +0 -0
  135. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/word2vec.py +0 -0
  136. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/word_embeddings.py +0 -0
  137. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/xlm_roberta_embeddings.py +0 -0
  138. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/xlm_roberta_sentence_embeddings.py +0 -0
  139. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/embeddings/xlnet_embeddings.py +0 -0
  140. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/er/__init__.py +0 -0
  141. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/er/entity_ruler.py +0 -0
  142. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/graph_extraction.py +0 -0
  143. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/keyword_extraction/__init__.py +0 -0
  144. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/keyword_extraction/yake_keyword_extraction.py +0 -0
  145. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/ld_dl/__init__.py +0 -0
  146. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/ld_dl/language_detector_dl.py +0 -0
  147. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/lemmatizer.py +0 -0
  148. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/matcher/__init__.py +0 -0
  149. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/matcher/big_text_matcher.py +0 -0
  150. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/matcher/date_matcher.py +0 -0
  151. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/matcher/multi_date_matcher.py +0 -0
  152. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/matcher/regex_matcher.py +0 -0
  153. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/matcher/text_matcher.py +0 -0
  154. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/n_gram_generator.py +0 -0
  155. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/ner/ner_approach.py +0 -0
  156. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/ner/ner_converter.py +0 -0
  157. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/ner/ner_crf.py +0 -0
  158. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/ner/ner_dl.py +0 -0
  159. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/ner/ner_overwriter.py +0 -0
  160. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/ner/zero_shot_ner_model.py +0 -0
  161. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/normalizer.py +0 -0
  162. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/openai/__init__.py +0 -0
  163. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/openai/openai_completion.py +0 -0
  164. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/openai/openai_embeddings.py +0 -0
  165. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/param/__init__.py +0 -0
  166. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/param/classifier_encoder.py +0 -0
  167. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/param/evaluation_dl_params.py +0 -0
  168. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/pos/__init__.py +0 -0
  169. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/pos/perceptron.py +0 -0
  170. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/sentence/__init__.py +0 -0
  171. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/sentence/sentence_detector.py +0 -0
  172. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/sentence/sentence_detector_dl.py +0 -0
  173. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/sentiment/__init__.py +0 -0
  174. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/sentiment/sentiment_detector.py +0 -0
  175. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/sentiment/vivekn_sentiment.py +0 -0
  176. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/seq2seq/__init__.py +0 -0
  177. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/seq2seq/auto_gguf_model.py +0 -0
  178. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/seq2seq/auto_gguf_vision_model.py +0 -0
  179. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/seq2seq/bart_transformer.py +0 -0
  180. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/seq2seq/cohere_transformer.py +0 -0
  181. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/seq2seq/cpm_transformer.py +0 -0
  182. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/seq2seq/gpt2_transformer.py +0 -0
  183. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/seq2seq/llama2_transformer.py +0 -0
  184. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/seq2seq/llama3_transformer.py +0 -0
  185. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/seq2seq/m2m100_transformer.py +0 -0
  186. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/seq2seq/marian_transformer.py +0 -0
  187. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/seq2seq/mistral_transformer.py +0 -0
  188. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/seq2seq/nllb_transformer.py +0 -0
  189. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/seq2seq/olmo_transformer.py +0 -0
  190. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/seq2seq/phi2_transformer.py +0 -0
  191. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/seq2seq/phi3_transformer.py +0 -0
  192. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/seq2seq/phi4_transformer.py +0 -0
  193. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/seq2seq/qwen_transformer.py +0 -0
  194. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/seq2seq/starcoder_transformer.py +0 -0
  195. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/seq2seq/t5_transformer.py +0 -0
  196. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/similarity/__init__.py +0 -0
  197. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/similarity/document_similarity_ranker.py +0 -0
  198. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/spell_check/__init__.py +0 -0
  199. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/spell_check/context_spell_checker.py +0 -0
  200. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/spell_check/norvig_sweeting.py +0 -0
  201. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/spell_check/symmetric_delete.py +0 -0
  202. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/stemmer.py +0 -0
  203. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/stop_words_cleaner.py +0 -0
  204. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/tf_ner_dl_graph_builder.py +0 -0
  205. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/token/__init__.py +0 -0
  206. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/token/chunk_tokenizer.py +0 -0
  207. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/token/recursive_tokenizer.py +0 -0
  208. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/token/regex_tokenizer.py +0 -0
  209. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/token/tokenizer.py +0 -0
  210. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/token2_chunk.py +0 -0
  211. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/ws/__init__.py +0 -0
  212. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/annotator/ws/word_segmenter.py +0 -0
  213. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/base/audio_assembler.py +0 -0
  214. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/base/doc2_chunk.py +0 -0
  215. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/base/document_assembler.py +0 -0
  216. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/base/embeddings_finisher.py +0 -0
  217. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/base/finisher.py +0 -0
  218. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/base/graph_finisher.py +0 -0
  219. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/base/has_recursive_fit.py +0 -0
  220. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/base/has_recursive_transform.py +0 -0
  221. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/base/image_assembler.py +0 -0
  222. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/base/light_pipeline.py +0 -0
  223. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/base/multi_document_assembler.py +0 -0
  224. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/base/prompt_assembler.py +0 -0
  225. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/base/recursive_pipeline.py +0 -0
  226. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/base/table_assembler.py +0 -0
  227. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/base/token_assembler.py +0 -0
  228. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/common/__init__.py +0 -0
  229. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/common/annotator_approach.py +0 -0
  230. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/common/annotator_model.py +0 -0
  231. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/common/annotator_properties.py +0 -0
  232. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/common/annotator_type.py +0 -0
  233. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/common/coverage_result.py +0 -0
  234. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/common/match_strategy.py +0 -0
  235. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/common/properties.py +0 -0
  236. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/common/read_as.py +0 -0
  237. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/common/recursive_annotator_approach.py +0 -0
  238. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/common/storage.py +0 -0
  239. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/common/utils.py +0 -0
  240. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/functions.py +0 -0
  241. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/internal/__init__.py +0 -0
  242. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/internal/annotator_java_ml.py +0 -0
  243. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/internal/annotator_transformer.py +0 -0
  244. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/internal/extended_java_wrapper.py +0 -0
  245. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/internal/params_getters_setters.py +0 -0
  246. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/internal/recursive.py +0 -0
  247. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/logging/__init__.py +0 -0
  248. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/logging/comet.py +0 -0
  249. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/partition/__init__.py +0 -0
  250. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/partition/partition.py +0 -0
  251. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/partition/partition_properties.py +0 -0
  252. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/partition/partition_transformer.py +0 -0
  253. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/pretrained/__init__.py +0 -0
  254. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/pretrained/pretrained_pipeline.py +0 -0
  255. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/pretrained/resource_downloader.py +0 -0
  256. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/pretrained/utils.py +0 -0
  257. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/reader/__init__.py +0 -0
  258. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/reader/enums.py +0 -0
  259. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/reader/pdf_to_text.py +0 -0
  260. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/reader/reader2table.py +0 -0
  261. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/reader/sparknlp_reader.py +0 -0
  262. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/__init__.py +0 -0
  263. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/_tf_graph_builders/__init__.py +0 -0
  264. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/_tf_graph_builders/graph_builders.py +0 -0
  265. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/_tf_graph_builders/ner_dl/__init__.py +0 -0
  266. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/_tf_graph_builders/ner_dl/create_graph.py +0 -0
  267. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/_tf_graph_builders/ner_dl/dataset_encoder.py +0 -0
  268. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/_tf_graph_builders/ner_dl/ner_model.py +0 -0
  269. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/_tf_graph_builders/ner_dl/ner_model_saver.py +0 -0
  270. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/_tf_graph_builders/ner_dl/sentence_grouper.py +0 -0
  271. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/_tf_graph_builders/tf2contrib/__init__.py +0 -0
  272. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/_tf_graph_builders/tf2contrib/core_rnn_cell.py +0 -0
  273. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/_tf_graph_builders/tf2contrib/fused_rnn_cell.py +0 -0
  274. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/_tf_graph_builders/tf2contrib/gru_ops.py +0 -0
  275. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/_tf_graph_builders/tf2contrib/lstm_ops.py +0 -0
  276. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/_tf_graph_builders/tf2contrib/rnn.py +0 -0
  277. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/_tf_graph_builders/tf2contrib/rnn_cell.py +0 -0
  278. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/_tf_graph_builders_1x/__init__.py +0 -0
  279. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/_tf_graph_builders_1x/graph_builders.py +0 -0
  280. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/_tf_graph_builders_1x/ner_dl/__init__.py +0 -0
  281. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/_tf_graph_builders_1x/ner_dl/create_graph.py +0 -0
  282. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/_tf_graph_builders_1x/ner_dl/dataset_encoder.py +0 -0
  283. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model.py +0 -0
  284. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model_saver.py +0 -0
  285. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/_tf_graph_builders_1x/ner_dl/sentence_grouper.py +0 -0
  286. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/conll.py +0 -0
  287. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/conllu.py +0 -0
  288. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/pos.py +0 -0
  289. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/pub_tator.py +0 -0
  290. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/spacy_to_annotation.py +0 -0
  291. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/training/tfgraphs.py +0 -0
  292. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/upload_to_hub.py +0 -0
  293. {spark_nlp-6.1.2 → spark_nlp-6.1.3}/sparknlp/util.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: spark-nlp
3
- Version: 6.1.2
3
+ Version: 6.1.3
4
4
  Summary: John Snow Labs Spark NLP is a natural language processing library built on top of Apache Spark ML. It provides simple, performant & accurate NLP annotations for machine learning pipelines, that scale easily in a distributed environment.
5
5
  Home-page: https://github.com/JohnSnowLabs/spark-nlp
6
6
  Author: John Snow Labs
@@ -102,7 +102,7 @@ $ java -version
102
102
  $ conda create -n sparknlp python=3.7 -y
103
103
  $ conda activate sparknlp
104
104
  # spark-nlp by default is based on pyspark 3.x
105
- $ pip install spark-nlp==6.1.2 pyspark==3.3.1
105
+ $ pip install spark-nlp==6.1.3 pyspark==3.3.1
106
106
  ```
107
107
 
108
108
  In Python console or Jupyter `Python3` kernel:
@@ -168,7 +168,7 @@ For a quick example of using pipelines and models take a look at our official [d
168
168
 
169
169
  ### Apache Spark Support
170
170
 
171
- Spark NLP *6.1.2* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
171
+ Spark NLP *6.1.3* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
172
172
 
173
173
  | Spark NLP | Apache Spark 3.5.x | Apache Spark 3.4.x | Apache Spark 3.3.x | Apache Spark 3.2.x | Apache Spark 3.1.x | Apache Spark 3.0.x | Apache Spark 2.4.x | Apache Spark 2.3.x |
174
174
  |-----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
@@ -198,7 +198,7 @@ Find out more about 4.x `SparkNLP` versions in our official [documentation](http
198
198
 
199
199
  ### Databricks Support
200
200
 
201
- Spark NLP 6.1.2 has been tested and is compatible with the following runtimes:
201
+ Spark NLP 6.1.3 has been tested and is compatible with the following runtimes:
202
202
 
203
203
  | **CPU** | **GPU** |
204
204
  |--------------------|--------------------|
@@ -216,7 +216,7 @@ We are compatible with older runtimes. For a full list check databricks support
216
216
 
217
217
  ### EMR Support
218
218
 
219
- Spark NLP 6.1.2 has been tested and is compatible with the following EMR releases:
219
+ Spark NLP 6.1.3 has been tested and is compatible with the following EMR releases:
220
220
 
221
221
  | **EMR Release** |
222
222
  |--------------------|
@@ -63,7 +63,7 @@ $ java -version
63
63
  $ conda create -n sparknlp python=3.7 -y
64
64
  $ conda activate sparknlp
65
65
  # spark-nlp by default is based on pyspark 3.x
66
- $ pip install spark-nlp==6.1.2 pyspark==3.3.1
66
+ $ pip install spark-nlp==6.1.3 pyspark==3.3.1
67
67
  ```
68
68
 
69
69
  In Python console or Jupyter `Python3` kernel:
@@ -129,7 +129,7 @@ For a quick example of using pipelines and models take a look at our official [d
129
129
 
130
130
  ### Apache Spark Support
131
131
 
132
- Spark NLP *6.1.2* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
132
+ Spark NLP *6.1.3* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
133
133
 
134
134
  | Spark NLP | Apache Spark 3.5.x | Apache Spark 3.4.x | Apache Spark 3.3.x | Apache Spark 3.2.x | Apache Spark 3.1.x | Apache Spark 3.0.x | Apache Spark 2.4.x | Apache Spark 2.3.x |
135
135
  |-----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
@@ -159,7 +159,7 @@ Find out more about 4.x `SparkNLP` versions in our official [documentation](http
159
159
 
160
160
  ### Databricks Support
161
161
 
162
- Spark NLP 6.1.2 has been tested and is compatible with the following runtimes:
162
+ Spark NLP 6.1.3 has been tested and is compatible with the following runtimes:
163
163
 
164
164
  | **CPU** | **GPU** |
165
165
  |--------------------|--------------------|
@@ -177,7 +177,7 @@ We are compatible with older runtimes. For a full list check databricks support
177
177
 
178
178
  ### EMR Support
179
179
 
180
- Spark NLP 6.1.2 has been tested and is compatible with the following EMR releases:
180
+ Spark NLP 6.1.3 has been tested and is compatible with the following EMR releases:
181
181
 
182
182
  | **EMR Release** |
183
183
  |--------------------|
@@ -41,7 +41,7 @@ setup(
41
41
  # project code, see
42
42
  # https://packaging.python.org/en/latest/single_source_version.html
43
43
 
44
- version='6.1.2', # Required
44
+ version='6.1.3', # Required
45
45
 
46
46
  # This is a one-line description or tagline of what your project does. This
47
47
  # corresponds to the 'Summary' metadata field:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: spark-nlp
3
- Version: 6.1.2
3
+ Version: 6.1.3
4
4
  Summary: John Snow Labs Spark NLP is a natural language processing library built on top of Apache Spark ML. It provides simple, performant & accurate NLP annotations for machine learning pipelines, that scale easily in a distributed environment.
5
5
  Home-page: https://github.com/JohnSnowLabs/spark-nlp
6
6
  Author: John Snow Labs
@@ -102,7 +102,7 @@ $ java -version
102
102
  $ conda create -n sparknlp python=3.7 -y
103
103
  $ conda activate sparknlp
104
104
  # spark-nlp by default is based on pyspark 3.x
105
- $ pip install spark-nlp==6.1.2 pyspark==3.3.1
105
+ $ pip install spark-nlp==6.1.3 pyspark==3.3.1
106
106
  ```
107
107
 
108
108
  In Python console or Jupyter `Python3` kernel:
@@ -168,7 +168,7 @@ For a quick example of using pipelines and models take a look at our official [d
168
168
 
169
169
  ### Apache Spark Support
170
170
 
171
- Spark NLP *6.1.2* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
171
+ Spark NLP *6.1.3* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
172
172
 
173
173
  | Spark NLP | Apache Spark 3.5.x | Apache Spark 3.4.x | Apache Spark 3.3.x | Apache Spark 3.2.x | Apache Spark 3.1.x | Apache Spark 3.0.x | Apache Spark 2.4.x | Apache Spark 2.3.x |
174
174
  |-----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
@@ -198,7 +198,7 @@ Find out more about 4.x `SparkNLP` versions in our official [documentation](http
198
198
 
199
199
  ### Databricks Support
200
200
 
201
- Spark NLP 6.1.2 has been tested and is compatible with the following runtimes:
201
+ Spark NLP 6.1.3 has been tested and is compatible with the following runtimes:
202
202
 
203
203
  | **CPU** | **GPU** |
204
204
  |--------------------|--------------------|
@@ -216,7 +216,7 @@ We are compatible with older runtimes. For a full list check databricks support
216
216
 
217
217
  ### EMR Support
218
218
 
219
- Spark NLP 6.1.2 has been tested and is compatible with the following EMR releases:
219
+ Spark NLP 6.1.3 has been tested and is compatible with the following EMR releases:
220
220
 
221
221
  | **EMR Release** |
222
222
  |--------------------|
@@ -158,6 +158,7 @@ sparknlp/annotator/ner/ner_approach.py
158
158
  sparknlp/annotator/ner/ner_converter.py
159
159
  sparknlp/annotator/ner/ner_crf.py
160
160
  sparknlp/annotator/ner/ner_dl.py
161
+ sparknlp/annotator/ner/ner_dl_graph_checker.py
161
162
  sparknlp/annotator/ner/ner_overwriter.py
162
163
  sparknlp/annotator/ner/zero_shot_ner_model.py
163
164
  sparknlp/annotator/openai/__init__.py
@@ -214,6 +215,7 @@ sparknlp/base/doc2_chunk.py
214
215
  sparknlp/base/document_assembler.py
215
216
  sparknlp/base/embeddings_finisher.py
216
217
  sparknlp/base/finisher.py
218
+ sparknlp/base/gguf_ranking_finisher.py
217
219
  sparknlp/base/graph_finisher.py
218
220
  sparknlp/base/has_recursive_fit.py
219
221
  sparknlp/base/has_recursive_transform.py
@@ -66,7 +66,7 @@ sys.modules['com.johnsnowlabs.ml.ai'] = annotator
66
66
  annotators = annotator
67
67
  embeddings = annotator
68
68
 
69
- __version__ = "6.1.2"
69
+ __version__ = "6.1.3"
70
70
 
71
71
 
72
72
  def start(gpu=False,
@@ -16,5 +16,6 @@ from sparknlp.annotator.ner.ner_approach import *
16
16
  from sparknlp.annotator.ner.ner_converter import *
17
17
  from sparknlp.annotator.ner.ner_crf import *
18
18
  from sparknlp.annotator.ner.ner_dl import *
19
+ from sparknlp.annotator.ner.ner_dl_graph_checker import *
19
20
  from sparknlp.annotator.ner.ner_overwriter import *
20
21
  from sparknlp.annotator.ner.zero_shot_ner_model import *
@@ -0,0 +1,237 @@
1
+ # Copyright 2017-2025 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains classes for NerDL."""
15
+
16
+ from sparknlp.common import *
17
+ import sparknlp.internal as _internal
18
+ from pyspark.ml.util import JavaMLWritable
19
+ from pyspark.ml.wrapper import JavaEstimator
20
+
21
+
22
+ class NerDLGraphChecker(
23
+ JavaEstimator,
24
+ JavaMLWritable,
25
+ _internal.ParamsGettersSetters,
26
+ ):
27
+ """Checks whether a suitable NerDL graph is available for the given training dataset, before any
28
+ computations/training is done. This annotator is useful for custom training cases, where
29
+ specialized graphs are needed.
30
+
31
+ Important: This annotator should be used or positioned before any embedding or NerDLApproach
32
+ annotators in the pipeline and will process the whole dataset to extract the required graph parameters.
33
+
34
+ This annotator requires a dataset with at least two columns: one with tokens and one with the
35
+ labels. In addition, it requires the used embedding annotator in the pipeline to extract the
36
+ suitable embedding dimension.
37
+
38
+ For extended examples of usage, see the`Examples
39
+ <https://github.com/JohnSnowLabs/spark-nlp/blob/master//home/ducha/Workspace/scala/spark-nlp-feature/examples/python/training/english/dl-ner/ner_dl_graph_checker.ipynb>`__
40
+ and the `NerDLGraphCheckerTestSpec
41
+ <https://github.com/JohnSnowLabs/spark-nlp/blob/master/src/test/scala/com/johnsnowlabs/nlp/annotators/ner/dl/NerDLGraphCheckerTestSpec.scala>`__.
42
+
43
+ ==================================== ======================
44
+ Input Annotation types Output Annotation type
45
+ ==================================== ======================
46
+ ``DOCUMENT, TOKEN`` `NONE`
47
+ ==================================== ======================
48
+
49
+ Parameters
50
+ ----------
51
+ inputCols
52
+ Column names of input annotations
53
+ labelColumn
54
+ Column name for data labels
55
+ embeddingsDim
56
+ Dimensionality of embeddings
57
+
58
+ Examples
59
+ --------
60
+ >>> import sparknlp
61
+ >>> from sparknlp.base import *
62
+ >>> from sparknlp.annotator import *
63
+ >>> from pyspark.ml import Pipeline
64
+
65
+ This CoNLL dataset already includes a sentence, token and label
66
+ column with their respective annotator types. If a custom dataset is used,
67
+ these need to be defined with for example:
68
+
69
+ >>> conll = CoNLL()
70
+ >>> trainingData = conll.readDataset(spark, "src/test/resources/conll2003/eng.train")
71
+ >>> embeddings = BertEmbeddings \\
72
+ ... .pretrained() \\
73
+ ... .setInputCols(["sentence", "token"]) \\
74
+ ... .setOutputCol("embeddings")
75
+
76
+ This annotatorr requires the data for NerDLApproach graphs: text, tokens, labels and the embedding model
77
+
78
+ >>> nerDLGraphChecker = NerDLGraphChecker() \\
79
+ ... .setInputCols(["sentence", "token"]) \\
80
+ ... .setLabelColumn("label") \\
81
+ ... .setEmbeddingsModel(embeddings)
82
+ >>> nerTagger = NerDLApproach() \\
83
+ ... .setInputCols(["sentence", "token", "embeddings"]) \\
84
+ ... .setLabelColumn("label") \\
85
+ ... .setOutputCol("ner") \\
86
+ ... .setMaxEpochs(1) \\
87
+ ... .setRandomSeed(0) \\
88
+ ... .setVerbose(0)
89
+ >>> pipeline = Pipeline().setStages([nerDLGraphChecker, embeddings, nerTagger])
90
+
91
+ If we now fit the model with a graph missing, then an exception is raised.
92
+
93
+ >>> pipelineModel = pipeline.fit(trainingData)
94
+ """
95
+
96
+ inputCols = Param(
97
+ Params._dummy(),
98
+ "inputCols",
99
+ "Input columns",
100
+ typeConverter=TypeConverters.toListString,
101
+ )
102
+
103
+ def setInputCols(self, *value):
104
+ """Sets column names of input annotations.
105
+
106
+ Parameters
107
+ ----------
108
+ *value : List[str]
109
+ Input columns for the annotator
110
+ """
111
+ if type(value[0]) == str or type(value[0]) == list:
112
+ # self.inputColsValidation(value)
113
+ if len(value) == 1 and type(value[0]) == list:
114
+ return self._set(inputCols=value[0])
115
+ else:
116
+ return self._set(inputCols=list(value))
117
+ else:
118
+ raise TypeError(
119
+ "InputCols datatype not supported. It must be either str or list"
120
+ )
121
+
122
+ labelColumn = Param(
123
+ Params._dummy(),
124
+ "labelColumn",
125
+ "Column with label per each token",
126
+ typeConverter=TypeConverters.toString,
127
+ )
128
+
129
+ def setLabelColumn(self, value):
130
+ """Sets name of column for data labels.
131
+
132
+ Parameters
133
+ ----------
134
+ value : str
135
+ Column for data labels
136
+ """
137
+ return self._set(labelColumn=value)
138
+
139
+ embeddingsDim = Param(
140
+ Params._dummy(),
141
+ "embeddingsDim",
142
+ "Dimensionality of embeddings",
143
+ typeConverter=TypeConverters.toInt,
144
+ )
145
+
146
+ def setEmbeddingsDim(self, value: int):
147
+ """Sets Dimensionality of embeddings
148
+
149
+ Parameters
150
+ ----------
151
+ value : int
152
+ Dimensionality of embeddings
153
+ """
154
+ return self._set(embeddingsDim=value)
155
+
156
+ def setEmbeddingsModel(self, model: HasEmbeddingsProperties):
157
+ """
158
+ Get embeddingsDim from a given embeddings model, if possible.
159
+ Falls back to setEmbeddingsDim if dimension cannot be obtained automatically.
160
+ """
161
+ # Try Python API first
162
+ if hasattr(model, "getDimension"):
163
+ dim = model.getDimension()
164
+ return self.setEmbeddingsDim(int(dim))
165
+ # Try JVM side if available
166
+ if hasattr(model, "_java_obj") and hasattr(model._java_obj, "getDimension"):
167
+ dim = int(model._java_obj.getDimension())
168
+ return self.setEmbeddingsDim(dim)
169
+ raise ValueError(
170
+ "Could not infer embeddings dimension from provided model. "
171
+ "Use setEmbeddingsDim(dim) explicitly."
172
+ )
173
+
174
+ inputAnnotatorTypes = [
175
+ AnnotatorType.DOCUMENT,
176
+ AnnotatorType.TOKEN,
177
+ ]
178
+
179
+ graphFolder = Param(
180
+ Params._dummy(),
181
+ "graphFolder",
182
+ "Folder path that contain external graph files",
183
+ TypeConverters.toString,
184
+ )
185
+
186
+ def setGraphFolder(self, p):
187
+ """Sets folder path that contain external graph files.
188
+
189
+ Parameters
190
+ ----------
191
+ p : str
192
+ Folder path that contain external graph files
193
+ """
194
+ return self._set(graphFolder=p)
195
+
196
+ @keyword_only
197
+ def __init__(self):
198
+ _internal.ParamsGettersSetters.__init__(self)
199
+ classname = "com.johnsnowlabs.nlp.annotators.ner.dl.NerDLGraphChecker"
200
+ self.__class__._java_class_name = classname
201
+ self._java_obj = self._new_java_obj(classname, self.uid)
202
+ # self._setDefault()
203
+
204
+ def _create_model(self, java_model):
205
+ return NerDLGraphCheckerModel()
206
+
207
+
208
+ class NerDLGraphCheckerModel(
209
+ JavaModel,
210
+ JavaMLWritable,
211
+ _internal.ParamsGettersSetters,
212
+ ):
213
+ """
214
+ Resulting model from NerDLGraphChecker, that does not perform any transformations, as the
215
+ checks are done during the ``fit`` phase. It acts as the identity.
216
+
217
+ This annotator should never be used directly.
218
+ """
219
+
220
+ inputAnnotatorTypes = [
221
+ AnnotatorType.DOCUMENT,
222
+ AnnotatorType.TOKEN,
223
+ ]
224
+
225
+ @keyword_only
226
+ def __init__(
227
+ self,
228
+ classname="com.johnsnowlabs.nlp.annotators.ner.dl.NerDLGraphCheckerModel",
229
+ java_model=None,
230
+ ):
231
+ super(NerDLGraphCheckerModel, self).__init__(java_model=java_model)
232
+ if classname and not java_model:
233
+ self.__class__._java_class_name = classname
234
+ self._java_obj = self._new_java_obj(classname, self.uid)
235
+ if java_model is not None:
236
+ self._transfer_params_from_java()
237
+ # self._setDefault(lazyAnnotator=False)
@@ -47,7 +47,7 @@ class AutoGGUFReranker(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppProperties
47
47
  ... .setOutputCol("reranked_documents") \\
48
48
  ... .setQuery("A man is eating pasta.")
49
49
 
50
- The default model is ``"bge-reranker-v2-m3-Q4_K_M"``, if no name is provided.
50
+ The default model is ``"bge_reranker_v2_m3_Q4_K_M"``, if no name is provided.
51
51
 
52
52
  For extended examples of usage, see the
53
53
  `AutoGGUFRerankerTest <https://github.com/JohnSnowLabs/spark-nlp/tree/master/src/test/scala/com/johnsnowlabs/nlp/annotators/seq2seq/AutoGGUFRerankerTest.scala>`__
@@ -222,7 +222,7 @@ class AutoGGUFReranker(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppProperties
222
222
  >>> document = DocumentAssembler() \\
223
223
  ... .setInputCol("text") \\
224
224
  ... .setOutputCol("document")
225
- >>> reranker = AutoGGUFReranker.pretrained("bge-reranker-v2-m3-Q4_K_M") \\
225
+ >>> reranker = AutoGGUFReranker.pretrained() \\
226
226
  ... .setInputCols(["document"]) \\
227
227
  ... .setOutputCol("reranked_documents") \\
228
228
  ... .setBatchSize(4) \\
@@ -307,13 +307,13 @@ class AutoGGUFReranker(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppProperties
307
307
  return AutoGGUFReranker(java_model=jModel)
308
308
 
309
309
  @staticmethod
310
- def pretrained(name="bge-reranker-v2-m3-Q4_K_M", lang="en", remote_loc=None):
310
+ def pretrained(name="bge_reranker_v2_m3_Q4_K_M", lang="en", remote_loc=None):
311
311
  """Downloads and loads a pretrained model.
312
312
 
313
313
  Parameters
314
314
  ----------
315
315
  name : str, optional
316
- Name of the pretrained model, by default "bge-reranker-v2-m3-Q4_K_M"
316
+ Name of the pretrained model, by default "bge_reranker_v2_m3_Q4_K_M"
317
317
  lang : str, optional
318
318
  Language of the pretrained model, by default "en"
319
319
  remote_loc : str, optional
@@ -17,6 +17,7 @@ from sparknlp.base.document_assembler import *
17
17
  from sparknlp.base.multi_document_assembler import *
18
18
  from sparknlp.base.embeddings_finisher import *
19
19
  from sparknlp.base.finisher import *
20
+ from sparknlp.base.gguf_ranking_finisher import *
20
21
  from sparknlp.base.graph_finisher import *
21
22
  from sparknlp.base.has_recursive_fit import *
22
23
  from sparknlp.base.has_recursive_transform import *
@@ -0,0 +1,234 @@
1
+ # Copyright 2017-2024 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains classes for the GGUFRankingFinisher."""
15
+
16
+ from pyspark import keyword_only
17
+ from pyspark.ml.param import TypeConverters, Params, Param
18
+ from sparknlp.internal import AnnotatorTransformer
19
+
20
+
21
+ class GGUFRankingFinisher(AnnotatorTransformer):
22
+ """Finisher for AutoGGUFReranker outputs that provides ranking capabilities
23
+ including top-k selection, sorting by relevance score, and score normalization.
24
+
25
+ This finisher processes the output of AutoGGUFReranker, which contains documents with
26
+ relevance scores in their metadata. It provides several options for post-processing:
27
+
28
+ - Top-k selection: Select only the top k documents by relevance score
29
+ - Score thresholding: Filter documents by minimum relevance score
30
+ - Min-max scaling: Normalize relevance scores to 0-1 range
31
+ - Sorting: Sort documents by relevance score in descending order
32
+ - Ranking: Add rank information to document metadata
33
+
34
+ The finisher preserves the document annotation structure while adding ranking information
35
+ to the metadata and optionally filtering/sorting the documents.
36
+
37
+ For extended examples of usage, see the `Examples
38
+ <https://github.com/JohnSnowLabs/spark-nlp/blob/master/examples/python/finisher/gguf_ranking_finisher_example.py>`__.
39
+
40
+ ====================== ======================
41
+ Input Annotation types Output Annotation type
42
+ ====================== ======================
43
+ ``DOCUMENT`` ``DOCUMENT``
44
+ ====================== ======================
45
+
46
+ Parameters
47
+ ----------
48
+ inputCols
49
+ Name of input annotation columns containing reranked documents
50
+ outputCol
51
+ Name of output annotation column containing ranked documents, by default "ranked_documents"
52
+ topK
53
+ Maximum number of top documents to return based on relevance score (-1 for no limit), by default -1
54
+ minRelevanceScore
55
+ Minimum relevance score threshold for filtering documents, by default Double.MinValue
56
+ minMaxScaling
57
+ Whether to apply min-max scaling to normalize relevance scores to 0-1 range, by default False
58
+
59
+ Examples
60
+ --------
61
+ >>> import sparknlp
62
+ >>> from sparknlp.base import *
63
+ >>> from sparknlp.annotator import *
64
+ >>> from pyspark.ml import Pipeline
65
+ >>> documentAssembler = DocumentAssembler() \\
66
+ ... .setInputCol("text") \\
67
+ ... .setOutputCol("document")
68
+ >>> reranker = AutoGGUFReranker.pretrained() \\
69
+ ... .setInputCols("document") \\
70
+ ... .setOutputCol("reranked_documents") \\
71
+ ... .setQuery("A man is eating pasta.")
72
+ >>> finisher = GGUFRankingFinisher() \\
73
+ ... .setInputCols("reranked_documents") \\
74
+ ... .setOutputCol("ranked_documents") \\
75
+ ... .setTopK(3) \\
76
+ ... .setMinMaxScaling(True)
77
+ >>> pipeline = Pipeline().setStages([documentAssembler, reranker, finisher])
78
+ >>> data = spark.createDataFrame([
79
+ ... ("A man is eating food.",),
80
+ ... ("A man is eating a piece of bread.",),
81
+ ... ("The girl is carrying a baby.",),
82
+ ... ("A man is riding a horse.",)
83
+ ... ], ["text"])
84
+ >>> result = pipeline.fit(data).transform(data)
85
+ >>> result.select("ranked_documents").show(truncate=False)
86
+ # Documents will be sorted by relevance with rank information in metadata
87
+ """
88
+
89
+ name = "GGUFRankingFinisher"
90
+
91
+ inputCols = Param(Params._dummy(),
92
+ "inputCols",
93
+ "Name of input annotation columns containing reranked documents",
94
+ typeConverter=TypeConverters.toListString)
95
+
96
+ outputCol = Param(Params._dummy(),
97
+ "outputCol",
98
+ "Name of output annotation column containing ranked documents",
99
+ typeConverter=TypeConverters.toListString)
100
+
101
+ topK = Param(Params._dummy(),
102
+ "topK",
103
+ "Maximum number of top documents to return based on relevance score (-1 for no limit)",
104
+ typeConverter=TypeConverters.toInt)
105
+
106
+ minRelevanceScore = Param(Params._dummy(),
107
+ "minRelevanceScore",
108
+ "Minimum relevance score threshold for filtering documents",
109
+ typeConverter=TypeConverters.toFloat)
110
+
111
+ minMaxScaling = Param(Params._dummy(),
112
+ "minMaxScaling",
113
+ "Whether to apply min-max scaling to normalize relevance scores to 0-1 range",
114
+ typeConverter=TypeConverters.toBoolean)
115
+
116
+ @keyword_only
117
+ def __init__(self):
118
+ super(GGUFRankingFinisher, self).__init__(
119
+ classname="com.johnsnowlabs.nlp.finisher.GGUFRankingFinisher")
120
+ self._setDefault(
121
+ topK=-1,
122
+ minRelevanceScore=float('-inf'), # Equivalent to Double.MinValue
123
+ minMaxScaling=False,
124
+ outputCol=["ranked_documents"]
125
+ )
126
+
127
+ @keyword_only
128
+ def setParams(self):
129
+ kwargs = self._input_kwargs
130
+ return self._set(**kwargs)
131
+
132
+ def setInputCols(self, *value):
133
+ """Sets input annotation column names.
134
+
135
+ Parameters
136
+ ----------
137
+ value : List[str]
138
+ Input annotation column names containing reranked documents
139
+ """
140
+ if len(value) == 1 and isinstance(value[0], list):
141
+ return self._set(inputCols=value[0])
142
+ else:
143
+ return self._set(inputCols=list(value))
144
+
145
+ def getInputCols(self):
146
+ """Gets input annotation column names.
147
+
148
+ Returns
149
+ -------
150
+ List[str]
151
+ Input annotation column names
152
+ """
153
+ return self.getOrDefault(self.inputCols)
154
+
155
+ def setOutputCol(self, value):
156
+ """Sets output annotation column name.
157
+
158
+ Parameters
159
+ ----------
160
+ value : str
161
+ Output annotation column name
162
+ """
163
+ return self._set(outputCol=[value])
164
+
165
+ def getOutputCol(self):
166
+ """Gets output annotation column name.
167
+
168
+ Returns
169
+ -------
170
+ str
171
+ Output annotation column name
172
+ """
173
+ output_cols = self.getOrDefault(self.outputCol)
174
+ return output_cols[0] if output_cols else "ranked_documents"
175
+
176
+ def setTopK(self, value):
177
+ """Sets maximum number of top documents to return.
178
+
179
+ Parameters
180
+ ----------
181
+ value : int
182
+ Maximum number of top documents to return (-1 for no limit)
183
+ """
184
+ return self._set(topK=value)
185
+
186
+ def getTopK(self):
187
+ """Gets maximum number of top documents to return.
188
+
189
+ Returns
190
+ -------
191
+ int
192
+ Maximum number of top documents to return
193
+ """
194
+ return self.getOrDefault(self.topK)
195
+
196
+ def setMinRelevanceScore(self, value):
197
+ """Sets minimum relevance score threshold.
198
+
199
+ Parameters
200
+ ----------
201
+ value : float
202
+ Minimum relevance score threshold
203
+ """
204
+ return self._set(minRelevanceScore=value)
205
+
206
+ def getMinRelevanceScore(self):
207
+ """Gets minimum relevance score threshold.
208
+
209
+ Returns
210
+ -------
211
+ float
212
+ Minimum relevance score threshold
213
+ """
214
+ return self.getOrDefault(self.minRelevanceScore)
215
+
216
+ def setMinMaxScaling(self, value):
217
+ """Sets whether to apply min-max scaling.
218
+
219
+ Parameters
220
+ ----------
221
+ value : bool
222
+ Whether to apply min-max scaling to normalize scores
223
+ """
224
+ return self._set(minMaxScaling=value)
225
+
226
+ def getMinMaxScaling(self):
227
+ """Gets whether to apply min-max scaling.
228
+
229
+ Returns
230
+ -------
231
+ bool
232
+ Whether min-max scaling is enabled
233
+ """
234
+ return self.getOrDefault(self.minMaxScaling)