spark-nlp 6.1.2rc1__tar.gz → 6.1.3rc1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of spark-nlp might be problematic. Click here for more details.

Files changed (291) hide show
  1. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/PKG-INFO +5 -5
  2. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/README.md +4 -4
  3. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/setup.py +1 -1
  4. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/spark_nlp.egg-info/PKG-INFO +5 -5
  5. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/spark_nlp.egg-info/SOURCES.txt +1 -0
  6. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/__init__.py +1 -1
  7. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/seq2seq/__init__.py +1 -0
  8. spark_nlp-6.1.3rc1/sparknlp/annotator/seq2seq/auto_gguf_reranker.py +329 -0
  9. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/internal/__init__.py +6 -1
  10. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/com/__init__.py +0 -0
  11. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/com/johnsnowlabs/__init__.py +0 -0
  12. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/com/johnsnowlabs/ml/__init__.py +0 -0
  13. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/com/johnsnowlabs/ml/ai/__init__.py +0 -0
  14. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/com/johnsnowlabs/nlp/__init__.py +0 -0
  15. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/setup.cfg +0 -0
  16. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/spark_nlp.egg-info/dependency_links.txt +0 -0
  17. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/spark_nlp.egg-info/top_level.txt +0 -0
  18. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotation.py +0 -0
  19. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotation_audio.py +0 -0
  20. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotation_image.py +0 -0
  21. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/__init__.py +0 -0
  22. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/audio/__init__.py +0 -0
  23. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/audio/hubert_for_ctc.py +0 -0
  24. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/audio/wav2vec2_for_ctc.py +0 -0
  25. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/audio/whisper_for_ctc.py +0 -0
  26. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/chunk2_doc.py +0 -0
  27. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/chunker.py +0 -0
  28. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/__init__.py +0 -0
  29. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/albert_for_multiple_choice.py +0 -0
  30. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/albert_for_question_answering.py +0 -0
  31. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/albert_for_sequence_classification.py +0 -0
  32. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/albert_for_token_classification.py +0 -0
  33. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/albert_for_zero_shot_classification.py +0 -0
  34. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/bart_for_zero_shot_classification.py +0 -0
  35. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/bert_for_multiple_choice.py +0 -0
  36. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/bert_for_question_answering.py +0 -0
  37. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/bert_for_sequence_classification.py +0 -0
  38. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/bert_for_token_classification.py +0 -0
  39. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/bert_for_zero_shot_classification.py +0 -0
  40. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/camembert_for_question_answering.py +0 -0
  41. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/camembert_for_sequence_classification.py +0 -0
  42. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/camembert_for_token_classification.py +0 -0
  43. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/camembert_for_zero_shot_classification.py +0 -0
  44. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/classifier_dl.py +0 -0
  45. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/deberta_for_question_answering.py +0 -0
  46. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/deberta_for_sequence_classification.py +0 -0
  47. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/deberta_for_token_classification.py +0 -0
  48. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/deberta_for_zero_shot_classification.py +0 -0
  49. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/distil_bert_for_question_answering.py +0 -0
  50. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/distil_bert_for_sequence_classification.py +0 -0
  51. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/distil_bert_for_token_classification.py +0 -0
  52. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/distil_bert_for_zero_shot_classification.py +0 -0
  53. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/distilbert_for_multiple_choice.py +0 -0
  54. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/longformer_for_question_answering.py +0 -0
  55. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/longformer_for_sequence_classification.py +0 -0
  56. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/longformer_for_token_classification.py +0 -0
  57. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/mpnet_for_question_answering.py +0 -0
  58. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/mpnet_for_sequence_classification.py +0 -0
  59. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/mpnet_for_token_classification.py +0 -0
  60. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/multi_classifier_dl.py +0 -0
  61. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/roberta_for_multiple_choice.py +0 -0
  62. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/roberta_for_question_answering.py +0 -0
  63. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/roberta_for_sequence_classification.py +0 -0
  64. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/roberta_for_token_classification.py +0 -0
  65. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/roberta_for_zero_shot_classification.py +0 -0
  66. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/sentiment_dl.py +0 -0
  67. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/tapas_for_question_answering.py +0 -0
  68. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/xlm_roberta_for_multiple_choice.py +0 -0
  69. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/xlm_roberta_for_question_answering.py +0 -0
  70. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/xlm_roberta_for_sequence_classification.py +0 -0
  71. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/xlm_roberta_for_token_classification.py +0 -0
  72. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/xlm_roberta_for_zero_shot_classification.py +0 -0
  73. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/xlnet_for_sequence_classification.py +0 -0
  74. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/classifier_dl/xlnet_for_token_classification.py +0 -0
  75. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/cleaners/__init__.py +0 -0
  76. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/cleaners/cleaner.py +0 -0
  77. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/cleaners/extractor.py +0 -0
  78. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/coref/__init__.py +0 -0
  79. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/coref/spanbert_coref.py +0 -0
  80. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/cv/__init__.py +0 -0
  81. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/cv/blip_for_question_answering.py +0 -0
  82. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/cv/clip_for_zero_shot_classification.py +0 -0
  83. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/cv/convnext_for_image_classification.py +0 -0
  84. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/cv/florence2_transformer.py +0 -0
  85. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/cv/gemma3_for_multimodal.py +0 -0
  86. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/cv/internvl_for_multimodal.py +0 -0
  87. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/cv/janus_for_multimodal.py +0 -0
  88. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/cv/llava_for_multimodal.py +0 -0
  89. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/cv/mllama_for_multimodal.py +0 -0
  90. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/cv/paligemma_for_multimodal.py +0 -0
  91. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/cv/phi3_vision_for_multimodal.py +0 -0
  92. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/cv/qwen2vl_transformer.py +0 -0
  93. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/cv/smolvlm_transformer.py +0 -0
  94. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/cv/swin_for_image_classification.py +0 -0
  95. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/cv/vision_encoder_decoder_for_image_captioning.py +0 -0
  96. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/cv/vit_for_image_classification.py +0 -0
  97. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/dataframe_optimizer.py +0 -0
  98. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/date2_chunk.py +0 -0
  99. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/dependency/__init__.py +0 -0
  100. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/dependency/dependency_parser.py +0 -0
  101. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/dependency/typed_dependency_parser.py +0 -0
  102. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/document_character_text_splitter.py +0 -0
  103. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/document_normalizer.py +0 -0
  104. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/document_token_splitter.py +0 -0
  105. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/document_token_splitter_test.py +0 -0
  106. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/__init__.py +0 -0
  107. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/albert_embeddings.py +0 -0
  108. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/auto_gguf_embeddings.py +0 -0
  109. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/bert_embeddings.py +0 -0
  110. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/bert_sentence_embeddings.py +0 -0
  111. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/bge_embeddings.py +0 -0
  112. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/camembert_embeddings.py +0 -0
  113. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/chunk_embeddings.py +0 -0
  114. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/deberta_embeddings.py +0 -0
  115. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/distil_bert_embeddings.py +0 -0
  116. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/doc2vec.py +0 -0
  117. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/e5_embeddings.py +0 -0
  118. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/e5v_embeddings.py +0 -0
  119. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/elmo_embeddings.py +0 -0
  120. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/instructor_embeddings.py +0 -0
  121. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/longformer_embeddings.py +0 -0
  122. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/minilm_embeddings.py +0 -0
  123. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/mpnet_embeddings.py +0 -0
  124. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/mxbai_embeddings.py +0 -0
  125. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/nomic_embeddings.py +0 -0
  126. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/roberta_embeddings.py +0 -0
  127. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/roberta_sentence_embeddings.py +0 -0
  128. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/sentence_embeddings.py +0 -0
  129. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/snowflake_embeddings.py +0 -0
  130. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/uae_embeddings.py +0 -0
  131. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/universal_sentence_encoder.py +0 -0
  132. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/word2vec.py +0 -0
  133. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/word_embeddings.py +0 -0
  134. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/xlm_roberta_embeddings.py +0 -0
  135. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/xlm_roberta_sentence_embeddings.py +0 -0
  136. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/embeddings/xlnet_embeddings.py +0 -0
  137. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/er/__init__.py +0 -0
  138. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/er/entity_ruler.py +0 -0
  139. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/graph_extraction.py +0 -0
  140. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/keyword_extraction/__init__.py +0 -0
  141. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/keyword_extraction/yake_keyword_extraction.py +0 -0
  142. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/ld_dl/__init__.py +0 -0
  143. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/ld_dl/language_detector_dl.py +0 -0
  144. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/lemmatizer.py +0 -0
  145. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/matcher/__init__.py +0 -0
  146. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/matcher/big_text_matcher.py +0 -0
  147. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/matcher/date_matcher.py +0 -0
  148. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/matcher/multi_date_matcher.py +0 -0
  149. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/matcher/regex_matcher.py +0 -0
  150. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/matcher/text_matcher.py +0 -0
  151. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/n_gram_generator.py +0 -0
  152. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/ner/__init__.py +0 -0
  153. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/ner/ner_approach.py +0 -0
  154. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/ner/ner_converter.py +0 -0
  155. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/ner/ner_crf.py +0 -0
  156. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/ner/ner_dl.py +0 -0
  157. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/ner/ner_overwriter.py +0 -0
  158. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/ner/zero_shot_ner_model.py +0 -0
  159. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/normalizer.py +0 -0
  160. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/openai/__init__.py +0 -0
  161. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/openai/openai_completion.py +0 -0
  162. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/openai/openai_embeddings.py +0 -0
  163. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/param/__init__.py +0 -0
  164. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/param/classifier_encoder.py +0 -0
  165. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/param/evaluation_dl_params.py +0 -0
  166. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/pos/__init__.py +0 -0
  167. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/pos/perceptron.py +0 -0
  168. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/sentence/__init__.py +0 -0
  169. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/sentence/sentence_detector.py +0 -0
  170. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/sentence/sentence_detector_dl.py +0 -0
  171. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/sentiment/__init__.py +0 -0
  172. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/sentiment/sentiment_detector.py +0 -0
  173. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/sentiment/vivekn_sentiment.py +0 -0
  174. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/seq2seq/auto_gguf_model.py +0 -0
  175. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/seq2seq/auto_gguf_vision_model.py +0 -0
  176. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/seq2seq/bart_transformer.py +0 -0
  177. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/seq2seq/cohere_transformer.py +0 -0
  178. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/seq2seq/cpm_transformer.py +0 -0
  179. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/seq2seq/gpt2_transformer.py +0 -0
  180. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/seq2seq/llama2_transformer.py +0 -0
  181. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/seq2seq/llama3_transformer.py +0 -0
  182. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/seq2seq/m2m100_transformer.py +0 -0
  183. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/seq2seq/marian_transformer.py +0 -0
  184. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/seq2seq/mistral_transformer.py +0 -0
  185. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/seq2seq/nllb_transformer.py +0 -0
  186. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/seq2seq/olmo_transformer.py +0 -0
  187. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/seq2seq/phi2_transformer.py +0 -0
  188. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/seq2seq/phi3_transformer.py +0 -0
  189. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/seq2seq/phi4_transformer.py +0 -0
  190. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/seq2seq/qwen_transformer.py +0 -0
  191. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/seq2seq/starcoder_transformer.py +0 -0
  192. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/seq2seq/t5_transformer.py +0 -0
  193. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/similarity/__init__.py +0 -0
  194. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/similarity/document_similarity_ranker.py +0 -0
  195. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/spell_check/__init__.py +0 -0
  196. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/spell_check/context_spell_checker.py +0 -0
  197. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/spell_check/norvig_sweeting.py +0 -0
  198. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/spell_check/symmetric_delete.py +0 -0
  199. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/stemmer.py +0 -0
  200. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/stop_words_cleaner.py +0 -0
  201. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/tf_ner_dl_graph_builder.py +0 -0
  202. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/token/__init__.py +0 -0
  203. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/token/chunk_tokenizer.py +0 -0
  204. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/token/recursive_tokenizer.py +0 -0
  205. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/token/regex_tokenizer.py +0 -0
  206. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/token/tokenizer.py +0 -0
  207. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/token2_chunk.py +0 -0
  208. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/ws/__init__.py +0 -0
  209. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/annotator/ws/word_segmenter.py +0 -0
  210. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/base/__init__.py +0 -0
  211. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/base/audio_assembler.py +0 -0
  212. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/base/doc2_chunk.py +0 -0
  213. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/base/document_assembler.py +0 -0
  214. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/base/embeddings_finisher.py +0 -0
  215. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/base/finisher.py +0 -0
  216. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/base/graph_finisher.py +0 -0
  217. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/base/has_recursive_fit.py +0 -0
  218. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/base/has_recursive_transform.py +0 -0
  219. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/base/image_assembler.py +0 -0
  220. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/base/light_pipeline.py +0 -0
  221. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/base/multi_document_assembler.py +0 -0
  222. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/base/prompt_assembler.py +0 -0
  223. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/base/recursive_pipeline.py +0 -0
  224. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/base/table_assembler.py +0 -0
  225. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/base/token_assembler.py +0 -0
  226. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/common/__init__.py +0 -0
  227. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/common/annotator_approach.py +0 -0
  228. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/common/annotator_model.py +0 -0
  229. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/common/annotator_properties.py +0 -0
  230. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/common/annotator_type.py +0 -0
  231. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/common/coverage_result.py +0 -0
  232. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/common/match_strategy.py +0 -0
  233. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/common/properties.py +0 -0
  234. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/common/read_as.py +0 -0
  235. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/common/recursive_annotator_approach.py +0 -0
  236. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/common/storage.py +0 -0
  237. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/common/utils.py +0 -0
  238. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/functions.py +0 -0
  239. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/internal/annotator_java_ml.py +0 -0
  240. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/internal/annotator_transformer.py +0 -0
  241. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/internal/extended_java_wrapper.py +0 -0
  242. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/internal/params_getters_setters.py +0 -0
  243. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/internal/recursive.py +0 -0
  244. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/logging/__init__.py +0 -0
  245. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/logging/comet.py +0 -0
  246. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/partition/__init__.py +0 -0
  247. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/partition/partition.py +0 -0
  248. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/partition/partition_properties.py +0 -0
  249. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/partition/partition_transformer.py +0 -0
  250. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/pretrained/__init__.py +0 -0
  251. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/pretrained/pretrained_pipeline.py +0 -0
  252. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/pretrained/resource_downloader.py +0 -0
  253. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/pretrained/utils.py +0 -0
  254. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/reader/__init__.py +0 -0
  255. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/reader/enums.py +0 -0
  256. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/reader/pdf_to_text.py +0 -0
  257. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/reader/reader2doc.py +0 -0
  258. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/reader/reader2table.py +0 -0
  259. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/reader/sparknlp_reader.py +0 -0
  260. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/__init__.py +0 -0
  261. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/_tf_graph_builders/__init__.py +0 -0
  262. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/_tf_graph_builders/graph_builders.py +0 -0
  263. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/_tf_graph_builders/ner_dl/__init__.py +0 -0
  264. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/_tf_graph_builders/ner_dl/create_graph.py +0 -0
  265. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/_tf_graph_builders/ner_dl/dataset_encoder.py +0 -0
  266. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/_tf_graph_builders/ner_dl/ner_model.py +0 -0
  267. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/_tf_graph_builders/ner_dl/ner_model_saver.py +0 -0
  268. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/_tf_graph_builders/ner_dl/sentence_grouper.py +0 -0
  269. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/_tf_graph_builders/tf2contrib/__init__.py +0 -0
  270. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/_tf_graph_builders/tf2contrib/core_rnn_cell.py +0 -0
  271. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/_tf_graph_builders/tf2contrib/fused_rnn_cell.py +0 -0
  272. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/_tf_graph_builders/tf2contrib/gru_ops.py +0 -0
  273. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/_tf_graph_builders/tf2contrib/lstm_ops.py +0 -0
  274. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/_tf_graph_builders/tf2contrib/rnn.py +0 -0
  275. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/_tf_graph_builders/tf2contrib/rnn_cell.py +0 -0
  276. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/_tf_graph_builders_1x/__init__.py +0 -0
  277. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/_tf_graph_builders_1x/graph_builders.py +0 -0
  278. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/_tf_graph_builders_1x/ner_dl/__init__.py +0 -0
  279. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/_tf_graph_builders_1x/ner_dl/create_graph.py +0 -0
  280. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/_tf_graph_builders_1x/ner_dl/dataset_encoder.py +0 -0
  281. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model.py +0 -0
  282. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model_saver.py +0 -0
  283. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/_tf_graph_builders_1x/ner_dl/sentence_grouper.py +0 -0
  284. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/conll.py +0 -0
  285. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/conllu.py +0 -0
  286. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/pos.py +0 -0
  287. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/pub_tator.py +0 -0
  288. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/spacy_to_annotation.py +0 -0
  289. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/training/tfgraphs.py +0 -0
  290. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/upload_to_hub.py +0 -0
  291. {spark_nlp-6.1.2rc1 → spark_nlp-6.1.3rc1}/sparknlp/util.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: spark-nlp
3
- Version: 6.1.2rc1
3
+ Version: 6.1.3rc1
4
4
  Summary: John Snow Labs Spark NLP is a natural language processing library built on top of Apache Spark ML. It provides simple, performant & accurate NLP annotations for machine learning pipelines, that scale easily in a distributed environment.
5
5
  Home-page: https://github.com/JohnSnowLabs/spark-nlp
6
6
  Author: John Snow Labs
@@ -102,7 +102,7 @@ $ java -version
102
102
  $ conda create -n sparknlp python=3.7 -y
103
103
  $ conda activate sparknlp
104
104
  # spark-nlp by default is based on pyspark 3.x
105
- $ pip install spark-nlp==6.1.1 pyspark==3.3.1
105
+ $ pip install spark-nlp==6.1.3 pyspark==3.3.1
106
106
  ```
107
107
 
108
108
  In Python console or Jupyter `Python3` kernel:
@@ -168,7 +168,7 @@ For a quick example of using pipelines and models take a look at our official [d
168
168
 
169
169
  ### Apache Spark Support
170
170
 
171
- Spark NLP *6.1.1* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
171
+ Spark NLP *6.1.3* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
172
172
 
173
173
  | Spark NLP | Apache Spark 3.5.x | Apache Spark 3.4.x | Apache Spark 3.3.x | Apache Spark 3.2.x | Apache Spark 3.1.x | Apache Spark 3.0.x | Apache Spark 2.4.x | Apache Spark 2.3.x |
174
174
  |-----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
@@ -198,7 +198,7 @@ Find out more about 4.x `SparkNLP` versions in our official [documentation](http
198
198
 
199
199
  ### Databricks Support
200
200
 
201
- Spark NLP 6.1.1 has been tested and is compatible with the following runtimes:
201
+ Spark NLP 6.1.3 has been tested and is compatible with the following runtimes:
202
202
 
203
203
  | **CPU** | **GPU** |
204
204
  |--------------------|--------------------|
@@ -216,7 +216,7 @@ We are compatible with older runtimes. For a full list check databricks support
216
216
 
217
217
  ### EMR Support
218
218
 
219
- Spark NLP 6.1.1 has been tested and is compatible with the following EMR releases:
219
+ Spark NLP 6.1.3 has been tested and is compatible with the following EMR releases:
220
220
 
221
221
  | **EMR Release** |
222
222
  |--------------------|
@@ -63,7 +63,7 @@ $ java -version
63
63
  $ conda create -n sparknlp python=3.7 -y
64
64
  $ conda activate sparknlp
65
65
  # spark-nlp by default is based on pyspark 3.x
66
- $ pip install spark-nlp==6.1.1 pyspark==3.3.1
66
+ $ pip install spark-nlp==6.1.3 pyspark==3.3.1
67
67
  ```
68
68
 
69
69
  In Python console or Jupyter `Python3` kernel:
@@ -129,7 +129,7 @@ For a quick example of using pipelines and models take a look at our official [d
129
129
 
130
130
  ### Apache Spark Support
131
131
 
132
- Spark NLP *6.1.1* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
132
+ Spark NLP *6.1.3* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
133
133
 
134
134
  | Spark NLP | Apache Spark 3.5.x | Apache Spark 3.4.x | Apache Spark 3.3.x | Apache Spark 3.2.x | Apache Spark 3.1.x | Apache Spark 3.0.x | Apache Spark 2.4.x | Apache Spark 2.3.x |
135
135
  |-----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
@@ -159,7 +159,7 @@ Find out more about 4.x `SparkNLP` versions in our official [documentation](http
159
159
 
160
160
  ### Databricks Support
161
161
 
162
- Spark NLP 6.1.1 has been tested and is compatible with the following runtimes:
162
+ Spark NLP 6.1.3 has been tested and is compatible with the following runtimes:
163
163
 
164
164
  | **CPU** | **GPU** |
165
165
  |--------------------|--------------------|
@@ -177,7 +177,7 @@ We are compatible with older runtimes. For a full list check databricks support
177
177
 
178
178
  ### EMR Support
179
179
 
180
- Spark NLP 6.1.1 has been tested and is compatible with the following EMR releases:
180
+ Spark NLP 6.1.3 has been tested and is compatible with the following EMR releases:
181
181
 
182
182
  | **EMR Release** |
183
183
  |--------------------|
@@ -41,7 +41,7 @@ setup(
41
41
  # project code, see
42
42
  # https://packaging.python.org/en/latest/single_source_version.html
43
43
 
44
- version='6.1.2-rc1', # Required
44
+ version='6.1.3rc1', # Required
45
45
 
46
46
  # This is a one-line description or tagline of what your project does. This
47
47
  # corresponds to the 'Summary' metadata field:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: spark-nlp
3
- Version: 6.1.2rc1
3
+ Version: 6.1.3rc1
4
4
  Summary: John Snow Labs Spark NLP is a natural language processing library built on top of Apache Spark ML. It provides simple, performant & accurate NLP annotations for machine learning pipelines, that scale easily in a distributed environment.
5
5
  Home-page: https://github.com/JohnSnowLabs/spark-nlp
6
6
  Author: John Snow Labs
@@ -102,7 +102,7 @@ $ java -version
102
102
  $ conda create -n sparknlp python=3.7 -y
103
103
  $ conda activate sparknlp
104
104
  # spark-nlp by default is based on pyspark 3.x
105
- $ pip install spark-nlp==6.1.1 pyspark==3.3.1
105
+ $ pip install spark-nlp==6.1.3 pyspark==3.3.1
106
106
  ```
107
107
 
108
108
  In Python console or Jupyter `Python3` kernel:
@@ -168,7 +168,7 @@ For a quick example of using pipelines and models take a look at our official [d
168
168
 
169
169
  ### Apache Spark Support
170
170
 
171
- Spark NLP *6.1.1* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
171
+ Spark NLP *6.1.3* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
172
172
 
173
173
  | Spark NLP | Apache Spark 3.5.x | Apache Spark 3.4.x | Apache Spark 3.3.x | Apache Spark 3.2.x | Apache Spark 3.1.x | Apache Spark 3.0.x | Apache Spark 2.4.x | Apache Spark 2.3.x |
174
174
  |-----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
@@ -198,7 +198,7 @@ Find out more about 4.x `SparkNLP` versions in our official [documentation](http
198
198
 
199
199
  ### Databricks Support
200
200
 
201
- Spark NLP 6.1.1 has been tested and is compatible with the following runtimes:
201
+ Spark NLP 6.1.3 has been tested and is compatible with the following runtimes:
202
202
 
203
203
  | **CPU** | **GPU** |
204
204
  |--------------------|--------------------|
@@ -216,7 +216,7 @@ We are compatible with older runtimes. For a full list check databricks support
216
216
 
217
217
  ### EMR Support
218
218
 
219
- Spark NLP 6.1.1 has been tested and is compatible with the following EMR releases:
219
+ Spark NLP 6.1.3 has been tested and is compatible with the following EMR releases:
220
220
 
221
221
  | **EMR Release** |
222
222
  |--------------------|
@@ -176,6 +176,7 @@ sparknlp/annotator/sentiment/sentiment_detector.py
176
176
  sparknlp/annotator/sentiment/vivekn_sentiment.py
177
177
  sparknlp/annotator/seq2seq/__init__.py
178
178
  sparknlp/annotator/seq2seq/auto_gguf_model.py
179
+ sparknlp/annotator/seq2seq/auto_gguf_reranker.py
179
180
  sparknlp/annotator/seq2seq/auto_gguf_vision_model.py
180
181
  sparknlp/annotator/seq2seq/bart_transformer.py
181
182
  sparknlp/annotator/seq2seq/cohere_transformer.py
@@ -66,7 +66,7 @@ sys.modules['com.johnsnowlabs.ml.ai'] = annotator
66
66
  annotators = annotator
67
67
  embeddings = annotator
68
68
 
69
- __version__ = "6.1.1"
69
+ __version__ = "6.1.3"
70
70
 
71
71
 
72
72
  def start(gpu=False,
@@ -32,3 +32,4 @@ from sparknlp.annotator.seq2seq.llama3_transformer import *
32
32
  from sparknlp.annotator.seq2seq.cohere_transformer import *
33
33
  from sparknlp.annotator.seq2seq.olmo_transformer import *
34
34
  from sparknlp.annotator.seq2seq.phi4_transformer import *
35
+ from sparknlp.annotator.seq2seq.auto_gguf_reranker import *
@@ -0,0 +1,329 @@
1
+ # Copyright 2017-2023 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains classes for the AutoGGUFReranker."""
15
+ from typing import List, Dict
16
+
17
+ from sparknlp.common import *
18
+
19
+
20
+ class AutoGGUFReranker(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppProperties):
21
+ """
22
+ Annotator that uses the llama.cpp library to rerank text documents based on their relevance
23
+ to a given query using GGUF-format reranking models.
24
+
25
+ This annotator is specifically designed for text reranking tasks, where multiple documents
26
+ or text passages are ranked according to their relevance to a query. It uses specialized
27
+ reranking models in GGUF format that output relevance scores for each input document.
28
+
29
+ The reranker takes a query (set via :meth:`.setQuery`) and a list of documents, then returns the
30
+ same documents with added metadata containing relevance scores. The documents are processed
31
+ in batches and each receives a ``relevance_score`` in its metadata indicating how relevant
32
+ it is to the provided query.
33
+
34
+ For settable parameters, and their explanations, see the parameters of this class and refer to
35
+ the llama.cpp documentation of
36
+ `server.cpp <https://github.com/ggerganov/llama.cpp/tree/7d5e8777ae1d21af99d4f95be10db4870720da91/examples/server>`__
37
+ for more information.
38
+
39
+ If the parameters are not set, the annotator will default to use the parameters provided by
40
+ the model.
41
+
42
+ Pretrained models can be loaded with :meth:`.pretrained` of the companion
43
+ object:
44
+
45
+ >>> reranker = AutoGGUFReranker.pretrained() \\
46
+ ... .setInputCols(["document"]) \\
47
+ ... .setOutputCol("reranked_documents") \\
48
+ ... .setQuery("A man is eating pasta.")
49
+
50
+ The default model is ``"bge-reranker-v2-m3-Q4_K_M"``, if no name is provided.
51
+
52
+ For extended examples of usage, see the
53
+ `AutoGGUFRerankerTest <https://github.com/JohnSnowLabs/spark-nlp/tree/master/src/test/scala/com/johnsnowlabs/nlp/annotators/seq2seq/AutoGGUFRerankerTest.scala>`__
54
+ and the
55
+ `example notebook <https://github.com/JohnSnowLabs/spark-nlp/tree/master/examples/python/llama.cpp/llama.cpp_in_Spark_NLP_AutoGGUFReranker.ipynb>`__.
56
+
57
+ For available pretrained models please see the `Models Hub <https://sparknlp.org/models>`__.
58
+
59
+ ====================== ======================
60
+ Input Annotation types Output Annotation type
61
+ ====================== ======================
62
+ ``DOCUMENT`` ``DOCUMENT``
63
+ ====================== ======================
64
+
65
+ Parameters
66
+ ----------
67
+ query
68
+ The query to be used for reranking. If not set, the input text will be used as the query.
69
+ nThreads
70
+ Set the number of threads to use during generation
71
+ nThreadsDraft
72
+ Set the number of threads to use during draft generation
73
+ nThreadsBatch
74
+ Set the number of threads to use during batch and prompt processing
75
+ nThreadsBatchDraft
76
+ Set the number of threads to use during batch and prompt processing
77
+ nCtx
78
+ Set the size of the prompt context
79
+ nBatch
80
+ Set the logical batch size for prompt processing (must be >=32 to use BLAS)
81
+ nUbatch
82
+ Set the physical batch size for prompt processing (must be >=32 to use BLAS)
83
+ nGpuLayers
84
+ Set the number of layers to store in VRAM (-1 - use default)
85
+ nGpuLayersDraft
86
+ Set the number of layers to store in VRAM for the draft model (-1 - use default)
87
+ gpuSplitMode
88
+ Set how to split the model across GPUs
89
+ mainGpu
90
+ Set the main GPU that is used for scratch and small tensors.
91
+ tensorSplit
92
+ Set how split tensors should be distributed across GPUs
93
+ grpAttnN
94
+ Set the group-attention factor
95
+ grpAttnW
96
+ Set the group-attention width
97
+ ropeFreqBase
98
+ Set the RoPE base frequency, used by NTK-aware scaling
99
+ ropeFreqScale
100
+ Set the RoPE frequency scaling factor, expands context by a factor of 1/N
101
+ yarnExtFactor
102
+ Set the YaRN extrapolation mix factor
103
+ yarnAttnFactor
104
+ Set the YaRN scale sqrt(t) or attention magnitude
105
+ yarnBetaFast
106
+ Set the YaRN low correction dim or beta
107
+ yarnBetaSlow
108
+ Set the YaRN high correction dim or alpha
109
+ yarnOrigCtx
110
+ Set the YaRN original context size of model
111
+ defragmentationThreshold
112
+ Set the KV cache defragmentation threshold
113
+ numaStrategy
114
+ Set optimization strategies that help on some NUMA systems (if available)
115
+ ropeScalingType
116
+ Set the RoPE frequency scaling method, defaults to linear unless specified by the model
117
+ poolingType
118
+ Set the pooling type for embeddings, use model default if unspecified
119
+ modelDraft
120
+ Set the draft model for speculative decoding
121
+ modelAlias
122
+ Set a model alias
123
+ lookupCacheStaticFilePath
124
+ Set path to static lookup cache to use for lookup decoding (not updated by generation)
125
+ lookupCacheDynamicFilePath
126
+ Set path to dynamic lookup cache to use for lookup decoding (updated by generation)
127
+ flashAttention
128
+ Whether to enable Flash Attention
129
+ inputPrefixBos
130
+ Whether to add prefix BOS to user inputs, preceding the `--in-prefix` string
131
+ useMmap
132
+ Whether to use memory-map model (faster load but may increase pageouts if not using mlock)
133
+ useMlock
134
+ Whether to force the system to keep model in RAM rather than swapping or compressing
135
+ noKvOffload
136
+ Whether to disable KV offload
137
+ systemPrompt
138
+ Set a system prompt to use
139
+ chatTemplate
140
+ The chat template to use
141
+ inputPrefix
142
+ Set the prompt to start generation with
143
+ inputSuffix
144
+ Set a suffix for infilling
145
+ cachePrompt
146
+ Whether to remember the prompt to avoid reprocessing it
147
+ nPredict
148
+ Set the number of tokens to predict
149
+ topK
150
+ Set top-k sampling
151
+ topP
152
+ Set top-p sampling
153
+ minP
154
+ Set min-p sampling
155
+ tfsZ
156
+ Set tail free sampling, parameter z
157
+ typicalP
158
+ Set locally typical sampling, parameter p
159
+ temperature
160
+ Set the temperature
161
+ dynatempRange
162
+ Set the dynamic temperature range
163
+ dynatempExponent
164
+ Set the dynamic temperature exponent
165
+ repeatLastN
166
+ Set the last n tokens to consider for penalties
167
+ repeatPenalty
168
+ Set the penalty of repeated sequences of tokens
169
+ frequencyPenalty
170
+ Set the repetition alpha frequency penalty
171
+ presencePenalty
172
+ Set the repetition alpha presence penalty
173
+ miroStat
174
+ Set MiroStat sampling strategies.
175
+ mirostatTau
176
+ Set the MiroStat target entropy, parameter tau
177
+ mirostatEta
178
+ Set the MiroStat learning rate, parameter eta
179
+ penalizeNl
180
+ Whether to penalize newline tokens
181
+ nKeep
182
+ Set the number of tokens to keep from the initial prompt
183
+ seed
184
+ Set the RNG seed
185
+ nProbs
186
+ Set the amount top tokens probabilities to output if greater than 0.
187
+ minKeep
188
+ Set the amount of tokens the samplers should return at least (0 = disabled)
189
+ grammar
190
+ Set BNF-like grammar to constrain generations
191
+ penaltyPrompt
192
+ Override which part of the prompt is penalized for repetition.
193
+ ignoreEos
194
+ Set whether to ignore end of stream token and continue generating (implies --logit-bias 2-inf)
195
+ disableTokenIds
196
+ Set the token ids to disable in the completion
197
+ stopStrings
198
+ Set strings upon seeing which token generation is stopped
199
+ samplers
200
+ Set which samplers to use for token generation in the given order
201
+ useChatTemplate
202
+ Set whether or not generate should apply a chat template
203
+
204
+ Notes
205
+ -----
206
+ This annotator is designed for reranking tasks and requires setting a query using ``setQuery``.
207
+ The query represents the search intent against which documents will be ranked. Each input
208
+ document receives a relevance score in the output metadata.
209
+
210
+ To use GPU inference with this annotator, make sure to use the Spark NLP GPU package and set
211
+ the number of GPU layers with the `setNGpuLayers` method.
212
+
213
+ When using larger models, we recommend adjusting GPU usage with `setNCtx` and `setNGpuLayers`
214
+ according to your hardware to avoid out-of-memory errors.
215
+
216
+ Examples
217
+ --------
218
+ >>> import sparknlp
219
+ >>> from sparknlp.base import *
220
+ >>> from sparknlp.annotator import *
221
+ >>> from pyspark.ml import Pipeline
222
+ >>> document = DocumentAssembler() \\
223
+ ... .setInputCol("text") \\
224
+ ... .setOutputCol("document")
225
+ >>> reranker = AutoGGUFReranker.pretrained("bge-reranker-v2-m3-Q4_K_M") \\
226
+ ... .setInputCols(["document"]) \\
227
+ ... .setOutputCol("reranked_documents") \\
228
+ ... .setBatchSize(4) \\
229
+ ... .setQuery("A man is eating pasta.")
230
+ >>> pipeline = Pipeline().setStages([document, reranker])
231
+ >>> data = spark.createDataFrame([
232
+ ... ["A man is eating food."],
233
+ ... ["A man is eating a piece of bread."],
234
+ ... ["The girl is carrying a baby."],
235
+ ... ["A man is riding a horse."]
236
+ ... ]).toDF("text")
237
+ >>> result = pipeline.fit(data).transform(data)
238
+ >>> result.select("reranked_documents").show(truncate = False)
239
+ # Each document will have a relevance_score in metadata showing how relevant it is to the query
240
+ """
241
+
242
+ name = "AutoGGUFReranker"
243
+ inputAnnotatorTypes = [AnnotatorType.DOCUMENT]
244
+ outputAnnotatorType = AnnotatorType.DOCUMENT
245
+
246
+ query = Param(Params._dummy(), "query",
247
+ "The query to be used for reranking. If not set, the input text will be used as the query.",
248
+ typeConverter=TypeConverters.toString)
249
+ @keyword_only
250
+ def __init__(self, classname="com.johnsnowlabs.nlp.annotators.seq2seq.AutoGGUFReranker", java_model=None):
251
+ super(AutoGGUFReranker, self).__init__(
252
+ classname=classname,
253
+ java_model=java_model
254
+ )
255
+ self._setDefault(
256
+ useChatTemplate=True,
257
+ nCtx=4096,
258
+ nBatch=512,
259
+ nGpuLayers=99,
260
+ systemPrompt="You are a helpful assistant.",
261
+ query=""
262
+ )
263
+
264
+ def setQuery(self, value: str):
265
+ """Set the query to be used for reranking.
266
+
267
+ Parameters
268
+ ----------
269
+ value : str
270
+ The query text that documents will be ranked against.
271
+
272
+ Returns
273
+ -------
274
+ AutoGGUFReranker
275
+ This instance for method chaining.
276
+ """
277
+ return self._set(query=value)
278
+
279
+ def getQuery(self):
280
+ """Get the current query used for reranking.
281
+
282
+ Returns
283
+ -------
284
+ str
285
+ The current query string.
286
+ """
287
+ return self._call_java("getQuery")
288
+
289
+ @staticmethod
290
+ def loadSavedModel(folder, spark_session):
291
+ """Loads a locally saved model.
292
+
293
+ Parameters
294
+ ----------
295
+ folder : str
296
+ Folder of the saved model
297
+ spark_session : pyspark.sql.SparkSession
298
+ The current SparkSession
299
+
300
+ Returns
301
+ -------
302
+ AutoGGUFReranker
303
+ The restored model
304
+ """
305
+ from sparknlp.internal import _AutoGGUFRerankerLoader
306
+ jModel = _AutoGGUFRerankerLoader(folder, spark_session._jsparkSession)._java_obj
307
+ return AutoGGUFReranker(java_model=jModel)
308
+
309
+ @staticmethod
310
+ def pretrained(name="bge-reranker-v2-m3-Q4_K_M", lang="en", remote_loc=None):
311
+ """Downloads and loads a pretrained model.
312
+
313
+ Parameters
314
+ ----------
315
+ name : str, optional
316
+ Name of the pretrained model, by default "bge-reranker-v2-m3-Q4_K_M"
317
+ lang : str, optional
318
+ Language of the pretrained model, by default "en"
319
+ remote_loc : str, optional
320
+ Optional remote address of the resource, by default None. Will use
321
+ Spark NLPs repositories otherwise.
322
+
323
+ Returns
324
+ -------
325
+ AutoGGUFReranker
326
+ The restored model
327
+ """
328
+ from sparknlp.pretrained import ResourceDownloader
329
+ return ResourceDownloader.downloadModel(AutoGGUFReranker, name, lang, remote_loc)
@@ -1191,4 +1191,9 @@ class _Phi4Loader(ExtendedJavaWrapper):
1191
1191
  path,
1192
1192
  jspark,
1193
1193
  use_openvino,
1194
- )
1194
+ )
1195
+
1196
+ class _AutoGGUFRerankerLoader(ExtendedJavaWrapper):
1197
+ def __init__(self, path, jspark):
1198
+ super(_AutoGGUFRerankerLoader, self).__init__(
1199
+ "com.johnsnowlabs.nlp.annotators.seq2seq.AutoGGUFReranker.loadSavedModel", path, jspark)
File without changes