spark-nlp 6.1.0__tar.gz → 6.1.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of spark-nlp might be problematic. Click here for more details.

Files changed (291) hide show
  1. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/PKG-INFO +12 -11
  2. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/README.md +11 -10
  3. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/setup.py +1 -1
  4. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/spark_nlp.egg-info/PKG-INFO +12 -11
  5. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/spark_nlp.egg-info/SOURCES.txt +2 -0
  6. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/__init__.py +1 -1
  7. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/auto_gguf_embeddings.py +4 -12
  8. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/seq2seq/__init__.py +1 -0
  9. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/seq2seq/auto_gguf_model.py +11 -10
  10. spark_nlp-6.1.2/sparknlp/annotator/seq2seq/auto_gguf_reranker.py +329 -0
  11. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/seq2seq/auto_gguf_vision_model.py +7 -9
  12. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/common/properties.py +25 -30
  13. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/internal/__init__.py +6 -1
  14. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/reader/reader2doc.py +25 -9
  15. spark_nlp-6.1.2/sparknlp/reader/reader2table.py +163 -0
  16. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/com/__init__.py +0 -0
  17. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/com/johnsnowlabs/__init__.py +0 -0
  18. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/com/johnsnowlabs/ml/__init__.py +0 -0
  19. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/com/johnsnowlabs/ml/ai/__init__.py +0 -0
  20. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/com/johnsnowlabs/nlp/__init__.py +0 -0
  21. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/setup.cfg +0 -0
  22. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/spark_nlp.egg-info/dependency_links.txt +0 -0
  23. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/spark_nlp.egg-info/top_level.txt +0 -0
  24. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotation.py +0 -0
  25. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotation_audio.py +0 -0
  26. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotation_image.py +0 -0
  27. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/__init__.py +0 -0
  28. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/audio/__init__.py +0 -0
  29. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/audio/hubert_for_ctc.py +0 -0
  30. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/audio/wav2vec2_for_ctc.py +0 -0
  31. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/audio/whisper_for_ctc.py +0 -0
  32. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/chunk2_doc.py +0 -0
  33. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/chunker.py +0 -0
  34. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/__init__.py +0 -0
  35. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/albert_for_multiple_choice.py +0 -0
  36. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/albert_for_question_answering.py +0 -0
  37. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/albert_for_sequence_classification.py +0 -0
  38. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/albert_for_token_classification.py +0 -0
  39. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/albert_for_zero_shot_classification.py +0 -0
  40. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/bart_for_zero_shot_classification.py +0 -0
  41. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/bert_for_multiple_choice.py +0 -0
  42. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/bert_for_question_answering.py +0 -0
  43. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/bert_for_sequence_classification.py +0 -0
  44. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/bert_for_token_classification.py +0 -0
  45. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/bert_for_zero_shot_classification.py +0 -0
  46. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/camembert_for_question_answering.py +0 -0
  47. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/camembert_for_sequence_classification.py +0 -0
  48. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/camembert_for_token_classification.py +0 -0
  49. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/camembert_for_zero_shot_classification.py +0 -0
  50. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/classifier_dl.py +0 -0
  51. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/deberta_for_question_answering.py +0 -0
  52. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/deberta_for_sequence_classification.py +0 -0
  53. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/deberta_for_token_classification.py +0 -0
  54. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/deberta_for_zero_shot_classification.py +0 -0
  55. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/distil_bert_for_question_answering.py +0 -0
  56. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/distil_bert_for_sequence_classification.py +0 -0
  57. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/distil_bert_for_token_classification.py +0 -0
  58. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/distil_bert_for_zero_shot_classification.py +0 -0
  59. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/distilbert_for_multiple_choice.py +0 -0
  60. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/longformer_for_question_answering.py +0 -0
  61. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/longformer_for_sequence_classification.py +0 -0
  62. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/longformer_for_token_classification.py +0 -0
  63. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/mpnet_for_question_answering.py +0 -0
  64. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/mpnet_for_sequence_classification.py +0 -0
  65. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/mpnet_for_token_classification.py +0 -0
  66. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/multi_classifier_dl.py +0 -0
  67. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/roberta_for_multiple_choice.py +0 -0
  68. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/roberta_for_question_answering.py +0 -0
  69. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/roberta_for_sequence_classification.py +0 -0
  70. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/roberta_for_token_classification.py +0 -0
  71. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/roberta_for_zero_shot_classification.py +0 -0
  72. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/sentiment_dl.py +0 -0
  73. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/tapas_for_question_answering.py +0 -0
  74. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/xlm_roberta_for_multiple_choice.py +0 -0
  75. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/xlm_roberta_for_question_answering.py +0 -0
  76. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/xlm_roberta_for_sequence_classification.py +0 -0
  77. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/xlm_roberta_for_token_classification.py +0 -0
  78. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/xlm_roberta_for_zero_shot_classification.py +0 -0
  79. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/xlnet_for_sequence_classification.py +0 -0
  80. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/classifier_dl/xlnet_for_token_classification.py +0 -0
  81. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/cleaners/__init__.py +0 -0
  82. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/cleaners/cleaner.py +0 -0
  83. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/cleaners/extractor.py +0 -0
  84. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/coref/__init__.py +0 -0
  85. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/coref/spanbert_coref.py +0 -0
  86. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/cv/__init__.py +0 -0
  87. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/cv/blip_for_question_answering.py +0 -0
  88. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/cv/clip_for_zero_shot_classification.py +0 -0
  89. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/cv/convnext_for_image_classification.py +0 -0
  90. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/cv/florence2_transformer.py +0 -0
  91. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/cv/gemma3_for_multimodal.py +0 -0
  92. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/cv/internvl_for_multimodal.py +0 -0
  93. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/cv/janus_for_multimodal.py +0 -0
  94. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/cv/llava_for_multimodal.py +0 -0
  95. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/cv/mllama_for_multimodal.py +0 -0
  96. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/cv/paligemma_for_multimodal.py +0 -0
  97. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/cv/phi3_vision_for_multimodal.py +0 -0
  98. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/cv/qwen2vl_transformer.py +0 -0
  99. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/cv/smolvlm_transformer.py +0 -0
  100. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/cv/swin_for_image_classification.py +0 -0
  101. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/cv/vision_encoder_decoder_for_image_captioning.py +0 -0
  102. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/cv/vit_for_image_classification.py +0 -0
  103. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/dataframe_optimizer.py +0 -0
  104. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/date2_chunk.py +0 -0
  105. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/dependency/__init__.py +0 -0
  106. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/dependency/dependency_parser.py +0 -0
  107. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/dependency/typed_dependency_parser.py +0 -0
  108. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/document_character_text_splitter.py +0 -0
  109. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/document_normalizer.py +0 -0
  110. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/document_token_splitter.py +0 -0
  111. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/document_token_splitter_test.py +0 -0
  112. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/__init__.py +0 -0
  113. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/albert_embeddings.py +0 -0
  114. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/bert_embeddings.py +0 -0
  115. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/bert_sentence_embeddings.py +0 -0
  116. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/bge_embeddings.py +0 -0
  117. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/camembert_embeddings.py +0 -0
  118. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/chunk_embeddings.py +0 -0
  119. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/deberta_embeddings.py +0 -0
  120. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/distil_bert_embeddings.py +0 -0
  121. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/doc2vec.py +0 -0
  122. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/e5_embeddings.py +0 -0
  123. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/e5v_embeddings.py +0 -0
  124. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/elmo_embeddings.py +0 -0
  125. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/instructor_embeddings.py +0 -0
  126. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/longformer_embeddings.py +0 -0
  127. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/minilm_embeddings.py +0 -0
  128. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/mpnet_embeddings.py +0 -0
  129. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/mxbai_embeddings.py +0 -0
  130. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/nomic_embeddings.py +0 -0
  131. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/roberta_embeddings.py +0 -0
  132. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/roberta_sentence_embeddings.py +0 -0
  133. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/sentence_embeddings.py +0 -0
  134. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/snowflake_embeddings.py +0 -0
  135. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/uae_embeddings.py +0 -0
  136. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/universal_sentence_encoder.py +0 -0
  137. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/word2vec.py +0 -0
  138. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/word_embeddings.py +0 -0
  139. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/xlm_roberta_embeddings.py +0 -0
  140. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/xlm_roberta_sentence_embeddings.py +0 -0
  141. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/embeddings/xlnet_embeddings.py +0 -0
  142. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/er/__init__.py +0 -0
  143. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/er/entity_ruler.py +0 -0
  144. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/graph_extraction.py +0 -0
  145. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/keyword_extraction/__init__.py +0 -0
  146. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/keyword_extraction/yake_keyword_extraction.py +0 -0
  147. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/ld_dl/__init__.py +0 -0
  148. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/ld_dl/language_detector_dl.py +0 -0
  149. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/lemmatizer.py +0 -0
  150. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/matcher/__init__.py +0 -0
  151. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/matcher/big_text_matcher.py +0 -0
  152. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/matcher/date_matcher.py +0 -0
  153. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/matcher/multi_date_matcher.py +0 -0
  154. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/matcher/regex_matcher.py +0 -0
  155. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/matcher/text_matcher.py +0 -0
  156. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/n_gram_generator.py +0 -0
  157. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/ner/__init__.py +0 -0
  158. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/ner/ner_approach.py +0 -0
  159. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/ner/ner_converter.py +0 -0
  160. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/ner/ner_crf.py +0 -0
  161. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/ner/ner_dl.py +0 -0
  162. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/ner/ner_overwriter.py +0 -0
  163. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/ner/zero_shot_ner_model.py +0 -0
  164. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/normalizer.py +0 -0
  165. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/openai/__init__.py +0 -0
  166. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/openai/openai_completion.py +0 -0
  167. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/openai/openai_embeddings.py +0 -0
  168. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/param/__init__.py +0 -0
  169. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/param/classifier_encoder.py +0 -0
  170. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/param/evaluation_dl_params.py +0 -0
  171. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/pos/__init__.py +0 -0
  172. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/pos/perceptron.py +0 -0
  173. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/sentence/__init__.py +0 -0
  174. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/sentence/sentence_detector.py +0 -0
  175. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/sentence/sentence_detector_dl.py +0 -0
  176. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/sentiment/__init__.py +0 -0
  177. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/sentiment/sentiment_detector.py +0 -0
  178. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/sentiment/vivekn_sentiment.py +0 -0
  179. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/seq2seq/bart_transformer.py +0 -0
  180. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/seq2seq/cohere_transformer.py +0 -0
  181. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/seq2seq/cpm_transformer.py +0 -0
  182. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/seq2seq/gpt2_transformer.py +0 -0
  183. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/seq2seq/llama2_transformer.py +0 -0
  184. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/seq2seq/llama3_transformer.py +0 -0
  185. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/seq2seq/m2m100_transformer.py +0 -0
  186. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/seq2seq/marian_transformer.py +0 -0
  187. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/seq2seq/mistral_transformer.py +0 -0
  188. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/seq2seq/nllb_transformer.py +0 -0
  189. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/seq2seq/olmo_transformer.py +0 -0
  190. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/seq2seq/phi2_transformer.py +0 -0
  191. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/seq2seq/phi3_transformer.py +0 -0
  192. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/seq2seq/phi4_transformer.py +0 -0
  193. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/seq2seq/qwen_transformer.py +0 -0
  194. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/seq2seq/starcoder_transformer.py +0 -0
  195. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/seq2seq/t5_transformer.py +0 -0
  196. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/similarity/__init__.py +0 -0
  197. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/similarity/document_similarity_ranker.py +0 -0
  198. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/spell_check/__init__.py +0 -0
  199. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/spell_check/context_spell_checker.py +0 -0
  200. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/spell_check/norvig_sweeting.py +0 -0
  201. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/spell_check/symmetric_delete.py +0 -0
  202. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/stemmer.py +0 -0
  203. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/stop_words_cleaner.py +0 -0
  204. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/tf_ner_dl_graph_builder.py +0 -0
  205. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/token/__init__.py +0 -0
  206. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/token/chunk_tokenizer.py +0 -0
  207. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/token/recursive_tokenizer.py +0 -0
  208. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/token/regex_tokenizer.py +0 -0
  209. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/token/tokenizer.py +0 -0
  210. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/token2_chunk.py +0 -0
  211. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/ws/__init__.py +0 -0
  212. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/annotator/ws/word_segmenter.py +0 -0
  213. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/base/__init__.py +0 -0
  214. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/base/audio_assembler.py +0 -0
  215. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/base/doc2_chunk.py +0 -0
  216. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/base/document_assembler.py +0 -0
  217. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/base/embeddings_finisher.py +0 -0
  218. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/base/finisher.py +0 -0
  219. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/base/graph_finisher.py +0 -0
  220. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/base/has_recursive_fit.py +0 -0
  221. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/base/has_recursive_transform.py +0 -0
  222. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/base/image_assembler.py +0 -0
  223. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/base/light_pipeline.py +0 -0
  224. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/base/multi_document_assembler.py +0 -0
  225. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/base/prompt_assembler.py +0 -0
  226. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/base/recursive_pipeline.py +0 -0
  227. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/base/table_assembler.py +0 -0
  228. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/base/token_assembler.py +0 -0
  229. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/common/__init__.py +0 -0
  230. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/common/annotator_approach.py +0 -0
  231. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/common/annotator_model.py +0 -0
  232. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/common/annotator_properties.py +0 -0
  233. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/common/annotator_type.py +0 -0
  234. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/common/coverage_result.py +0 -0
  235. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/common/match_strategy.py +0 -0
  236. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/common/read_as.py +0 -0
  237. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/common/recursive_annotator_approach.py +0 -0
  238. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/common/storage.py +0 -0
  239. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/common/utils.py +0 -0
  240. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/functions.py +0 -0
  241. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/internal/annotator_java_ml.py +0 -0
  242. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/internal/annotator_transformer.py +0 -0
  243. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/internal/extended_java_wrapper.py +0 -0
  244. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/internal/params_getters_setters.py +0 -0
  245. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/internal/recursive.py +0 -0
  246. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/logging/__init__.py +0 -0
  247. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/logging/comet.py +0 -0
  248. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/partition/__init__.py +0 -0
  249. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/partition/partition.py +0 -0
  250. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/partition/partition_properties.py +0 -0
  251. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/partition/partition_transformer.py +0 -0
  252. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/pretrained/__init__.py +0 -0
  253. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/pretrained/pretrained_pipeline.py +0 -0
  254. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/pretrained/resource_downloader.py +0 -0
  255. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/pretrained/utils.py +0 -0
  256. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/reader/__init__.py +0 -0
  257. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/reader/enums.py +0 -0
  258. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/reader/pdf_to_text.py +0 -0
  259. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/reader/sparknlp_reader.py +0 -0
  260. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/__init__.py +0 -0
  261. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/_tf_graph_builders/__init__.py +0 -0
  262. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/_tf_graph_builders/graph_builders.py +0 -0
  263. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/_tf_graph_builders/ner_dl/__init__.py +0 -0
  264. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/_tf_graph_builders/ner_dl/create_graph.py +0 -0
  265. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/_tf_graph_builders/ner_dl/dataset_encoder.py +0 -0
  266. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/_tf_graph_builders/ner_dl/ner_model.py +0 -0
  267. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/_tf_graph_builders/ner_dl/ner_model_saver.py +0 -0
  268. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/_tf_graph_builders/ner_dl/sentence_grouper.py +0 -0
  269. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/_tf_graph_builders/tf2contrib/__init__.py +0 -0
  270. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/_tf_graph_builders/tf2contrib/core_rnn_cell.py +0 -0
  271. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/_tf_graph_builders/tf2contrib/fused_rnn_cell.py +0 -0
  272. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/_tf_graph_builders/tf2contrib/gru_ops.py +0 -0
  273. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/_tf_graph_builders/tf2contrib/lstm_ops.py +0 -0
  274. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/_tf_graph_builders/tf2contrib/rnn.py +0 -0
  275. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/_tf_graph_builders/tf2contrib/rnn_cell.py +0 -0
  276. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/_tf_graph_builders_1x/__init__.py +0 -0
  277. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/_tf_graph_builders_1x/graph_builders.py +0 -0
  278. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/_tf_graph_builders_1x/ner_dl/__init__.py +0 -0
  279. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/_tf_graph_builders_1x/ner_dl/create_graph.py +0 -0
  280. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/_tf_graph_builders_1x/ner_dl/dataset_encoder.py +0 -0
  281. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model.py +0 -0
  282. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model_saver.py +0 -0
  283. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/_tf_graph_builders_1x/ner_dl/sentence_grouper.py +0 -0
  284. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/conll.py +0 -0
  285. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/conllu.py +0 -0
  286. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/pos.py +0 -0
  287. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/pub_tator.py +0 -0
  288. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/spacy_to_annotation.py +0 -0
  289. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/training/tfgraphs.py +0 -0
  290. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/upload_to_hub.py +0 -0
  291. {spark_nlp-6.1.0 → spark_nlp-6.1.2}/sparknlp/util.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: spark-nlp
3
- Version: 6.1.0
3
+ Version: 6.1.2
4
4
  Summary: John Snow Labs Spark NLP is a natural language processing library built on top of Apache Spark ML. It provides simple, performant & accurate NLP annotations for machine learning pipelines, that scale easily in a distributed environment.
5
5
  Home-page: https://github.com/JohnSnowLabs/spark-nlp
6
6
  Author: John Snow Labs
@@ -58,7 +58,7 @@ Dynamic: summary
58
58
 
59
59
  Spark NLP is a state-of-the-art Natural Language Processing library built on top of Apache Spark. It provides **simple**, **performant** & **accurate** NLP annotations for machine learning pipelines that **scale** easily in a distributed environment.
60
60
 
61
- Spark NLP comes with **83000+** pretrained **pipelines** and **models** in more than **200+** languages.
61
+ Spark NLP comes with **100000+** pretrained **pipelines** and **models** in more than **200+** languages.
62
62
  It also offers tasks such as **Tokenization**, **Word Segmentation**, **Part-of-Speech Tagging**, Word and Sentence **Embeddings**, **Named Entity Recognition**, **Dependency Parsing**, **Spell Checking**, **Text Classification**, **Sentiment Analysis**, **Token Classification**, **Machine Translation** (+180 languages), **Summarization**, **Question Answering**, **Table Question Answering**, **Text Generation**, **Image Classification**, **Image to Text (captioning)**, **Automatic Speech Recognition**, **Zero-Shot Learning**, and many more [NLP tasks](#features).
63
63
 
64
64
  **Spark NLP** is the only open-source NLP library in **production** that offers state-of-the-art transformers such as **BERT**, **CamemBERT**, **ALBERT**, **ELECTRA**, **XLNet**, **DistilBERT**, **RoBERTa**, **DeBERTa**, **XLM-RoBERTa**, **Longformer**, **ELMO**, **Universal Sentence Encoder**, **Llama-2**, **M2M100**, **BART**, **Instructor**, **E5**, **Google T5**, **MarianMT**, **OpenAI GPT2**, **Vision Transformers (ViT)**, **OpenAI Whisper**, **Llama**, **Mistral**, **Phi**, **Qwen2**, and many more not only to **Python** and **R**, but also to **JVM** ecosystem (**Java**, **Scala**, and **Kotlin**) at **scale** by extending **Apache Spark** natively.
@@ -102,7 +102,7 @@ $ java -version
102
102
  $ conda create -n sparknlp python=3.7 -y
103
103
  $ conda activate sparknlp
104
104
  # spark-nlp by default is based on pyspark 3.x
105
- $ pip install spark-nlp==6.1.0 pyspark==3.3.1
105
+ $ pip install spark-nlp==6.1.2 pyspark==3.3.1
106
106
  ```
107
107
 
108
108
  In Python console or Jupyter `Python3` kernel:
@@ -168,11 +168,11 @@ For a quick example of using pipelines and models take a look at our official [d
168
168
 
169
169
  ### Apache Spark Support
170
170
 
171
- Spark NLP *6.1.0* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
171
+ Spark NLP *6.1.2* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
172
172
 
173
173
  | Spark NLP | Apache Spark 3.5.x | Apache Spark 3.4.x | Apache Spark 3.3.x | Apache Spark 3.2.x | Apache Spark 3.1.x | Apache Spark 3.0.x | Apache Spark 2.4.x | Apache Spark 2.3.x |
174
174
  |-----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
175
- | 6.0.x | YES | YES | YES | YES | YES | YES | NO | NO |
175
+ | 6.x.x and up | YES | YES | YES | YES | YES | YES | NO | NO |
176
176
  | 5.5.x | YES | YES | YES | YES | YES | YES | NO | NO |
177
177
  | 5.4.x | YES | YES | YES | YES | YES | YES | NO | NO |
178
178
  | 5.3.x | YES | YES | YES | YES | YES | YES | NO | NO |
@@ -198,7 +198,7 @@ Find out more about 4.x `SparkNLP` versions in our official [documentation](http
198
198
 
199
199
  ### Databricks Support
200
200
 
201
- Spark NLP 6.1.0 has been tested and is compatible with the following runtimes:
201
+ Spark NLP 6.1.2 has been tested and is compatible with the following runtimes:
202
202
 
203
203
  | **CPU** | **GPU** |
204
204
  |--------------------|--------------------|
@@ -206,16 +206,17 @@ Spark NLP 6.1.0 has been tested and is compatible with the following runtimes:
206
206
  | 14.2 / 14.2 ML | 14.2 ML & GPU |
207
207
  | 14.3 / 14.3 ML | 14.3 ML & GPU |
208
208
  | 15.0 / 15.0 ML | 15.0 ML & GPU |
209
- | 15.1 / 15.0 ML | 15.1 ML & GPU |
210
- | 15.2 / 15.0 ML | 15.2 ML & GPU |
211
- | 15.3 / 15.0 ML | 15.3 ML & GPU |
212
- | 15.4 / 15.0 ML | 15.4 ML & GPU |
209
+ | 15.1 / 15.1 ML | 15.1 ML & GPU |
210
+ | 15.2 / 15.2 ML | 15.2 ML & GPU |
211
+ | 15.3 / 15.3 ML | 15.3 ML & GPU |
212
+ | 15.4 / 15.4 ML | 15.4 ML & GPU |
213
+ | 16.4 / 16.4 ML | 16.4 ML & GPU |
213
214
 
214
215
  We are compatible with older runtimes. For a full list check databricks support in our official [documentation](https://sparknlp.org/docs/en/install#databricks-support)
215
216
 
216
217
  ### EMR Support
217
218
 
218
- Spark NLP 6.1.0 has been tested and is compatible with the following EMR releases:
219
+ Spark NLP 6.1.2 has been tested and is compatible with the following EMR releases:
219
220
 
220
221
  | **EMR Release** |
221
222
  |--------------------|
@@ -19,7 +19,7 @@
19
19
 
20
20
  Spark NLP is a state-of-the-art Natural Language Processing library built on top of Apache Spark. It provides **simple**, **performant** & **accurate** NLP annotations for machine learning pipelines that **scale** easily in a distributed environment.
21
21
 
22
- Spark NLP comes with **83000+** pretrained **pipelines** and **models** in more than **200+** languages.
22
+ Spark NLP comes with **100000+** pretrained **pipelines** and **models** in more than **200+** languages.
23
23
  It also offers tasks such as **Tokenization**, **Word Segmentation**, **Part-of-Speech Tagging**, Word and Sentence **Embeddings**, **Named Entity Recognition**, **Dependency Parsing**, **Spell Checking**, **Text Classification**, **Sentiment Analysis**, **Token Classification**, **Machine Translation** (+180 languages), **Summarization**, **Question Answering**, **Table Question Answering**, **Text Generation**, **Image Classification**, **Image to Text (captioning)**, **Automatic Speech Recognition**, **Zero-Shot Learning**, and many more [NLP tasks](#features).
24
24
 
25
25
  **Spark NLP** is the only open-source NLP library in **production** that offers state-of-the-art transformers such as **BERT**, **CamemBERT**, **ALBERT**, **ELECTRA**, **XLNet**, **DistilBERT**, **RoBERTa**, **DeBERTa**, **XLM-RoBERTa**, **Longformer**, **ELMO**, **Universal Sentence Encoder**, **Llama-2**, **M2M100**, **BART**, **Instructor**, **E5**, **Google T5**, **MarianMT**, **OpenAI GPT2**, **Vision Transformers (ViT)**, **OpenAI Whisper**, **Llama**, **Mistral**, **Phi**, **Qwen2**, and many more not only to **Python** and **R**, but also to **JVM** ecosystem (**Java**, **Scala**, and **Kotlin**) at **scale** by extending **Apache Spark** natively.
@@ -63,7 +63,7 @@ $ java -version
63
63
  $ conda create -n sparknlp python=3.7 -y
64
64
  $ conda activate sparknlp
65
65
  # spark-nlp by default is based on pyspark 3.x
66
- $ pip install spark-nlp==6.1.0 pyspark==3.3.1
66
+ $ pip install spark-nlp==6.1.2 pyspark==3.3.1
67
67
  ```
68
68
 
69
69
  In Python console or Jupyter `Python3` kernel:
@@ -129,11 +129,11 @@ For a quick example of using pipelines and models take a look at our official [d
129
129
 
130
130
  ### Apache Spark Support
131
131
 
132
- Spark NLP *6.1.0* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
132
+ Spark NLP *6.1.2* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
133
133
 
134
134
  | Spark NLP | Apache Spark 3.5.x | Apache Spark 3.4.x | Apache Spark 3.3.x | Apache Spark 3.2.x | Apache Spark 3.1.x | Apache Spark 3.0.x | Apache Spark 2.4.x | Apache Spark 2.3.x |
135
135
  |-----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
136
- | 6.0.x | YES | YES | YES | YES | YES | YES | NO | NO |
136
+ | 6.x.x and up | YES | YES | YES | YES | YES | YES | NO | NO |
137
137
  | 5.5.x | YES | YES | YES | YES | YES | YES | NO | NO |
138
138
  | 5.4.x | YES | YES | YES | YES | YES | YES | NO | NO |
139
139
  | 5.3.x | YES | YES | YES | YES | YES | YES | NO | NO |
@@ -159,7 +159,7 @@ Find out more about 4.x `SparkNLP` versions in our official [documentation](http
159
159
 
160
160
  ### Databricks Support
161
161
 
162
- Spark NLP 6.1.0 has been tested and is compatible with the following runtimes:
162
+ Spark NLP 6.1.2 has been tested and is compatible with the following runtimes:
163
163
 
164
164
  | **CPU** | **GPU** |
165
165
  |--------------------|--------------------|
@@ -167,16 +167,17 @@ Spark NLP 6.1.0 has been tested and is compatible with the following runtimes:
167
167
  | 14.2 / 14.2 ML | 14.2 ML & GPU |
168
168
  | 14.3 / 14.3 ML | 14.3 ML & GPU |
169
169
  | 15.0 / 15.0 ML | 15.0 ML & GPU |
170
- | 15.1 / 15.0 ML | 15.1 ML & GPU |
171
- | 15.2 / 15.0 ML | 15.2 ML & GPU |
172
- | 15.3 / 15.0 ML | 15.3 ML & GPU |
173
- | 15.4 / 15.0 ML | 15.4 ML & GPU |
170
+ | 15.1 / 15.1 ML | 15.1 ML & GPU |
171
+ | 15.2 / 15.2 ML | 15.2 ML & GPU |
172
+ | 15.3 / 15.3 ML | 15.3 ML & GPU |
173
+ | 15.4 / 15.4 ML | 15.4 ML & GPU |
174
+ | 16.4 / 16.4 ML | 16.4 ML & GPU |
174
175
 
175
176
  We are compatible with older runtimes. For a full list check databricks support in our official [documentation](https://sparknlp.org/docs/en/install#databricks-support)
176
177
 
177
178
  ### EMR Support
178
179
 
179
- Spark NLP 6.1.0 has been tested and is compatible with the following EMR releases:
180
+ Spark NLP 6.1.2 has been tested and is compatible with the following EMR releases:
180
181
 
181
182
  | **EMR Release** |
182
183
  |--------------------|
@@ -41,7 +41,7 @@ setup(
41
41
  # project code, see
42
42
  # https://packaging.python.org/en/latest/single_source_version.html
43
43
 
44
- version='6.1.0', # Required
44
+ version='6.1.2', # Required
45
45
 
46
46
  # This is a one-line description or tagline of what your project does. This
47
47
  # corresponds to the 'Summary' metadata field:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: spark-nlp
3
- Version: 6.1.0
3
+ Version: 6.1.2
4
4
  Summary: John Snow Labs Spark NLP is a natural language processing library built on top of Apache Spark ML. It provides simple, performant & accurate NLP annotations for machine learning pipelines, that scale easily in a distributed environment.
5
5
  Home-page: https://github.com/JohnSnowLabs/spark-nlp
6
6
  Author: John Snow Labs
@@ -58,7 +58,7 @@ Dynamic: summary
58
58
 
59
59
  Spark NLP is a state-of-the-art Natural Language Processing library built on top of Apache Spark. It provides **simple**, **performant** & **accurate** NLP annotations for machine learning pipelines that **scale** easily in a distributed environment.
60
60
 
61
- Spark NLP comes with **83000+** pretrained **pipelines** and **models** in more than **200+** languages.
61
+ Spark NLP comes with **100000+** pretrained **pipelines** and **models** in more than **200+** languages.
62
62
  It also offers tasks such as **Tokenization**, **Word Segmentation**, **Part-of-Speech Tagging**, Word and Sentence **Embeddings**, **Named Entity Recognition**, **Dependency Parsing**, **Spell Checking**, **Text Classification**, **Sentiment Analysis**, **Token Classification**, **Machine Translation** (+180 languages), **Summarization**, **Question Answering**, **Table Question Answering**, **Text Generation**, **Image Classification**, **Image to Text (captioning)**, **Automatic Speech Recognition**, **Zero-Shot Learning**, and many more [NLP tasks](#features).
63
63
 
64
64
  **Spark NLP** is the only open-source NLP library in **production** that offers state-of-the-art transformers such as **BERT**, **CamemBERT**, **ALBERT**, **ELECTRA**, **XLNet**, **DistilBERT**, **RoBERTa**, **DeBERTa**, **XLM-RoBERTa**, **Longformer**, **ELMO**, **Universal Sentence Encoder**, **Llama-2**, **M2M100**, **BART**, **Instructor**, **E5**, **Google T5**, **MarianMT**, **OpenAI GPT2**, **Vision Transformers (ViT)**, **OpenAI Whisper**, **Llama**, **Mistral**, **Phi**, **Qwen2**, and many more not only to **Python** and **R**, but also to **JVM** ecosystem (**Java**, **Scala**, and **Kotlin**) at **scale** by extending **Apache Spark** natively.
@@ -102,7 +102,7 @@ $ java -version
102
102
  $ conda create -n sparknlp python=3.7 -y
103
103
  $ conda activate sparknlp
104
104
  # spark-nlp by default is based on pyspark 3.x
105
- $ pip install spark-nlp==6.1.0 pyspark==3.3.1
105
+ $ pip install spark-nlp==6.1.2 pyspark==3.3.1
106
106
  ```
107
107
 
108
108
  In Python console or Jupyter `Python3` kernel:
@@ -168,11 +168,11 @@ For a quick example of using pipelines and models take a look at our official [d
168
168
 
169
169
  ### Apache Spark Support
170
170
 
171
- Spark NLP *6.1.0* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
171
+ Spark NLP *6.1.2* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
172
172
 
173
173
  | Spark NLP | Apache Spark 3.5.x | Apache Spark 3.4.x | Apache Spark 3.3.x | Apache Spark 3.2.x | Apache Spark 3.1.x | Apache Spark 3.0.x | Apache Spark 2.4.x | Apache Spark 2.3.x |
174
174
  |-----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
175
- | 6.0.x | YES | YES | YES | YES | YES | YES | NO | NO |
175
+ | 6.x.x and up | YES | YES | YES | YES | YES | YES | NO | NO |
176
176
  | 5.5.x | YES | YES | YES | YES | YES | YES | NO | NO |
177
177
  | 5.4.x | YES | YES | YES | YES | YES | YES | NO | NO |
178
178
  | 5.3.x | YES | YES | YES | YES | YES | YES | NO | NO |
@@ -198,7 +198,7 @@ Find out more about 4.x `SparkNLP` versions in our official [documentation](http
198
198
 
199
199
  ### Databricks Support
200
200
 
201
- Spark NLP 6.1.0 has been tested and is compatible with the following runtimes:
201
+ Spark NLP 6.1.2 has been tested and is compatible with the following runtimes:
202
202
 
203
203
  | **CPU** | **GPU** |
204
204
  |--------------------|--------------------|
@@ -206,16 +206,17 @@ Spark NLP 6.1.0 has been tested and is compatible with the following runtimes:
206
206
  | 14.2 / 14.2 ML | 14.2 ML & GPU |
207
207
  | 14.3 / 14.3 ML | 14.3 ML & GPU |
208
208
  | 15.0 / 15.0 ML | 15.0 ML & GPU |
209
- | 15.1 / 15.0 ML | 15.1 ML & GPU |
210
- | 15.2 / 15.0 ML | 15.2 ML & GPU |
211
- | 15.3 / 15.0 ML | 15.3 ML & GPU |
212
- | 15.4 / 15.0 ML | 15.4 ML & GPU |
209
+ | 15.1 / 15.1 ML | 15.1 ML & GPU |
210
+ | 15.2 / 15.2 ML | 15.2 ML & GPU |
211
+ | 15.3 / 15.3 ML | 15.3 ML & GPU |
212
+ | 15.4 / 15.4 ML | 15.4 ML & GPU |
213
+ | 16.4 / 16.4 ML | 16.4 ML & GPU |
213
214
 
214
215
  We are compatible with older runtimes. For a full list check databricks support in our official [documentation](https://sparknlp.org/docs/en/install#databricks-support)
215
216
 
216
217
  ### EMR Support
217
218
 
218
- Spark NLP 6.1.0 has been tested and is compatible with the following EMR releases:
219
+ Spark NLP 6.1.2 has been tested and is compatible with the following EMR releases:
219
220
 
220
221
  | **EMR Release** |
221
222
  |--------------------|
@@ -176,6 +176,7 @@ sparknlp/annotator/sentiment/sentiment_detector.py
176
176
  sparknlp/annotator/sentiment/vivekn_sentiment.py
177
177
  sparknlp/annotator/seq2seq/__init__.py
178
178
  sparknlp/annotator/seq2seq/auto_gguf_model.py
179
+ sparknlp/annotator/seq2seq/auto_gguf_reranker.py
179
180
  sparknlp/annotator/seq2seq/auto_gguf_vision_model.py
180
181
  sparknlp/annotator/seq2seq/bart_transformer.py
181
182
  sparknlp/annotator/seq2seq/cohere_transformer.py
@@ -255,6 +256,7 @@ sparknlp/reader/__init__.py
255
256
  sparknlp/reader/enums.py
256
257
  sparknlp/reader/pdf_to_text.py
257
258
  sparknlp/reader/reader2doc.py
259
+ sparknlp/reader/reader2table.py
258
260
  sparknlp/reader/sparknlp_reader.py
259
261
  sparknlp/training/__init__.py
260
262
  sparknlp/training/conll.py
@@ -66,7 +66,7 @@ sys.modules['com.johnsnowlabs.ml.ai'] = annotator
66
66
  annotators = annotator
67
67
  embeddings = annotator
68
68
 
69
- __version__ = "6.1.0"
69
+ __version__ = "6.1.2"
70
70
 
71
71
 
72
72
  def start(gpu=False,
@@ -12,8 +12,6 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  """Contains classes for the AutoGGUFEmbeddings."""
15
- from typing import List
16
-
17
15
  from sparknlp.common import *
18
16
 
19
17
 
@@ -32,7 +30,7 @@ class AutoGGUFEmbeddings(AnnotatorModel, HasBatchedAnnotate):
32
30
  ... .setInputCols(["document"]) \\
33
31
  ... .setOutputCol("embeddings")
34
32
 
35
- The default model is ``"Nomic_Embed_Text_v1.5.Q8_0.gguf"``, if no name is provided.
33
+ The default model is ``"Qwen3_Embedding_0.6B_Q8_0_gguf"``, if no name is provided.
36
34
 
37
35
  For extended examples of usage, see the
38
36
  `AutoGGUFEmbeddingsTest <https://github.com/JohnSnowLabs/spark-nlp/tree/master/src/test/scala/com/johnsnowlabs/nlp/embeddings/AutoGGUFEmbeddingsTest.scala>`__
@@ -313,12 +311,6 @@ class AutoGGUFEmbeddings(AnnotatorModel, HasBatchedAnnotate):
313
311
  "Set the pooling type for embeddings, use model default if unspecified",
314
312
  typeConverter=TypeConverters.toString,
315
313
  )
316
- embedding = Param(
317
- Params._dummy(),
318
- "embedding",
319
- "Whether to load model with embedding support",
320
- typeConverter=TypeConverters.toBoolean,
321
- )
322
314
  flashAttention = Param(
323
315
  Params._dummy(),
324
316
  "flashAttention",
@@ -489,10 +481,10 @@ class AutoGGUFEmbeddings(AnnotatorModel, HasBatchedAnnotate):
489
481
  classname=classname, java_model=java_model
490
482
  )
491
483
  self._setDefault(
492
- embedding=True,
493
484
  nCtx=4096,
494
485
  nBatch=512,
495
486
  poolingType="MEAN",
487
+ nGpuLayers=99,
496
488
  )
497
489
 
498
490
  @staticmethod
@@ -517,13 +509,13 @@ class AutoGGUFEmbeddings(AnnotatorModel, HasBatchedAnnotate):
517
509
  return AutoGGUFEmbeddings(java_model=jModel)
518
510
 
519
511
  @staticmethod
520
- def pretrained(name="Nomic_Embed_Text_v1.5.Q8_0.gguf", lang="en", remote_loc=None):
512
+ def pretrained(name="Qwen3_Embedding_0.6B_Q8_0_gguf", lang="en", remote_loc=None):
521
513
  """Downloads and loads a pretrained model.
522
514
 
523
515
  Parameters
524
516
  ----------
525
517
  name : str, optional
526
- Name of the pretrained model, by default "Nomic_Embed_Text_v1.5.Q8_0.gguf"
518
+ Name of the pretrained model, by default "Qwen3_Embedding_0.6B_Q8_0_gguf"
527
519
  lang : str, optional
528
520
  Language of the pretrained model, by default "en"
529
521
  remote_loc : str, optional
@@ -32,3 +32,4 @@ from sparknlp.annotator.seq2seq.llama3_transformer import *
32
32
  from sparknlp.annotator.seq2seq.cohere_transformer import *
33
33
  from sparknlp.annotator.seq2seq.olmo_transformer import *
34
34
  from sparknlp.annotator.seq2seq.phi4_transformer import *
35
+ from sparknlp.annotator.seq2seq.auto_gguf_reranker import *
@@ -37,7 +37,11 @@ class AutoGGUFModel(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppProperties):
37
37
  ... .setInputCols(["document"]) \\
38
38
  ... .setOutputCol("completions")
39
39
 
40
- The default model is ``"phi3.5_mini_4k_instruct_q4_gguf"``, if no name is provided.
40
+ The default model is ``"Phi_4_mini_instruct_Q4_K_M_gguf"``, if no name is provided.
41
+
42
+ AutoGGUFModel is also able to load pretrained models from AutoGGUFVisionModel. Just
43
+ specify the same name for the pretrained method, and it will load the text-part of the
44
+ multimodal model automatically.
41
45
 
42
46
  For extended examples of usage, see the
43
47
  `AutoGGUFModelTest <https://github.com/JohnSnowLabs/spark-nlp/tree/master/src/test/scala/com/johnsnowlabs/nlp/annotators/seq2seq/AutoGGUFModelTest.scala>`__
@@ -120,8 +124,6 @@ class AutoGGUFModel(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppProperties):
120
124
  Set path to static lookup cache to use for lookup decoding (not updated by generation)
121
125
  lookupCacheDynamicFilePath
122
126
  Set path to dynamic lookup cache to use for lookup decoding (updated by generation)
123
- embedding
124
- Whether to load model with embedding support
125
127
  flashAttention
126
128
  Whether to enable Flash Attention
127
129
  inputPrefixBos
@@ -252,20 +254,19 @@ class AutoGGUFModel(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppProperties):
252
254
  useChatTemplate=True,
253
255
  nCtx=4096,
254
256
  nBatch=512,
255
- embedding=False,
256
257
  nPredict=100,
257
258
  nGpuLayers=99,
258
259
  systemPrompt="You are a helpful assistant."
259
260
  )
260
261
 
261
262
  @staticmethod
262
- def loadSavedModel(folder, spark_session):
263
+ def loadSavedModel(path, spark_session):
263
264
  """Loads a locally saved model.
264
265
 
265
266
  Parameters
266
267
  ----------
267
- folder : str
268
- Folder of the saved model
268
+ path : str
269
+ Path to the gguf model
269
270
  spark_session : pyspark.sql.SparkSession
270
271
  The current SparkSession
271
272
 
@@ -275,17 +276,17 @@ class AutoGGUFModel(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppProperties):
275
276
  The restored model
276
277
  """
277
278
  from sparknlp.internal import _AutoGGUFLoader
278
- jModel = _AutoGGUFLoader(folder, spark_session._jsparkSession)._java_obj
279
+ jModel = _AutoGGUFLoader(path, spark_session._jsparkSession)._java_obj
279
280
  return AutoGGUFModel(java_model=jModel)
280
281
 
281
282
  @staticmethod
282
- def pretrained(name="phi3.5_mini_4k_instruct_q4_gguf", lang="en", remote_loc=None):
283
+ def pretrained(name="Phi_4_mini_instruct_Q4_K_M_gguf", lang="en", remote_loc=None):
283
284
  """Downloads and loads a pretrained model.
284
285
 
285
286
  Parameters
286
287
  ----------
287
288
  name : str, optional
288
- Name of the pretrained model, by default "phi3.5_mini_4k_instruct_q4_gguf"
289
+ Name of the pretrained model, by default "Phi_4_mini_instruct_Q4_K_M_gguf"
289
290
  lang : str, optional
290
291
  Language of the pretrained model, by default "en"
291
292
  remote_loc : str, optional