spark-nlp 6.0.5__tar.gz → 6.1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of spark-nlp might be problematic. Click here for more details.

Files changed (290) hide show
  1. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/PKG-INFO +12 -11
  2. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/README.md +11 -10
  3. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/setup.py +1 -1
  4. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/spark_nlp.egg-info/PKG-INFO +12 -11
  5. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/spark_nlp.egg-info/SOURCES.txt +3 -0
  6. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/__init__.py +1 -1
  7. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/auto_gguf_embeddings.py +4 -12
  8. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/__init__.py +1 -0
  9. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/auto_gguf_model.py +10 -7
  10. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/auto_gguf_vision_model.py +3 -3
  11. spark_nlp-6.1.1/sparknlp/annotator/seq2seq/phi4_transformer.py +387 -0
  12. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/common/properties.py +114 -85
  13. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/internal/__init__.py +9 -0
  14. spark_nlp-6.1.1/sparknlp/reader/reader2doc.py +210 -0
  15. spark_nlp-6.1.1/sparknlp/reader/reader2table.py +163 -0
  16. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/reader/sparknlp_reader.py +45 -0
  17. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/com/__init__.py +0 -0
  18. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/com/johnsnowlabs/__init__.py +0 -0
  19. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/com/johnsnowlabs/ml/__init__.py +0 -0
  20. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/com/johnsnowlabs/ml/ai/__init__.py +0 -0
  21. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/com/johnsnowlabs/nlp/__init__.py +0 -0
  22. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/setup.cfg +0 -0
  23. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/spark_nlp.egg-info/dependency_links.txt +0 -0
  24. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/spark_nlp.egg-info/top_level.txt +0 -0
  25. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotation.py +0 -0
  26. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotation_audio.py +0 -0
  27. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotation_image.py +0 -0
  28. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/__init__.py +0 -0
  29. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/audio/__init__.py +0 -0
  30. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/audio/hubert_for_ctc.py +0 -0
  31. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/audio/wav2vec2_for_ctc.py +0 -0
  32. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/audio/whisper_for_ctc.py +0 -0
  33. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/chunk2_doc.py +0 -0
  34. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/chunker.py +0 -0
  35. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/__init__.py +0 -0
  36. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/albert_for_multiple_choice.py +0 -0
  37. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/albert_for_question_answering.py +0 -0
  38. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/albert_for_sequence_classification.py +0 -0
  39. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/albert_for_token_classification.py +0 -0
  40. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/albert_for_zero_shot_classification.py +0 -0
  41. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/bart_for_zero_shot_classification.py +0 -0
  42. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/bert_for_multiple_choice.py +0 -0
  43. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/bert_for_question_answering.py +0 -0
  44. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/bert_for_sequence_classification.py +0 -0
  45. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/bert_for_token_classification.py +0 -0
  46. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/bert_for_zero_shot_classification.py +0 -0
  47. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/camembert_for_question_answering.py +0 -0
  48. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/camembert_for_sequence_classification.py +0 -0
  49. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/camembert_for_token_classification.py +0 -0
  50. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/camembert_for_zero_shot_classification.py +0 -0
  51. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/classifier_dl.py +0 -0
  52. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/deberta_for_question_answering.py +0 -0
  53. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/deberta_for_sequence_classification.py +0 -0
  54. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/deberta_for_token_classification.py +0 -0
  55. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/deberta_for_zero_shot_classification.py +0 -0
  56. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/distil_bert_for_question_answering.py +0 -0
  57. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/distil_bert_for_sequence_classification.py +0 -0
  58. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/distil_bert_for_token_classification.py +0 -0
  59. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/distil_bert_for_zero_shot_classification.py +0 -0
  60. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/distilbert_for_multiple_choice.py +0 -0
  61. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/longformer_for_question_answering.py +0 -0
  62. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/longformer_for_sequence_classification.py +0 -0
  63. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/longformer_for_token_classification.py +0 -0
  64. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/mpnet_for_question_answering.py +0 -0
  65. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/mpnet_for_sequence_classification.py +0 -0
  66. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/mpnet_for_token_classification.py +0 -0
  67. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/multi_classifier_dl.py +0 -0
  68. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/roberta_for_multiple_choice.py +0 -0
  69. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/roberta_for_question_answering.py +0 -0
  70. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/roberta_for_sequence_classification.py +0 -0
  71. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/roberta_for_token_classification.py +0 -0
  72. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/roberta_for_zero_shot_classification.py +0 -0
  73. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/sentiment_dl.py +0 -0
  74. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/tapas_for_question_answering.py +0 -0
  75. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/xlm_roberta_for_multiple_choice.py +0 -0
  76. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/xlm_roberta_for_question_answering.py +0 -0
  77. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/xlm_roberta_for_sequence_classification.py +0 -0
  78. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/xlm_roberta_for_token_classification.py +0 -0
  79. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/xlm_roberta_for_zero_shot_classification.py +0 -0
  80. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/xlnet_for_sequence_classification.py +0 -0
  81. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/xlnet_for_token_classification.py +0 -0
  82. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/cleaners/__init__.py +0 -0
  83. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/cleaners/cleaner.py +0 -0
  84. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/cleaners/extractor.py +0 -0
  85. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/coref/__init__.py +0 -0
  86. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/coref/spanbert_coref.py +0 -0
  87. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/cv/__init__.py +0 -0
  88. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/cv/blip_for_question_answering.py +0 -0
  89. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/cv/clip_for_zero_shot_classification.py +0 -0
  90. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/cv/convnext_for_image_classification.py +0 -0
  91. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/cv/florence2_transformer.py +0 -0
  92. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/cv/gemma3_for_multimodal.py +0 -0
  93. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/cv/internvl_for_multimodal.py +0 -0
  94. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/cv/janus_for_multimodal.py +0 -0
  95. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/cv/llava_for_multimodal.py +0 -0
  96. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/cv/mllama_for_multimodal.py +0 -0
  97. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/cv/paligemma_for_multimodal.py +0 -0
  98. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/cv/phi3_vision_for_multimodal.py +0 -0
  99. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/cv/qwen2vl_transformer.py +0 -0
  100. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/cv/smolvlm_transformer.py +0 -0
  101. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/cv/swin_for_image_classification.py +0 -0
  102. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/cv/vision_encoder_decoder_for_image_captioning.py +0 -0
  103. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/cv/vit_for_image_classification.py +0 -0
  104. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/dataframe_optimizer.py +0 -0
  105. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/date2_chunk.py +0 -0
  106. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/dependency/__init__.py +0 -0
  107. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/dependency/dependency_parser.py +0 -0
  108. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/dependency/typed_dependency_parser.py +0 -0
  109. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/document_character_text_splitter.py +0 -0
  110. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/document_normalizer.py +0 -0
  111. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/document_token_splitter.py +0 -0
  112. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/document_token_splitter_test.py +0 -0
  113. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/__init__.py +0 -0
  114. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/albert_embeddings.py +0 -0
  115. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/bert_embeddings.py +0 -0
  116. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/bert_sentence_embeddings.py +0 -0
  117. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/bge_embeddings.py +0 -0
  118. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/camembert_embeddings.py +0 -0
  119. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/chunk_embeddings.py +0 -0
  120. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/deberta_embeddings.py +0 -0
  121. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/distil_bert_embeddings.py +0 -0
  122. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/doc2vec.py +0 -0
  123. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/e5_embeddings.py +0 -0
  124. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/e5v_embeddings.py +0 -0
  125. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/elmo_embeddings.py +0 -0
  126. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/instructor_embeddings.py +0 -0
  127. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/longformer_embeddings.py +0 -0
  128. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/minilm_embeddings.py +0 -0
  129. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/mpnet_embeddings.py +0 -0
  130. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/mxbai_embeddings.py +0 -0
  131. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/nomic_embeddings.py +0 -0
  132. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/roberta_embeddings.py +0 -0
  133. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/roberta_sentence_embeddings.py +0 -0
  134. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/sentence_embeddings.py +0 -0
  135. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/snowflake_embeddings.py +0 -0
  136. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/uae_embeddings.py +0 -0
  137. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/universal_sentence_encoder.py +0 -0
  138. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/word2vec.py +0 -0
  139. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/word_embeddings.py +0 -0
  140. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/xlm_roberta_embeddings.py +0 -0
  141. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/xlm_roberta_sentence_embeddings.py +0 -0
  142. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/xlnet_embeddings.py +0 -0
  143. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/er/__init__.py +0 -0
  144. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/er/entity_ruler.py +0 -0
  145. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/graph_extraction.py +0 -0
  146. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/keyword_extraction/__init__.py +0 -0
  147. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/keyword_extraction/yake_keyword_extraction.py +0 -0
  148. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/ld_dl/__init__.py +0 -0
  149. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/ld_dl/language_detector_dl.py +0 -0
  150. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/lemmatizer.py +0 -0
  151. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/matcher/__init__.py +0 -0
  152. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/matcher/big_text_matcher.py +0 -0
  153. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/matcher/date_matcher.py +0 -0
  154. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/matcher/multi_date_matcher.py +0 -0
  155. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/matcher/regex_matcher.py +0 -0
  156. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/matcher/text_matcher.py +0 -0
  157. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/n_gram_generator.py +0 -0
  158. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/ner/__init__.py +0 -0
  159. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/ner/ner_approach.py +0 -0
  160. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/ner/ner_converter.py +0 -0
  161. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/ner/ner_crf.py +0 -0
  162. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/ner/ner_dl.py +0 -0
  163. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/ner/ner_overwriter.py +0 -0
  164. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/ner/zero_shot_ner_model.py +0 -0
  165. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/normalizer.py +0 -0
  166. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/openai/__init__.py +0 -0
  167. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/openai/openai_completion.py +0 -0
  168. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/openai/openai_embeddings.py +0 -0
  169. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/param/__init__.py +0 -0
  170. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/param/classifier_encoder.py +0 -0
  171. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/param/evaluation_dl_params.py +0 -0
  172. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/pos/__init__.py +0 -0
  173. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/pos/perceptron.py +0 -0
  174. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/sentence/__init__.py +0 -0
  175. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/sentence/sentence_detector.py +0 -0
  176. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/sentence/sentence_detector_dl.py +0 -0
  177. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/sentiment/__init__.py +0 -0
  178. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/sentiment/sentiment_detector.py +0 -0
  179. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/sentiment/vivekn_sentiment.py +0 -0
  180. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/bart_transformer.py +0 -0
  181. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/cohere_transformer.py +0 -0
  182. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/cpm_transformer.py +0 -0
  183. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/gpt2_transformer.py +0 -0
  184. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/llama2_transformer.py +0 -0
  185. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/llama3_transformer.py +0 -0
  186. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/m2m100_transformer.py +0 -0
  187. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/marian_transformer.py +0 -0
  188. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/mistral_transformer.py +0 -0
  189. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/nllb_transformer.py +0 -0
  190. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/olmo_transformer.py +0 -0
  191. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/phi2_transformer.py +0 -0
  192. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/phi3_transformer.py +0 -0
  193. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/qwen_transformer.py +0 -0
  194. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/starcoder_transformer.py +0 -0
  195. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/t5_transformer.py +0 -0
  196. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/similarity/__init__.py +0 -0
  197. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/similarity/document_similarity_ranker.py +0 -0
  198. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/spell_check/__init__.py +0 -0
  199. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/spell_check/context_spell_checker.py +0 -0
  200. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/spell_check/norvig_sweeting.py +0 -0
  201. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/spell_check/symmetric_delete.py +0 -0
  202. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/stemmer.py +0 -0
  203. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/stop_words_cleaner.py +0 -0
  204. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/tf_ner_dl_graph_builder.py +0 -0
  205. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/token/__init__.py +0 -0
  206. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/token/chunk_tokenizer.py +0 -0
  207. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/token/recursive_tokenizer.py +0 -0
  208. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/token/regex_tokenizer.py +0 -0
  209. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/token/tokenizer.py +0 -0
  210. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/token2_chunk.py +0 -0
  211. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/ws/__init__.py +0 -0
  212. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/annotator/ws/word_segmenter.py +0 -0
  213. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/base/__init__.py +0 -0
  214. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/base/audio_assembler.py +0 -0
  215. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/base/doc2_chunk.py +0 -0
  216. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/base/document_assembler.py +0 -0
  217. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/base/embeddings_finisher.py +0 -0
  218. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/base/finisher.py +0 -0
  219. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/base/graph_finisher.py +0 -0
  220. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/base/has_recursive_fit.py +0 -0
  221. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/base/has_recursive_transform.py +0 -0
  222. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/base/image_assembler.py +0 -0
  223. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/base/light_pipeline.py +0 -0
  224. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/base/multi_document_assembler.py +0 -0
  225. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/base/prompt_assembler.py +0 -0
  226. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/base/recursive_pipeline.py +0 -0
  227. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/base/table_assembler.py +0 -0
  228. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/base/token_assembler.py +0 -0
  229. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/common/__init__.py +0 -0
  230. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/common/annotator_approach.py +0 -0
  231. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/common/annotator_model.py +0 -0
  232. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/common/annotator_properties.py +0 -0
  233. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/common/annotator_type.py +0 -0
  234. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/common/coverage_result.py +0 -0
  235. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/common/match_strategy.py +0 -0
  236. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/common/read_as.py +0 -0
  237. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/common/recursive_annotator_approach.py +0 -0
  238. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/common/storage.py +0 -0
  239. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/common/utils.py +0 -0
  240. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/functions.py +0 -0
  241. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/internal/annotator_java_ml.py +0 -0
  242. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/internal/annotator_transformer.py +0 -0
  243. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/internal/extended_java_wrapper.py +0 -0
  244. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/internal/params_getters_setters.py +0 -0
  245. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/internal/recursive.py +0 -0
  246. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/logging/__init__.py +0 -0
  247. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/logging/comet.py +0 -0
  248. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/partition/__init__.py +0 -0
  249. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/partition/partition.py +0 -0
  250. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/partition/partition_properties.py +0 -0
  251. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/partition/partition_transformer.py +0 -0
  252. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/pretrained/__init__.py +0 -0
  253. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/pretrained/pretrained_pipeline.py +0 -0
  254. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/pretrained/resource_downloader.py +0 -0
  255. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/pretrained/utils.py +0 -0
  256. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/reader/__init__.py +0 -0
  257. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/reader/enums.py +0 -0
  258. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/reader/pdf_to_text.py +0 -0
  259. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/__init__.py +0 -0
  260. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/__init__.py +0 -0
  261. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/graph_builders.py +0 -0
  262. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/ner_dl/__init__.py +0 -0
  263. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/ner_dl/create_graph.py +0 -0
  264. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/ner_dl/dataset_encoder.py +0 -0
  265. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/ner_dl/ner_model.py +0 -0
  266. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/ner_dl/ner_model_saver.py +0 -0
  267. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/ner_dl/sentence_grouper.py +0 -0
  268. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/tf2contrib/__init__.py +0 -0
  269. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/tf2contrib/core_rnn_cell.py +0 -0
  270. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/tf2contrib/fused_rnn_cell.py +0 -0
  271. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/tf2contrib/gru_ops.py +0 -0
  272. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/tf2contrib/lstm_ops.py +0 -0
  273. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/tf2contrib/rnn.py +0 -0
  274. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/tf2contrib/rnn_cell.py +0 -0
  275. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders_1x/__init__.py +0 -0
  276. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders_1x/graph_builders.py +0 -0
  277. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders_1x/ner_dl/__init__.py +0 -0
  278. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders_1x/ner_dl/create_graph.py +0 -0
  279. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders_1x/ner_dl/dataset_encoder.py +0 -0
  280. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model.py +0 -0
  281. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model_saver.py +0 -0
  282. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders_1x/ner_dl/sentence_grouper.py +0 -0
  283. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/conll.py +0 -0
  284. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/conllu.py +0 -0
  285. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/pos.py +0 -0
  286. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/pub_tator.py +0 -0
  287. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/spacy_to_annotation.py +0 -0
  288. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/training/tfgraphs.py +0 -0
  289. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/upload_to_hub.py +0 -0
  290. {spark_nlp-6.0.5 → spark_nlp-6.1.1}/sparknlp/util.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: spark-nlp
3
- Version: 6.0.5
3
+ Version: 6.1.1
4
4
  Summary: John Snow Labs Spark NLP is a natural language processing library built on top of Apache Spark ML. It provides simple, performant & accurate NLP annotations for machine learning pipelines, that scale easily in a distributed environment.
5
5
  Home-page: https://github.com/JohnSnowLabs/spark-nlp
6
6
  Author: John Snow Labs
@@ -58,7 +58,7 @@ Dynamic: summary
58
58
 
59
59
  Spark NLP is a state-of-the-art Natural Language Processing library built on top of Apache Spark. It provides **simple**, **performant** & **accurate** NLP annotations for machine learning pipelines that **scale** easily in a distributed environment.
60
60
 
61
- Spark NLP comes with **83000+** pretrained **pipelines** and **models** in more than **200+** languages.
61
+ Spark NLP comes with **100000+** pretrained **pipelines** and **models** in more than **200+** languages.
62
62
  It also offers tasks such as **Tokenization**, **Word Segmentation**, **Part-of-Speech Tagging**, Word and Sentence **Embeddings**, **Named Entity Recognition**, **Dependency Parsing**, **Spell Checking**, **Text Classification**, **Sentiment Analysis**, **Token Classification**, **Machine Translation** (+180 languages), **Summarization**, **Question Answering**, **Table Question Answering**, **Text Generation**, **Image Classification**, **Image to Text (captioning)**, **Automatic Speech Recognition**, **Zero-Shot Learning**, and many more [NLP tasks](#features).
63
63
 
64
64
  **Spark NLP** is the only open-source NLP library in **production** that offers state-of-the-art transformers such as **BERT**, **CamemBERT**, **ALBERT**, **ELECTRA**, **XLNet**, **DistilBERT**, **RoBERTa**, **DeBERTa**, **XLM-RoBERTa**, **Longformer**, **ELMO**, **Universal Sentence Encoder**, **Llama-2**, **M2M100**, **BART**, **Instructor**, **E5**, **Google T5**, **MarianMT**, **OpenAI GPT2**, **Vision Transformers (ViT)**, **OpenAI Whisper**, **Llama**, **Mistral**, **Phi**, **Qwen2**, and many more not only to **Python** and **R**, but also to **JVM** ecosystem (**Java**, **Scala**, and **Kotlin**) at **scale** by extending **Apache Spark** natively.
@@ -102,7 +102,7 @@ $ java -version
102
102
  $ conda create -n sparknlp python=3.7 -y
103
103
  $ conda activate sparknlp
104
104
  # spark-nlp by default is based on pyspark 3.x
105
- $ pip install spark-nlp==6.0.5 pyspark==3.3.1
105
+ $ pip install spark-nlp==6.1.1 pyspark==3.3.1
106
106
  ```
107
107
 
108
108
  In Python console or Jupyter `Python3` kernel:
@@ -168,11 +168,11 @@ For a quick example of using pipelines and models take a look at our official [d
168
168
 
169
169
  ### Apache Spark Support
170
170
 
171
- Spark NLP *6.0.5* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
171
+ Spark NLP *6.1.1* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
172
172
 
173
173
  | Spark NLP | Apache Spark 3.5.x | Apache Spark 3.4.x | Apache Spark 3.3.x | Apache Spark 3.2.x | Apache Spark 3.1.x | Apache Spark 3.0.x | Apache Spark 2.4.x | Apache Spark 2.3.x |
174
174
  |-----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
175
- | 6.0.x | YES | YES | YES | YES | YES | YES | NO | NO |
175
+ | 6.x.x and up | YES | YES | YES | YES | YES | YES | NO | NO |
176
176
  | 5.5.x | YES | YES | YES | YES | YES | YES | NO | NO |
177
177
  | 5.4.x | YES | YES | YES | YES | YES | YES | NO | NO |
178
178
  | 5.3.x | YES | YES | YES | YES | YES | YES | NO | NO |
@@ -198,7 +198,7 @@ Find out more about 4.x `SparkNLP` versions in our official [documentation](http
198
198
 
199
199
  ### Databricks Support
200
200
 
201
- Spark NLP 6.0.5 has been tested and is compatible with the following runtimes:
201
+ Spark NLP 6.1.1 has been tested and is compatible with the following runtimes:
202
202
 
203
203
  | **CPU** | **GPU** |
204
204
  |--------------------|--------------------|
@@ -206,16 +206,17 @@ Spark NLP 6.0.5 has been tested and is compatible with the following runtimes:
206
206
  | 14.2 / 14.2 ML | 14.2 ML & GPU |
207
207
  | 14.3 / 14.3 ML | 14.3 ML & GPU |
208
208
  | 15.0 / 15.0 ML | 15.0 ML & GPU |
209
- | 15.1 / 15.0 ML | 15.1 ML & GPU |
210
- | 15.2 / 15.0 ML | 15.2 ML & GPU |
211
- | 15.3 / 15.0 ML | 15.3 ML & GPU |
212
- | 15.4 / 15.0 ML | 15.4 ML & GPU |
209
+ | 15.1 / 15.1 ML | 15.1 ML & GPU |
210
+ | 15.2 / 15.2 ML | 15.2 ML & GPU |
211
+ | 15.3 / 15.3 ML | 15.3 ML & GPU |
212
+ | 15.4 / 15.4 ML | 15.4 ML & GPU |
213
+ | 16.4 / 16.4 ML | 16.4 ML & GPU |
213
214
 
214
215
  We are compatible with older runtimes. For a full list check databricks support in our official [documentation](https://sparknlp.org/docs/en/install#databricks-support)
215
216
 
216
217
  ### EMR Support
217
218
 
218
- Spark NLP 6.0.5 has been tested and is compatible with the following EMR releases:
219
+ Spark NLP 6.1.1 has been tested and is compatible with the following EMR releases:
219
220
 
220
221
  | **EMR Release** |
221
222
  |--------------------|
@@ -19,7 +19,7 @@
19
19
 
20
20
  Spark NLP is a state-of-the-art Natural Language Processing library built on top of Apache Spark. It provides **simple**, **performant** & **accurate** NLP annotations for machine learning pipelines that **scale** easily in a distributed environment.
21
21
 
22
- Spark NLP comes with **83000+** pretrained **pipelines** and **models** in more than **200+** languages.
22
+ Spark NLP comes with **100000+** pretrained **pipelines** and **models** in more than **200+** languages.
23
23
  It also offers tasks such as **Tokenization**, **Word Segmentation**, **Part-of-Speech Tagging**, Word and Sentence **Embeddings**, **Named Entity Recognition**, **Dependency Parsing**, **Spell Checking**, **Text Classification**, **Sentiment Analysis**, **Token Classification**, **Machine Translation** (+180 languages), **Summarization**, **Question Answering**, **Table Question Answering**, **Text Generation**, **Image Classification**, **Image to Text (captioning)**, **Automatic Speech Recognition**, **Zero-Shot Learning**, and many more [NLP tasks](#features).
24
24
 
25
25
  **Spark NLP** is the only open-source NLP library in **production** that offers state-of-the-art transformers such as **BERT**, **CamemBERT**, **ALBERT**, **ELECTRA**, **XLNet**, **DistilBERT**, **RoBERTa**, **DeBERTa**, **XLM-RoBERTa**, **Longformer**, **ELMO**, **Universal Sentence Encoder**, **Llama-2**, **M2M100**, **BART**, **Instructor**, **E5**, **Google T5**, **MarianMT**, **OpenAI GPT2**, **Vision Transformers (ViT)**, **OpenAI Whisper**, **Llama**, **Mistral**, **Phi**, **Qwen2**, and many more not only to **Python** and **R**, but also to **JVM** ecosystem (**Java**, **Scala**, and **Kotlin**) at **scale** by extending **Apache Spark** natively.
@@ -63,7 +63,7 @@ $ java -version
63
63
  $ conda create -n sparknlp python=3.7 -y
64
64
  $ conda activate sparknlp
65
65
  # spark-nlp by default is based on pyspark 3.x
66
- $ pip install spark-nlp==6.0.5 pyspark==3.3.1
66
+ $ pip install spark-nlp==6.1.1 pyspark==3.3.1
67
67
  ```
68
68
 
69
69
  In Python console or Jupyter `Python3` kernel:
@@ -129,11 +129,11 @@ For a quick example of using pipelines and models take a look at our official [d
129
129
 
130
130
  ### Apache Spark Support
131
131
 
132
- Spark NLP *6.0.5* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
132
+ Spark NLP *6.1.1* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
133
133
 
134
134
  | Spark NLP | Apache Spark 3.5.x | Apache Spark 3.4.x | Apache Spark 3.3.x | Apache Spark 3.2.x | Apache Spark 3.1.x | Apache Spark 3.0.x | Apache Spark 2.4.x | Apache Spark 2.3.x |
135
135
  |-----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
136
- | 6.0.x | YES | YES | YES | YES | YES | YES | NO | NO |
136
+ | 6.x.x and up | YES | YES | YES | YES | YES | YES | NO | NO |
137
137
  | 5.5.x | YES | YES | YES | YES | YES | YES | NO | NO |
138
138
  | 5.4.x | YES | YES | YES | YES | YES | YES | NO | NO |
139
139
  | 5.3.x | YES | YES | YES | YES | YES | YES | NO | NO |
@@ -159,7 +159,7 @@ Find out more about 4.x `SparkNLP` versions in our official [documentation](http
159
159
 
160
160
  ### Databricks Support
161
161
 
162
- Spark NLP 6.0.5 has been tested and is compatible with the following runtimes:
162
+ Spark NLP 6.1.1 has been tested and is compatible with the following runtimes:
163
163
 
164
164
  | **CPU** | **GPU** |
165
165
  |--------------------|--------------------|
@@ -167,16 +167,17 @@ Spark NLP 6.0.5 has been tested and is compatible with the following runtimes:
167
167
  | 14.2 / 14.2 ML | 14.2 ML & GPU |
168
168
  | 14.3 / 14.3 ML | 14.3 ML & GPU |
169
169
  | 15.0 / 15.0 ML | 15.0 ML & GPU |
170
- | 15.1 / 15.0 ML | 15.1 ML & GPU |
171
- | 15.2 / 15.0 ML | 15.2 ML & GPU |
172
- | 15.3 / 15.0 ML | 15.3 ML & GPU |
173
- | 15.4 / 15.0 ML | 15.4 ML & GPU |
170
+ | 15.1 / 15.1 ML | 15.1 ML & GPU |
171
+ | 15.2 / 15.2 ML | 15.2 ML & GPU |
172
+ | 15.3 / 15.3 ML | 15.3 ML & GPU |
173
+ | 15.4 / 15.4 ML | 15.4 ML & GPU |
174
+ | 16.4 / 16.4 ML | 16.4 ML & GPU |
174
175
 
175
176
  We are compatible with older runtimes. For a full list check databricks support in our official [documentation](https://sparknlp.org/docs/en/install#databricks-support)
176
177
 
177
178
  ### EMR Support
178
179
 
179
- Spark NLP 6.0.5 has been tested and is compatible with the following EMR releases:
180
+ Spark NLP 6.1.1 has been tested and is compatible with the following EMR releases:
180
181
 
181
182
  | **EMR Release** |
182
183
  |--------------------|
@@ -41,7 +41,7 @@ setup(
41
41
  # project code, see
42
42
  # https://packaging.python.org/en/latest/single_source_version.html
43
43
 
44
- version='6.0.5', # Required
44
+ version='6.1.1', # Required
45
45
 
46
46
  # This is a one-line description or tagline of what your project does. This
47
47
  # corresponds to the 'Summary' metadata field:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: spark-nlp
3
- Version: 6.0.5
3
+ Version: 6.1.1
4
4
  Summary: John Snow Labs Spark NLP is a natural language processing library built on top of Apache Spark ML. It provides simple, performant & accurate NLP annotations for machine learning pipelines, that scale easily in a distributed environment.
5
5
  Home-page: https://github.com/JohnSnowLabs/spark-nlp
6
6
  Author: John Snow Labs
@@ -58,7 +58,7 @@ Dynamic: summary
58
58
 
59
59
  Spark NLP is a state-of-the-art Natural Language Processing library built on top of Apache Spark. It provides **simple**, **performant** & **accurate** NLP annotations for machine learning pipelines that **scale** easily in a distributed environment.
60
60
 
61
- Spark NLP comes with **83000+** pretrained **pipelines** and **models** in more than **200+** languages.
61
+ Spark NLP comes with **100000+** pretrained **pipelines** and **models** in more than **200+** languages.
62
62
  It also offers tasks such as **Tokenization**, **Word Segmentation**, **Part-of-Speech Tagging**, Word and Sentence **Embeddings**, **Named Entity Recognition**, **Dependency Parsing**, **Spell Checking**, **Text Classification**, **Sentiment Analysis**, **Token Classification**, **Machine Translation** (+180 languages), **Summarization**, **Question Answering**, **Table Question Answering**, **Text Generation**, **Image Classification**, **Image to Text (captioning)**, **Automatic Speech Recognition**, **Zero-Shot Learning**, and many more [NLP tasks](#features).
63
63
 
64
64
  **Spark NLP** is the only open-source NLP library in **production** that offers state-of-the-art transformers such as **BERT**, **CamemBERT**, **ALBERT**, **ELECTRA**, **XLNet**, **DistilBERT**, **RoBERTa**, **DeBERTa**, **XLM-RoBERTa**, **Longformer**, **ELMO**, **Universal Sentence Encoder**, **Llama-2**, **M2M100**, **BART**, **Instructor**, **E5**, **Google T5**, **MarianMT**, **OpenAI GPT2**, **Vision Transformers (ViT)**, **OpenAI Whisper**, **Llama**, **Mistral**, **Phi**, **Qwen2**, and many more not only to **Python** and **R**, but also to **JVM** ecosystem (**Java**, **Scala**, and **Kotlin**) at **scale** by extending **Apache Spark** natively.
@@ -102,7 +102,7 @@ $ java -version
102
102
  $ conda create -n sparknlp python=3.7 -y
103
103
  $ conda activate sparknlp
104
104
  # spark-nlp by default is based on pyspark 3.x
105
- $ pip install spark-nlp==6.0.5 pyspark==3.3.1
105
+ $ pip install spark-nlp==6.1.1 pyspark==3.3.1
106
106
  ```
107
107
 
108
108
  In Python console or Jupyter `Python3` kernel:
@@ -168,11 +168,11 @@ For a quick example of using pipelines and models take a look at our official [d
168
168
 
169
169
  ### Apache Spark Support
170
170
 
171
- Spark NLP *6.0.5* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
171
+ Spark NLP *6.1.1* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
172
172
 
173
173
  | Spark NLP | Apache Spark 3.5.x | Apache Spark 3.4.x | Apache Spark 3.3.x | Apache Spark 3.2.x | Apache Spark 3.1.x | Apache Spark 3.0.x | Apache Spark 2.4.x | Apache Spark 2.3.x |
174
174
  |-----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
175
- | 6.0.x | YES | YES | YES | YES | YES | YES | NO | NO |
175
+ | 6.x.x and up | YES | YES | YES | YES | YES | YES | NO | NO |
176
176
  | 5.5.x | YES | YES | YES | YES | YES | YES | NO | NO |
177
177
  | 5.4.x | YES | YES | YES | YES | YES | YES | NO | NO |
178
178
  | 5.3.x | YES | YES | YES | YES | YES | YES | NO | NO |
@@ -198,7 +198,7 @@ Find out more about 4.x `SparkNLP` versions in our official [documentation](http
198
198
 
199
199
  ### Databricks Support
200
200
 
201
- Spark NLP 6.0.5 has been tested and is compatible with the following runtimes:
201
+ Spark NLP 6.1.1 has been tested and is compatible with the following runtimes:
202
202
 
203
203
  | **CPU** | **GPU** |
204
204
  |--------------------|--------------------|
@@ -206,16 +206,17 @@ Spark NLP 6.0.5 has been tested and is compatible with the following runtimes:
206
206
  | 14.2 / 14.2 ML | 14.2 ML & GPU |
207
207
  | 14.3 / 14.3 ML | 14.3 ML & GPU |
208
208
  | 15.0 / 15.0 ML | 15.0 ML & GPU |
209
- | 15.1 / 15.0 ML | 15.1 ML & GPU |
210
- | 15.2 / 15.0 ML | 15.2 ML & GPU |
211
- | 15.3 / 15.0 ML | 15.3 ML & GPU |
212
- | 15.4 / 15.0 ML | 15.4 ML & GPU |
209
+ | 15.1 / 15.1 ML | 15.1 ML & GPU |
210
+ | 15.2 / 15.2 ML | 15.2 ML & GPU |
211
+ | 15.3 / 15.3 ML | 15.3 ML & GPU |
212
+ | 15.4 / 15.4 ML | 15.4 ML & GPU |
213
+ | 16.4 / 16.4 ML | 16.4 ML & GPU |
213
214
 
214
215
  We are compatible with older runtimes. For a full list check databricks support in our official [documentation](https://sparknlp.org/docs/en/install#databricks-support)
215
216
 
216
217
  ### EMR Support
217
218
 
218
- Spark NLP 6.0.5 has been tested and is compatible with the following EMR releases:
219
+ Spark NLP 6.1.1 has been tested and is compatible with the following EMR releases:
219
220
 
220
221
  | **EMR Release** |
221
222
  |--------------------|
@@ -190,6 +190,7 @@ sparknlp/annotator/seq2seq/nllb_transformer.py
190
190
  sparknlp/annotator/seq2seq/olmo_transformer.py
191
191
  sparknlp/annotator/seq2seq/phi2_transformer.py
192
192
  sparknlp/annotator/seq2seq/phi3_transformer.py
193
+ sparknlp/annotator/seq2seq/phi4_transformer.py
193
194
  sparknlp/annotator/seq2seq/qwen_transformer.py
194
195
  sparknlp/annotator/seq2seq/starcoder_transformer.py
195
196
  sparknlp/annotator/seq2seq/t5_transformer.py
@@ -253,6 +254,8 @@ sparknlp/pretrained/utils.py
253
254
  sparknlp/reader/__init__.py
254
255
  sparknlp/reader/enums.py
255
256
  sparknlp/reader/pdf_to_text.py
257
+ sparknlp/reader/reader2doc.py
258
+ sparknlp/reader/reader2table.py
256
259
  sparknlp/reader/sparknlp_reader.py
257
260
  sparknlp/training/__init__.py
258
261
  sparknlp/training/conll.py
@@ -66,7 +66,7 @@ sys.modules['com.johnsnowlabs.ml.ai'] = annotator
66
66
  annotators = annotator
67
67
  embeddings = annotator
68
68
 
69
- __version__ = "6.0.5"
69
+ __version__ = "6.1.1"
70
70
 
71
71
 
72
72
  def start(gpu=False,
@@ -12,8 +12,6 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  """Contains classes for the AutoGGUFEmbeddings."""
15
- from typing import List
16
-
17
15
  from sparknlp.common import *
18
16
 
19
17
 
@@ -32,7 +30,7 @@ class AutoGGUFEmbeddings(AnnotatorModel, HasBatchedAnnotate):
32
30
  ... .setInputCols(["document"]) \\
33
31
  ... .setOutputCol("embeddings")
34
32
 
35
- The default model is ``"Nomic_Embed_Text_v1.5.Q8_0.gguf"``, if no name is provided.
33
+ The default model is ``"Qwen3_Embedding_0.6B_Q8_0_gguf"``, if no name is provided.
36
34
 
37
35
  For extended examples of usage, see the
38
36
  `AutoGGUFEmbeddingsTest <https://github.com/JohnSnowLabs/spark-nlp/tree/master/src/test/scala/com/johnsnowlabs/nlp/embeddings/AutoGGUFEmbeddingsTest.scala>`__
@@ -313,12 +311,6 @@ class AutoGGUFEmbeddings(AnnotatorModel, HasBatchedAnnotate):
313
311
  "Set the pooling type for embeddings, use model default if unspecified",
314
312
  typeConverter=TypeConverters.toString,
315
313
  )
316
- embedding = Param(
317
- Params._dummy(),
318
- "embedding",
319
- "Whether to load model with embedding support",
320
- typeConverter=TypeConverters.toBoolean,
321
- )
322
314
  flashAttention = Param(
323
315
  Params._dummy(),
324
316
  "flashAttention",
@@ -489,10 +481,10 @@ class AutoGGUFEmbeddings(AnnotatorModel, HasBatchedAnnotate):
489
481
  classname=classname, java_model=java_model
490
482
  )
491
483
  self._setDefault(
492
- embedding=True,
493
484
  nCtx=4096,
494
485
  nBatch=512,
495
486
  poolingType="MEAN",
487
+ nGpuLayers=99,
496
488
  )
497
489
 
498
490
  @staticmethod
@@ -517,13 +509,13 @@ class AutoGGUFEmbeddings(AnnotatorModel, HasBatchedAnnotate):
517
509
  return AutoGGUFEmbeddings(java_model=jModel)
518
510
 
519
511
  @staticmethod
520
- def pretrained(name="Nomic_Embed_Text_v1.5.Q8_0.gguf", lang="en", remote_loc=None):
512
+ def pretrained(name="Qwen3_Embedding_0.6B_Q8_0_gguf", lang="en", remote_loc=None):
521
513
  """Downloads and loads a pretrained model.
522
514
 
523
515
  Parameters
524
516
  ----------
525
517
  name : str, optional
526
- Name of the pretrained model, by default "Nomic_Embed_Text_v1.5.Q8_0.gguf"
518
+ Name of the pretrained model, by default "Qwen3_Embedding_0.6B_Q8_0_gguf"
527
519
  lang : str, optional
528
520
  Language of the pretrained model, by default "en"
529
521
  remote_loc : str, optional
@@ -31,3 +31,4 @@ from sparknlp.annotator.seq2seq.starcoder_transformer import *
31
31
  from sparknlp.annotator.seq2seq.llama3_transformer import *
32
32
  from sparknlp.annotator.seq2seq.cohere_transformer import *
33
33
  from sparknlp.annotator.seq2seq.olmo_transformer import *
34
+ from sparknlp.annotator.seq2seq.phi4_transformer import *
@@ -37,7 +37,11 @@ class AutoGGUFModel(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppProperties):
37
37
  ... .setInputCols(["document"]) \\
38
38
  ... .setOutputCol("completions")
39
39
 
40
- The default model is ``"phi3.5_mini_4k_instruct_q4_gguf"``, if no name is provided.
40
+ The default model is ``"Phi_4_mini_instruct_Q4_K_M_gguf"``, if no name is provided.
41
+
42
+ AutoGGUFModel is also able to load pretrained models from AutoGGUFVisionModel. Just
43
+ specify the same name for the pretrained method, and it will load the text-part of the
44
+ multimodal model automatically.
41
45
 
42
46
  For extended examples of usage, see the
43
47
  `AutoGGUFModelTest <https://github.com/JohnSnowLabs/spark-nlp/tree/master/src/test/scala/com/johnsnowlabs/nlp/annotators/seq2seq/AutoGGUFModelTest.scala>`__
@@ -120,8 +124,6 @@ class AutoGGUFModel(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppProperties):
120
124
  Set path to static lookup cache to use for lookup decoding (not updated by generation)
121
125
  lookupCacheDynamicFilePath
122
126
  Set path to dynamic lookup cache to use for lookup decoding (updated by generation)
123
- embedding
124
- Whether to load model with embedding support
125
127
  flashAttention
126
128
  Whether to enable Flash Attention
127
129
  inputPrefixBos
@@ -252,8 +254,9 @@ class AutoGGUFModel(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppProperties):
252
254
  useChatTemplate=True,
253
255
  nCtx=4096,
254
256
  nBatch=512,
255
- embedding=False,
256
- nPredict=100
257
+ nPredict=100,
258
+ nGpuLayers=99,
259
+ systemPrompt="You are a helpful assistant."
257
260
  )
258
261
 
259
262
  @staticmethod
@@ -277,13 +280,13 @@ class AutoGGUFModel(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppProperties):
277
280
  return AutoGGUFModel(java_model=jModel)
278
281
 
279
282
  @staticmethod
280
- def pretrained(name="phi3.5_mini_4k_instruct_q4_gguf", lang="en", remote_loc=None):
283
+ def pretrained(name="Phi_4_mini_instruct_Q4_K_M_gguf", lang="en", remote_loc=None):
281
284
  """Downloads and loads a pretrained model.
282
285
 
283
286
  Parameters
284
287
  ----------
285
288
  name : str, optional
286
- Name of the pretrained model, by default "phi3.5_mini_4k_instruct_q4_gguf"
289
+ Name of the pretrained model, by default "Phi_4_mini_instruct_Q4_K_M_gguf"
287
290
  lang : str, optional
288
291
  Language of the pretrained model, by default "en"
289
292
  remote_loc : str, optional
@@ -43,7 +43,7 @@ class AutoGGUFVisionModel(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppPropert
43
43
  .setOutputCol("completions")
44
44
 
45
45
 
46
- The default model is ``"llava_v1.5_7b_Q4_0_gguf"``, if no name is provided.
46
+ The default model is ``"Qwen2.5_VL_3B_Instruct_Q4_K_M_gguf"``, if no name is provided.
47
47
 
48
48
  For available pretrained models please see the `Models Hub <https://sparknlp.org/models>`__.
49
49
 
@@ -311,13 +311,13 @@ class AutoGGUFVisionModel(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppPropert
311
311
  return AutoGGUFVisionModel(java_model=jModel)
312
312
 
313
313
  @staticmethod
314
- def pretrained(name="llava_v1.5_7b_Q4_0_gguf", lang="en", remote_loc=None):
314
+ def pretrained(name="Qwen2.5_VL_3B_Instruct_Q4_K_M_gguf", lang="en", remote_loc=None):
315
315
  """Downloads and loads a pretrained model.
316
316
 
317
317
  Parameters
318
318
  ----------
319
319
  name : str, optional
320
- Name of the pretrained model, by default "llava_v1.5_7b_Q4_0_gguf"
320
+ Name of the pretrained model, by default "Qwen2.5_VL_3B_Instruct_Q4_K_M_gguf"
321
321
  lang : str, optional
322
322
  Language of the pretrained model, by default "en"
323
323
  remote_loc : str, optional