spark-nlp 6.0.1__tar.gz → 6.0.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of spark-nlp might be problematic. Click here for more details.

Files changed (285) hide show
  1. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/PKG-INFO +13 -6
  2. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/README.md +4 -4
  3. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/setup.py +1 -1
  4. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/spark_nlp.egg-info/PKG-INFO +13 -6
  5. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/spark_nlp.egg-info/SOURCES.txt +6 -0
  6. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/__init__.py +4 -2
  7. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/cv/__init__.py +2 -0
  8. spark_nlp-6.0.2/sparknlp/annotator/cv/florence2_transformer.py +180 -0
  9. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/cv/gemma3_for_multimodal.py +5 -10
  10. spark_nlp-6.0.2/sparknlp/annotator/cv/internvl_for_multimodal.py +280 -0
  11. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/cv/janus_for_multimodal.py +8 -13
  12. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/cv/llava_for_multimodal.py +1 -1
  13. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/cv/paligemma_for_multimodal.py +7 -7
  14. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/cv/phi3_vision_for_multimodal.py +1 -1
  15. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/cv/qwen2vl_transformer.py +1 -1
  16. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/cv/smolvlm_transformer.py +7 -13
  17. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/date2_chunk.py +1 -1
  18. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/document_character_text_splitter.py +8 -8
  19. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/document_token_splitter.py +7 -7
  20. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/bge_embeddings.py +21 -19
  21. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/snowflake_embeddings.py +15 -15
  22. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/openai/openai_completion.py +3 -4
  23. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/seq2seq/m2m100_transformer.py +1 -1
  24. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/seq2seq/mistral_transformer.py +2 -3
  25. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/seq2seq/nllb_transformer.py +1 -1
  26. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/seq2seq/qwen_transformer.py +26 -25
  27. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/spell_check/context_spell_checker.py +1 -1
  28. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/base/prompt_assembler.py +1 -1
  29. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/common/properties.py +7 -7
  30. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/internal/__init__.py +19 -0
  31. spark_nlp-6.0.2/sparknlp/partition/__init__.py +16 -0
  32. spark_nlp-6.0.2/sparknlp/partition/partition.py +244 -0
  33. spark_nlp-6.0.2/sparknlp/partition/partition_properties.py +257 -0
  34. spark_nlp-6.0.2/sparknlp/partition/partition_transformer.py +196 -0
  35. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/reader/pdf_to_text.py +50 -4
  36. spark_nlp-6.0.2/sparknlp/reader/sparknlp_reader.py +325 -0
  37. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/spacy_to_annotation.py +7 -7
  38. spark_nlp-6.0.1/sparknlp/reader/sparknlp_reader.py +0 -321
  39. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/com/__init__.py +0 -0
  40. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/com/johnsnowlabs/__init__.py +0 -0
  41. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/com/johnsnowlabs/ml/__init__.py +0 -0
  42. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/com/johnsnowlabs/ml/ai/__init__.py +0 -0
  43. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/com/johnsnowlabs/nlp/__init__.py +0 -0
  44. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/setup.cfg +0 -0
  45. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/spark_nlp.egg-info/dependency_links.txt +0 -0
  46. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/spark_nlp.egg-info/top_level.txt +0 -0
  47. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotation.py +0 -0
  48. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotation_audio.py +0 -0
  49. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotation_image.py +0 -0
  50. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/__init__.py +0 -0
  51. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/audio/__init__.py +0 -0
  52. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/audio/hubert_for_ctc.py +0 -0
  53. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/audio/wav2vec2_for_ctc.py +0 -0
  54. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/audio/whisper_for_ctc.py +0 -0
  55. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/chunk2_doc.py +0 -0
  56. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/chunker.py +0 -0
  57. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/__init__.py +0 -0
  58. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/albert_for_multiple_choice.py +0 -0
  59. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/albert_for_question_answering.py +0 -0
  60. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/albert_for_sequence_classification.py +0 -0
  61. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/albert_for_token_classification.py +0 -0
  62. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/albert_for_zero_shot_classification.py +0 -0
  63. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/bart_for_zero_shot_classification.py +0 -0
  64. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/bert_for_multiple_choice.py +0 -0
  65. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/bert_for_question_answering.py +0 -0
  66. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/bert_for_sequence_classification.py +0 -0
  67. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/bert_for_token_classification.py +0 -0
  68. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/bert_for_zero_shot_classification.py +0 -0
  69. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/camembert_for_question_answering.py +0 -0
  70. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/camembert_for_sequence_classification.py +0 -0
  71. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/camembert_for_token_classification.py +0 -0
  72. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/camembert_for_zero_shot_classification.py +0 -0
  73. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/classifier_dl.py +0 -0
  74. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/deberta_for_question_answering.py +0 -0
  75. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/deberta_for_sequence_classification.py +0 -0
  76. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/deberta_for_token_classification.py +0 -0
  77. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/deberta_for_zero_shot_classification.py +0 -0
  78. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/distil_bert_for_question_answering.py +0 -0
  79. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/distil_bert_for_sequence_classification.py +0 -0
  80. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/distil_bert_for_token_classification.py +0 -0
  81. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/distil_bert_for_zero_shot_classification.py +0 -0
  82. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/distilbert_for_multiple_choice.py +0 -0
  83. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/longformer_for_question_answering.py +0 -0
  84. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/longformer_for_sequence_classification.py +0 -0
  85. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/longformer_for_token_classification.py +0 -0
  86. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/mpnet_for_question_answering.py +0 -0
  87. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/mpnet_for_sequence_classification.py +0 -0
  88. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/mpnet_for_token_classification.py +0 -0
  89. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/multi_classifier_dl.py +0 -0
  90. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/roberta_for_multiple_choice.py +0 -0
  91. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/roberta_for_question_answering.py +0 -0
  92. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/roberta_for_sequence_classification.py +0 -0
  93. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/roberta_for_token_classification.py +0 -0
  94. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/roberta_for_zero_shot_classification.py +0 -0
  95. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/sentiment_dl.py +0 -0
  96. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/tapas_for_question_answering.py +0 -0
  97. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/xlm_roberta_for_multiple_choice.py +0 -0
  98. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/xlm_roberta_for_question_answering.py +0 -0
  99. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/xlm_roberta_for_sequence_classification.py +0 -0
  100. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/xlm_roberta_for_token_classification.py +0 -0
  101. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/xlm_roberta_for_zero_shot_classification.py +0 -0
  102. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/xlnet_for_sequence_classification.py +0 -0
  103. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/classifier_dl/xlnet_for_token_classification.py +0 -0
  104. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/cleaners/__init__.py +0 -0
  105. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/cleaners/cleaner.py +0 -0
  106. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/cleaners/extractor.py +0 -0
  107. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/coref/__init__.py +0 -0
  108. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/coref/spanbert_coref.py +0 -0
  109. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/cv/blip_for_question_answering.py +0 -0
  110. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/cv/clip_for_zero_shot_classification.py +0 -0
  111. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/cv/convnext_for_image_classification.py +0 -0
  112. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/cv/mllama_for_multimodal.py +0 -0
  113. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/cv/swin_for_image_classification.py +0 -0
  114. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/cv/vision_encoder_decoder_for_image_captioning.py +0 -0
  115. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/cv/vit_for_image_classification.py +0 -0
  116. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/dependency/__init__.py +0 -0
  117. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/dependency/dependency_parser.py +0 -0
  118. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/dependency/typed_dependency_parser.py +0 -0
  119. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/document_normalizer.py +0 -0
  120. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/document_token_splitter_test.py +0 -0
  121. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/__init__.py +0 -0
  122. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/albert_embeddings.py +0 -0
  123. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/auto_gguf_embeddings.py +0 -0
  124. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/bert_embeddings.py +0 -0
  125. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/bert_sentence_embeddings.py +0 -0
  126. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/camembert_embeddings.py +0 -0
  127. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/chunk_embeddings.py +0 -0
  128. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/deberta_embeddings.py +0 -0
  129. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/distil_bert_embeddings.py +0 -0
  130. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/doc2vec.py +0 -0
  131. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/e5_embeddings.py +0 -0
  132. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/elmo_embeddings.py +0 -0
  133. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/instructor_embeddings.py +0 -0
  134. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/longformer_embeddings.py +0 -0
  135. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/mpnet_embeddings.py +0 -0
  136. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/mxbai_embeddings.py +0 -0
  137. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/nomic_embeddings.py +0 -0
  138. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/roberta_embeddings.py +0 -0
  139. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/roberta_sentence_embeddings.py +0 -0
  140. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/sentence_embeddings.py +0 -0
  141. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/uae_embeddings.py +0 -0
  142. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/universal_sentence_encoder.py +0 -0
  143. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/word2vec.py +0 -0
  144. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/word_embeddings.py +0 -0
  145. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/xlm_roberta_embeddings.py +0 -0
  146. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/xlm_roberta_sentence_embeddings.py +0 -0
  147. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/embeddings/xlnet_embeddings.py +0 -0
  148. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/er/__init__.py +0 -0
  149. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/er/entity_ruler.py +0 -0
  150. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/graph_extraction.py +0 -0
  151. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/keyword_extraction/__init__.py +0 -0
  152. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/keyword_extraction/yake_keyword_extraction.py +0 -0
  153. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/ld_dl/__init__.py +0 -0
  154. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/ld_dl/language_detector_dl.py +0 -0
  155. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/lemmatizer.py +0 -0
  156. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/matcher/__init__.py +0 -0
  157. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/matcher/big_text_matcher.py +0 -0
  158. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/matcher/date_matcher.py +0 -0
  159. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/matcher/multi_date_matcher.py +0 -0
  160. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/matcher/regex_matcher.py +0 -0
  161. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/matcher/text_matcher.py +0 -0
  162. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/n_gram_generator.py +0 -0
  163. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/ner/__init__.py +0 -0
  164. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/ner/ner_approach.py +0 -0
  165. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/ner/ner_converter.py +0 -0
  166. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/ner/ner_crf.py +0 -0
  167. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/ner/ner_dl.py +0 -0
  168. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/ner/ner_overwriter.py +0 -0
  169. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/ner/zero_shot_ner_model.py +0 -0
  170. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/normalizer.py +0 -0
  171. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/openai/__init__.py +0 -0
  172. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/openai/openai_embeddings.py +0 -0
  173. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/param/__init__.py +0 -0
  174. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/param/classifier_encoder.py +0 -0
  175. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/param/evaluation_dl_params.py +0 -0
  176. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/pos/__init__.py +0 -0
  177. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/pos/perceptron.py +0 -0
  178. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/sentence/__init__.py +0 -0
  179. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/sentence/sentence_detector.py +0 -0
  180. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/sentence/sentence_detector_dl.py +0 -0
  181. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/sentiment/__init__.py +0 -0
  182. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/sentiment/sentiment_detector.py +0 -0
  183. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/sentiment/vivekn_sentiment.py +0 -0
  184. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/seq2seq/__init__.py +0 -0
  185. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/seq2seq/auto_gguf_model.py +0 -0
  186. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/seq2seq/auto_gguf_vision_model.py +0 -0
  187. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/seq2seq/bart_transformer.py +0 -0
  188. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/seq2seq/cohere_transformer.py +0 -0
  189. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/seq2seq/cpm_transformer.py +0 -0
  190. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/seq2seq/gpt2_transformer.py +0 -0
  191. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/seq2seq/llama2_transformer.py +0 -0
  192. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/seq2seq/llama3_transformer.py +0 -0
  193. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/seq2seq/marian_transformer.py +0 -0
  194. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/seq2seq/olmo_transformer.py +0 -0
  195. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/seq2seq/phi2_transformer.py +0 -0
  196. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/seq2seq/phi3_transformer.py +0 -0
  197. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/seq2seq/starcoder_transformer.py +0 -0
  198. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/seq2seq/t5_transformer.py +0 -0
  199. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/similarity/__init__.py +0 -0
  200. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/similarity/document_similarity_ranker.py +0 -0
  201. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/spell_check/__init__.py +0 -0
  202. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/spell_check/norvig_sweeting.py +0 -0
  203. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/spell_check/symmetric_delete.py +0 -0
  204. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/stemmer.py +0 -0
  205. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/stop_words_cleaner.py +0 -0
  206. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/tf_ner_dl_graph_builder.py +0 -0
  207. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/token/__init__.py +0 -0
  208. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/token/chunk_tokenizer.py +0 -0
  209. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/token/recursive_tokenizer.py +0 -0
  210. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/token/regex_tokenizer.py +0 -0
  211. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/token/tokenizer.py +0 -0
  212. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/token2_chunk.py +0 -0
  213. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/ws/__init__.py +0 -0
  214. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/annotator/ws/word_segmenter.py +0 -0
  215. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/base/__init__.py +0 -0
  216. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/base/audio_assembler.py +0 -0
  217. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/base/doc2_chunk.py +0 -0
  218. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/base/document_assembler.py +0 -0
  219. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/base/embeddings_finisher.py +0 -0
  220. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/base/finisher.py +0 -0
  221. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/base/graph_finisher.py +0 -0
  222. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/base/has_recursive_fit.py +0 -0
  223. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/base/has_recursive_transform.py +0 -0
  224. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/base/image_assembler.py +0 -0
  225. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/base/light_pipeline.py +0 -0
  226. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/base/multi_document_assembler.py +0 -0
  227. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/base/recursive_pipeline.py +0 -0
  228. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/base/table_assembler.py +0 -0
  229. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/base/token_assembler.py +0 -0
  230. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/common/__init__.py +0 -0
  231. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/common/annotator_approach.py +0 -0
  232. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/common/annotator_model.py +0 -0
  233. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/common/annotator_properties.py +0 -0
  234. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/common/annotator_type.py +0 -0
  235. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/common/coverage_result.py +0 -0
  236. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/common/match_strategy.py +0 -0
  237. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/common/read_as.py +0 -0
  238. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/common/recursive_annotator_approach.py +0 -0
  239. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/common/storage.py +0 -0
  240. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/common/utils.py +0 -0
  241. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/functions.py +0 -0
  242. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/internal/annotator_java_ml.py +0 -0
  243. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/internal/annotator_transformer.py +0 -0
  244. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/internal/extended_java_wrapper.py +0 -0
  245. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/internal/params_getters_setters.py +0 -0
  246. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/internal/recursive.py +0 -0
  247. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/logging/__init__.py +0 -0
  248. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/logging/comet.py +0 -0
  249. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/pretrained/__init__.py +0 -0
  250. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/pretrained/pretrained_pipeline.py +0 -0
  251. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/pretrained/resource_downloader.py +0 -0
  252. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/pretrained/utils.py +0 -0
  253. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/reader/__init__.py +0 -0
  254. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/reader/enums.py +0 -0
  255. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/__init__.py +0 -0
  256. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/_tf_graph_builders/__init__.py +0 -0
  257. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/_tf_graph_builders/graph_builders.py +0 -0
  258. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/_tf_graph_builders/ner_dl/__init__.py +0 -0
  259. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/_tf_graph_builders/ner_dl/create_graph.py +0 -0
  260. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/_tf_graph_builders/ner_dl/dataset_encoder.py +0 -0
  261. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/_tf_graph_builders/ner_dl/ner_model.py +0 -0
  262. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/_tf_graph_builders/ner_dl/ner_model_saver.py +0 -0
  263. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/_tf_graph_builders/ner_dl/sentence_grouper.py +0 -0
  264. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/_tf_graph_builders/tf2contrib/__init__.py +0 -0
  265. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/_tf_graph_builders/tf2contrib/core_rnn_cell.py +0 -0
  266. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/_tf_graph_builders/tf2contrib/fused_rnn_cell.py +0 -0
  267. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/_tf_graph_builders/tf2contrib/gru_ops.py +0 -0
  268. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/_tf_graph_builders/tf2contrib/lstm_ops.py +0 -0
  269. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/_tf_graph_builders/tf2contrib/rnn.py +0 -0
  270. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/_tf_graph_builders/tf2contrib/rnn_cell.py +0 -0
  271. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/_tf_graph_builders_1x/__init__.py +0 -0
  272. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/_tf_graph_builders_1x/graph_builders.py +0 -0
  273. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/_tf_graph_builders_1x/ner_dl/__init__.py +0 -0
  274. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/_tf_graph_builders_1x/ner_dl/create_graph.py +0 -0
  275. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/_tf_graph_builders_1x/ner_dl/dataset_encoder.py +0 -0
  276. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model.py +0 -0
  277. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model_saver.py +0 -0
  278. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/_tf_graph_builders_1x/ner_dl/sentence_grouper.py +0 -0
  279. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/conll.py +0 -0
  280. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/conllu.py +0 -0
  281. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/pos.py +0 -0
  282. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/pub_tator.py +0 -0
  283. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/training/tfgraphs.py +0 -0
  284. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/upload_to_hub.py +0 -0
  285. {spark_nlp-6.0.1 → spark_nlp-6.0.2}/sparknlp/util.py +0 -0
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.4
2
2
  Name: spark-nlp
3
- Version: 6.0.1
3
+ Version: 6.0.2
4
4
  Summary: John Snow Labs Spark NLP is a natural language processing library built on top of Apache Spark ML. It provides simple, performant & accurate NLP annotations for machine learning pipelines, that scale easily in a distributed environment.
5
5
  Home-page: https://github.com/JohnSnowLabs/spark-nlp
6
6
  Author: John Snow Labs
@@ -29,6 +29,13 @@ Classifier: Topic :: Text Processing :: Linguistic
29
29
  Classifier: Topic :: Scientific/Engineering
30
30
  Classifier: Typing :: Typed
31
31
  Description-Content-Type: text/markdown
32
+ Dynamic: author
33
+ Dynamic: classifier
34
+ Dynamic: description
35
+ Dynamic: description-content-type
36
+ Dynamic: home-page
37
+ Dynamic: keywords
38
+ Dynamic: summary
32
39
 
33
40
  # Spark NLP: State-of-the-Art Natural Language Processing & LLMs Library
34
41
 
@@ -95,7 +102,7 @@ $ java -version
95
102
  $ conda create -n sparknlp python=3.7 -y
96
103
  $ conda activate sparknlp
97
104
  # spark-nlp by default is based on pyspark 3.x
98
- $ pip install spark-nlp==6.0.1 pyspark==3.3.1
105
+ $ pip install spark-nlp==6.0.2 pyspark==3.3.1
99
106
  ```
100
107
 
101
108
  In Python console or Jupyter `Python3` kernel:
@@ -161,7 +168,7 @@ For a quick example of using pipelines and models take a look at our official [d
161
168
 
162
169
  ### Apache Spark Support
163
170
 
164
- Spark NLP *6.0.1* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
171
+ Spark NLP *6.0.2* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
165
172
 
166
173
  | Spark NLP | Apache Spark 3.5.x | Apache Spark 3.4.x | Apache Spark 3.3.x | Apache Spark 3.2.x | Apache Spark 3.1.x | Apache Spark 3.0.x | Apache Spark 2.4.x | Apache Spark 2.3.x |
167
174
  |-----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
@@ -191,7 +198,7 @@ Find out more about 4.x `SparkNLP` versions in our official [documentation](http
191
198
 
192
199
  ### Databricks Support
193
200
 
194
- Spark NLP 6.0.1 has been tested and is compatible with the following runtimes:
201
+ Spark NLP 6.0.2 has been tested and is compatible with the following runtimes:
195
202
 
196
203
  | **CPU** | **GPU** |
197
204
  |--------------------|--------------------|
@@ -208,7 +215,7 @@ We are compatible with older runtimes. For a full list check databricks support
208
215
 
209
216
  ### EMR Support
210
217
 
211
- Spark NLP 6.0.1 has been tested and is compatible with the following EMR releases:
218
+ Spark NLP 6.0.2 has been tested and is compatible with the following EMR releases:
212
219
 
213
220
  | **EMR Release** |
214
221
  |--------------------|
@@ -63,7 +63,7 @@ $ java -version
63
63
  $ conda create -n sparknlp python=3.7 -y
64
64
  $ conda activate sparknlp
65
65
  # spark-nlp by default is based on pyspark 3.x
66
- $ pip install spark-nlp==6.0.1 pyspark==3.3.1
66
+ $ pip install spark-nlp==6.0.2 pyspark==3.3.1
67
67
  ```
68
68
 
69
69
  In Python console or Jupyter `Python3` kernel:
@@ -129,7 +129,7 @@ For a quick example of using pipelines and models take a look at our official [d
129
129
 
130
130
  ### Apache Spark Support
131
131
 
132
- Spark NLP *6.0.1* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
132
+ Spark NLP *6.0.2* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
133
133
 
134
134
  | Spark NLP | Apache Spark 3.5.x | Apache Spark 3.4.x | Apache Spark 3.3.x | Apache Spark 3.2.x | Apache Spark 3.1.x | Apache Spark 3.0.x | Apache Spark 2.4.x | Apache Spark 2.3.x |
135
135
  |-----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
@@ -159,7 +159,7 @@ Find out more about 4.x `SparkNLP` versions in our official [documentation](http
159
159
 
160
160
  ### Databricks Support
161
161
 
162
- Spark NLP 6.0.1 has been tested and is compatible with the following runtimes:
162
+ Spark NLP 6.0.2 has been tested and is compatible with the following runtimes:
163
163
 
164
164
  | **CPU** | **GPU** |
165
165
  |--------------------|--------------------|
@@ -176,7 +176,7 @@ We are compatible with older runtimes. For a full list check databricks support
176
176
 
177
177
  ### EMR Support
178
178
 
179
- Spark NLP 6.0.1 has been tested and is compatible with the following EMR releases:
179
+ Spark NLP 6.0.2 has been tested and is compatible with the following EMR releases:
180
180
 
181
181
  | **EMR Release** |
182
182
  |--------------------|
@@ -41,7 +41,7 @@ setup(
41
41
  # project code, see
42
42
  # https://packaging.python.org/en/latest/single_source_version.html
43
43
 
44
- version='6.0.1', # Required
44
+ version='6.0.2', # Required
45
45
 
46
46
  # This is a one-line description or tagline of what your project does. This
47
47
  # corresponds to the 'Summary' metadata field:
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.4
2
2
  Name: spark-nlp
3
- Version: 6.0.1
3
+ Version: 6.0.2
4
4
  Summary: John Snow Labs Spark NLP is a natural language processing library built on top of Apache Spark ML. It provides simple, performant & accurate NLP annotations for machine learning pipelines, that scale easily in a distributed environment.
5
5
  Home-page: https://github.com/JohnSnowLabs/spark-nlp
6
6
  Author: John Snow Labs
@@ -29,6 +29,13 @@ Classifier: Topic :: Text Processing :: Linguistic
29
29
  Classifier: Topic :: Scientific/Engineering
30
30
  Classifier: Typing :: Typed
31
31
  Description-Content-Type: text/markdown
32
+ Dynamic: author
33
+ Dynamic: classifier
34
+ Dynamic: description
35
+ Dynamic: description-content-type
36
+ Dynamic: home-page
37
+ Dynamic: keywords
38
+ Dynamic: summary
32
39
 
33
40
  # Spark NLP: State-of-the-Art Natural Language Processing & LLMs Library
34
41
 
@@ -95,7 +102,7 @@ $ java -version
95
102
  $ conda create -n sparknlp python=3.7 -y
96
103
  $ conda activate sparknlp
97
104
  # spark-nlp by default is based on pyspark 3.x
98
- $ pip install spark-nlp==6.0.1 pyspark==3.3.1
105
+ $ pip install spark-nlp==6.0.2 pyspark==3.3.1
99
106
  ```
100
107
 
101
108
  In Python console or Jupyter `Python3` kernel:
@@ -161,7 +168,7 @@ For a quick example of using pipelines and models take a look at our official [d
161
168
 
162
169
  ### Apache Spark Support
163
170
 
164
- Spark NLP *6.0.1* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
171
+ Spark NLP *6.0.2* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
165
172
 
166
173
  | Spark NLP | Apache Spark 3.5.x | Apache Spark 3.4.x | Apache Spark 3.3.x | Apache Spark 3.2.x | Apache Spark 3.1.x | Apache Spark 3.0.x | Apache Spark 2.4.x | Apache Spark 2.3.x |
167
174
  |-----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
@@ -191,7 +198,7 @@ Find out more about 4.x `SparkNLP` versions in our official [documentation](http
191
198
 
192
199
  ### Databricks Support
193
200
 
194
- Spark NLP 6.0.1 has been tested and is compatible with the following runtimes:
201
+ Spark NLP 6.0.2 has been tested and is compatible with the following runtimes:
195
202
 
196
203
  | **CPU** | **GPU** |
197
204
  |--------------------|--------------------|
@@ -208,7 +215,7 @@ We are compatible with older runtimes. For a full list check databricks support
208
215
 
209
216
  ### EMR Support
210
217
 
211
- Spark NLP 6.0.1 has been tested and is compatible with the following EMR releases:
218
+ Spark NLP 6.0.2 has been tested and is compatible with the following EMR releases:
212
219
 
213
220
  | **EMR Release** |
214
221
  |--------------------|
@@ -93,7 +93,9 @@ sparknlp/annotator/cv/__init__.py
93
93
  sparknlp/annotator/cv/blip_for_question_answering.py
94
94
  sparknlp/annotator/cv/clip_for_zero_shot_classification.py
95
95
  sparknlp/annotator/cv/convnext_for_image_classification.py
96
+ sparknlp/annotator/cv/florence2_transformer.py
96
97
  sparknlp/annotator/cv/gemma3_for_multimodal.py
98
+ sparknlp/annotator/cv/internvl_for_multimodal.py
97
99
  sparknlp/annotator/cv/janus_for_multimodal.py
98
100
  sparknlp/annotator/cv/llava_for_multimodal.py
99
101
  sparknlp/annotator/cv/mllama_for_multimodal.py
@@ -237,6 +239,10 @@ sparknlp/internal/params_getters_setters.py
237
239
  sparknlp/internal/recursive.py
238
240
  sparknlp/logging/__init__.py
239
241
  sparknlp/logging/comet.py
242
+ sparknlp/partition/__init__.py
243
+ sparknlp/partition/partition.py
244
+ sparknlp/partition/partition_properties.py
245
+ sparknlp/partition/partition_transformer.py
240
246
  sparknlp/pretrained/__init__.py
241
247
  sparknlp/pretrained/pretrained_pipeline.py
242
248
  sparknlp/pretrained/resource_downloader.py
@@ -66,6 +66,8 @@ sys.modules['com.johnsnowlabs.ml.ai'] = annotator
66
66
  annotators = annotator
67
67
  embeddings = annotator
68
68
 
69
+ __version__ = "6.0.2"
70
+
69
71
 
70
72
  def start(gpu=False,
71
73
  apple_silicon=False,
@@ -132,7 +134,7 @@ def start(gpu=False,
132
134
  The initiated Spark session.
133
135
 
134
136
  """
135
- current_version = "6.0.1"
137
+ current_version = __version__
136
138
 
137
139
  if params is None:
138
140
  params = {}
@@ -316,4 +318,4 @@ def version():
316
318
  str
317
319
  The current Spark NLP version.
318
320
  """
319
- return '6.0.1'
321
+ return __version__
@@ -25,3 +25,5 @@ from sparknlp.annotator.cv.phi3_vision_for_multimodal import *
25
25
  from sparknlp.annotator.cv.smolvlm_transformer import *
26
26
  from sparknlp.annotator.cv.paligemma_for_multimodal import *
27
27
  from sparknlp.annotator.cv.gemma3_for_multimodal import *
28
+ from sparknlp.annotator.cv.internvl_for_multimodal import *
29
+ from sparknlp.annotator.cv.florence2_transformer import *
@@ -0,0 +1,180 @@
1
+ # Copyright 2017-2024 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from sparknlp.common import *
16
+
17
+ class Florence2Transformer(AnnotatorModel,
18
+ HasBatchedAnnotateImage,
19
+ HasImageFeatureProperties,
20
+ HasEngine):
21
+ """Florence2Transformer can load Florence-2 models for a variety of vision and vision-language tasks using prompt-based inference.
22
+
23
+ The model supports image captioning, object detection, segmentation, OCR, and more, using prompt tokens as described in the Florence-2 documentation.
24
+
25
+ Pretrained models can be loaded with :meth:`.pretrained` of the companion object:
26
+
27
+ >>> florence2 = Florence2Transformer.pretrained() \
28
+ ... .setInputCols(["image_assembler"]) \
29
+ ... .setOutputCol("answer")
30
+
31
+ The default model is ``"florence2_base_ft_int4"``, if no name is provided.
32
+
33
+ For available pretrained models please see the `Models Hub <https://sparknlp.org/models?task=Vision+Tasks>`__.
34
+
35
+ ====================== ======================
36
+ Input Annotation types Output Annotation type
37
+ ====================== ======================
38
+ ``IMAGE`` ``DOCUMENT``
39
+ ====================== ======================
40
+
41
+ Parameters
42
+ ----------
43
+ batchSize
44
+ Batch size. Large values allows faster processing but requires more memory, by default 2
45
+ maxOutputLength
46
+ Maximum length of output text, by default 200
47
+ minOutputLength
48
+ Minimum length of the sequence to be generated, by default 10
49
+ doSample
50
+ Whether or not to use sampling; use greedy decoding otherwise, by default False
51
+ temperature
52
+ The value used to module the next token probabilities, by default 1.0
53
+ topK
54
+ The number of highest probability vocabulary tokens to keep for top-k-filtering, by default 50
55
+ topP
56
+ If set to float < 1, only the most probable tokens with probabilities that add up to ``top_p`` or higher are kept for generation, by default 1.0
57
+ repetitionPenalty
58
+ The parameter for repetition penalty. 1.0 means no penalty, by default 1.0
59
+ noRepeatNgramSize
60
+ If set to int > 0, all ngrams of that size can only occur once, by default 3
61
+ ignoreTokenIds
62
+ A list of token ids which are ignored in the decoder's output, by default []
63
+ beamSize
64
+ The Number of beams for beam search, by default 1
65
+
66
+ Examples
67
+ --------
68
+ >>> import sparknlp
69
+ >>> from sparknlp.base import *
70
+ >>> from sparknlp.annotator import *
71
+ >>> from pyspark.ml import Pipeline
72
+ >>> image_df = spark.read.format("image").load(path=images_path)
73
+ >>> test_df = image_df.withColumn("text", lit("<OD>"))
74
+ >>> imageAssembler = ImageAssembler() \
75
+ ... .setInputCol("image") \
76
+ ... .setOutputCol("image_assembler")
77
+ >>> florence2 = Florence2Transformer.pretrained() \
78
+ ... .setInputCols(["image_assembler"]) \
79
+ ... .setOutputCol("answer")
80
+ >>> pipeline = Pipeline().setStages([
81
+ ... imageAssembler,
82
+ ... florence2
83
+ ... ])
84
+ >>> result = pipeline.fit(test_df).transform(test_df)
85
+ >>> result.select("image_assembler.origin", "answer.result").show(False)
86
+ """
87
+
88
+ name = "Florence2Transformer"
89
+
90
+ inputAnnotatorTypes = [AnnotatorType.IMAGE]
91
+ outputAnnotatorType = AnnotatorType.DOCUMENT
92
+
93
+ minOutputLength = Param(Params._dummy(), "minOutputLength", "Minimum length of the sequence to be generated", typeConverter=TypeConverters.toInt)
94
+ maxOutputLength = Param(Params._dummy(), "maxOutputLength", "Maximum length of output text", typeConverter=TypeConverters.toInt)
95
+ doSample = Param(Params._dummy(), "doSample", "Whether or not to use sampling; use greedy decoding otherwise", typeConverter=TypeConverters.toBoolean)
96
+ temperature = Param(Params._dummy(), "temperature", "The value used to module the next token probabilities", typeConverter=TypeConverters.toFloat)
97
+ topK = Param(Params._dummy(), "topK", "The number of highest probability vocabulary tokens to keep for top-k-filtering", typeConverter=TypeConverters.toInt)
98
+ topP = Param(Params._dummy(), "topP", "If set to float < 1, only the most probable tokens with probabilities that add up to top_p or higher are kept for generation", typeConverter=TypeConverters.toFloat)
99
+ repetitionPenalty = Param(Params._dummy(), "repetitionPenalty", "The parameter for repetition penalty. 1.0 means no penalty.", typeConverter=TypeConverters.toFloat)
100
+ noRepeatNgramSize = Param(Params._dummy(), "noRepeatNgramSize", "If set to int > 0, all ngrams of that size can only occur once", typeConverter=TypeConverters.toInt)
101
+ ignoreTokenIds = Param(Params._dummy(), "ignoreTokenIds", "A list of token ids which are ignored in the decoder's output", typeConverter=TypeConverters.toListInt)
102
+ beamSize = Param(Params._dummy(), "beamSize", "The Number of beams for beam search.", typeConverter=TypeConverters.toInt)
103
+ batchSize = Param(Params._dummy(), "batchSize", "Batch size. Large values allows faster processing but requires more memory", typeConverter=TypeConverters.toInt)
104
+
105
+ @keyword_only
106
+ def __init__(self, classname="com.johnsnowlabs.nlp.annotators.cv.Florence2Transformer", java_model=None):
107
+ super(Florence2Transformer, self).__init__(
108
+ classname=classname,
109
+ java_model=java_model
110
+ )
111
+ self._setDefault(
112
+ batchSize=2,
113
+ minOutputLength=10,
114
+ maxOutputLength=200,
115
+ doSample=False,
116
+ temperature=1.0,
117
+ topK=50,
118
+ topP=1.0,
119
+ repetitionPenalty=1.0,
120
+ noRepeatNgramSize=3,
121
+ ignoreTokenIds=[],
122
+ beamSize=1,
123
+ )
124
+
125
+ def setMinOutputLength(self, value):
126
+ """Sets minimum length of the sequence to be generated."""
127
+ return self._set(minOutputLength=value)
128
+
129
+ def setMaxOutputLength(self, value):
130
+ """Sets maximum length of output text."""
131
+ return self._set(maxOutputLength=value)
132
+
133
+ def setDoSample(self, value):
134
+ """Sets whether or not to use sampling; use greedy decoding otherwise."""
135
+ return self._set(doSample=value)
136
+
137
+ def setTemperature(self, value):
138
+ """Sets the value used to module the next token probabilities."""
139
+ return self._set(temperature=value)
140
+
141
+ def setTopK(self, value):
142
+ """Sets the number of highest probability vocabulary tokens to keep for top-k-filtering."""
143
+ return self._set(topK=value)
144
+
145
+ def setTopP(self, value):
146
+ """Sets the top cumulative probability for vocabulary tokens."""
147
+ return self._set(topP=value)
148
+
149
+ def setRepetitionPenalty(self, value):
150
+ """Sets the parameter for repetition penalty. 1.0 means no penalty."""
151
+ return self._set(repetitionPenalty=value)
152
+
153
+ def setNoRepeatNgramSize(self, value):
154
+ """Sets size of n-grams that can only occur once."""
155
+ return self._set(noRepeatNgramSize=value)
156
+
157
+ def setIgnoreTokenIds(self, value):
158
+ """A list of token ids which are ignored in the decoder's output."""
159
+ return self._set(ignoreTokenIds=value)
160
+
161
+ def setBeamSize(self, value):
162
+ """Sets the number of beams for beam search."""
163
+ return self._set(beamSize=value)
164
+
165
+ def setBatchSize(self, value):
166
+ """Sets the batch size."""
167
+ return self._set(batchSize=value)
168
+
169
+ @staticmethod
170
+ def loadSavedModel(folder, spark_session, use_openvino=False):
171
+ """Loads a locally saved model."""
172
+ from sparknlp.internal import _Florence2TransformerLoader
173
+ jModel = _Florence2TransformerLoader(folder, spark_session._jsparkSession, use_openvino)._java_obj
174
+ return Florence2Transformer(java_model=jModel)
175
+
176
+ @staticmethod
177
+ def pretrained(name="florence2_base_ft_int4", lang="en", remote_loc=None):
178
+ """Downloads and loads a pretrained model."""
179
+ from sparknlp.pretrained import ResourceDownloader
180
+ return ResourceDownloader.downloadModel(Florence2Transformer, name, lang, remote_loc)
@@ -83,23 +83,18 @@ class Gemma3ForMultiModal(AnnotatorModel,
83
83
  >>> from sparknlp.annotator import *
84
84
  >>> from pyspark.ml import Pipeline
85
85
  >>> from pyspark.sql.functions import lit
86
- >>>
87
86
  >>> imageDF = spark.read.format("image").load(images_path)
88
- >>> testDF = imageDF.withColumn("text", lit("<bos><start_of_turn>user\nYou are a helpful assistant.\n\n<start_of_image>Describe this image in detail.<end_of_turn>\n<start_of_turn>model\n"))
89
- >>>
90
- >>> imageAssembler = ImageAssembler() \
91
- ... .setInputCol("image") \
87
+ >>> testDF = imageDF.withColumn("text", lit("<bos><start_of_turn>user\\nYou are a helpful assistant.\\n\\n<start_of_image>Describe this image in detail.<end_of_turn>\\n<start_of_turn>model\\n"))
88
+ >>> imageAssembler = ImageAssembler() \\
89
+ ... .setInputCol("image") \\
92
90
  ... .setOutputCol("image_assembler")
93
- >>>
94
- >>> visualQA = Gemma3ForMultiModal.pretrained() \
95
- ... .setInputCols("image_assembler") \
91
+ >>> visualQA = Gemma3ForMultiModal.pretrained() \\
92
+ ... .setInputCols("image_assembler") \\
96
93
  ... .setOutputCol("answer")
97
- >>>
98
94
  >>> pipeline = Pipeline().setStages([
99
95
  ... imageAssembler,
100
96
  ... visualQA
101
97
  ... ])
102
- >>>
103
98
  >>> result = pipeline.fit(testDF).transform(testDF)
104
99
  >>> result.select("image_assembler.origin", "answer.result").show(truncate=False)
105
100
  """