spark-nlp 6.1.0__tar.gz → 6.1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of spark-nlp might be problematic. Click here for more details.

Files changed (290) hide show
  1. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/PKG-INFO +12 -11
  2. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/README.md +11 -10
  3. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/setup.py +1 -1
  4. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/spark_nlp.egg-info/PKG-INFO +12 -11
  5. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/spark_nlp.egg-info/SOURCES.txt +1 -0
  6. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/__init__.py +1 -1
  7. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/auto_gguf_embeddings.py +4 -12
  8. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/auto_gguf_model.py +7 -6
  9. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/auto_gguf_vision_model.py +3 -3
  10. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/common/properties.py +25 -30
  11. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/reader/reader2doc.py +25 -9
  12. spark_nlp-6.1.1/sparknlp/reader/reader2table.py +163 -0
  13. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/com/__init__.py +0 -0
  14. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/com/johnsnowlabs/__init__.py +0 -0
  15. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/com/johnsnowlabs/ml/__init__.py +0 -0
  16. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/com/johnsnowlabs/ml/ai/__init__.py +0 -0
  17. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/com/johnsnowlabs/nlp/__init__.py +0 -0
  18. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/setup.cfg +0 -0
  19. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/spark_nlp.egg-info/dependency_links.txt +0 -0
  20. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/spark_nlp.egg-info/top_level.txt +0 -0
  21. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotation.py +0 -0
  22. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotation_audio.py +0 -0
  23. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotation_image.py +0 -0
  24. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/__init__.py +0 -0
  25. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/audio/__init__.py +0 -0
  26. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/audio/hubert_for_ctc.py +0 -0
  27. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/audio/wav2vec2_for_ctc.py +0 -0
  28. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/audio/whisper_for_ctc.py +0 -0
  29. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/chunk2_doc.py +0 -0
  30. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/chunker.py +0 -0
  31. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/__init__.py +0 -0
  32. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/albert_for_multiple_choice.py +0 -0
  33. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/albert_for_question_answering.py +0 -0
  34. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/albert_for_sequence_classification.py +0 -0
  35. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/albert_for_token_classification.py +0 -0
  36. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/albert_for_zero_shot_classification.py +0 -0
  37. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/bart_for_zero_shot_classification.py +0 -0
  38. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/bert_for_multiple_choice.py +0 -0
  39. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/bert_for_question_answering.py +0 -0
  40. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/bert_for_sequence_classification.py +0 -0
  41. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/bert_for_token_classification.py +0 -0
  42. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/bert_for_zero_shot_classification.py +0 -0
  43. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/camembert_for_question_answering.py +0 -0
  44. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/camembert_for_sequence_classification.py +0 -0
  45. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/camembert_for_token_classification.py +0 -0
  46. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/camembert_for_zero_shot_classification.py +0 -0
  47. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/classifier_dl.py +0 -0
  48. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/deberta_for_question_answering.py +0 -0
  49. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/deberta_for_sequence_classification.py +0 -0
  50. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/deberta_for_token_classification.py +0 -0
  51. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/deberta_for_zero_shot_classification.py +0 -0
  52. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/distil_bert_for_question_answering.py +0 -0
  53. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/distil_bert_for_sequence_classification.py +0 -0
  54. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/distil_bert_for_token_classification.py +0 -0
  55. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/distil_bert_for_zero_shot_classification.py +0 -0
  56. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/distilbert_for_multiple_choice.py +0 -0
  57. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/longformer_for_question_answering.py +0 -0
  58. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/longformer_for_sequence_classification.py +0 -0
  59. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/longformer_for_token_classification.py +0 -0
  60. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/mpnet_for_question_answering.py +0 -0
  61. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/mpnet_for_sequence_classification.py +0 -0
  62. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/mpnet_for_token_classification.py +0 -0
  63. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/multi_classifier_dl.py +0 -0
  64. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/roberta_for_multiple_choice.py +0 -0
  65. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/roberta_for_question_answering.py +0 -0
  66. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/roberta_for_sequence_classification.py +0 -0
  67. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/roberta_for_token_classification.py +0 -0
  68. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/roberta_for_zero_shot_classification.py +0 -0
  69. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/sentiment_dl.py +0 -0
  70. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/tapas_for_question_answering.py +0 -0
  71. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/xlm_roberta_for_multiple_choice.py +0 -0
  72. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/xlm_roberta_for_question_answering.py +0 -0
  73. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/xlm_roberta_for_sequence_classification.py +0 -0
  74. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/xlm_roberta_for_token_classification.py +0 -0
  75. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/xlm_roberta_for_zero_shot_classification.py +0 -0
  76. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/xlnet_for_sequence_classification.py +0 -0
  77. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/classifier_dl/xlnet_for_token_classification.py +0 -0
  78. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/cleaners/__init__.py +0 -0
  79. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/cleaners/cleaner.py +0 -0
  80. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/cleaners/extractor.py +0 -0
  81. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/coref/__init__.py +0 -0
  82. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/coref/spanbert_coref.py +0 -0
  83. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/cv/__init__.py +0 -0
  84. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/cv/blip_for_question_answering.py +0 -0
  85. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/cv/clip_for_zero_shot_classification.py +0 -0
  86. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/cv/convnext_for_image_classification.py +0 -0
  87. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/cv/florence2_transformer.py +0 -0
  88. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/cv/gemma3_for_multimodal.py +0 -0
  89. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/cv/internvl_for_multimodal.py +0 -0
  90. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/cv/janus_for_multimodal.py +0 -0
  91. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/cv/llava_for_multimodal.py +0 -0
  92. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/cv/mllama_for_multimodal.py +0 -0
  93. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/cv/paligemma_for_multimodal.py +0 -0
  94. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/cv/phi3_vision_for_multimodal.py +0 -0
  95. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/cv/qwen2vl_transformer.py +0 -0
  96. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/cv/smolvlm_transformer.py +0 -0
  97. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/cv/swin_for_image_classification.py +0 -0
  98. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/cv/vision_encoder_decoder_for_image_captioning.py +0 -0
  99. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/cv/vit_for_image_classification.py +0 -0
  100. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/dataframe_optimizer.py +0 -0
  101. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/date2_chunk.py +0 -0
  102. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/dependency/__init__.py +0 -0
  103. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/dependency/dependency_parser.py +0 -0
  104. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/dependency/typed_dependency_parser.py +0 -0
  105. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/document_character_text_splitter.py +0 -0
  106. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/document_normalizer.py +0 -0
  107. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/document_token_splitter.py +0 -0
  108. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/document_token_splitter_test.py +0 -0
  109. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/__init__.py +0 -0
  110. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/albert_embeddings.py +0 -0
  111. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/bert_embeddings.py +0 -0
  112. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/bert_sentence_embeddings.py +0 -0
  113. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/bge_embeddings.py +0 -0
  114. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/camembert_embeddings.py +0 -0
  115. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/chunk_embeddings.py +0 -0
  116. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/deberta_embeddings.py +0 -0
  117. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/distil_bert_embeddings.py +0 -0
  118. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/doc2vec.py +0 -0
  119. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/e5_embeddings.py +0 -0
  120. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/e5v_embeddings.py +0 -0
  121. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/elmo_embeddings.py +0 -0
  122. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/instructor_embeddings.py +0 -0
  123. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/longformer_embeddings.py +0 -0
  124. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/minilm_embeddings.py +0 -0
  125. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/mpnet_embeddings.py +0 -0
  126. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/mxbai_embeddings.py +0 -0
  127. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/nomic_embeddings.py +0 -0
  128. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/roberta_embeddings.py +0 -0
  129. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/roberta_sentence_embeddings.py +0 -0
  130. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/sentence_embeddings.py +0 -0
  131. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/snowflake_embeddings.py +0 -0
  132. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/uae_embeddings.py +0 -0
  133. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/universal_sentence_encoder.py +0 -0
  134. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/word2vec.py +0 -0
  135. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/word_embeddings.py +0 -0
  136. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/xlm_roberta_embeddings.py +0 -0
  137. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/xlm_roberta_sentence_embeddings.py +0 -0
  138. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/embeddings/xlnet_embeddings.py +0 -0
  139. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/er/__init__.py +0 -0
  140. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/er/entity_ruler.py +0 -0
  141. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/graph_extraction.py +0 -0
  142. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/keyword_extraction/__init__.py +0 -0
  143. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/keyword_extraction/yake_keyword_extraction.py +0 -0
  144. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/ld_dl/__init__.py +0 -0
  145. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/ld_dl/language_detector_dl.py +0 -0
  146. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/lemmatizer.py +0 -0
  147. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/matcher/__init__.py +0 -0
  148. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/matcher/big_text_matcher.py +0 -0
  149. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/matcher/date_matcher.py +0 -0
  150. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/matcher/multi_date_matcher.py +0 -0
  151. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/matcher/regex_matcher.py +0 -0
  152. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/matcher/text_matcher.py +0 -0
  153. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/n_gram_generator.py +0 -0
  154. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/ner/__init__.py +0 -0
  155. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/ner/ner_approach.py +0 -0
  156. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/ner/ner_converter.py +0 -0
  157. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/ner/ner_crf.py +0 -0
  158. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/ner/ner_dl.py +0 -0
  159. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/ner/ner_overwriter.py +0 -0
  160. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/ner/zero_shot_ner_model.py +0 -0
  161. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/normalizer.py +0 -0
  162. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/openai/__init__.py +0 -0
  163. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/openai/openai_completion.py +0 -0
  164. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/openai/openai_embeddings.py +0 -0
  165. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/param/__init__.py +0 -0
  166. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/param/classifier_encoder.py +0 -0
  167. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/param/evaluation_dl_params.py +0 -0
  168. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/pos/__init__.py +0 -0
  169. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/pos/perceptron.py +0 -0
  170. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/sentence/__init__.py +0 -0
  171. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/sentence/sentence_detector.py +0 -0
  172. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/sentence/sentence_detector_dl.py +0 -0
  173. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/sentiment/__init__.py +0 -0
  174. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/sentiment/sentiment_detector.py +0 -0
  175. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/sentiment/vivekn_sentiment.py +0 -0
  176. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/__init__.py +0 -0
  177. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/bart_transformer.py +0 -0
  178. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/cohere_transformer.py +0 -0
  179. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/cpm_transformer.py +0 -0
  180. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/gpt2_transformer.py +0 -0
  181. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/llama2_transformer.py +0 -0
  182. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/llama3_transformer.py +0 -0
  183. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/m2m100_transformer.py +0 -0
  184. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/marian_transformer.py +0 -0
  185. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/mistral_transformer.py +0 -0
  186. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/nllb_transformer.py +0 -0
  187. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/olmo_transformer.py +0 -0
  188. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/phi2_transformer.py +0 -0
  189. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/phi3_transformer.py +0 -0
  190. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/phi4_transformer.py +0 -0
  191. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/qwen_transformer.py +0 -0
  192. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/starcoder_transformer.py +0 -0
  193. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/seq2seq/t5_transformer.py +0 -0
  194. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/similarity/__init__.py +0 -0
  195. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/similarity/document_similarity_ranker.py +0 -0
  196. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/spell_check/__init__.py +0 -0
  197. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/spell_check/context_spell_checker.py +0 -0
  198. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/spell_check/norvig_sweeting.py +0 -0
  199. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/spell_check/symmetric_delete.py +0 -0
  200. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/stemmer.py +0 -0
  201. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/stop_words_cleaner.py +0 -0
  202. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/tf_ner_dl_graph_builder.py +0 -0
  203. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/token/__init__.py +0 -0
  204. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/token/chunk_tokenizer.py +0 -0
  205. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/token/recursive_tokenizer.py +0 -0
  206. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/token/regex_tokenizer.py +0 -0
  207. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/token/tokenizer.py +0 -0
  208. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/token2_chunk.py +0 -0
  209. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/ws/__init__.py +0 -0
  210. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/annotator/ws/word_segmenter.py +0 -0
  211. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/base/__init__.py +0 -0
  212. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/base/audio_assembler.py +0 -0
  213. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/base/doc2_chunk.py +0 -0
  214. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/base/document_assembler.py +0 -0
  215. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/base/embeddings_finisher.py +0 -0
  216. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/base/finisher.py +0 -0
  217. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/base/graph_finisher.py +0 -0
  218. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/base/has_recursive_fit.py +0 -0
  219. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/base/has_recursive_transform.py +0 -0
  220. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/base/image_assembler.py +0 -0
  221. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/base/light_pipeline.py +0 -0
  222. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/base/multi_document_assembler.py +0 -0
  223. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/base/prompt_assembler.py +0 -0
  224. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/base/recursive_pipeline.py +0 -0
  225. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/base/table_assembler.py +0 -0
  226. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/base/token_assembler.py +0 -0
  227. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/common/__init__.py +0 -0
  228. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/common/annotator_approach.py +0 -0
  229. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/common/annotator_model.py +0 -0
  230. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/common/annotator_properties.py +0 -0
  231. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/common/annotator_type.py +0 -0
  232. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/common/coverage_result.py +0 -0
  233. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/common/match_strategy.py +0 -0
  234. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/common/read_as.py +0 -0
  235. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/common/recursive_annotator_approach.py +0 -0
  236. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/common/storage.py +0 -0
  237. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/common/utils.py +0 -0
  238. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/functions.py +0 -0
  239. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/internal/__init__.py +0 -0
  240. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/internal/annotator_java_ml.py +0 -0
  241. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/internal/annotator_transformer.py +0 -0
  242. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/internal/extended_java_wrapper.py +0 -0
  243. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/internal/params_getters_setters.py +0 -0
  244. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/internal/recursive.py +0 -0
  245. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/logging/__init__.py +0 -0
  246. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/logging/comet.py +0 -0
  247. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/partition/__init__.py +0 -0
  248. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/partition/partition.py +0 -0
  249. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/partition/partition_properties.py +0 -0
  250. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/partition/partition_transformer.py +0 -0
  251. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/pretrained/__init__.py +0 -0
  252. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/pretrained/pretrained_pipeline.py +0 -0
  253. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/pretrained/resource_downloader.py +0 -0
  254. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/pretrained/utils.py +0 -0
  255. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/reader/__init__.py +0 -0
  256. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/reader/enums.py +0 -0
  257. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/reader/pdf_to_text.py +0 -0
  258. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/reader/sparknlp_reader.py +0 -0
  259. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/__init__.py +0 -0
  260. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/__init__.py +0 -0
  261. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/graph_builders.py +0 -0
  262. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/ner_dl/__init__.py +0 -0
  263. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/ner_dl/create_graph.py +0 -0
  264. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/ner_dl/dataset_encoder.py +0 -0
  265. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/ner_dl/ner_model.py +0 -0
  266. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/ner_dl/ner_model_saver.py +0 -0
  267. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/ner_dl/sentence_grouper.py +0 -0
  268. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/tf2contrib/__init__.py +0 -0
  269. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/tf2contrib/core_rnn_cell.py +0 -0
  270. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/tf2contrib/fused_rnn_cell.py +0 -0
  271. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/tf2contrib/gru_ops.py +0 -0
  272. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/tf2contrib/lstm_ops.py +0 -0
  273. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/tf2contrib/rnn.py +0 -0
  274. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders/tf2contrib/rnn_cell.py +0 -0
  275. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders_1x/__init__.py +0 -0
  276. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders_1x/graph_builders.py +0 -0
  277. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders_1x/ner_dl/__init__.py +0 -0
  278. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders_1x/ner_dl/create_graph.py +0 -0
  279. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders_1x/ner_dl/dataset_encoder.py +0 -0
  280. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model.py +0 -0
  281. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model_saver.py +0 -0
  282. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/_tf_graph_builders_1x/ner_dl/sentence_grouper.py +0 -0
  283. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/conll.py +0 -0
  284. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/conllu.py +0 -0
  285. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/pos.py +0 -0
  286. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/pub_tator.py +0 -0
  287. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/spacy_to_annotation.py +0 -0
  288. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/training/tfgraphs.py +0 -0
  289. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/upload_to_hub.py +0 -0
  290. {spark_nlp-6.1.0 → spark_nlp-6.1.1}/sparknlp/util.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: spark-nlp
3
- Version: 6.1.0
3
+ Version: 6.1.1
4
4
  Summary: John Snow Labs Spark NLP is a natural language processing library built on top of Apache Spark ML. It provides simple, performant & accurate NLP annotations for machine learning pipelines, that scale easily in a distributed environment.
5
5
  Home-page: https://github.com/JohnSnowLabs/spark-nlp
6
6
  Author: John Snow Labs
@@ -58,7 +58,7 @@ Dynamic: summary
58
58
 
59
59
  Spark NLP is a state-of-the-art Natural Language Processing library built on top of Apache Spark. It provides **simple**, **performant** & **accurate** NLP annotations for machine learning pipelines that **scale** easily in a distributed environment.
60
60
 
61
- Spark NLP comes with **83000+** pretrained **pipelines** and **models** in more than **200+** languages.
61
+ Spark NLP comes with **100000+** pretrained **pipelines** and **models** in more than **200+** languages.
62
62
  It also offers tasks such as **Tokenization**, **Word Segmentation**, **Part-of-Speech Tagging**, Word and Sentence **Embeddings**, **Named Entity Recognition**, **Dependency Parsing**, **Spell Checking**, **Text Classification**, **Sentiment Analysis**, **Token Classification**, **Machine Translation** (+180 languages), **Summarization**, **Question Answering**, **Table Question Answering**, **Text Generation**, **Image Classification**, **Image to Text (captioning)**, **Automatic Speech Recognition**, **Zero-Shot Learning**, and many more [NLP tasks](#features).
63
63
 
64
64
  **Spark NLP** is the only open-source NLP library in **production** that offers state-of-the-art transformers such as **BERT**, **CamemBERT**, **ALBERT**, **ELECTRA**, **XLNet**, **DistilBERT**, **RoBERTa**, **DeBERTa**, **XLM-RoBERTa**, **Longformer**, **ELMO**, **Universal Sentence Encoder**, **Llama-2**, **M2M100**, **BART**, **Instructor**, **E5**, **Google T5**, **MarianMT**, **OpenAI GPT2**, **Vision Transformers (ViT)**, **OpenAI Whisper**, **Llama**, **Mistral**, **Phi**, **Qwen2**, and many more not only to **Python** and **R**, but also to **JVM** ecosystem (**Java**, **Scala**, and **Kotlin**) at **scale** by extending **Apache Spark** natively.
@@ -102,7 +102,7 @@ $ java -version
102
102
  $ conda create -n sparknlp python=3.7 -y
103
103
  $ conda activate sparknlp
104
104
  # spark-nlp by default is based on pyspark 3.x
105
- $ pip install spark-nlp==6.1.0 pyspark==3.3.1
105
+ $ pip install spark-nlp==6.1.1 pyspark==3.3.1
106
106
  ```
107
107
 
108
108
  In Python console or Jupyter `Python3` kernel:
@@ -168,11 +168,11 @@ For a quick example of using pipelines and models take a look at our official [d
168
168
 
169
169
  ### Apache Spark Support
170
170
 
171
- Spark NLP *6.1.0* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
171
+ Spark NLP *6.1.1* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
172
172
 
173
173
  | Spark NLP | Apache Spark 3.5.x | Apache Spark 3.4.x | Apache Spark 3.3.x | Apache Spark 3.2.x | Apache Spark 3.1.x | Apache Spark 3.0.x | Apache Spark 2.4.x | Apache Spark 2.3.x |
174
174
  |-----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
175
- | 6.0.x | YES | YES | YES | YES | YES | YES | NO | NO |
175
+ | 6.x.x and up | YES | YES | YES | YES | YES | YES | NO | NO |
176
176
  | 5.5.x | YES | YES | YES | YES | YES | YES | NO | NO |
177
177
  | 5.4.x | YES | YES | YES | YES | YES | YES | NO | NO |
178
178
  | 5.3.x | YES | YES | YES | YES | YES | YES | NO | NO |
@@ -198,7 +198,7 @@ Find out more about 4.x `SparkNLP` versions in our official [documentation](http
198
198
 
199
199
  ### Databricks Support
200
200
 
201
- Spark NLP 6.1.0 has been tested and is compatible with the following runtimes:
201
+ Spark NLP 6.1.1 has been tested and is compatible with the following runtimes:
202
202
 
203
203
  | **CPU** | **GPU** |
204
204
  |--------------------|--------------------|
@@ -206,16 +206,17 @@ Spark NLP 6.1.0 has been tested and is compatible with the following runtimes:
206
206
  | 14.2 / 14.2 ML | 14.2 ML & GPU |
207
207
  | 14.3 / 14.3 ML | 14.3 ML & GPU |
208
208
  | 15.0 / 15.0 ML | 15.0 ML & GPU |
209
- | 15.1 / 15.0 ML | 15.1 ML & GPU |
210
- | 15.2 / 15.0 ML | 15.2 ML & GPU |
211
- | 15.3 / 15.0 ML | 15.3 ML & GPU |
212
- | 15.4 / 15.0 ML | 15.4 ML & GPU |
209
+ | 15.1 / 15.1 ML | 15.1 ML & GPU |
210
+ | 15.2 / 15.2 ML | 15.2 ML & GPU |
211
+ | 15.3 / 15.3 ML | 15.3 ML & GPU |
212
+ | 15.4 / 15.4 ML | 15.4 ML & GPU |
213
+ | 16.4 / 16.4 ML | 16.4 ML & GPU |
213
214
 
214
215
  We are compatible with older runtimes. For a full list check databricks support in our official [documentation](https://sparknlp.org/docs/en/install#databricks-support)
215
216
 
216
217
  ### EMR Support
217
218
 
218
- Spark NLP 6.1.0 has been tested and is compatible with the following EMR releases:
219
+ Spark NLP 6.1.1 has been tested and is compatible with the following EMR releases:
219
220
 
220
221
  | **EMR Release** |
221
222
  |--------------------|
@@ -19,7 +19,7 @@
19
19
 
20
20
  Spark NLP is a state-of-the-art Natural Language Processing library built on top of Apache Spark. It provides **simple**, **performant** & **accurate** NLP annotations for machine learning pipelines that **scale** easily in a distributed environment.
21
21
 
22
- Spark NLP comes with **83000+** pretrained **pipelines** and **models** in more than **200+** languages.
22
+ Spark NLP comes with **100000+** pretrained **pipelines** and **models** in more than **200+** languages.
23
23
  It also offers tasks such as **Tokenization**, **Word Segmentation**, **Part-of-Speech Tagging**, Word and Sentence **Embeddings**, **Named Entity Recognition**, **Dependency Parsing**, **Spell Checking**, **Text Classification**, **Sentiment Analysis**, **Token Classification**, **Machine Translation** (+180 languages), **Summarization**, **Question Answering**, **Table Question Answering**, **Text Generation**, **Image Classification**, **Image to Text (captioning)**, **Automatic Speech Recognition**, **Zero-Shot Learning**, and many more [NLP tasks](#features).
24
24
 
25
25
  **Spark NLP** is the only open-source NLP library in **production** that offers state-of-the-art transformers such as **BERT**, **CamemBERT**, **ALBERT**, **ELECTRA**, **XLNet**, **DistilBERT**, **RoBERTa**, **DeBERTa**, **XLM-RoBERTa**, **Longformer**, **ELMO**, **Universal Sentence Encoder**, **Llama-2**, **M2M100**, **BART**, **Instructor**, **E5**, **Google T5**, **MarianMT**, **OpenAI GPT2**, **Vision Transformers (ViT)**, **OpenAI Whisper**, **Llama**, **Mistral**, **Phi**, **Qwen2**, and many more not only to **Python** and **R**, but also to **JVM** ecosystem (**Java**, **Scala**, and **Kotlin**) at **scale** by extending **Apache Spark** natively.
@@ -63,7 +63,7 @@ $ java -version
63
63
  $ conda create -n sparknlp python=3.7 -y
64
64
  $ conda activate sparknlp
65
65
  # spark-nlp by default is based on pyspark 3.x
66
- $ pip install spark-nlp==6.1.0 pyspark==3.3.1
66
+ $ pip install spark-nlp==6.1.1 pyspark==3.3.1
67
67
  ```
68
68
 
69
69
  In Python console or Jupyter `Python3` kernel:
@@ -129,11 +129,11 @@ For a quick example of using pipelines and models take a look at our official [d
129
129
 
130
130
  ### Apache Spark Support
131
131
 
132
- Spark NLP *6.1.0* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
132
+ Spark NLP *6.1.1* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
133
133
 
134
134
  | Spark NLP | Apache Spark 3.5.x | Apache Spark 3.4.x | Apache Spark 3.3.x | Apache Spark 3.2.x | Apache Spark 3.1.x | Apache Spark 3.0.x | Apache Spark 2.4.x | Apache Spark 2.3.x |
135
135
  |-----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
136
- | 6.0.x | YES | YES | YES | YES | YES | YES | NO | NO |
136
+ | 6.x.x and up | YES | YES | YES | YES | YES | YES | NO | NO |
137
137
  | 5.5.x | YES | YES | YES | YES | YES | YES | NO | NO |
138
138
  | 5.4.x | YES | YES | YES | YES | YES | YES | NO | NO |
139
139
  | 5.3.x | YES | YES | YES | YES | YES | YES | NO | NO |
@@ -159,7 +159,7 @@ Find out more about 4.x `SparkNLP` versions in our official [documentation](http
159
159
 
160
160
  ### Databricks Support
161
161
 
162
- Spark NLP 6.1.0 has been tested and is compatible with the following runtimes:
162
+ Spark NLP 6.1.1 has been tested and is compatible with the following runtimes:
163
163
 
164
164
  | **CPU** | **GPU** |
165
165
  |--------------------|--------------------|
@@ -167,16 +167,17 @@ Spark NLP 6.1.0 has been tested and is compatible with the following runtimes:
167
167
  | 14.2 / 14.2 ML | 14.2 ML & GPU |
168
168
  | 14.3 / 14.3 ML | 14.3 ML & GPU |
169
169
  | 15.0 / 15.0 ML | 15.0 ML & GPU |
170
- | 15.1 / 15.0 ML | 15.1 ML & GPU |
171
- | 15.2 / 15.0 ML | 15.2 ML & GPU |
172
- | 15.3 / 15.0 ML | 15.3 ML & GPU |
173
- | 15.4 / 15.0 ML | 15.4 ML & GPU |
170
+ | 15.1 / 15.1 ML | 15.1 ML & GPU |
171
+ | 15.2 / 15.2 ML | 15.2 ML & GPU |
172
+ | 15.3 / 15.3 ML | 15.3 ML & GPU |
173
+ | 15.4 / 15.4 ML | 15.4 ML & GPU |
174
+ | 16.4 / 16.4 ML | 16.4 ML & GPU |
174
175
 
175
176
  We are compatible with older runtimes. For a full list check databricks support in our official [documentation](https://sparknlp.org/docs/en/install#databricks-support)
176
177
 
177
178
  ### EMR Support
178
179
 
179
- Spark NLP 6.1.0 has been tested and is compatible with the following EMR releases:
180
+ Spark NLP 6.1.1 has been tested and is compatible with the following EMR releases:
180
181
 
181
182
  | **EMR Release** |
182
183
  |--------------------|
@@ -41,7 +41,7 @@ setup(
41
41
  # project code, see
42
42
  # https://packaging.python.org/en/latest/single_source_version.html
43
43
 
44
- version='6.1.0', # Required
44
+ version='6.1.1', # Required
45
45
 
46
46
  # This is a one-line description or tagline of what your project does. This
47
47
  # corresponds to the 'Summary' metadata field:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: spark-nlp
3
- Version: 6.1.0
3
+ Version: 6.1.1
4
4
  Summary: John Snow Labs Spark NLP is a natural language processing library built on top of Apache Spark ML. It provides simple, performant & accurate NLP annotations for machine learning pipelines, that scale easily in a distributed environment.
5
5
  Home-page: https://github.com/JohnSnowLabs/spark-nlp
6
6
  Author: John Snow Labs
@@ -58,7 +58,7 @@ Dynamic: summary
58
58
 
59
59
  Spark NLP is a state-of-the-art Natural Language Processing library built on top of Apache Spark. It provides **simple**, **performant** & **accurate** NLP annotations for machine learning pipelines that **scale** easily in a distributed environment.
60
60
 
61
- Spark NLP comes with **83000+** pretrained **pipelines** and **models** in more than **200+** languages.
61
+ Spark NLP comes with **100000+** pretrained **pipelines** and **models** in more than **200+** languages.
62
62
  It also offers tasks such as **Tokenization**, **Word Segmentation**, **Part-of-Speech Tagging**, Word and Sentence **Embeddings**, **Named Entity Recognition**, **Dependency Parsing**, **Spell Checking**, **Text Classification**, **Sentiment Analysis**, **Token Classification**, **Machine Translation** (+180 languages), **Summarization**, **Question Answering**, **Table Question Answering**, **Text Generation**, **Image Classification**, **Image to Text (captioning)**, **Automatic Speech Recognition**, **Zero-Shot Learning**, and many more [NLP tasks](#features).
63
63
 
64
64
  **Spark NLP** is the only open-source NLP library in **production** that offers state-of-the-art transformers such as **BERT**, **CamemBERT**, **ALBERT**, **ELECTRA**, **XLNet**, **DistilBERT**, **RoBERTa**, **DeBERTa**, **XLM-RoBERTa**, **Longformer**, **ELMO**, **Universal Sentence Encoder**, **Llama-2**, **M2M100**, **BART**, **Instructor**, **E5**, **Google T5**, **MarianMT**, **OpenAI GPT2**, **Vision Transformers (ViT)**, **OpenAI Whisper**, **Llama**, **Mistral**, **Phi**, **Qwen2**, and many more not only to **Python** and **R**, but also to **JVM** ecosystem (**Java**, **Scala**, and **Kotlin**) at **scale** by extending **Apache Spark** natively.
@@ -102,7 +102,7 @@ $ java -version
102
102
  $ conda create -n sparknlp python=3.7 -y
103
103
  $ conda activate sparknlp
104
104
  # spark-nlp by default is based on pyspark 3.x
105
- $ pip install spark-nlp==6.1.0 pyspark==3.3.1
105
+ $ pip install spark-nlp==6.1.1 pyspark==3.3.1
106
106
  ```
107
107
 
108
108
  In Python console or Jupyter `Python3` kernel:
@@ -168,11 +168,11 @@ For a quick example of using pipelines and models take a look at our official [d
168
168
 
169
169
  ### Apache Spark Support
170
170
 
171
- Spark NLP *6.1.0* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
171
+ Spark NLP *6.1.1* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
172
172
 
173
173
  | Spark NLP | Apache Spark 3.5.x | Apache Spark 3.4.x | Apache Spark 3.3.x | Apache Spark 3.2.x | Apache Spark 3.1.x | Apache Spark 3.0.x | Apache Spark 2.4.x | Apache Spark 2.3.x |
174
174
  |-----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
175
- | 6.0.x | YES | YES | YES | YES | YES | YES | NO | NO |
175
+ | 6.x.x and up | YES | YES | YES | YES | YES | YES | NO | NO |
176
176
  | 5.5.x | YES | YES | YES | YES | YES | YES | NO | NO |
177
177
  | 5.4.x | YES | YES | YES | YES | YES | YES | NO | NO |
178
178
  | 5.3.x | YES | YES | YES | YES | YES | YES | NO | NO |
@@ -198,7 +198,7 @@ Find out more about 4.x `SparkNLP` versions in our official [documentation](http
198
198
 
199
199
  ### Databricks Support
200
200
 
201
- Spark NLP 6.1.0 has been tested and is compatible with the following runtimes:
201
+ Spark NLP 6.1.1 has been tested and is compatible with the following runtimes:
202
202
 
203
203
  | **CPU** | **GPU** |
204
204
  |--------------------|--------------------|
@@ -206,16 +206,17 @@ Spark NLP 6.1.0 has been tested and is compatible with the following runtimes:
206
206
  | 14.2 / 14.2 ML | 14.2 ML & GPU |
207
207
  | 14.3 / 14.3 ML | 14.3 ML & GPU |
208
208
  | 15.0 / 15.0 ML | 15.0 ML & GPU |
209
- | 15.1 / 15.0 ML | 15.1 ML & GPU |
210
- | 15.2 / 15.0 ML | 15.2 ML & GPU |
211
- | 15.3 / 15.0 ML | 15.3 ML & GPU |
212
- | 15.4 / 15.0 ML | 15.4 ML & GPU |
209
+ | 15.1 / 15.1 ML | 15.1 ML & GPU |
210
+ | 15.2 / 15.2 ML | 15.2 ML & GPU |
211
+ | 15.3 / 15.3 ML | 15.3 ML & GPU |
212
+ | 15.4 / 15.4 ML | 15.4 ML & GPU |
213
+ | 16.4 / 16.4 ML | 16.4 ML & GPU |
213
214
 
214
215
  We are compatible with older runtimes. For a full list check databricks support in our official [documentation](https://sparknlp.org/docs/en/install#databricks-support)
215
216
 
216
217
  ### EMR Support
217
218
 
218
- Spark NLP 6.1.0 has been tested and is compatible with the following EMR releases:
219
+ Spark NLP 6.1.1 has been tested and is compatible with the following EMR releases:
219
220
 
220
221
  | **EMR Release** |
221
222
  |--------------------|
@@ -255,6 +255,7 @@ sparknlp/reader/__init__.py
255
255
  sparknlp/reader/enums.py
256
256
  sparknlp/reader/pdf_to_text.py
257
257
  sparknlp/reader/reader2doc.py
258
+ sparknlp/reader/reader2table.py
258
259
  sparknlp/reader/sparknlp_reader.py
259
260
  sparknlp/training/__init__.py
260
261
  sparknlp/training/conll.py
@@ -66,7 +66,7 @@ sys.modules['com.johnsnowlabs.ml.ai'] = annotator
66
66
  annotators = annotator
67
67
  embeddings = annotator
68
68
 
69
- __version__ = "6.1.0"
69
+ __version__ = "6.1.1"
70
70
 
71
71
 
72
72
  def start(gpu=False,
@@ -12,8 +12,6 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  """Contains classes for the AutoGGUFEmbeddings."""
15
- from typing import List
16
-
17
15
  from sparknlp.common import *
18
16
 
19
17
 
@@ -32,7 +30,7 @@ class AutoGGUFEmbeddings(AnnotatorModel, HasBatchedAnnotate):
32
30
  ... .setInputCols(["document"]) \\
33
31
  ... .setOutputCol("embeddings")
34
32
 
35
- The default model is ``"Nomic_Embed_Text_v1.5.Q8_0.gguf"``, if no name is provided.
33
+ The default model is ``"Qwen3_Embedding_0.6B_Q8_0_gguf"``, if no name is provided.
36
34
 
37
35
  For extended examples of usage, see the
38
36
  `AutoGGUFEmbeddingsTest <https://github.com/JohnSnowLabs/spark-nlp/tree/master/src/test/scala/com/johnsnowlabs/nlp/embeddings/AutoGGUFEmbeddingsTest.scala>`__
@@ -313,12 +311,6 @@ class AutoGGUFEmbeddings(AnnotatorModel, HasBatchedAnnotate):
313
311
  "Set the pooling type for embeddings, use model default if unspecified",
314
312
  typeConverter=TypeConverters.toString,
315
313
  )
316
- embedding = Param(
317
- Params._dummy(),
318
- "embedding",
319
- "Whether to load model with embedding support",
320
- typeConverter=TypeConverters.toBoolean,
321
- )
322
314
  flashAttention = Param(
323
315
  Params._dummy(),
324
316
  "flashAttention",
@@ -489,10 +481,10 @@ class AutoGGUFEmbeddings(AnnotatorModel, HasBatchedAnnotate):
489
481
  classname=classname, java_model=java_model
490
482
  )
491
483
  self._setDefault(
492
- embedding=True,
493
484
  nCtx=4096,
494
485
  nBatch=512,
495
486
  poolingType="MEAN",
487
+ nGpuLayers=99,
496
488
  )
497
489
 
498
490
  @staticmethod
@@ -517,13 +509,13 @@ class AutoGGUFEmbeddings(AnnotatorModel, HasBatchedAnnotate):
517
509
  return AutoGGUFEmbeddings(java_model=jModel)
518
510
 
519
511
  @staticmethod
520
- def pretrained(name="Nomic_Embed_Text_v1.5.Q8_0.gguf", lang="en", remote_loc=None):
512
+ def pretrained(name="Qwen3_Embedding_0.6B_Q8_0_gguf", lang="en", remote_loc=None):
521
513
  """Downloads and loads a pretrained model.
522
514
 
523
515
  Parameters
524
516
  ----------
525
517
  name : str, optional
526
- Name of the pretrained model, by default "Nomic_Embed_Text_v1.5.Q8_0.gguf"
518
+ Name of the pretrained model, by default "Qwen3_Embedding_0.6B_Q8_0_gguf"
527
519
  lang : str, optional
528
520
  Language of the pretrained model, by default "en"
529
521
  remote_loc : str, optional
@@ -37,7 +37,11 @@ class AutoGGUFModel(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppProperties):
37
37
  ... .setInputCols(["document"]) \\
38
38
  ... .setOutputCol("completions")
39
39
 
40
- The default model is ``"phi3.5_mini_4k_instruct_q4_gguf"``, if no name is provided.
40
+ The default model is ``"Phi_4_mini_instruct_Q4_K_M_gguf"``, if no name is provided.
41
+
42
+ AutoGGUFModel is also able to load pretrained models from AutoGGUFVisionModel. Just
43
+ specify the same name for the pretrained method, and it will load the text-part of the
44
+ multimodal model automatically.
41
45
 
42
46
  For extended examples of usage, see the
43
47
  `AutoGGUFModelTest <https://github.com/JohnSnowLabs/spark-nlp/tree/master/src/test/scala/com/johnsnowlabs/nlp/annotators/seq2seq/AutoGGUFModelTest.scala>`__
@@ -120,8 +124,6 @@ class AutoGGUFModel(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppProperties):
120
124
  Set path to static lookup cache to use for lookup decoding (not updated by generation)
121
125
  lookupCacheDynamicFilePath
122
126
  Set path to dynamic lookup cache to use for lookup decoding (updated by generation)
123
- embedding
124
- Whether to load model with embedding support
125
127
  flashAttention
126
128
  Whether to enable Flash Attention
127
129
  inputPrefixBos
@@ -252,7 +254,6 @@ class AutoGGUFModel(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppProperties):
252
254
  useChatTemplate=True,
253
255
  nCtx=4096,
254
256
  nBatch=512,
255
- embedding=False,
256
257
  nPredict=100,
257
258
  nGpuLayers=99,
258
259
  systemPrompt="You are a helpful assistant."
@@ -279,13 +280,13 @@ class AutoGGUFModel(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppProperties):
279
280
  return AutoGGUFModel(java_model=jModel)
280
281
 
281
282
  @staticmethod
282
- def pretrained(name="phi3.5_mini_4k_instruct_q4_gguf", lang="en", remote_loc=None):
283
+ def pretrained(name="Phi_4_mini_instruct_Q4_K_M_gguf", lang="en", remote_loc=None):
283
284
  """Downloads and loads a pretrained model.
284
285
 
285
286
  Parameters
286
287
  ----------
287
288
  name : str, optional
288
- Name of the pretrained model, by default "phi3.5_mini_4k_instruct_q4_gguf"
289
+ Name of the pretrained model, by default "Phi_4_mini_instruct_Q4_K_M_gguf"
289
290
  lang : str, optional
290
291
  Language of the pretrained model, by default "en"
291
292
  remote_loc : str, optional
@@ -43,7 +43,7 @@ class AutoGGUFVisionModel(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppPropert
43
43
  .setOutputCol("completions")
44
44
 
45
45
 
46
- The default model is ``"llava_v1.5_7b_Q4_0_gguf"``, if no name is provided.
46
+ The default model is ``"Qwen2.5_VL_3B_Instruct_Q4_K_M_gguf"``, if no name is provided.
47
47
 
48
48
  For available pretrained models please see the `Models Hub <https://sparknlp.org/models>`__.
49
49
 
@@ -311,13 +311,13 @@ class AutoGGUFVisionModel(AnnotatorModel, HasBatchedAnnotate, HasLlamaCppPropert
311
311
  return AutoGGUFVisionModel(java_model=jModel)
312
312
 
313
313
  @staticmethod
314
- def pretrained(name="llava_v1.5_7b_Q4_0_gguf", lang="en", remote_loc=None):
314
+ def pretrained(name="Qwen2.5_VL_3B_Instruct_Q4_K_M_gguf", lang="en", remote_loc=None):
315
315
  """Downloads and loads a pretrained model.
316
316
 
317
317
  Parameters
318
318
  ----------
319
319
  name : str, optional
320
- Name of the pretrained model, by default "llava_v1.5_7b_Q4_0_gguf"
320
+ Name of the pretrained model, by default "Qwen2.5_VL_3B_Instruct_Q4_K_M_gguf"
321
321
  lang : str, optional
322
322
  Language of the pretrained model, by default "en"
323
323
  remote_loc : str, optional
@@ -628,7 +628,6 @@ class HasGeneratorProperties:
628
628
  "The number of sequences to return from the beam search.",
629
629
  typeConverter=TypeConverters.toInt)
630
630
 
631
-
632
631
  def setTask(self, value):
633
632
  """Sets the transformer's task, e.g. ``summarize:``.
634
633
 
@@ -639,7 +638,6 @@ class HasGeneratorProperties:
639
638
  """
640
639
  return self._set(task=value)
641
640
 
642
-
643
641
  def setMinOutputLength(self, value):
644
642
  """Sets minimum length of the sequence to be generated.
645
643
 
@@ -650,7 +648,6 @@ class HasGeneratorProperties:
650
648
  """
651
649
  return self._set(minOutputLength=value)
652
650
 
653
-
654
651
  def setMaxOutputLength(self, value):
655
652
  """Sets maximum length of output text.
656
653
 
@@ -661,7 +658,6 @@ class HasGeneratorProperties:
661
658
  """
662
659
  return self._set(maxOutputLength=value)
663
660
 
664
-
665
661
  def setDoSample(self, value):
666
662
  """Sets whether or not to use sampling, use greedy decoding otherwise.
667
663
 
@@ -672,7 +668,6 @@ class HasGeneratorProperties:
672
668
  """
673
669
  return self._set(doSample=value)
674
670
 
675
-
676
671
  def setTemperature(self, value):
677
672
  """Sets the value used to module the next token probabilities.
678
673
 
@@ -683,7 +678,6 @@ class HasGeneratorProperties:
683
678
  """
684
679
  return self._set(temperature=value)
685
680
 
686
-
687
681
  def setTopK(self, value):
688
682
  """Sets the number of highest probability vocabulary tokens to keep for
689
683
  top-k-filtering.
@@ -695,7 +689,6 @@ class HasGeneratorProperties:
695
689
  """
696
690
  return self._set(topK=value)
697
691
 
698
-
699
692
  def setTopP(self, value):
700
693
  """Sets the top cumulative probability for vocabulary tokens.
701
694
 
@@ -709,7 +702,6 @@ class HasGeneratorProperties:
709
702
  """
710
703
  return self._set(topP=value)
711
704
 
712
-
713
705
  def setRepetitionPenalty(self, value):
714
706
  """Sets the parameter for repetition penalty. 1.0 means no penalty.
715
707
 
@@ -725,7 +717,6 @@ class HasGeneratorProperties:
725
717
  """
726
718
  return self._set(repetitionPenalty=value)
727
719
 
728
-
729
720
  def setNoRepeatNgramSize(self, value):
730
721
  """Sets size of n-grams that can only occur once.
731
722
 
@@ -738,7 +729,6 @@ class HasGeneratorProperties:
738
729
  """
739
730
  return self._set(noRepeatNgramSize=value)
740
731
 
741
-
742
732
  def setBeamSize(self, value):
743
733
  """Sets the number of beam size for beam search.
744
734
 
@@ -749,7 +739,6 @@ class HasGeneratorProperties:
749
739
  """
750
740
  return self._set(beamSize=value)
751
741
 
752
-
753
742
  def setNReturnSequences(self, value):
754
743
  """Sets the number of sequences to return from the beam search.
755
744
 
@@ -845,11 +834,10 @@ class HasLlamaCppProperties:
845
834
  typeConverter=TypeConverters.toString)
846
835
  # Set the pooling type for embeddings, use model default if unspecified
847
836
  #
848
- # - 0 NONE: Don't use any pooling
849
- # - 1 MEAN: Mean Pooling
850
- # - 2 CLS: CLS Pooling
851
- # - 3 LAST: Last token pooling
852
- # - 4 RANK: For reranked models
837
+ # - MEAN: Mean Pooling
838
+ # - CLS: CLS Pooling
839
+ # - LAST: Last token pooling
840
+ # - RANK: For reranked models
853
841
  poolingType = Param(Params._dummy(), "poolingType",
854
842
  "Set the pooling type for embeddings, use model default if unspecified",
855
843
  typeConverter=TypeConverters.toString)
@@ -882,6 +870,10 @@ class HasLlamaCppProperties:
882
870
  typeConverter=TypeConverters.toString)
883
871
  chatTemplate = Param(Params._dummy(), "chatTemplate", "The chat template to use",
884
872
  typeConverter=TypeConverters.toString)
873
+ logVerbosity = Param(Params._dummy(), "logVerbosity", "Set the log verbosity level",
874
+ typeConverter=TypeConverters.toInt)
875
+ disableLog = Param(Params._dummy(), "disableLog", "Whether to disable logging",
876
+ typeConverter=TypeConverters.toBoolean)
885
877
 
886
878
  # -------- INFERENCE PARAMETERS --------
887
879
  inputPrefix = Param(Params._dummy(), "inputPrefix", "Set the prompt to start generation with",
@@ -1082,10 +1074,10 @@ class HasLlamaCppProperties:
1082
1074
  ropeScalingTypeUpper = ropeScalingType.upper()
1083
1075
  ropeScalingTypes = ["NONE", "LINEAR", "YARN"]
1084
1076
  if ropeScalingTypeUpper not in ropeScalingTypes:
1085
- raise ValueError(
1086
- f"Invalid RoPE scaling type: {ropeScalingType}. "
1087
- + f"Valid values are: {ropeScalingTypes}"
1088
- )
1077
+ raise ValueError(
1078
+ f"Invalid RoPE scaling type: {ropeScalingType}. "
1079
+ + f"Valid values are: {ropeScalingTypes}"
1080
+ )
1089
1081
  return self._set(ropeScalingType=ropeScalingTypeUpper)
1090
1082
 
1091
1083
  def setPoolingType(self, poolingType: str):
@@ -1093,11 +1085,10 @@ class HasLlamaCppProperties:
1093
1085
 
1094
1086
  Possible values:
1095
1087
 
1096
- - 0 NONE: Don't use any pooling
1097
- - 1 MEAN: Mean Pooling
1098
- - 2 CLS: CLS Pooling
1099
- - 3 LAST: Last token pooling
1100
- - 4 RANK: For reranked models
1088
+ - MEAN: Mean Pooling
1089
+ - CLS: CLS Pooling
1090
+ - LAST: Last token pooling
1091
+ - RANK: For reranked models
1101
1092
  """
1102
1093
  poolingTypeUpper = poolingType.upper()
1103
1094
  poolingTypes = ["NONE", "MEAN", "CLS", "LAST", "RANK"]
@@ -1124,10 +1115,6 @@ class HasLlamaCppProperties:
1124
1115
  # """Set path to dynamic lookup cache to use for lookup decoding (updated by generation)"""
1125
1116
  # return self._set(lookupCacheDynamicFilePath=lookupCacheDynamicFilePath)
1126
1117
 
1127
- def setEmbedding(self, embedding: bool):
1128
- """Whether to load model with embedding support"""
1129
- return self._set(embedding=embedding)
1130
-
1131
1118
  def setFlashAttention(self, flashAttention: bool):
1132
1119
  """Whether to enable Flash Attention"""
1133
1120
  return self._set(flashAttention=flashAttention)
@@ -1280,11 +1267,19 @@ class HasLlamaCppProperties:
1280
1267
  def setUseChatTemplate(self, useChatTemplate: bool):
1281
1268
  """Set whether generate should apply a chat template"""
1282
1269
  return self._set(useChatTemplate=useChatTemplate)
1283
-
1270
+
1284
1271
  def setNParallel(self, nParallel: int):
1285
1272
  """Sets the number of parallel processes for decoding. This is an alias for `setBatchSize`."""
1286
1273
  return self.setBatchSize(nParallel)
1287
1274
 
1275
+ def setLogVerbosity(self, logVerbosity: int):
1276
+ """Set the log verbosity level"""
1277
+ return self._set(logVerbosity=logVerbosity)
1278
+
1279
+ def setDisableLog(self, disableLog: bool):
1280
+ """Whether to disable logging"""
1281
+ return self._set(disableLog=disableLog)
1282
+
1288
1283
  # -------- JAVA SETTERS --------
1289
1284
  def setTokenIdBias(self, tokenIdBias: Dict[int, float]):
1290
1285
  """Set token id bias"""
@@ -25,7 +25,7 @@ class Reader2Doc(
25
25
  HasExcelReaderProperties,
26
26
  HasHTMLReaderProperties,
27
27
  HasPowerPointProperties,
28
- HasTextReaderProperties,
28
+ HasTextReaderProperties
29
29
  ):
30
30
  """
31
31
  The Reader2Doc annotator allows you to use reading files more smoothly within existing
@@ -36,7 +36,7 @@ class Reader2Doc(
36
36
  output as a structured Spark DataFrame.
37
37
 
38
38
  Supported formats include:
39
-
39
+
40
40
  - Plain text
41
41
  - HTML
42
42
  - Word (.doc/.docx)
@@ -77,42 +77,49 @@ class Reader2Doc(
77
77
  Params._dummy(),
78
78
  "contentPath",
79
79
  "contentPath path to files to read",
80
- typeConverter=TypeConverters.toString,
80
+ typeConverter=TypeConverters.toString
81
81
  )
82
82
 
83
83
  outputCol = Param(
84
84
  Params._dummy(),
85
85
  "outputCol",
86
86
  "output column name",
87
- typeConverter=TypeConverters.toString,
87
+ typeConverter=TypeConverters.toString
88
88
  )
89
89
 
90
90
  contentType = Param(
91
91
  Params._dummy(),
92
92
  "contentType",
93
93
  "Set the content type to load following MIME specification",
94
- typeConverter=TypeConverters.toString,
94
+ typeConverter=TypeConverters.toString
95
95
  )
96
96
 
97
97
  explodeDocs = Param(
98
98
  Params._dummy(),
99
99
  "explodeDocs",
100
100
  "whether to explode the documents into separate rows",
101
- typeConverter=TypeConverters.toBoolean,
101
+ typeConverter=TypeConverters.toBoolean
102
102
  )
103
103
 
104
104
  flattenOutput = Param(
105
105
  Params._dummy(),
106
106
  "flattenOutput",
107
107
  "If true, output is flattened to plain text with minimal metadata",
108
- typeConverter=TypeConverters.toBoolean,
108
+ typeConverter=TypeConverters.toBoolean
109
109
  )
110
110
 
111
111
  titleThreshold = Param(
112
112
  Params._dummy(),
113
113
  "titleThreshold",
114
114
  "Minimum font size threshold for title detection in PDF docs",
115
- typeConverter=TypeConverters.toFloat,
115
+ typeConverter=TypeConverters.toFloat
116
+ )
117
+
118
+ outputFormat = Param(
119
+ Params._dummy(),
120
+ "outputFormat",
121
+ "Output format for the table content. Options are 'plain-text' or 'html-table'. Default is 'json-table'.",
122
+ typeConverter=TypeConverters.toString
116
123
  )
117
124
 
118
125
  @keyword_only
@@ -126,7 +133,6 @@ class Reader2Doc(
126
133
  titleThreshold=18
127
134
  )
128
135
  @keyword_only
129
-
130
136
  def setParams(self):
131
137
  kwargs = self._input_kwargs
132
138
  return self._set(**kwargs)
@@ -192,3 +198,13 @@ class Reader2Doc(
192
198
  Minimum font size threshold for title detection in PDF docs
193
199
  """
194
200
  return self._set(titleThreshold=value)
201
+
202
+ def setOutputFormat(self, value):
203
+ """Sets the output format for the table content.
204
+
205
+ Parameters
206
+ ----------
207
+ value : str
208
+ Output format for the table content. Options are 'plain-text' or 'html-table'. Default is 'json-table'.
209
+ """
210
+ return self._set(outputFormat=value)