spark-nlp 4.2.6__py2.py3-none-any.whl → 6.2.1__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (221) hide show
  1. com/johnsnowlabs/ml/__init__.py +0 -0
  2. com/johnsnowlabs/ml/ai/__init__.py +10 -0
  3. spark_nlp-6.2.1.dist-info/METADATA +362 -0
  4. spark_nlp-6.2.1.dist-info/RECORD +292 -0
  5. {spark_nlp-4.2.6.dist-info → spark_nlp-6.2.1.dist-info}/WHEEL +1 -1
  6. sparknlp/__init__.py +81 -28
  7. sparknlp/annotation.py +3 -2
  8. sparknlp/annotator/__init__.py +6 -0
  9. sparknlp/annotator/audio/__init__.py +2 -0
  10. sparknlp/annotator/audio/hubert_for_ctc.py +188 -0
  11. sparknlp/annotator/audio/wav2vec2_for_ctc.py +14 -14
  12. sparknlp/annotator/audio/whisper_for_ctc.py +251 -0
  13. sparknlp/{base → annotator}/chunk2_doc.py +4 -7
  14. sparknlp/annotator/chunker.py +1 -2
  15. sparknlp/annotator/classifier_dl/__init__.py +17 -0
  16. sparknlp/annotator/classifier_dl/albert_for_multiple_choice.py +161 -0
  17. sparknlp/annotator/classifier_dl/albert_for_question_answering.py +3 -15
  18. sparknlp/annotator/classifier_dl/albert_for_sequence_classification.py +4 -18
  19. sparknlp/annotator/classifier_dl/albert_for_token_classification.py +3 -17
  20. sparknlp/annotator/classifier_dl/albert_for_zero_shot_classification.py +211 -0
  21. sparknlp/annotator/classifier_dl/bart_for_zero_shot_classification.py +225 -0
  22. sparknlp/annotator/classifier_dl/bert_for_multiple_choice.py +161 -0
  23. sparknlp/annotator/classifier_dl/bert_for_question_answering.py +6 -20
  24. sparknlp/annotator/classifier_dl/bert_for_sequence_classification.py +3 -17
  25. sparknlp/annotator/classifier_dl/bert_for_token_classification.py +3 -17
  26. sparknlp/annotator/classifier_dl/bert_for_zero_shot_classification.py +212 -0
  27. sparknlp/annotator/classifier_dl/camembert_for_question_answering.py +168 -0
  28. sparknlp/annotator/classifier_dl/camembert_for_sequence_classification.py +5 -19
  29. sparknlp/annotator/classifier_dl/camembert_for_token_classification.py +5 -19
  30. sparknlp/annotator/classifier_dl/camembert_for_zero_shot_classification.py +202 -0
  31. sparknlp/annotator/classifier_dl/classifier_dl.py +4 -4
  32. sparknlp/annotator/classifier_dl/deberta_for_question_answering.py +3 -17
  33. sparknlp/annotator/classifier_dl/deberta_for_sequence_classification.py +4 -19
  34. sparknlp/annotator/classifier_dl/deberta_for_token_classification.py +5 -21
  35. sparknlp/annotator/classifier_dl/deberta_for_zero_shot_classification.py +193 -0
  36. sparknlp/annotator/classifier_dl/distil_bert_for_question_answering.py +3 -17
  37. sparknlp/annotator/classifier_dl/distil_bert_for_sequence_classification.py +4 -18
  38. sparknlp/annotator/classifier_dl/distil_bert_for_token_classification.py +3 -17
  39. sparknlp/annotator/classifier_dl/distil_bert_for_zero_shot_classification.py +211 -0
  40. sparknlp/annotator/classifier_dl/distilbert_for_multiple_choice.py +161 -0
  41. sparknlp/annotator/classifier_dl/longformer_for_question_answering.py +3 -17
  42. sparknlp/annotator/classifier_dl/longformer_for_sequence_classification.py +4 -18
  43. sparknlp/annotator/classifier_dl/longformer_for_token_classification.py +3 -17
  44. sparknlp/annotator/classifier_dl/mpnet_for_question_answering.py +148 -0
  45. sparknlp/annotator/classifier_dl/mpnet_for_sequence_classification.py +188 -0
  46. sparknlp/annotator/classifier_dl/mpnet_for_token_classification.py +173 -0
  47. sparknlp/annotator/classifier_dl/multi_classifier_dl.py +3 -3
  48. sparknlp/annotator/classifier_dl/roberta_for_multiple_choice.py +161 -0
  49. sparknlp/annotator/classifier_dl/roberta_for_question_answering.py +3 -17
  50. sparknlp/annotator/classifier_dl/roberta_for_sequence_classification.py +4 -18
  51. sparknlp/annotator/classifier_dl/roberta_for_token_classification.py +1 -1
  52. sparknlp/annotator/classifier_dl/roberta_for_zero_shot_classification.py +225 -0
  53. sparknlp/annotator/classifier_dl/sentiment_dl.py +4 -4
  54. sparknlp/annotator/classifier_dl/tapas_for_question_answering.py +2 -2
  55. sparknlp/annotator/classifier_dl/xlm_roberta_for_multiple_choice.py +149 -0
  56. sparknlp/annotator/classifier_dl/xlm_roberta_for_question_answering.py +3 -17
  57. sparknlp/annotator/classifier_dl/xlm_roberta_for_sequence_classification.py +4 -18
  58. sparknlp/annotator/classifier_dl/xlm_roberta_for_token_classification.py +6 -20
  59. sparknlp/annotator/classifier_dl/xlm_roberta_for_zero_shot_classification.py +225 -0
  60. sparknlp/annotator/classifier_dl/xlnet_for_sequence_classification.py +4 -18
  61. sparknlp/annotator/classifier_dl/xlnet_for_token_classification.py +3 -17
  62. sparknlp/annotator/cleaners/__init__.py +15 -0
  63. sparknlp/annotator/cleaners/cleaner.py +202 -0
  64. sparknlp/annotator/cleaners/extractor.py +191 -0
  65. sparknlp/annotator/coref/spanbert_coref.py +4 -18
  66. sparknlp/annotator/cv/__init__.py +15 -0
  67. sparknlp/annotator/cv/blip_for_question_answering.py +172 -0
  68. sparknlp/annotator/cv/clip_for_zero_shot_classification.py +193 -0
  69. sparknlp/annotator/cv/convnext_for_image_classification.py +269 -0
  70. sparknlp/annotator/cv/florence2_transformer.py +180 -0
  71. sparknlp/annotator/cv/gemma3_for_multimodal.py +346 -0
  72. sparknlp/annotator/cv/internvl_for_multimodal.py +280 -0
  73. sparknlp/annotator/cv/janus_for_multimodal.py +351 -0
  74. sparknlp/annotator/cv/llava_for_multimodal.py +328 -0
  75. sparknlp/annotator/cv/mllama_for_multimodal.py +340 -0
  76. sparknlp/annotator/cv/paligemma_for_multimodal.py +308 -0
  77. sparknlp/annotator/cv/phi3_vision_for_multimodal.py +328 -0
  78. sparknlp/annotator/cv/qwen2vl_transformer.py +332 -0
  79. sparknlp/annotator/cv/smolvlm_transformer.py +426 -0
  80. sparknlp/annotator/cv/swin_for_image_classification.py +242 -0
  81. sparknlp/annotator/cv/vision_encoder_decoder_for_image_captioning.py +240 -0
  82. sparknlp/annotator/cv/vit_for_image_classification.py +36 -4
  83. sparknlp/annotator/dataframe_optimizer.py +216 -0
  84. sparknlp/annotator/date2_chunk.py +88 -0
  85. sparknlp/annotator/dependency/dependency_parser.py +2 -3
  86. sparknlp/annotator/dependency/typed_dependency_parser.py +3 -4
  87. sparknlp/annotator/document_character_text_splitter.py +228 -0
  88. sparknlp/annotator/document_normalizer.py +37 -1
  89. sparknlp/annotator/document_token_splitter.py +175 -0
  90. sparknlp/annotator/document_token_splitter_test.py +85 -0
  91. sparknlp/annotator/embeddings/__init__.py +11 -0
  92. sparknlp/annotator/embeddings/albert_embeddings.py +4 -18
  93. sparknlp/annotator/embeddings/auto_gguf_embeddings.py +539 -0
  94. sparknlp/annotator/embeddings/bert_embeddings.py +9 -22
  95. sparknlp/annotator/embeddings/bert_sentence_embeddings.py +12 -24
  96. sparknlp/annotator/embeddings/bge_embeddings.py +199 -0
  97. sparknlp/annotator/embeddings/camembert_embeddings.py +4 -20
  98. sparknlp/annotator/embeddings/chunk_embeddings.py +1 -2
  99. sparknlp/annotator/embeddings/deberta_embeddings.py +2 -16
  100. sparknlp/annotator/embeddings/distil_bert_embeddings.py +5 -19
  101. sparknlp/annotator/embeddings/doc2vec.py +7 -1
  102. sparknlp/annotator/embeddings/e5_embeddings.py +195 -0
  103. sparknlp/annotator/embeddings/e5v_embeddings.py +138 -0
  104. sparknlp/annotator/embeddings/elmo_embeddings.py +2 -2
  105. sparknlp/annotator/embeddings/instructor_embeddings.py +204 -0
  106. sparknlp/annotator/embeddings/longformer_embeddings.py +3 -17
  107. sparknlp/annotator/embeddings/minilm_embeddings.py +189 -0
  108. sparknlp/annotator/embeddings/mpnet_embeddings.py +192 -0
  109. sparknlp/annotator/embeddings/mxbai_embeddings.py +184 -0
  110. sparknlp/annotator/embeddings/nomic_embeddings.py +181 -0
  111. sparknlp/annotator/embeddings/roberta_embeddings.py +9 -21
  112. sparknlp/annotator/embeddings/roberta_sentence_embeddings.py +7 -21
  113. sparknlp/annotator/embeddings/sentence_embeddings.py +2 -3
  114. sparknlp/annotator/embeddings/snowflake_embeddings.py +202 -0
  115. sparknlp/annotator/embeddings/uae_embeddings.py +211 -0
  116. sparknlp/annotator/embeddings/universal_sentence_encoder.py +3 -3
  117. sparknlp/annotator/embeddings/word2vec.py +7 -1
  118. sparknlp/annotator/embeddings/word_embeddings.py +4 -5
  119. sparknlp/annotator/embeddings/xlm_roberta_embeddings.py +9 -21
  120. sparknlp/annotator/embeddings/xlm_roberta_sentence_embeddings.py +7 -21
  121. sparknlp/annotator/embeddings/xlnet_embeddings.py +4 -18
  122. sparknlp/annotator/er/entity_ruler.py +37 -23
  123. sparknlp/annotator/keyword_extraction/yake_keyword_extraction.py +2 -3
  124. sparknlp/annotator/ld_dl/language_detector_dl.py +2 -2
  125. sparknlp/annotator/lemmatizer.py +3 -4
  126. sparknlp/annotator/matcher/date_matcher.py +35 -3
  127. sparknlp/annotator/matcher/multi_date_matcher.py +1 -2
  128. sparknlp/annotator/matcher/regex_matcher.py +3 -3
  129. sparknlp/annotator/matcher/text_matcher.py +2 -3
  130. sparknlp/annotator/n_gram_generator.py +1 -2
  131. sparknlp/annotator/ner/__init__.py +3 -1
  132. sparknlp/annotator/ner/ner_converter.py +18 -0
  133. sparknlp/annotator/ner/ner_crf.py +4 -5
  134. sparknlp/annotator/ner/ner_dl.py +10 -5
  135. sparknlp/annotator/ner/ner_dl_graph_checker.py +293 -0
  136. sparknlp/annotator/ner/ner_overwriter.py +2 -2
  137. sparknlp/annotator/ner/zero_shot_ner_model.py +173 -0
  138. sparknlp/annotator/normalizer.py +2 -2
  139. sparknlp/annotator/openai/__init__.py +16 -0
  140. sparknlp/annotator/openai/openai_completion.py +349 -0
  141. sparknlp/annotator/openai/openai_embeddings.py +106 -0
  142. sparknlp/annotator/pos/perceptron.py +6 -7
  143. sparknlp/annotator/sentence/sentence_detector.py +2 -2
  144. sparknlp/annotator/sentence/sentence_detector_dl.py +3 -3
  145. sparknlp/annotator/sentiment/sentiment_detector.py +4 -5
  146. sparknlp/annotator/sentiment/vivekn_sentiment.py +4 -5
  147. sparknlp/annotator/seq2seq/__init__.py +17 -0
  148. sparknlp/annotator/seq2seq/auto_gguf_model.py +304 -0
  149. sparknlp/annotator/seq2seq/auto_gguf_reranker.py +334 -0
  150. sparknlp/annotator/seq2seq/auto_gguf_vision_model.py +336 -0
  151. sparknlp/annotator/seq2seq/bart_transformer.py +420 -0
  152. sparknlp/annotator/seq2seq/cohere_transformer.py +357 -0
  153. sparknlp/annotator/seq2seq/cpm_transformer.py +321 -0
  154. sparknlp/annotator/seq2seq/gpt2_transformer.py +1 -1
  155. sparknlp/annotator/seq2seq/llama2_transformer.py +343 -0
  156. sparknlp/annotator/seq2seq/llama3_transformer.py +381 -0
  157. sparknlp/annotator/seq2seq/m2m100_transformer.py +392 -0
  158. sparknlp/annotator/seq2seq/marian_transformer.py +124 -3
  159. sparknlp/annotator/seq2seq/mistral_transformer.py +348 -0
  160. sparknlp/annotator/seq2seq/nllb_transformer.py +420 -0
  161. sparknlp/annotator/seq2seq/olmo_transformer.py +326 -0
  162. sparknlp/annotator/seq2seq/phi2_transformer.py +326 -0
  163. sparknlp/annotator/seq2seq/phi3_transformer.py +330 -0
  164. sparknlp/annotator/seq2seq/phi4_transformer.py +387 -0
  165. sparknlp/annotator/seq2seq/qwen_transformer.py +340 -0
  166. sparknlp/annotator/seq2seq/starcoder_transformer.py +335 -0
  167. sparknlp/annotator/seq2seq/t5_transformer.py +54 -4
  168. sparknlp/annotator/similarity/__init__.py +0 -0
  169. sparknlp/annotator/similarity/document_similarity_ranker.py +379 -0
  170. sparknlp/annotator/spell_check/context_spell_checker.py +116 -17
  171. sparknlp/annotator/spell_check/norvig_sweeting.py +3 -6
  172. sparknlp/annotator/spell_check/symmetric_delete.py +1 -1
  173. sparknlp/annotator/stemmer.py +2 -3
  174. sparknlp/annotator/stop_words_cleaner.py +3 -4
  175. sparknlp/annotator/tf_ner_dl_graph_builder.py +1 -1
  176. sparknlp/annotator/token/__init__.py +0 -1
  177. sparknlp/annotator/token/recursive_tokenizer.py +2 -3
  178. sparknlp/annotator/token/tokenizer.py +2 -3
  179. sparknlp/annotator/ws/word_segmenter.py +35 -10
  180. sparknlp/base/__init__.py +2 -3
  181. sparknlp/base/doc2_chunk.py +0 -3
  182. sparknlp/base/document_assembler.py +5 -5
  183. sparknlp/base/embeddings_finisher.py +14 -2
  184. sparknlp/base/finisher.py +15 -4
  185. sparknlp/base/gguf_ranking_finisher.py +234 -0
  186. sparknlp/base/image_assembler.py +69 -0
  187. sparknlp/base/light_pipeline.py +53 -21
  188. sparknlp/base/multi_document_assembler.py +9 -13
  189. sparknlp/base/prompt_assembler.py +207 -0
  190. sparknlp/base/token_assembler.py +1 -2
  191. sparknlp/common/__init__.py +2 -0
  192. sparknlp/common/annotator_type.py +1 -0
  193. sparknlp/common/completion_post_processing.py +37 -0
  194. sparknlp/common/match_strategy.py +33 -0
  195. sparknlp/common/properties.py +914 -9
  196. sparknlp/internal/__init__.py +841 -116
  197. sparknlp/internal/annotator_java_ml.py +1 -1
  198. sparknlp/internal/annotator_transformer.py +3 -0
  199. sparknlp/logging/comet.py +2 -2
  200. sparknlp/partition/__init__.py +16 -0
  201. sparknlp/partition/partition.py +244 -0
  202. sparknlp/partition/partition_properties.py +902 -0
  203. sparknlp/partition/partition_transformer.py +200 -0
  204. sparknlp/pretrained/pretrained_pipeline.py +1 -1
  205. sparknlp/pretrained/resource_downloader.py +126 -2
  206. sparknlp/reader/__init__.py +15 -0
  207. sparknlp/reader/enums.py +19 -0
  208. sparknlp/reader/pdf_to_text.py +190 -0
  209. sparknlp/reader/reader2doc.py +124 -0
  210. sparknlp/reader/reader2image.py +136 -0
  211. sparknlp/reader/reader2table.py +44 -0
  212. sparknlp/reader/reader_assembler.py +159 -0
  213. sparknlp/reader/sparknlp_reader.py +461 -0
  214. sparknlp/training/__init__.py +1 -0
  215. sparknlp/training/conll.py +8 -2
  216. sparknlp/training/spacy_to_annotation.py +57 -0
  217. sparknlp/util.py +26 -0
  218. spark_nlp-4.2.6.dist-info/METADATA +0 -1256
  219. spark_nlp-4.2.6.dist-info/RECORD +0 -196
  220. {spark_nlp-4.2.6.dist-info → spark_nlp-6.2.1.dist-info}/top_level.txt +0 -0
  221. /sparknlp/annotator/{token/token2_chunk.py → token2_chunk.py} +0 -0
@@ -0,0 +1,124 @@
1
+ # Copyright 2017-2025 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from pyspark import keyword_only
15
+
16
+ from sparknlp.common import AnnotatorType
17
+ from sparknlp.internal import AnnotatorTransformer
18
+ from sparknlp.partition.partition_properties import *
19
+
20
+
21
+ class Reader2Doc(
22
+ AnnotatorTransformer,
23
+ HasReaderProperties,
24
+ HasHTMLReaderProperties,
25
+ HasEmailReaderProperties,
26
+ HasExcelReaderProperties,
27
+ HasPowerPointProperties,
28
+ HasTextReaderProperties
29
+ ):
30
+ """
31
+ The Reader2Doc annotator allows you to use reading files more smoothly within existing
32
+ Spark NLP workflows, enabling seamless reuse of your pipelines.
33
+
34
+ Reader2Doc can be used for extracting structured content from various document types
35
+ using Spark NLP readers. It supports reading from many file types and returns parsed
36
+ output as a structured Spark DataFrame.
37
+
38
+ Supported formats include:
39
+
40
+ - Plain text
41
+ - HTML
42
+ - Word (.doc/.docx)
43
+ - Excel (.xls/.xlsx)
44
+ - PowerPoint (.ppt/.pptx)
45
+ - Email files (.eml, .msg)
46
+ - PDFs
47
+
48
+ Examples
49
+ --------
50
+ >>> from johnsnowlabs.reader import Reader2Doc
51
+ >>> from johnsnowlabs.nlp.base import DocumentAssembler
52
+ >>> from pyspark.ml import Pipeline
53
+ >>> # Initialize Reader2Doc for PDF files
54
+ >>> reader2doc = Reader2Doc() \\
55
+ ... .setContentType("application/pdf") \\
56
+ ... .setContentPath(f"{pdf_directory}/")
57
+ >>> # Build the pipeline with the Reader2Doc stage
58
+ >>> pipeline = Pipeline(stages=[reader2doc])
59
+ >>> # Fit the pipeline to an empty DataFrame
60
+ >>> pipeline_model = pipeline.fit(empty_data_set)
61
+ >>> result_df = pipeline_model.transform(empty_data_set)
62
+ >>> # Show the resulting DataFrame
63
+ >>> result_df.show()
64
+ +------------------------------------------------------------------------------------------------------------------------------------+
65
+ |document |
66
+ +------------------------------------------------------------------------------------------------------------------------------------+
67
+ |[{'document', 0, 14, 'This is a Title', {'pageNumber': 1, 'elementType': 'Title', 'fileName': 'pdf-title.pdf'}, []}] |
68
+ |[{'document', 15, 38, 'This is a narrative text', {'pageNumber': 1, 'elementType': 'NarrativeText', 'fileName': 'pdf-title.pdf'}, []}]|
69
+ |[{'document', 39, 68, 'This is another narrative text', {'pageNumber': 1, 'elementType': 'NarrativeText', 'fileName': 'pdf-title.pdf'}, []}]|
70
+ +------------------------------------------------------------------------------------------------------------------------------------+
71
+ """
72
+
73
+ name = "Reader2Doc"
74
+
75
+ outputAnnotatorType = AnnotatorType.DOCUMENT
76
+
77
+ excludeNonText = Param(
78
+ Params._dummy(),
79
+ "excludeNonText",
80
+ "Whether to exclude non-text content from the output. Default is False.",
81
+ typeConverter=TypeConverters.toBoolean
82
+ )
83
+
84
+ def setExcludeNonText(self, value):
85
+ """Sets whether to exclude non-text content from the output.
86
+
87
+ Parameters
88
+ ----------
89
+ value : bool
90
+ Whether to exclude non-text content from the output. Default is False.
91
+ """
92
+ return self._set(excludeNonText=value)
93
+
94
+ joinString = Param(
95
+ Params._dummy(),
96
+ "joinString",
97
+ "If outputAsDocument is true, specifies the string used to join elements into a single document.",
98
+ typeConverter=TypeConverters.toString
99
+ )
100
+
101
+ def setJoinString(self, value):
102
+ """
103
+ If outputAsDocument is true, specifies the string used to join elements into a single
104
+ """
105
+ return self._set(joinString=value)
106
+
107
+ @keyword_only
108
+ def __init__(self):
109
+ super(Reader2Doc, self).__init__(classname="com.johnsnowlabs.reader.Reader2Doc")
110
+ self._setDefault(
111
+ outputCol="document",
112
+ explodeDocs=False,
113
+ contentType="",
114
+ flattenOutput=False,
115
+ outputAsDocument=True,
116
+ outputFormat="plain-text",
117
+ excludeNonText=False,
118
+ joinString="\n"
119
+ )
120
+
121
+ @keyword_only
122
+ def setParams(self):
123
+ kwargs = self._input_kwargs
124
+ return self._set(**kwargs)
@@ -0,0 +1,136 @@
1
+ # Copyright 2017-2025 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from pyspark import keyword_only
15
+ from pyspark.ml.param import TypeConverters, Params, Param
16
+
17
+ from sparknlp.common import AnnotatorType
18
+ from sparknlp.internal import AnnotatorTransformer
19
+ from sparknlp.partition.partition_properties import *
20
+
21
+ class Reader2Image(
22
+ AnnotatorTransformer,
23
+ HasReaderProperties,
24
+ HasHTMLReaderProperties,
25
+ HasPdfProperties
26
+ ):
27
+ """
28
+ The Reader2Image annotator allows you to use the reading files with images more smoothly within existing
29
+ Spark NLP workflows, enabling seamless reuse of your pipelines. Reader2Image can be used for
30
+ extracting structured image content from various document types using Spark NLP readers. It supports
31
+ reading from many file types and returns parsed output as a structured Spark DataFrame.
32
+
33
+ Supported formats include HTML and Markdown.
34
+
35
+ == Example ==
36
+ This example demonstrates how to load HTML files with images and process them into a structured
37
+ Spark DataFrame using Reader2Image.
38
+
39
+ Expected output:
40
+ +-------------------+--------------------+
41
+ | fileName| image|
42
+ +-------------------+--------------------+
43
+ |example-images.html|[{image, example-...|
44
+ |example-images.html|[{image, example-...|
45
+ +-------------------+--------------------+
46
+
47
+ Schema:
48
+ root
49
+ |-- fileName: string (nullable = true)
50
+ |-- image: array (nullable = false)
51
+ | |-- element: struct (containsNull = true)
52
+ | | |-- annotatorType: string (nullable = true)
53
+ | | |-- origin: string (nullable = true)
54
+ | | |-- height: integer (nullable = false)
55
+ | | |-- width: integer (nullable = false)
56
+ | | |-- nChannels: integer (nullable = false)
57
+ | | |-- mode: integer (nullable = false)
58
+ | | |-- result: binary (nullable = true)
59
+ | | |-- metadata: map (nullable = true)
60
+ | | | |-- key: string
61
+ | | | |-- value: string (valueContainsNull = true)
62
+ | | |-- text: string (nullable = true)
63
+ """
64
+
65
+ name = "Reader2Image"
66
+ outputAnnotatorType = AnnotatorType.IMAGE
67
+
68
+ userMessage = Param(
69
+ Params._dummy(),
70
+ "userMessage",
71
+ "Custom user message.",
72
+ typeConverter=TypeConverters.toString
73
+ )
74
+
75
+ promptTemplate = Param(
76
+ Params._dummy(),
77
+ "promptTemplate",
78
+ "Format of the output prompt.",
79
+ typeConverter=TypeConverters.toString
80
+ )
81
+
82
+ customPromptTemplate = Param(
83
+ Params._dummy(),
84
+ "customPromptTemplate",
85
+ "Custom prompt template for image models.",
86
+ typeConverter=TypeConverters.toString
87
+ )
88
+
89
+ @keyword_only
90
+ def __init__(self):
91
+ super(Reader2Image, self).__init__(classname="com.johnsnowlabs.reader.Reader2Image")
92
+ self._setDefault(
93
+ contentType="",
94
+ outputFormat="image",
95
+ explodeDocs=True,
96
+ userMessage="Describe this image",
97
+ promptTemplate="qwen2vl-chat",
98
+ readAsImage=True,
99
+ customPromptTemplate="",
100
+ ignoreExceptions=True
101
+ )
102
+
103
+ @keyword_only
104
+ def setParams(self):
105
+ kwargs = self._input_kwargs
106
+ return self._set(**kwargs)
107
+
108
+ def setUserMessage(self, value: str):
109
+ """Sets custom user message.
110
+
111
+ Parameters
112
+ ----------
113
+ value : str
114
+ Custom user message to include.
115
+ """
116
+ return self._set(userMessage=value)
117
+
118
+ def setPromptTemplate(self, value: str):
119
+ """Sets format of the output prompt.
120
+
121
+ Parameters
122
+ ----------
123
+ value : str
124
+ Prompt template format.
125
+ """
126
+ return self._set(promptTemplate=value)
127
+
128
+ def setCustomPromptTemplate(self, value: str):
129
+ """Sets custom prompt template for image models.
130
+
131
+ Parameters
132
+ ----------
133
+ value : str
134
+ Custom prompt template string.
135
+ """
136
+ return self._set(customPromptTemplate=value)
@@ -0,0 +1,44 @@
1
+ # Copyright 2017-2025 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from pyspark import keyword_only
16
+
17
+ from sparknlp.common import AnnotatorType
18
+ from sparknlp.internal import AnnotatorTransformer
19
+ from sparknlp.partition.partition_properties import *
20
+
21
+
22
+ class Reader2Table(
23
+ AnnotatorTransformer,
24
+ HasReaderProperties,
25
+ HasEmailReaderProperties,
26
+ HasExcelReaderProperties,
27
+ HasHTMLReaderProperties,
28
+ HasPowerPointProperties,
29
+ HasTextReaderProperties
30
+ ):
31
+ name = 'Reader2Table'
32
+
33
+ outputAnnotatorType = AnnotatorType.DOCUMENT
34
+
35
+ @keyword_only
36
+ def __init__(self):
37
+ super(Reader2Table, self).__init__(classname="com.johnsnowlabs.reader.Reader2Table")
38
+ self._setDefault(outputCol="document", outputFormat="json-table", inferTableStructure=True,
39
+ outputAsDocument=False)
40
+
41
+ @keyword_only
42
+ def setParams(self):
43
+ kwargs = self._input_kwargs
44
+ return self._set(**kwargs)
@@ -0,0 +1,159 @@
1
+ # Copyright 2017-2025 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from pyspark import keyword_only
16
+
17
+ from sparknlp.common import AnnotatorType
18
+ from sparknlp.internal import AnnotatorTransformer
19
+ from sparknlp.partition.partition_properties import *
20
+
21
+ class ReaderAssembler(
22
+ AnnotatorTransformer,
23
+ HasReaderProperties,
24
+ HasHTMLReaderProperties,
25
+ HasEmailReaderProperties,
26
+ HasExcelReaderProperties,
27
+ HasPowerPointProperties,
28
+ HasTextReaderProperties,
29
+ HasPdfProperties
30
+ ):
31
+ """
32
+ The ReaderAssembler annotator provides a unified interface for combining multiple Spark NLP
33
+ readers (such as Reader2Doc, Reader2Table, and Reader2Image) into a single, configurable
34
+ component. It automatically orchestrates the execution of different readers based on input type,
35
+ configured priorities, and fallback strategies allowing you to handle diverse content formats
36
+ without manually chaining multiple readers in your pipeline.
37
+
38
+ ReaderAssembler simplifies the process of building flexible pipelines capable of ingesting and
39
+ processing documents, tables, and images in a consistent way. It handles reader selection,
40
+ ordering, and fault-tolerance internally, ensuring that pipelines remain concise, robust, and
41
+ easy to maintain.
42
+
43
+ Examples
44
+ --------
45
+ >>> from johnsnowlabs.reader import ReaderAssembler
46
+ >>> from pyspark.ml import Pipeline
47
+ >>>
48
+ >>> reader_assembler = ReaderAssembler() \\
49
+ ... .setContentType("text/html") \\
50
+ ... .setContentPath("/table-image.html") \\
51
+ ... .setOutputCol("document")
52
+ >>>
53
+ >>> pipeline = Pipeline(stages=[reader_assembler])
54
+ >>> pipeline_model = pipeline.fit(empty_data_set)
55
+ >>> result_df = pipeline_model.transform(empty_data_set)
56
+ >>>
57
+ >>> result_df.show()
58
+ +--------+--------------------+--------------------+--------------------+---------+
59
+ |fileName| document_text| document_table| document_image|exception|
60
+ +--------+--------------------+--------------------+--------------------+---------+
61
+ | null|[{'document', 0, 26...|[{'document', 0, 50...|[{'image', , 5, 5, ...| null|
62
+ +--------+--------------------+--------------------+--------------------+---------+
63
+
64
+ This annotator is especially useful when working with heterogeneous input data — for example,
65
+ when a dataset includes PDFs, spreadsheets, and images — allowing Spark NLP to automatically
66
+ invoke the appropriate reader for each file type while preserving a unified schema in the output.
67
+ """
68
+
69
+
70
+ name = 'ReaderAssembler'
71
+
72
+ outputAnnotatorType = AnnotatorType.DOCUMENT
73
+
74
+ excludeNonText = Param(
75
+ Params._dummy(),
76
+ "excludeNonText",
77
+ "Whether to exclude non-text content from the output. Default is False.",
78
+ typeConverter=TypeConverters.toBoolean
79
+ )
80
+
81
+ userMessage = Param(
82
+ Params._dummy(),
83
+ "userMessage",
84
+ "Custom user message.",
85
+ typeConverter=TypeConverters.toString
86
+ )
87
+
88
+ promptTemplate = Param(
89
+ Params._dummy(),
90
+ "promptTemplate",
91
+ "Format of the output prompt.",
92
+ typeConverter=TypeConverters.toString
93
+ )
94
+
95
+ customPromptTemplate = Param(
96
+ Params._dummy(),
97
+ "customPromptTemplate",
98
+ "Custom prompt template for image models.",
99
+ typeConverter=TypeConverters.toString
100
+ )
101
+
102
+ @keyword_only
103
+ def __init__(self):
104
+ super(ReaderAssembler, self).__init__(classname="com.johnsnowlabs.reader.ReaderAssembler")
105
+ self._setDefault(contentType="",
106
+ explodeDocs=False,
107
+ userMessage="Describe this image",
108
+ promptTemplate="qwen2vl-chat",
109
+ readAsImage=True,
110
+ customPromptTemplate="",
111
+ ignoreExceptions=True,
112
+ flattenOutput=False,
113
+ titleThreshold=18)
114
+
115
+
116
+ @keyword_only
117
+ def setParams(self):
118
+ kwargs = self._input_kwargs
119
+ return self._set(**kwargs)
120
+
121
+ def setExcludeNonText(self, value):
122
+ """Sets whether to exclude non-text content from the output.
123
+
124
+ Parameters
125
+ ----------
126
+ value : bool
127
+ Whether to exclude non-text content from the output. Default is False.
128
+ """
129
+ return self._set(excludeNonText=value)
130
+
131
+ def setUserMessage(self, value: str):
132
+ """Sets custom user message.
133
+
134
+ Parameters
135
+ ----------
136
+ value : str
137
+ Custom user message to include.
138
+ """
139
+ return self._set(userMessage=value)
140
+
141
+ def setPromptTemplate(self, value: str):
142
+ """Sets format of the output prompt.
143
+
144
+ Parameters
145
+ ----------
146
+ value : str
147
+ Prompt template format.
148
+ """
149
+ return self._set(promptTemplate=value)
150
+
151
+ def setCustomPromptTemplate(self, value: str):
152
+ """Sets custom prompt template for image models.
153
+
154
+ Parameters
155
+ ----------
156
+ value : str
157
+ Custom prompt template string.
158
+ """
159
+ return self._set(customPromptTemplate=value)