spark-nlp 5.5.3__py2.py3-none-any.whl → 6.0.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of spark-nlp might be problematic. Click here for more details.

Files changed (33) hide show
  1. {spark_nlp-5.5.3.dist-info → spark_nlp-6.0.0.dist-info}/METADATA +20 -11
  2. {spark_nlp-5.5.3.dist-info → spark_nlp-6.0.0.dist-info}/RECORD +32 -17
  3. sparknlp/__init__.py +2 -2
  4. sparknlp/annotator/classifier_dl/__init__.py +4 -0
  5. sparknlp/annotator/classifier_dl/albert_for_multiple_choice.py +161 -0
  6. sparknlp/annotator/classifier_dl/bert_for_multiple_choice.py +2 -2
  7. sparknlp/annotator/classifier_dl/distilbert_for_multiple_choice.py +161 -0
  8. sparknlp/annotator/classifier_dl/roberta_for_multiple_choice.py +161 -0
  9. sparknlp/annotator/classifier_dl/xlm_roberta_for_multiple_choice.py +149 -0
  10. sparknlp/annotator/cleaners/__init__.py +15 -0
  11. sparknlp/annotator/cleaners/cleaner.py +202 -0
  12. sparknlp/annotator/cleaners/extractor.py +191 -0
  13. sparknlp/annotator/cv/__init__.py +6 -1
  14. sparknlp/annotator/cv/janus_for_multimodal.py +356 -0
  15. sparknlp/annotator/cv/llava_for_multimodal.py +328 -0
  16. sparknlp/annotator/cv/mllama_for_multimodal.py +340 -0
  17. sparknlp/annotator/cv/phi3_vision_for_multimodal.py +328 -0
  18. sparknlp/annotator/cv/qwen2vl_transformer.py +332 -0
  19. sparknlp/annotator/embeddings/auto_gguf_embeddings.py +10 -6
  20. sparknlp/annotator/seq2seq/__init__.py +3 -0
  21. sparknlp/annotator/seq2seq/auto_gguf_model.py +8 -503
  22. sparknlp/annotator/seq2seq/auto_gguf_vision_model.py +333 -0
  23. sparknlp/annotator/seq2seq/cohere_transformer.py +357 -0
  24. sparknlp/annotator/seq2seq/llama3_transformer.py +4 -4
  25. sparknlp/annotator/seq2seq/olmo_transformer.py +326 -0
  26. sparknlp/base/image_assembler.py +58 -0
  27. sparknlp/common/properties.py +605 -96
  28. sparknlp/internal/__init__.py +100 -2
  29. sparknlp/reader/pdf_to_text.py +65 -0
  30. sparknlp/reader/sparknlp_reader.py +222 -14
  31. spark_nlp-5.5.3.dist-info/.uuid +0 -1
  32. {spark_nlp-5.5.3.dist-info → spark_nlp-6.0.0.dist-info}/WHEEL +0 -0
  33. {spark_nlp-5.5.3.dist-info → spark_nlp-6.0.0.dist-info}/top_level.txt +0 -0
@@ -67,6 +67,15 @@ class _AlbertForZeroShotClassificationLoader(ExtendedJavaWrapper):
67
67
  )
68
68
 
69
69
 
70
+ class _AlbertMultipleChoiceLoader(ExtendedJavaWrapper):
71
+ def __init__(self, path, jspark):
72
+ super(_AlbertMultipleChoiceLoader, self).__init__(
73
+ "com.johnsnowlabs.nlp.annotators.classifier.dl.AlbertForMultipleChoice.loadSavedModel",
74
+ path,
75
+ jspark,
76
+ )
77
+
78
+
70
79
  class _BertLoader(ExtendedJavaWrapper):
71
80
  def __init__(self, path, jspark, use_openvino=False):
72
81
  super(_BertLoader, self).__init__(
@@ -121,6 +130,15 @@ class _BertMultipleChoiceLoader(ExtendedJavaWrapper):
121
130
  jspark,
122
131
  )
123
132
 
133
+ class _CoHereLoader(ExtendedJavaWrapper):
134
+ def __init__(self, path, jspark, use_openvino=False):
135
+ super(_CoHereLoader, self).__init__(
136
+ "com.johnsnowlabs.nlp.annotators.seq2seq.CoHereTransformer.loadSavedModel",
137
+ path,
138
+ jspark,
139
+ use_openvino,
140
+ )
141
+
124
142
  class _DeBERTaLoader(ExtendedJavaWrapper):
125
143
  def __init__(self, path, jspark):
126
144
  super(_DeBERTaLoader, self).__init__(
@@ -211,6 +229,15 @@ class _DistilBertQuestionAnsweringLoader(ExtendedJavaWrapper):
211
229
  )
212
230
 
213
231
 
232
+ class _DistilBertMultipleChoiceLoader(ExtendedJavaWrapper):
233
+ def __init__(self, path, jspark):
234
+ super(_DistilBertMultipleChoiceLoader, self).__init__(
235
+ "com.johnsnowlabs.nlp.annotators.classifier.dl.DistilBertForMultipleChoice.loadSavedModel",
236
+ path,
237
+ jspark,
238
+ )
239
+
240
+
214
241
  class _ElmoLoader(ExtendedJavaWrapper):
215
242
  def __init__(self, path, jspark):
216
243
  super(_ElmoLoader, self).__init__(
@@ -245,6 +272,14 @@ class _GPT2Loader(ExtendedJavaWrapper):
245
272
  jspark,
246
273
  )
247
274
 
275
+ class _JanusForMultiModalLoader(ExtendedJavaWrapper):
276
+ def __init__(self, path, jspark, use_openvino=False):
277
+ super(_JanusForMultiModalLoader, self).__init__(
278
+ "com.johnsnowlabs.nlp.annotators.cv.JanusForMultiModal.loadSavedModel",
279
+ path,
280
+ jspark,
281
+ use_openvino
282
+ )
248
283
 
249
284
  class _LLAMA2Loader(ExtendedJavaWrapper):
250
285
  def __init__(self, path, jspark, use_openvino=False):
@@ -299,6 +334,14 @@ class _LongformerQuestionAnsweringLoader(ExtendedJavaWrapper):
299
334
  jspark,
300
335
  )
301
336
 
337
+ class _LLAVAForMultiModalLoader(ExtendedJavaWrapper):
338
+ def __init__(self, path, jspark, use_openvino=False):
339
+ super(_LLAVAForMultiModalLoader, self).__init__(
340
+ "com.johnsnowlabs.nlp.annotators.cv.LLAVAForMultiModal.loadSavedModel",
341
+ path,
342
+ jspark,
343
+ use_openvino
344
+ )
302
345
 
303
346
  class _M2M100Loader(ExtendedJavaWrapper):
304
347
  def __init__(self, path, jspark, use_openvino=False):
@@ -318,6 +361,14 @@ class _MistralLoader(ExtendedJavaWrapper):
318
361
  use_openvino,
319
362
  )
320
363
 
364
+ class _MLLamaForMultimodalLoader(ExtendedJavaWrapper):
365
+ def __init__(self, path, jspark, use_openvino=False):
366
+ super(_MLLamaForMultimodalLoader, self).__init__(
367
+ "com.johnsnowlabs.nlp.annotators.cv.MLLamaForMultimodal.loadSavedModel",
368
+ path,
369
+ jspark,
370
+ use_openvino
371
+ )
321
372
 
322
373
  class _NLLBLoader(ExtendedJavaWrapper):
323
374
  def __init__(self, path, jspark, use_openvino=False):
@@ -345,6 +396,10 @@ class _MPNetLoader(ExtendedJavaWrapper):
345
396
  )
346
397
 
347
398
 
399
+ class _OLMoLoader(ExtendedJavaWrapper):
400
+ def __init__(self, path, jspark):
401
+ super(_OLMoLoader, self).__init__(
402
+ "com.johnsnowlabs.nlp.annotators.seq2seq.OLMoTransformer.loadSavedModel", path, jspark)
348
403
  class _Phi2Loader(ExtendedJavaWrapper):
349
404
  def __init__(self, path, jspark, use_openvino=False):
350
405
  super(_Phi2Loader, self).__init__(
@@ -363,6 +418,15 @@ class _Phi3Loader(ExtendedJavaWrapper):
363
418
  use_openvino,
364
419
  )
365
420
 
421
+ class _Phi3VisionLoader(ExtendedJavaWrapper):
422
+ def __init__(self, path, jspark, use_openvino=False):
423
+ super(_Phi3VisionLoader, self).__init__(
424
+ "com.johnsnowlabs.nlp.annotators.cv.Phi3Vision.loadSavedModel",
425
+ path,
426
+ jspark,
427
+ use_openvino
428
+ )
429
+
366
430
  class _RoBertaLoader(ExtendedJavaWrapper):
367
431
  def __init__(self, path, jspark, use_openvino=False):
368
432
  super(_RoBertaLoader, self).__init__(
@@ -409,6 +473,15 @@ class _RoBertaQuestionAnsweringLoader(ExtendedJavaWrapper):
409
473
  )
410
474
 
411
475
 
476
+ class _RoBertaMultipleChoiceLoader(ExtendedJavaWrapper):
477
+ def __init__(self, path, jspark):
478
+ super(_RoBertaMultipleChoiceLoader, self).__init__(
479
+ "com.johnsnowlabs.nlp.annotators.classifier.dl.RoBertaForMultipleChoice.loadSavedModel",
480
+ path,
481
+ jspark,
482
+ )
483
+
484
+
412
485
  class _StarCoderLoader(ExtendedJavaWrapper):
413
486
  def __init__(self, path, jspark, use_openvino=False):
414
487
  super(_StarCoderLoader, self).__init__(
@@ -504,6 +577,15 @@ class _XlmRoBertaQuestionAnsweringLoader(ExtendedJavaWrapper):
504
577
  )
505
578
 
506
579
 
580
+ class _XlmRoBertaMultipleChoiceLoader(ExtendedJavaWrapper):
581
+ def __init__(self, path, jspark):
582
+ super(_XlmRoBertaMultipleChoiceLoader, self).__init__(
583
+ "com.johnsnowlabs.nlp.annotators.classifier.dl.XlmRoBertaForMultipleChoice.loadSavedModel",
584
+ path,
585
+ jspark,
586
+ )
587
+
588
+
507
589
  class _XlnetLoader(ExtendedJavaWrapper):
508
590
  def __init__(self, path, jspark):
509
591
  super(_XlnetLoader, self).__init__(
@@ -992,8 +1074,8 @@ class _AutoGGUFLoader(ExtendedJavaWrapper):
992
1074
  def __init__(self, path, jspark):
993
1075
  super(_AutoGGUFLoader, self).__init__(
994
1076
  "com.johnsnowlabs.nlp.annotators.seq2seq.AutoGGUFModel.loadSavedModel", path, jspark)
995
-
996
-
1077
+
1078
+
997
1079
  class _MxbaiEmbeddingsLoader(ExtendedJavaWrapper):
998
1080
  def __init__(self, path, jspark):
999
1081
  super(_MxbaiEmbeddingsLoader, self).__init__(
@@ -1021,3 +1103,19 @@ class _BLIPForQuestionAnswering(ExtendedJavaWrapper):
1021
1103
  path,
1022
1104
  jspark,
1023
1105
  )
1106
+
1107
+
1108
+ class _AutoGGUFVisionLoader(ExtendedJavaWrapper):
1109
+ def __init__(self, modelPath, mmprojPath, jspark):
1110
+ super(_AutoGGUFVisionLoader, self).__init__(
1111
+ "com.johnsnowlabs.nlp.annotators.seq2seq.AutoGGUFVisionModel.loadSavedModel", modelPath, mmprojPath, jspark)
1112
+
1113
+
1114
+ class _Qwen2VLTransformerLoader(ExtendedJavaWrapper):
1115
+ def __init__(self, path, jspark, use_openvino=False):
1116
+ super(_Qwen2VLTransformerLoader, self).__init__(
1117
+ "com.johnsnowlabs.nlp.annotators.cv.Qwen2VLTransformer.loadSavedModel",
1118
+ path,
1119
+ jspark,
1120
+ use_openvino,
1121
+ )
@@ -0,0 +1,65 @@
1
+ from pyspark import keyword_only
2
+ from pyspark.ml.param import Param, Params, TypeConverters
3
+ from pyspark.ml.param.shared import HasInputCol, HasOutputCol
4
+ from pyspark.ml.util import JavaMLReadable, JavaMLWritable
5
+ from pyspark.ml.wrapper import JavaTransformer
6
+
7
+
8
+ class PdfToText(JavaTransformer, HasInputCol, HasOutputCol,
9
+ JavaMLReadable, JavaMLWritable):
10
+ """
11
+ Extract text from Pdf document to single string or to several strings per each page.
12
+ Input is a column with binary representation of PDF document.
13
+ As output generate column with text and page number.
14
+ Explode each page as separate row if split to page enabled.
15
+ """
16
+ pageNumCol = Param(Params._dummy(), "pageNumCol",
17
+ "Page number output column name.",
18
+ typeConverter=TypeConverters.toString)
19
+
20
+ partitionNum = Param(Params._dummy(), "partitionNum",
21
+ "Number of partitions.",
22
+ typeConverter=TypeConverters.toInt)
23
+
24
+ storeSplittedPdf = Param(Params._dummy(), "storeSplittedPdf",
25
+ "Force to store splitted pdf.",
26
+ typeConverter=TypeConverters.toBoolean)
27
+
28
+ @keyword_only
29
+ def __init__(self):
30
+ """
31
+ __init__(self)
32
+ """
33
+ super(PdfToText, self).__init__()
34
+ self._java_obj = self._new_java_obj("com.johnsnowlabs.reader.PdfToText", self.uid)
35
+
36
+
37
+ def setInputCol(self, value):
38
+ """
39
+ Sets the value of :py:attr:`inputCol`.
40
+ """
41
+ return self._set(inputCol=value)
42
+
43
+ def setOutputCol(self, value):
44
+ """
45
+ Sets the value of :py:attr:`outputCol`.
46
+ """
47
+ return self._set(outputCol=value)
48
+
49
+ def setPageNumCol(self, value):
50
+ """
51
+ Sets the value of :py:attr:`pageNumCol`.
52
+ """
53
+ return self._set(pageNumCol=value)
54
+
55
+ def setPartitionNum(self, value):
56
+ """
57
+ Sets the value of :py:attr:`partitionNum`.
58
+ """
59
+ return self._set(partitionNum=value)
60
+
61
+ def setStoreSplittedPdf(self, value):
62
+ """
63
+ Sets the value of :py:attr:`storeSplittedPdf`.
64
+ """
65
+ return self._set(storeSplittedPdf=value)
@@ -15,19 +15,39 @@ from sparknlp.internal import ExtendedJavaWrapper
15
15
 
16
16
 
17
17
  class SparkNLPReader(ExtendedJavaWrapper):
18
- """Instantiates class to read HTML, email, and document files.
19
-
20
- Two types of input paths are supported:
21
-
22
- - `htmlPath`: A path to a directory of HTML files or a single HTML file (e.g., `"path/html/files"`).
23
- - `url`: A single URL or a set of URLs (e.g., `"https://www.wikipedia.org"`).
18
+ """Instantiates class to read documents in various formats.
24
19
 
25
20
  Parameters
26
21
  ----------
27
- spark : SparkSession
28
- The active Spark session.
22
+ params : spark
23
+ Spark session
29
24
  params : dict, optional
30
- A dictionary with custom configurations.
25
+ Parameter with custom configuration
26
+
27
+ Notes
28
+ -----
29
+ This class can read HTML, email, PDF, MS Word, Excel, PowerPoint, and text files.
30
+
31
+ Examples
32
+ --------
33
+ >>> from sparknlp.reader import SparkNLPReader
34
+ >>> reader = SparkNLPReader(spark)
35
+
36
+ # Reading HTML
37
+ >>> html_df = reader.html("https://www.wikipedia.org")
38
+ >>> # Or with shorthand
39
+ >>> import sparknlp
40
+ >>> html_df = sparknlp.read().html("https://www.wikipedia.org")
41
+
42
+ # Reading PDF
43
+ >>> pdf_df = reader.pdf("home/user/pdfs-directory")
44
+ >>> # Or with shorthand
45
+ >>> pdf_df = sparknlp.read().pdf("home/user/pdfs-directory")
46
+
47
+ # Reading Email
48
+ >>> email_df = reader.email("home/user/emails-directory")
49
+ >>> # Or with shorthand
50
+ >>> email_df = sparknlp.read().email("home/user/emails-directory")
31
51
  """
32
52
 
33
53
  def __init__(self, spark, params=None):
@@ -59,11 +79,29 @@ class SparkNLPReader(ExtendedJavaWrapper):
59
79
  >>> import sparknlp
60
80
  >>> html_df = sparknlp.read().html("https://www.wikipedia.org")
61
81
  >>> html_df.show(truncate=False)
82
+
83
+ +--------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
84
+ |url |html |
85
+ +--------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
86
+ |https://example.com/|[{Title, Example Domain, {pageNumber -> 1}}, {NarrativeText, 0, This domain is for use in illustrative examples in documents. You may use this domain in literature without prior coordination or asking for permission., {pageNumber -> 1}}, {NarrativeText, 0, More information... More information..., {pageNumber -> 1}}] |
87
+ +--------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
88
+ >>> html_df.printSchema()
89
+
90
+ root
91
+ |-- url: string (nullable = true)
92
+ |-- html: array (nullable = true)
93
+ | |-- element: struct (containsNull = true)
94
+ | | |-- elementType: string (nullable = true)
95
+ | | |-- content: string (nullable = true)
96
+ | | |-- metadata: map (nullable = true)
97
+ | | | |-- key: string
98
+ | | | |-- value: string (valueContainsNull = true)
62
99
  """
63
100
  if not isinstance(htmlPath, (str, list)) or (isinstance(htmlPath, list) and not all(isinstance(item, str) for item in htmlPath)):
64
101
  raise TypeError("htmlPath must be a string or a list of strings")
65
102
  jdf = self._java_obj.html(htmlPath)
66
- return self.getDataFrame(self.spark, jdf)
103
+ dataframe = self.getDataFrame(self.spark, jdf)
104
+ return dataframe
67
105
 
68
106
  def email(self, filePath):
69
107
  """Reads email files and returns a Spark DataFrame.
@@ -83,31 +121,201 @@ class SparkNLPReader(ExtendedJavaWrapper):
83
121
  >>> from sparknlp.reader import SparkNLPReader
84
122
  >>> email_df = SparkNLPReader(spark).email("home/user/emails-directory")
85
123
 
86
- Using SparkNLP:
124
+ You can also use SparkNLP to simplify the process:
87
125
 
88
126
  >>> import sparknlp
89
127
  >>> email_df = sparknlp.read().email("home/user/emails-directory")
90
128
  >>> email_df.show(truncate=False)
129
+
130
+ +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
131
+ |email |
132
+ +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
133
+ |[{Title, Email Text Attachments, {sent_to -> Danilo Burbano <danilo@johnsnowlabs.com>, sent_from -> Danilo Burbano <danilo@johnsnowlabs.com>}}, {NarrativeText, Email test with two text attachments\r\n\r\nCheers,\r\n\r\n, {sent_to -> Danilo Burbano <danilo@johnsnowlabs.com>, sent_from -> Danilo Burbano <danilo@johnsnowlabs.com>, mimeType -> text/plain}}, {NarrativeText, <html>\r\n<head>\r\n<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">\r\n<style type="text/css" style="display:none;"> P {margin-top:0;margin-bottom:0;} </style>\r\n</head>\r\n<body dir="ltr">\r\n<span style="font-family: Aptos, Aptos_EmbeddedFont, Aptos_MSFontService, Calibri, Helvetica, sans-serif; font-size: 12pt; color: rgb(0, 0, 0);">Email&nbsp; test with two text attachments</span>\r\n<div class="elementToProof" style="font-family: Aptos, Aptos_EmbeddedFont, Aptos_MSFontService, Calibri, Helvetica, sans-serif; font-size: 12pt; color: rgb(0, 0, 0);">\r\n<br>\r\n</div>\r\n<div class="elementToProof" style="font-family: Aptos, Aptos_EmbeddedFont, Aptos_MSFontService, Calibri, Helvetica, sans-serif; font-size: 12pt; color: rgb(0, 0, 0);">\r\nCheers,</div>\r\n<div class="elementToProof" style="font-family: Aptos, Aptos_EmbeddedFont, Aptos_MSFontService, Calibri, Helvetica, sans-serif; font-size: 12pt; color: rgb(0, 0, 0);">\r\n<br>\r\n</div>\r\n</body>\r\n</html>\r\n, {sent_to -> Danilo Burbano <danilo@johnsnowlabs.com>, sent_from -> Danilo Burbano <danilo@johnsnowlabs.com>, mimeType -> text/html}}, {Attachment, filename.txt, {sent_to -> Danilo Burbano <danilo@johnsnowlabs.com>, sent_from -> Danilo Burbano <danilo@johnsnowlabs.com>, contentType -> text/plain; name="filename.txt"}}, {NarrativeText, This is the content of the file.\n, {sent_to -> Danilo Burbano <danilo@johnsnowlabs.com>, sent_from -> Danilo Burbano <danilo@johnsnowlabs.com>, mimeType -> text/plain}}, {Attachment, filename2.txt, {sent_to -> Danilo Burbano <danilo@johnsnowlabs.com>, sent_from -> Danilo Burbano <danilo@johnsnowlabs.com>, contentType -> text/plain; name="filename2.txt"}}, {NarrativeText, This is an additional content file.\n, {sent_to -> Danilo Burbano <danilo@johnsnowlabs.com>, sent_from -> Danilo Burbano <danilo@johnsnowlabs.com>, mimeType -> text/plain}}]|
134
+ +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
135
+ >>> email_df.printSchema()
136
+ root
137
+ |-- path: string (nullable = true)
138
+ |-- content: array (nullable = true)
139
+ |-- email: array (nullable = true)
140
+ | |-- element: struct (containsNull = true)
141
+ | | |-- elementType: string (nullable = true)
142
+ | | |-- content: string (nullable = true)
143
+ | | |-- metadata: map (nullable = true)
144
+ | | | |-- key: string
145
+ | | | |-- value: string (valueContainsNull = true)
146
+
91
147
  """
92
148
  if not isinstance(filePath, str):
93
149
  raise TypeError("filePath must be a string")
94
150
  jdf = self._java_obj.email(filePath)
95
- return self.getDataFrame(self.spark, jdf)
151
+ dataframe = self.getDataFrame(self.spark, jdf)
152
+ return dataframe
96
153
 
97
154
  def doc(self, docPath):
98
- """Reads document files and returns a Spark DataFrame.
155
+ """Reads word document files and returns a Spark DataFrame.
99
156
 
100
157
  Parameters
101
158
  ----------
102
159
  docPath : str
103
- Path to a document file.
160
+ Path to a word document file.
104
161
 
105
162
  Returns
106
163
  -------
107
164
  pyspark.sql.DataFrame
108
165
  A DataFrame containing parsed document content.
166
+
167
+ Examples
168
+ --------
169
+ >>> from sparknlp.reader import SparkNLPReader
170
+ >>> doc_df = SparkNLPReader().doc(spark, "home/user/word-directory")
171
+
172
+ You can use SparkNLP for one line of code
173
+ >>> import sparknlp
174
+ >>> doc_df = sparknlp.read().doc("home/user/word-directory")
175
+ >>> doc_df.show(truncate=False)
176
+
177
+ +----------------------------------------------------------------------------------------------------------------------------------------------------+
178
+ |doc | |
179
+ +----------------------------------------------------------------------------------------------------------------------------------------------------+
180
+ |[{Table, Header Col 1, {}}, {Table, Header Col 2, {}}, {Table, Lorem ipsum, {}}, {Table, A Link example, {}}, {NarrativeText, Dolor sit amet, {}}] |
181
+ +----------------------------------------------------------------------------------------------------------------------------------------------------+
182
+ >>> docsDf.printSchema()
183
+ root
184
+ |-- path: string (nullable = true)
185
+ |-- content: array (nullable = true)
186
+ |-- doc: array (nullable = true)
187
+ | |-- element: struct (containsNull = true)
188
+ | | |-- elementType: string (nullable = true)
189
+ | | |-- content: string (nullable = true)
190
+ | | |-- metadata: map (nullable = true)
191
+ | | | |-- key: string
192
+ | | | |-- value: string (valueContainsNull = true)
193
+
109
194
  """
110
195
  if not isinstance(docPath, str):
111
196
  raise TypeError("docPath must be a string")
112
197
  jdf = self._java_obj.doc(docPath)
198
+ dataframe = self.getDataFrame(self.spark, jdf)
199
+ return dataframe
200
+
201
+ def pdf(self, pdfPath):
202
+ if not isinstance(pdfPath, str):
203
+ raise TypeError("docPath must be a string")
204
+ jdf = self._java_obj.pdf(pdfPath)
205
+ dataframe = self.getDataFrame(self.spark, jdf)
206
+ return dataframe
207
+
208
+ def xls(self, docPath):
209
+ """Reads excel document files and returns a Spark DataFrame.
210
+
211
+ Parameters
212
+ ----------
213
+ docPath : str
214
+ Path to an excel document file.
215
+
216
+ Returns
217
+ -------
218
+ pyspark.sql.DataFrame
219
+ A DataFrame containing parsed document content.
220
+
221
+ Examples
222
+ --------
223
+ >>> from sparknlp.reader import SparkNLPReader
224
+ >>> xlsDf = SparkNLPReader().xls(spark, "home/user/excel-directory")
225
+
226
+ You can use SparkNLP for one line of code
227
+ >>> import sparknlp
228
+ >>> xlsDf = sparknlp.read().xls("home/user/excel-directory")
229
+ >>> xlsDf.show(truncate=False)
230
+
231
+ +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
232
+ |xls |
233
+ +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
234
+ |[{Title, Financial performance, {SheetName -> Index}}, {Title, Topic\tPeriod\t\t\tPage, {SheetName -> Index}}, {NarrativeText, Quarterly revenue\tNine quarters to 30 June 2023\t\t\t1.0, {SheetName -> Index}}, {NarrativeText, Group financial performance\tFY 22\tFY 23\t\t2.0, {SheetName -> Index}}, {NarrativeText, Segmental results\tFY 22\tFY 23\t\t3.0, {SheetName -> Index}}, {NarrativeText, Segmental analysis\tFY 22\tFY 23\t\t4.0, {SheetName -> Index}}, {NarrativeText, Cash flow\tFY 22\tFY 23\t\t5.0, {SheetName -> Index}}, {Title, Operational metrics, {SheetName -> Index}}, {Title, Topic\tPeriod\t\t\tPage, {SheetName -> Index}}, {NarrativeText, Mobile customers\tNine quarters to 30 June 2023\t\t\t6.0, {SheetName -> Index}}, {NarrativeText, Fixed broadband customers\tNine quarters to 30 June 2023\t\t\t7.0, {SheetName -> Index}}, {NarrativeText, Marketable homes passed\tNine quarters to 30 June 2023\t\t\t8.0, {SheetName -> Index}}, {NarrativeText, TV customers\tNine quarters to 30 June 2023\t\t\t9.0, {SheetName -> Index}}, {NarrativeText, Converged customers\tNine quarters to 30 June 2023\t\t\t10.0, {SheetName -> Index}}, {NarrativeText, Mobile churn\tNine quarters to 30 June 2023\t\t\t11.0, {SheetName -> Index}}, {NarrativeText, Mobile data usage\tNine quarters to 30 June 2023\t\t\t12.0, {SheetName -> Index}}, {NarrativeText, Mobile ARPU\tNine quarters to 30 June 2023\t\t\t13.0, {SheetName -> Index}}, {Title, Other, {SheetName -> Index}}, {Title, Topic\tPeriod\t\t\tPage, {SheetName -> Index}}, {NarrativeText, Average foreign exchange rates\tNine quarters to 30 June 2023\t\t\t14.0, {SheetName -> Index}}, {NarrativeText, Guidance rates\tFY 23/24\t\t\t14.0, {SheetName -> Index}}]|
235
+ +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
236
+
237
+ >>> xlsDf.printSchema()
238
+ root
239
+ |-- path: string (nullable = true)
240
+ |-- content: binary (nullable = true)
241
+ |-- xls: array (nullable = true)
242
+ | |-- element: struct (containsNull = true)
243
+ | | |-- elementType: string (nullable = true)
244
+ | | |-- content: string (nullable = true)
245
+ | | |-- metadata: map (nullable = true)
246
+ | | | |-- key: string
247
+ | | | |-- value: string (valueContainsNull = true)
248
+ """
249
+ if not isinstance(docPath, str):
250
+ raise TypeError("docPath must be a string")
251
+ jdf = self._java_obj.xls(docPath)
252
+ dataframe = self.getDataFrame(self.spark, jdf)
253
+ return dataframe
254
+
255
+ def ppt(self, docPath):
256
+ """
257
+ Reads power point document files and returns a Spark DataFrame.
258
+
259
+ Parameters
260
+ ----------
261
+ docPath : str
262
+ Path to an excel document file.
263
+
264
+ Returns
265
+ -------
266
+ pyspark.sql.DataFrame
267
+ A DataFrame containing parsed document content.
268
+
269
+ Examples
270
+ --------
271
+ >>> from sparknlp.reader import SparkNLPReader
272
+ >>> pptDf = SparkNLPReader().ppt(spark, "home/user/powerpoint-directory")
273
+
274
+ You can use SparkNLP for one line of code
275
+ >>> import sparknlp
276
+ >>> pptDf = sparknlp.read().ppt("home/user/powerpoint-directory")
277
+ >>> pptDf.show(truncate=False)
278
+ +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
279
+ |ppt |
280
+ +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
281
+ |[{Title, Adding a Bullet Slide, {}}, {ListItem, • Find the bullet slide layout, {}}, {ListItem, – Use _TextFrame.text for first bullet, {}}, {ListItem, • Use _TextFrame.add_paragraph() for subsequent bullets, {}}, {NarrativeText, Here is a lot of text!, {}}, {NarrativeText, Here is some text in a text box!, {}}]|
282
+ +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
283
+ """
284
+ if not isinstance(docPath, str):
285
+ raise TypeError("docPath must be a string")
286
+ jdf = self._java_obj.ppt(docPath)
287
+ dataframe = self.getDataFrame(self.spark, jdf)
288
+ return dataframe
289
+
290
+ def txt(self, docPath):
291
+ """Reads TXT files and returns a Spark DataFrame.
292
+
293
+ Parameters
294
+ ----------
295
+ docPath : str
296
+ Path to a TXT file.
297
+
298
+ Returns
299
+ -------
300
+ pyspark.sql.DataFrame
301
+ A DataFrame containing parsed document content.
302
+
303
+ Examples
304
+ --------
305
+ >>> from sparknlp.reader import SparkNLPReader
306
+ >>> txtDf = SparkNLPReader().txt(spark, "home/user/txt/files")
307
+
308
+ You can use SparkNLP for one line of code
309
+ >>> import sparknlp
310
+ >>> txtDf = sparknlp.read().txt("home/user/txt/files")
311
+ >>> txtDf.show(truncate=False)
312
+ +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
313
+ |txt |
314
+ +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
315
+ |[{Title, BIG DATA ANALYTICS, {paragraph -> 0}}, {NarrativeText, Apache Spark is a fast and general-purpose cluster computing system.\nIt provides high-level APIs in Java, Scala, Python, and R., {paragraph -> 0}}, {Title, MACHINE LEARNING, {paragraph -> 1}}, {NarrativeText, Spark's MLlib provides scalable machine learning algorithms.\nIt includes tools for classification, regression, clustering, and more., {paragraph -> 1}}]|
316
+ +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
317
+ """
318
+ if not isinstance(docPath, str):
319
+ raise TypeError("docPath must be a string")
320
+ jdf = self._java_obj.txt(docPath)
113
321
  return self.getDataFrame(self.spark, jdf)
@@ -1 +0,0 @@
1
- 90f78083-0ee0-43e9-8240-7263731b6707