spark-nlp 5.5.1__py2.py3-none-any.whl → 5.5.3__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of spark-nlp might be problematic. Click here for more details.
- {spark_nlp-5.5.1.dist-info → spark_nlp-5.5.3.dist-info}/METADATA +8 -8
- {spark_nlp-5.5.1.dist-info → spark_nlp-5.5.3.dist-info}/RECORD +23 -19
- sparknlp/__init__.py +12 -6
- sparknlp/annotator/cv/__init__.py +1 -0
- sparknlp/annotator/cv/blip_for_question_answering.py +172 -0
- sparknlp/annotator/embeddings/__init__.py +1 -0
- sparknlp/annotator/embeddings/auto_gguf_embeddings.py +538 -0
- sparknlp/annotator/embeddings/bge_embeddings.py +7 -3
- sparknlp/annotator/embeddings/nomic_embeddings.py +3 -3
- sparknlp/annotator/seq2seq/auto_gguf_model.py +14 -24
- sparknlp/annotator/seq2seq/cpm_transformer.py +5 -5
- sparknlp/annotator/seq2seq/nllb_transformer.py +4 -4
- sparknlp/annotator/seq2seq/phi3_transformer.py +4 -4
- sparknlp/annotator/seq2seq/qwen_transformer.py +3 -3
- sparknlp/base/image_assembler.py +11 -0
- sparknlp/base/light_pipeline.py +20 -9
- sparknlp/common/properties.py +27 -0
- sparknlp/internal/__init__.py +15 -0
- sparknlp/reader/__init__.py +15 -0
- sparknlp/reader/sparknlp_reader.py +113 -0
- {spark_nlp-5.5.1.dist-info → spark_nlp-5.5.3.dist-info}/.uuid +0 -0
- {spark_nlp-5.5.1.dist-info → spark_nlp-5.5.3.dist-info}/WHEEL +0 -0
- {spark_nlp-5.5.1.dist-info → spark_nlp-5.5.3.dist-info}/top_level.txt +0 -0
|
@@ -44,7 +44,7 @@ class CPMTransformer(AnnotatorModel, HasBatchedAnnotate, HasEngine):
|
|
|
44
44
|
... .setOutputCol("generation")
|
|
45
45
|
|
|
46
46
|
|
|
47
|
-
The default model is ``"
|
|
47
|
+
The default model is ``"mini_cpm_2b_8bit"``, if no name is provided. For available
|
|
48
48
|
pretrained models please see the `Models Hub
|
|
49
49
|
<https://sparknlp.org/models?q=cpm>`__.
|
|
50
50
|
|
|
@@ -104,7 +104,7 @@ class CPMTransformer(AnnotatorModel, HasBatchedAnnotate, HasEngine):
|
|
|
104
104
|
>>> documentAssembler = DocumentAssembler() \\
|
|
105
105
|
... .setInputCol("text") \\
|
|
106
106
|
... .setOutputCol("documents")
|
|
107
|
-
>>> cpm = CPMTransformer.pretrained("
|
|
107
|
+
>>> cpm = CPMTransformer.pretrained("mini_cpm_2b_8bit","xx") \\
|
|
108
108
|
... .setInputCols(["documents"]) \\
|
|
109
109
|
... .setMaxOutputLength(50) \\
|
|
110
110
|
... .setOutputCol("generation")
|
|
@@ -299,15 +299,15 @@ class CPMTransformer(AnnotatorModel, HasBatchedAnnotate, HasEngine):
|
|
|
299
299
|
return CPMTransformer(java_model=jModel)
|
|
300
300
|
|
|
301
301
|
@staticmethod
|
|
302
|
-
def pretrained(name="
|
|
302
|
+
def pretrained(name="mini_cpm_2b_8bit", lang="xx", remote_loc=None):
|
|
303
303
|
"""Downloads and loads a pretrained model.
|
|
304
304
|
|
|
305
305
|
Parameters
|
|
306
306
|
----------
|
|
307
307
|
name : str, optional
|
|
308
|
-
Name of the pretrained model, by default "
|
|
308
|
+
Name of the pretrained model, by default "mini_cpm_2b_8bit"
|
|
309
309
|
lang : str, optional
|
|
310
|
-
Language of the pretrained model, by default "
|
|
310
|
+
Language of the pretrained model, by default "xx"
|
|
311
311
|
remote_loc : str, optional
|
|
312
312
|
Optional remote address of the resource, by default None. Will use
|
|
313
313
|
Spark NLPs repositories otherwise.
|
|
@@ -32,7 +32,7 @@ class NLLBTransformer(AnnotatorModel, HasBatchedAnnotate, HasEngine):
|
|
|
32
32
|
... .setOutputCol("generation")
|
|
33
33
|
|
|
34
34
|
|
|
35
|
-
The default model is ``"
|
|
35
|
+
The default model is ``"nllb_distilled_600M_8int"``, if no name is provided. For available
|
|
36
36
|
pretrained models please see the `Models Hub
|
|
37
37
|
<https://sparknlp.org/models?q=nllb>`__.
|
|
38
38
|
|
|
@@ -164,7 +164,7 @@ class NLLBTransformer(AnnotatorModel, HasBatchedAnnotate, HasEngine):
|
|
|
164
164
|
>>> documentAssembler = DocumentAssembler() \\
|
|
165
165
|
... .setInputCol("text") \\
|
|
166
166
|
... .setOutputCol("documents")
|
|
167
|
-
>>> nllb = NLLBTransformer.pretrained("
|
|
167
|
+
>>> nllb = NLLBTransformer.pretrained("nllb_distilled_600M_8int") \\
|
|
168
168
|
... .setInputCols(["documents"]) \\
|
|
169
169
|
... .setMaxOutputLength(50) \\
|
|
170
170
|
... .setOutputCol("generation") \\
|
|
@@ -398,13 +398,13 @@ class NLLBTransformer(AnnotatorModel, HasBatchedAnnotate, HasEngine):
|
|
|
398
398
|
return NLLBTransformer(java_model=jModel)
|
|
399
399
|
|
|
400
400
|
@staticmethod
|
|
401
|
-
def pretrained(name="
|
|
401
|
+
def pretrained(name="nllb_distilled_600M_8int", lang="xx", remote_loc=None):
|
|
402
402
|
"""Downloads and loads a pretrained model.
|
|
403
403
|
|
|
404
404
|
Parameters
|
|
405
405
|
----------
|
|
406
406
|
name : str, optional
|
|
407
|
-
Name of the pretrained model, by default "
|
|
407
|
+
Name of the pretrained model, by default "nllb_distilled_600M_8int"
|
|
408
408
|
lang : str, optional
|
|
409
409
|
Language of the pretrained model, by default "en"
|
|
410
410
|
remote_loc : str, optional
|
|
@@ -37,7 +37,7 @@ class Phi3Transformer(AnnotatorModel, HasBatchedAnnotate, HasEngine):
|
|
|
37
37
|
... .setOutputCol("generation")
|
|
38
38
|
|
|
39
39
|
|
|
40
|
-
The default model is ``
|
|
40
|
+
The default model is ``phi_3_mini_128k_instruct``, if no name is provided. For available
|
|
41
41
|
pretrained models please see the `Models Hub
|
|
42
42
|
<https://sparknlp.org/models?q=phi3>`__.
|
|
43
43
|
|
|
@@ -112,7 +112,7 @@ class Phi3Transformer(AnnotatorModel, HasBatchedAnnotate, HasEngine):
|
|
|
112
112
|
>>> documentAssembler = DocumentAssembler() \\
|
|
113
113
|
... .setInputCol("text") \\
|
|
114
114
|
... .setOutputCol("documents")
|
|
115
|
-
>>> phi3 = Phi3Transformer.pretrained(
|
|
115
|
+
>>> phi3 = Phi3Transformer.pretrained(phi_3_mini_128k_instruct) \\
|
|
116
116
|
... .setInputCols(["documents"]) \\
|
|
117
117
|
... .setMaxOutputLength(50) \\
|
|
118
118
|
... .setOutputCol("generation")
|
|
@@ -308,13 +308,13 @@ class Phi3Transformer(AnnotatorModel, HasBatchedAnnotate, HasEngine):
|
|
|
308
308
|
return Phi3Transformer(java_model=jModel)
|
|
309
309
|
|
|
310
310
|
@staticmethod
|
|
311
|
-
def pretrained(name="
|
|
311
|
+
def pretrained(name="phi_3_mini_128k_instruct", lang="en", remote_loc=None):
|
|
312
312
|
"""Downloads and loads a pretrained model.
|
|
313
313
|
|
|
314
314
|
Parameters
|
|
315
315
|
----------
|
|
316
316
|
name : str, optional
|
|
317
|
-
Name of the pretrained model, by default
|
|
317
|
+
Name of the pretrained model, by default phi_3_mini_128k_instruct
|
|
318
318
|
lang : str, optional
|
|
319
319
|
Language of the pretrained model, by default "en"
|
|
320
320
|
remote_loc : str, optional
|
|
@@ -121,7 +121,7 @@ class QwenTransformer(AnnotatorModel, HasBatchedAnnotate, HasEngine):
|
|
|
121
121
|
>>> documentAssembler = DocumentAssembler() \\
|
|
122
122
|
... .setInputCol("text") \\
|
|
123
123
|
... .setOutputCol("documents")
|
|
124
|
-
>>> qwen = QwenTransformer.pretrained("
|
|
124
|
+
>>> qwen = QwenTransformer.pretrained("qwen_7.5b_chat") \\
|
|
125
125
|
... .setInputCols(["documents"]) \\
|
|
126
126
|
... .setMaxOutputLength(50) \\
|
|
127
127
|
... .setOutputCol("generation")
|
|
@@ -317,13 +317,13 @@ class QwenTransformer(AnnotatorModel, HasBatchedAnnotate, HasEngine):
|
|
|
317
317
|
return QwenTransformer(java_model=jModel)
|
|
318
318
|
|
|
319
319
|
@staticmethod
|
|
320
|
-
def pretrained(name="
|
|
320
|
+
def pretrained(name="qwen_7.5b_chat", lang="en", remote_loc=None):
|
|
321
321
|
"""Downloads and loads a pretrained model.
|
|
322
322
|
|
|
323
323
|
Parameters
|
|
324
324
|
----------
|
|
325
325
|
name : str, optional
|
|
326
|
-
Name of the pretrained model, by default "
|
|
326
|
+
Name of the pretrained model, by default "qwen_7.5b_chat"
|
|
327
327
|
lang : str, optional
|
|
328
328
|
Language of the pretrained model, by default "en"
|
|
329
329
|
remote_loc : str, optional
|
sparknlp/base/image_assembler.py
CHANGED
|
@@ -65,6 +65,7 @@ class ImageAssembler(AnnotatorTransformer):
|
|
|
65
65
|
outputAnnotatorType = AnnotatorType.IMAGE
|
|
66
66
|
|
|
67
67
|
inputCol = Param(Params._dummy(), "inputCol", "input column name", typeConverter=TypeConverters.toString)
|
|
68
|
+
textCol = Param(Params._dummy(), "textCol", "text column name", typeConverter=TypeConverters.toString)
|
|
68
69
|
outputCol = Param(Params._dummy(), "outputCol", "output column name", typeConverter=TypeConverters.toString)
|
|
69
70
|
name = 'ImageAssembler'
|
|
70
71
|
|
|
@@ -101,3 +102,13 @@ class ImageAssembler(AnnotatorTransformer):
|
|
|
101
102
|
def getOutputCol(self):
|
|
102
103
|
"""Gets output column name of annotations."""
|
|
103
104
|
return self.getOrDefault(self.outputCol)
|
|
105
|
+
|
|
106
|
+
def setTextCol(self, value):
|
|
107
|
+
"""Sets an optional text column name.
|
|
108
|
+
|
|
109
|
+
Parameters
|
|
110
|
+
----------
|
|
111
|
+
value : str
|
|
112
|
+
Name of an optional input text column
|
|
113
|
+
"""
|
|
114
|
+
return self._set(inputCol=value)
|
sparknlp/base/light_pipeline.py
CHANGED
|
@@ -277,7 +277,7 @@ class LightPipeline:
|
|
|
277
277
|
|
|
278
278
|
return result
|
|
279
279
|
|
|
280
|
-
def fullAnnotateImage(self, path_to_image):
|
|
280
|
+
def fullAnnotateImage(self, path_to_image, text=None):
|
|
281
281
|
"""Annotates the data provided into `Annotation` type results.
|
|
282
282
|
|
|
283
283
|
The data should be either a list or a str.
|
|
@@ -287,27 +287,38 @@ class LightPipeline:
|
|
|
287
287
|
path_to_image : list or str
|
|
288
288
|
Source path of image, list of paths to images
|
|
289
289
|
|
|
290
|
+
text: list or str, optional
|
|
291
|
+
Optional list or str of texts. If None, defaults to empty list if path_to_image is a list, or empty string if path_to_image is a string.
|
|
292
|
+
|
|
290
293
|
Returns
|
|
291
294
|
-------
|
|
292
295
|
List[AnnotationImage]
|
|
293
296
|
The result of the annotation
|
|
294
297
|
"""
|
|
298
|
+
if not isinstance(path_to_image, (str, list)):
|
|
299
|
+
raise TypeError("argument for path_to_image must be 'str' or 'list[str]'")
|
|
300
|
+
|
|
301
|
+
if text is None:
|
|
302
|
+
text = "" if isinstance(path_to_image, str) else []
|
|
303
|
+
|
|
304
|
+
if type(path_to_image) != type(text):
|
|
305
|
+
raise ValueError("`path_to_image` and `text` must be of the same type")
|
|
306
|
+
|
|
295
307
|
stages = self.pipeline_model.stages
|
|
296
308
|
if not self._skipPipelineValidation(stages):
|
|
297
309
|
self._validateStagesInputCols(stages)
|
|
298
310
|
|
|
299
|
-
if
|
|
311
|
+
if isinstance(path_to_image, str):
|
|
300
312
|
path_to_image = [path_to_image]
|
|
313
|
+
text = [text]
|
|
301
314
|
|
|
302
|
-
|
|
303
|
-
result = []
|
|
315
|
+
result = []
|
|
304
316
|
|
|
305
|
-
|
|
306
|
-
|
|
317
|
+
for image_result in self._lightPipeline.fullAnnotateImageJava(path_to_image, text):
|
|
318
|
+
result.append(self.__buildStages(image_result))
|
|
319
|
+
|
|
320
|
+
return result
|
|
307
321
|
|
|
308
|
-
return result
|
|
309
|
-
else:
|
|
310
|
-
raise TypeError("argument for annotation may be 'str' or list[str]")
|
|
311
322
|
|
|
312
323
|
def __buildStages(self, annotations_result):
|
|
313
324
|
stages = {}
|
sparknlp/common/properties.py
CHANGED
|
@@ -67,6 +67,33 @@ class HasCaseSensitiveProperties:
|
|
|
67
67
|
return self.getOrDefault(self.caseSensitive)
|
|
68
68
|
|
|
69
69
|
|
|
70
|
+
class HasClsTokenProperties:
|
|
71
|
+
useCLSToken = Param(Params._dummy(),
|
|
72
|
+
"useCLSToken",
|
|
73
|
+
"Whether to use CLS token for pooling (true) or attention-based average pooling (false)",
|
|
74
|
+
typeConverter=TypeConverters.toBoolean)
|
|
75
|
+
|
|
76
|
+
def setUseCLSToken(self, value):
|
|
77
|
+
"""Sets whether to ignore case in tokens for embeddings matching.
|
|
78
|
+
|
|
79
|
+
Parameters
|
|
80
|
+
----------
|
|
81
|
+
value : bool
|
|
82
|
+
Whether to use CLS token for pooling (true) or attention-based average pooling (false)
|
|
83
|
+
"""
|
|
84
|
+
return self._set(useCLSToken=value)
|
|
85
|
+
|
|
86
|
+
def getUseCLSToken(self):
|
|
87
|
+
"""Gets whether to use CLS token for pooling (true) or attention-based average pooling (false)
|
|
88
|
+
|
|
89
|
+
Returns
|
|
90
|
+
-------
|
|
91
|
+
bool
|
|
92
|
+
Whether to use CLS token for pooling (true) or attention-based average pooling (false)
|
|
93
|
+
"""
|
|
94
|
+
return self.getOrDefault(self.useCLSToken)
|
|
95
|
+
|
|
96
|
+
|
|
70
97
|
class HasClassifierActivationProperties:
|
|
71
98
|
activation = Param(Params._dummy(),
|
|
72
99
|
"activation",
|
sparknlp/internal/__init__.py
CHANGED
|
@@ -1006,3 +1006,18 @@ class _SnowFlakeEmbeddingsLoader(ExtendedJavaWrapper):
|
|
|
1006
1006
|
super(_SnowFlakeEmbeddingsLoader, self).__init__(
|
|
1007
1007
|
"com.johnsnowlabs.nlp.embeddings.SnowFlakeEmbeddings.loadSavedModel", path, jspark
|
|
1008
1008
|
)
|
|
1009
|
+
|
|
1010
|
+
|
|
1011
|
+
class _AutoGGUFEmbeddingsLoader(ExtendedJavaWrapper):
|
|
1012
|
+
def __init__(self, path, jspark):
|
|
1013
|
+
super(_AutoGGUFEmbeddingsLoader, self).__init__(
|
|
1014
|
+
"com.johnsnowlabs.nlp.embeddings.AutoGGUFEmbeddings.loadSavedModel", path, jspark)
|
|
1015
|
+
|
|
1016
|
+
|
|
1017
|
+
class _BLIPForQuestionAnswering(ExtendedJavaWrapper):
|
|
1018
|
+
def __init__(self, path, jspark):
|
|
1019
|
+
super(_BLIPForQuestionAnswering, self).__init__(
|
|
1020
|
+
"com.johnsnowlabs.nlp.annotators.cv.BLIPForQuestionAnswering.loadSavedModel",
|
|
1021
|
+
path,
|
|
1022
|
+
jspark,
|
|
1023
|
+
)
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
# Copyright 2017-2022 John Snow Labs
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
"""Module for reading different files types."""
|
|
15
|
+
from sparknlp.reader.sparknlp_reader import *
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
# Copyright 2017-2024 John Snow Labs
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
from sparknlp.internal import ExtendedJavaWrapper
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class SparkNLPReader(ExtendedJavaWrapper):
|
|
18
|
+
"""Instantiates class to read HTML, email, and document files.
|
|
19
|
+
|
|
20
|
+
Two types of input paths are supported:
|
|
21
|
+
|
|
22
|
+
- `htmlPath`: A path to a directory of HTML files or a single HTML file (e.g., `"path/html/files"`).
|
|
23
|
+
- `url`: A single URL or a set of URLs (e.g., `"https://www.wikipedia.org"`).
|
|
24
|
+
|
|
25
|
+
Parameters
|
|
26
|
+
----------
|
|
27
|
+
spark : SparkSession
|
|
28
|
+
The active Spark session.
|
|
29
|
+
params : dict, optional
|
|
30
|
+
A dictionary with custom configurations.
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
def __init__(self, spark, params=None):
|
|
34
|
+
if params is None:
|
|
35
|
+
params = {}
|
|
36
|
+
super(SparkNLPReader, self).__init__("com.johnsnowlabs.reader.SparkNLPReader", params)
|
|
37
|
+
self.spark = spark
|
|
38
|
+
|
|
39
|
+
def html(self, htmlPath):
|
|
40
|
+
"""Reads HTML files or URLs and returns a Spark DataFrame.
|
|
41
|
+
|
|
42
|
+
Parameters
|
|
43
|
+
----------
|
|
44
|
+
htmlPath : str or list of str
|
|
45
|
+
Path(s) to HTML file(s) or a list of URLs.
|
|
46
|
+
|
|
47
|
+
Returns
|
|
48
|
+
-------
|
|
49
|
+
pyspark.sql.DataFrame
|
|
50
|
+
A DataFrame containing the parsed HTML content.
|
|
51
|
+
|
|
52
|
+
Examples
|
|
53
|
+
--------
|
|
54
|
+
>>> from sparknlp.reader import SparkNLPReader
|
|
55
|
+
>>> html_df = SparkNLPReader(spark).html("https://www.wikipedia.org")
|
|
56
|
+
|
|
57
|
+
You can also use SparkNLP to simplify the process:
|
|
58
|
+
|
|
59
|
+
>>> import sparknlp
|
|
60
|
+
>>> html_df = sparknlp.read().html("https://www.wikipedia.org")
|
|
61
|
+
>>> html_df.show(truncate=False)
|
|
62
|
+
"""
|
|
63
|
+
if not isinstance(htmlPath, (str, list)) or (isinstance(htmlPath, list) and not all(isinstance(item, str) for item in htmlPath)):
|
|
64
|
+
raise TypeError("htmlPath must be a string or a list of strings")
|
|
65
|
+
jdf = self._java_obj.html(htmlPath)
|
|
66
|
+
return self.getDataFrame(self.spark, jdf)
|
|
67
|
+
|
|
68
|
+
def email(self, filePath):
|
|
69
|
+
"""Reads email files and returns a Spark DataFrame.
|
|
70
|
+
|
|
71
|
+
Parameters
|
|
72
|
+
----------
|
|
73
|
+
filePath : str
|
|
74
|
+
Path to an email file or a directory containing emails.
|
|
75
|
+
|
|
76
|
+
Returns
|
|
77
|
+
-------
|
|
78
|
+
pyspark.sql.DataFrame
|
|
79
|
+
A DataFrame containing parsed email data.
|
|
80
|
+
|
|
81
|
+
Examples
|
|
82
|
+
--------
|
|
83
|
+
>>> from sparknlp.reader import SparkNLPReader
|
|
84
|
+
>>> email_df = SparkNLPReader(spark).email("home/user/emails-directory")
|
|
85
|
+
|
|
86
|
+
Using SparkNLP:
|
|
87
|
+
|
|
88
|
+
>>> import sparknlp
|
|
89
|
+
>>> email_df = sparknlp.read().email("home/user/emails-directory")
|
|
90
|
+
>>> email_df.show(truncate=False)
|
|
91
|
+
"""
|
|
92
|
+
if not isinstance(filePath, str):
|
|
93
|
+
raise TypeError("filePath must be a string")
|
|
94
|
+
jdf = self._java_obj.email(filePath)
|
|
95
|
+
return self.getDataFrame(self.spark, jdf)
|
|
96
|
+
|
|
97
|
+
def doc(self, docPath):
|
|
98
|
+
"""Reads document files and returns a Spark DataFrame.
|
|
99
|
+
|
|
100
|
+
Parameters
|
|
101
|
+
----------
|
|
102
|
+
docPath : str
|
|
103
|
+
Path to a document file.
|
|
104
|
+
|
|
105
|
+
Returns
|
|
106
|
+
-------
|
|
107
|
+
pyspark.sql.DataFrame
|
|
108
|
+
A DataFrame containing parsed document content.
|
|
109
|
+
"""
|
|
110
|
+
if not isinstance(docPath, str):
|
|
111
|
+
raise TypeError("docPath must be a string")
|
|
112
|
+
jdf = self._java_obj.doc(docPath)
|
|
113
|
+
return self.getDataFrame(self.spark, jdf)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|