spark-nlp 6.0.3__py2.py3-none-any.whl → 6.0.5__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of spark-nlp might be problematic. Click here for more details.
- {spark_nlp-6.0.3.dist-info → spark_nlp-6.0.5.dist-info}/METADATA +5 -5
- {spark_nlp-6.0.3.dist-info → spark_nlp-6.0.5.dist-info}/RECORD +11 -9
- sparknlp/__init__.py +1 -1
- sparknlp/annotator/dataframe_optimizer.py +216 -0
- sparknlp/annotator/embeddings/__init__.py +1 -0
- sparknlp/annotator/embeddings/minilm_embeddings.py +189 -0
- sparknlp/internal/__init__.py +10 -0
- sparknlp/reader/pdf_to_text.py +34 -1
- sparknlp/reader/sparknlp_reader.py +46 -0
- {spark_nlp-6.0.3.dist-info → spark_nlp-6.0.5.dist-info}/WHEEL +0 -0
- {spark_nlp-6.0.3.dist-info → spark_nlp-6.0.5.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: spark-nlp
|
|
3
|
-
Version: 6.0.
|
|
3
|
+
Version: 6.0.5
|
|
4
4
|
Summary: John Snow Labs Spark NLP is a natural language processing library built on top of Apache Spark ML. It provides simple, performant & accurate NLP annotations for machine learning pipelines, that scale easily in a distributed environment.
|
|
5
5
|
Home-page: https://github.com/JohnSnowLabs/spark-nlp
|
|
6
6
|
Author: John Snow Labs
|
|
@@ -102,7 +102,7 @@ $ java -version
|
|
|
102
102
|
$ conda create -n sparknlp python=3.7 -y
|
|
103
103
|
$ conda activate sparknlp
|
|
104
104
|
# spark-nlp by default is based on pyspark 3.x
|
|
105
|
-
$ pip install spark-nlp==6.0.
|
|
105
|
+
$ pip install spark-nlp==6.0.5 pyspark==3.3.1
|
|
106
106
|
```
|
|
107
107
|
|
|
108
108
|
In Python console or Jupyter `Python3` kernel:
|
|
@@ -168,7 +168,7 @@ For a quick example of using pipelines and models take a look at our official [d
|
|
|
168
168
|
|
|
169
169
|
### Apache Spark Support
|
|
170
170
|
|
|
171
|
-
Spark NLP *6.0.
|
|
171
|
+
Spark NLP *6.0.5* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
|
|
172
172
|
|
|
173
173
|
| Spark NLP | Apache Spark 3.5.x | Apache Spark 3.4.x | Apache Spark 3.3.x | Apache Spark 3.2.x | Apache Spark 3.1.x | Apache Spark 3.0.x | Apache Spark 2.4.x | Apache Spark 2.3.x |
|
|
174
174
|
|-----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
|
|
@@ -198,7 +198,7 @@ Find out more about 4.x `SparkNLP` versions in our official [documentation](http
|
|
|
198
198
|
|
|
199
199
|
### Databricks Support
|
|
200
200
|
|
|
201
|
-
Spark NLP 6.0.
|
|
201
|
+
Spark NLP 6.0.5 has been tested and is compatible with the following runtimes:
|
|
202
202
|
|
|
203
203
|
| **CPU** | **GPU** |
|
|
204
204
|
|--------------------|--------------------|
|
|
@@ -215,7 +215,7 @@ We are compatible with older runtimes. For a full list check databricks support
|
|
|
215
215
|
|
|
216
216
|
### EMR Support
|
|
217
217
|
|
|
218
|
-
Spark NLP 6.0.
|
|
218
|
+
Spark NLP 6.0.5 has been tested and is compatible with the following EMR releases:
|
|
219
219
|
|
|
220
220
|
| **EMR Release** |
|
|
221
221
|
|--------------------|
|
|
@@ -3,7 +3,7 @@ com/johnsnowlabs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
|
|
|
3
3
|
com/johnsnowlabs/ml/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
4
|
com/johnsnowlabs/ml/ai/__init__.py,sha256=YQiK2M7U4d8y5irPy_HB8ae0mSpqS9583MH44pnKJXc,295
|
|
5
5
|
com/johnsnowlabs/nlp/__init__.py,sha256=DPIVXtONO5xXyOk-HB0-sNiHAcco17NN13zPS_6Uw8c,294
|
|
6
|
-
sparknlp/__init__.py,sha256=
|
|
6
|
+
sparknlp/__init__.py,sha256=peVwWLyO7M5yWlQdRGzOBql6cvaWcdQyy5pNhGR8-sg,13814
|
|
7
7
|
sparknlp/annotation.py,sha256=I5zOxG5vV2RfPZfqN9enT1i4mo6oBcn3Lrzs37QiOiA,5635
|
|
8
8
|
sparknlp/annotation_audio.py,sha256=iRV_InSVhgvAwSRe9NTbUH9v6OGvTM-FPCpSAKVu0mE,1917
|
|
9
9
|
sparknlp/annotation_image.py,sha256=xhCe8Ko-77XqWVuuYHFrjKqF6zPd8Z-RY_rmZXNwCXU,2547
|
|
@@ -13,6 +13,7 @@ sparknlp/util.py,sha256=2Z499Psal-NuEJ4CHQNgHnAJrS73QQNyCzKPo1MavU8,2279
|
|
|
13
13
|
sparknlp/annotator/__init__.py,sha256=G746SY8dRM_AOf-gaoSKlh7D-2TKGpqqHhGr4XF-b2A,3534
|
|
14
14
|
sparknlp/annotator/chunk2_doc.py,sha256=IJ3_vQHvzjqono90AZUzZ67QSYjwquuMYbN9_HSOVcg,3141
|
|
15
15
|
sparknlp/annotator/chunker.py,sha256=8nz9B7R_mxKxcfJRfKvz2x_T29W3u4izE9k0wfYPzgE,5174
|
|
16
|
+
sparknlp/annotator/dataframe_optimizer.py,sha256=P4GySLzz1lRCZX0UBRF9_IDuXlRS1XvRWz-B2L0zqMA,7771
|
|
16
17
|
sparknlp/annotator/date2_chunk.py,sha256=tW3m_LExmhx8LMFWOGXqMyfNRXSr2dnoEHD-6DrnpXI,3153
|
|
17
18
|
sparknlp/annotator/document_character_text_splitter.py,sha256=oNrOKJAKO2h1wr0bEuSqYrrltIU_Y6J6cTHy70yKy6s,9877
|
|
18
19
|
sparknlp/annotator/document_normalizer.py,sha256=hU2fG6vaPfdngQapoeSu-_zS_LiBZNp2tcVBGl6eTpk,10973
|
|
@@ -102,7 +103,7 @@ sparknlp/annotator/cv/vit_for_image_classification.py,sha256=D2V3pxAd3rBi1817lxV
|
|
|
102
103
|
sparknlp/annotator/dependency/__init__.py,sha256=eV43oXAGaYl2N1XKIEAAZJLNP8gpHm8VxuXDeDlQzR4,774
|
|
103
104
|
sparknlp/annotator/dependency/dependency_parser.py,sha256=SxyvHPp8Hs1Xnm5X1nLTMi095XoQMtfL8pbys15mYAI,11212
|
|
104
105
|
sparknlp/annotator/dependency/typed_dependency_parser.py,sha256=60vPdYkbFk9MPGegg3m9Uik9cMXpMZd8tBvXG39gNww,12456
|
|
105
|
-
sparknlp/annotator/embeddings/__init__.py,sha256=
|
|
106
|
+
sparknlp/annotator/embeddings/__init__.py,sha256=Aw1oaP5DI0OS6259c0TEZZ6j3VFSvYFEerah5a-udVw,2528
|
|
106
107
|
sparknlp/annotator/embeddings/albert_embeddings.py,sha256=6Rd1LIn8oFIpq_ALcJh-RUjPEO7Ht8wsHY6JHSFyMkw,9995
|
|
107
108
|
sparknlp/annotator/embeddings/auto_gguf_embeddings.py,sha256=IlqkPGOH2lmZvxEyDSGX-G90DtTFOe2Rvujfbg5zvlU,20185
|
|
108
109
|
sparknlp/annotator/embeddings/bert_embeddings.py,sha256=HVUjkg56kBcpGZCo-fmPG5uatMDF3swW_lnbpy1SgSI,8463
|
|
@@ -118,6 +119,7 @@ sparknlp/annotator/embeddings/e5v_embeddings.py,sha256=NFHO2nxDcgVzyKQ6yz1BWyqtj
|
|
|
118
119
|
sparknlp/annotator/embeddings/elmo_embeddings.py,sha256=KV-KPs0Pq_OpPaHsnqBz2k_S7VdzyFZ4632IeFNKqJ8,9858
|
|
119
120
|
sparknlp/annotator/embeddings/instructor_embeddings.py,sha256=CTKmbuBOx_KBM4JM-Y1U5LyR-6rrnpoBGbgGE_axS1c,8670
|
|
120
121
|
sparknlp/annotator/embeddings/longformer_embeddings.py,sha256=jS4fxB5O0-d9ta9VKv8ai-17n5YHt5rML8QxUw7K4Io,8754
|
|
122
|
+
sparknlp/annotator/embeddings/minilm_embeddings.py,sha256=iKO3FPA6qkGjJAyPlhYpAWssnrNvlYjYxZaZEC0QDhc,7436
|
|
121
123
|
sparknlp/annotator/embeddings/mpnet_embeddings.py,sha256=7d6E4lS7jjkppDPvty1UHNNrbykkriFiysrxZ_RzL0U,7875
|
|
122
124
|
sparknlp/annotator/embeddings/mxbai_embeddings.py,sha256=kCaYcM3lLYJjhElLK5isdxzJqIvoGZlUKKNkySMUkE8,6017
|
|
123
125
|
sparknlp/annotator/embeddings/nomic_embeddings.py,sha256=WTllH3htx9wDD2Le8pZgKVPM_U8XNmroJb6f4PeVeP8,7347
|
|
@@ -225,7 +227,7 @@ sparknlp/common/read_as.py,sha256=imxPGwV7jr4Li_acbo0OAHHRGCBbYv-akzEGaBWEfcY,12
|
|
|
225
227
|
sparknlp/common/recursive_annotator_approach.py,sha256=vqugBw22cE3Ff7PIpRlnYFuOlchgL0nM26D8j-NdpqU,1449
|
|
226
228
|
sparknlp/common/storage.py,sha256=D91H3p8EIjNspjqAYu6ephRpCUtdcAir4_PrAbkIQWE,4842
|
|
227
229
|
sparknlp/common/utils.py,sha256=Yne6yYcwKxhOZC-U4qfYoDhWUP_6BIaAjI5X_P_df1E,1306
|
|
228
|
-
sparknlp/internal/__init__.py,sha256=
|
|
230
|
+
sparknlp/internal/__init__.py,sha256=wvC7ovDfII5GiYSwNpA1HHttnlXjbFgpYAGV68NsiQo,40446
|
|
229
231
|
sparknlp/internal/annotator_java_ml.py,sha256=UGPoThG0rGXUOXGSQnDzEDW81Mu1s5RPF29v7DFyE3c,1187
|
|
230
232
|
sparknlp/internal/annotator_transformer.py,sha256=fXmc2IWXGybqZpbEU9obmbdBYPc798y42zvSB4tqV9U,1448
|
|
231
233
|
sparknlp/internal/extended_java_wrapper.py,sha256=hwP0133-hDiDf5sBF-P3MtUsuuDj1PpQbtGZQIRwzfk,2240
|
|
@@ -243,8 +245,8 @@ sparknlp/pretrained/resource_downloader.py,sha256=8_-rpvO2LsX_Lq4wMPif2ca3RlJZWE
|
|
|
243
245
|
sparknlp/pretrained/utils.py,sha256=T1MrvW_DaWk_jcOjVLOea0NMFE9w8fe0ZT_5urZ_nEY,1099
|
|
244
246
|
sparknlp/reader/__init__.py,sha256=-Toj3AIBki-zXPpV8ezFTI2LX1yP_rK2bhpoa8nBkTw,685
|
|
245
247
|
sparknlp/reader/enums.py,sha256=MNGug9oJ1BBLM1Pbske13kAabalDzHa2kucF5xzFpHs,770
|
|
246
|
-
sparknlp/reader/pdf_to_text.py,sha256=
|
|
247
|
-
sparknlp/reader/sparknlp_reader.py,sha256=
|
|
248
|
+
sparknlp/reader/pdf_to_text.py,sha256=eWw-cwjosmcSZ9eHso0F5QQoeGBBnwsOhzhCXXvMjZA,7169
|
|
249
|
+
sparknlp/reader/sparknlp_reader.py,sha256=IG0_wYKT1cIIU3EibzOVBZ-GhvX50mC5meXYv0WsYKs,18524
|
|
248
250
|
sparknlp/training/__init__.py,sha256=qREi9u-5Vc2VjpL6-XZsyvu5jSEIdIhowW7_kKaqMqo,852
|
|
249
251
|
sparknlp/training/conll.py,sha256=wKBiSTrjc6mjsl7Nyt6B8f4yXsDJkZb-sn8iOjix9cE,6961
|
|
250
252
|
sparknlp/training/conllu.py,sha256=8r3i-tmyrLsyk1DtZ9uo2mMDCWb1yw2Y5W6UsV13MkY,4953
|
|
@@ -275,7 +277,7 @@ sparknlp/training/_tf_graph_builders_1x/ner_dl/dataset_encoder.py,sha256=R4yHFN3
|
|
|
275
277
|
sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model.py,sha256=EoCSdcIjqQ3wv13MAuuWrKV8wyVBP0SbOEW41omHlR0,23189
|
|
276
278
|
sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model_saver.py,sha256=k5CQ7gKV6HZbZMB8cKLUJuZxoZWlP_DFWdZ--aIDwsc,2356
|
|
277
279
|
sparknlp/training/_tf_graph_builders_1x/ner_dl/sentence_grouper.py,sha256=pAxjWhjazSX8Vg0MFqJiuRVw1IbnQNSs-8Xp26L4nko,870
|
|
278
|
-
spark_nlp-6.0.
|
|
279
|
-
spark_nlp-6.0.
|
|
280
|
-
spark_nlp-6.0.
|
|
281
|
-
spark_nlp-6.0.
|
|
280
|
+
spark_nlp-6.0.5.dist-info/METADATA,sha256=BL1PeMYps-L3LAkmNpwxMkrGUw_KwO164VZ5AoqDZLg,19722
|
|
281
|
+
spark_nlp-6.0.5.dist-info/WHEEL,sha256=JNWh1Fm1UdwIQV075glCn4MVuCRs0sotJIq-J6rbxCU,109
|
|
282
|
+
spark_nlp-6.0.5.dist-info/top_level.txt,sha256=uuytur4pyMRw2H_txNY2ZkaucZHUs22QF8-R03ch_-E,13
|
|
283
|
+
spark_nlp-6.0.5.dist-info/RECORD,,
|
sparknlp/__init__.py
CHANGED
|
@@ -0,0 +1,216 @@
|
|
|
1
|
+
# Copyright 2017-2025 John Snow Labs
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
from pyspark.ml import Transformer
|
|
15
|
+
from pyspark.ml.param.shared import *
|
|
16
|
+
from pyspark.sql import DataFrame
|
|
17
|
+
from typing import Any
|
|
18
|
+
|
|
19
|
+
# Custom converter for string-to-string dictionaries
|
|
20
|
+
def toStringDict(value):
|
|
21
|
+
if not isinstance(value, dict):
|
|
22
|
+
raise TypeError("Expected a dictionary of strings.")
|
|
23
|
+
return {str(k): str(v) for k, v in value.items()}
|
|
24
|
+
|
|
25
|
+
class DataFrameOptimizer(Transformer):
|
|
26
|
+
"""
|
|
27
|
+
Optimizes a Spark DataFrame by repartitioning, optionally caching, and persisting it to disk.
|
|
28
|
+
|
|
29
|
+
This transformer is intended to improve performance for Spark NLP pipelines or when preparing
|
|
30
|
+
data for export. It allows partition tuning via `numPartitions` directly, or indirectly using
|
|
31
|
+
`executorCores` and `numWorkers`. The DataFrame can also be persisted in a specified format
|
|
32
|
+
(`csv`, `json`, or `parquet`) with additional writer options.
|
|
33
|
+
|
|
34
|
+
Parameters
|
|
35
|
+
----------
|
|
36
|
+
executorCores : int, optional
|
|
37
|
+
Number of cores per Spark executor (used to compute number of partitions if `numPartitions` is not set).
|
|
38
|
+
|
|
39
|
+
numWorkers : int, optional
|
|
40
|
+
Number of executor nodes (used to compute number of partitions if `numPartitions` is not set).
|
|
41
|
+
|
|
42
|
+
numPartitions : int, optional
|
|
43
|
+
Target number of partitions for the DataFrame (overrides calculation via cores × workers).
|
|
44
|
+
|
|
45
|
+
doCache : bool, default False
|
|
46
|
+
Whether to cache the DataFrame after repartitioning.
|
|
47
|
+
|
|
48
|
+
persistPath : str, optional
|
|
49
|
+
Path to save the DataFrame output (if persistence is enabled).
|
|
50
|
+
|
|
51
|
+
persistFormat : str, optional
|
|
52
|
+
Format to persist the DataFrame in: one of `'csv'`, `'json'`, or `'parquet'`.
|
|
53
|
+
|
|
54
|
+
outputOptions : dict, optional
|
|
55
|
+
Dictionary of options for the DataFrameWriter (e.g., `{"compression": "snappy"}` for parquet).
|
|
56
|
+
|
|
57
|
+
Examples
|
|
58
|
+
--------
|
|
59
|
+
>>> optimizer = DataFrameOptimizer() \\
|
|
60
|
+
... .setExecutorCores(4) \\
|
|
61
|
+
... .setNumWorkers(5) \\
|
|
62
|
+
... .setDoCache(True) \\
|
|
63
|
+
... .setPersistPath("/tmp/out") \\
|
|
64
|
+
... .setPersistFormat("parquet") \\
|
|
65
|
+
... .setOutputOptions({"compression": "snappy"})
|
|
66
|
+
|
|
67
|
+
>>> optimized_df = optimizer.transform(input_df)
|
|
68
|
+
|
|
69
|
+
Notes
|
|
70
|
+
-----
|
|
71
|
+
- You must specify either `numPartitions`, or both `executorCores` and `numWorkers`.
|
|
72
|
+
- Schema is preserved; no columns are modified or removed.
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
executorCores = Param(
|
|
76
|
+
Params._dummy(),
|
|
77
|
+
"executorCores",
|
|
78
|
+
"Number of cores per executor",
|
|
79
|
+
typeConverter = TypeConverters.toInt
|
|
80
|
+
)
|
|
81
|
+
numWorkers = Param(
|
|
82
|
+
Params._dummy(),
|
|
83
|
+
"numWorkers",
|
|
84
|
+
"Number of Spark workers",
|
|
85
|
+
typeConverter = TypeConverters.toInt
|
|
86
|
+
)
|
|
87
|
+
numPartitions = Param(
|
|
88
|
+
Params._dummy(),
|
|
89
|
+
"numPartitions",
|
|
90
|
+
"Total number of partitions (overrides executorCores * numWorkers)",
|
|
91
|
+
typeConverter = TypeConverters.toInt
|
|
92
|
+
)
|
|
93
|
+
doCache = Param(
|
|
94
|
+
Params._dummy(),
|
|
95
|
+
"doCache",
|
|
96
|
+
"Whether to cache the DataFrame",
|
|
97
|
+
typeConverter = TypeConverters.toBoolean
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
persistPath = Param(
|
|
101
|
+
Params._dummy(),
|
|
102
|
+
"persistPath",
|
|
103
|
+
"Optional path to persist the DataFrame",
|
|
104
|
+
typeConverter = TypeConverters.toString
|
|
105
|
+
)
|
|
106
|
+
persistFormat = Param(
|
|
107
|
+
Params._dummy(),
|
|
108
|
+
"persistFormat",
|
|
109
|
+
"Format to persist: parquet, json, csv",
|
|
110
|
+
typeConverter = TypeConverters.toString
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
outputOptions = Param(
|
|
114
|
+
Params._dummy(),
|
|
115
|
+
"outputOptions",
|
|
116
|
+
"Additional writer options",
|
|
117
|
+
typeConverter=toStringDict
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
def __init__(self):
|
|
121
|
+
super().__init__()
|
|
122
|
+
self._setDefault(
|
|
123
|
+
doCache=False,
|
|
124
|
+
persistFormat="none",
|
|
125
|
+
numPartitions=1,
|
|
126
|
+
executorCores=1,
|
|
127
|
+
numWorkers=1
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
# Parameter setters
|
|
131
|
+
def setExecutorCores(self, value: int):
|
|
132
|
+
"""Set the number of executor cores."""
|
|
133
|
+
return self._set(executorCores=value)
|
|
134
|
+
|
|
135
|
+
def setNumWorkers(self, value: int):
|
|
136
|
+
"""Set the number of Spark workers."""
|
|
137
|
+
return self._set(numWorkers=value)
|
|
138
|
+
|
|
139
|
+
def setNumPartitions(self, value: int):
|
|
140
|
+
"""Set the total number of partitions (overrides cores * workers)."""
|
|
141
|
+
return self._set(numPartitions=value)
|
|
142
|
+
|
|
143
|
+
def setDoCache(self, value: bool):
|
|
144
|
+
"""Set whether to cache the DataFrame."""
|
|
145
|
+
return self._set(doCache=value)
|
|
146
|
+
|
|
147
|
+
def setPersistPath(self, value: str):
|
|
148
|
+
"""Set the path where the DataFrame should be persisted."""
|
|
149
|
+
return self._set(persistPath=value)
|
|
150
|
+
|
|
151
|
+
def setPersistFormat(self, value: str):
|
|
152
|
+
"""Set the format to persist the DataFrame (parquet, json, csv)."""
|
|
153
|
+
return self._set(persistFormat=value)
|
|
154
|
+
|
|
155
|
+
def setOutputOptions(self, value: dict):
|
|
156
|
+
"""Set additional writer options (e.g. for csv headers)."""
|
|
157
|
+
return self._set(outputOptions=value)
|
|
158
|
+
|
|
159
|
+
# Optional bulk setter
|
|
160
|
+
def setParams(self, **kwargs: Any):
|
|
161
|
+
for param, value in kwargs.items():
|
|
162
|
+
self._set(**{param: value})
|
|
163
|
+
return self
|
|
164
|
+
|
|
165
|
+
def _transform(self, dataset: DataFrame) -> DataFrame:
|
|
166
|
+
self._validate_params()
|
|
167
|
+
part_count = self.getOrDefault(self.numPartitions)
|
|
168
|
+
cores = self.getOrDefault(self.executorCores)
|
|
169
|
+
workers = self.getOrDefault(self.numWorkers)
|
|
170
|
+
if cores is None or workers is None:
|
|
171
|
+
raise ValueError("Provide either numPartitions or both executorCores and numWorkers")
|
|
172
|
+
if part_count == 1:
|
|
173
|
+
part_count = cores * workers
|
|
174
|
+
|
|
175
|
+
optimized_df = dataset.repartition(part_count)
|
|
176
|
+
|
|
177
|
+
if self.getOrDefault(self.doCache):
|
|
178
|
+
optimized_df = optimized_df.cache()
|
|
179
|
+
|
|
180
|
+
format = self.getOrDefault(self.persistFormat).lower()
|
|
181
|
+
if format != "none":
|
|
182
|
+
path = self.getOrDefault(self.persistPath)
|
|
183
|
+
if not path:
|
|
184
|
+
raise ValueError("persistPath must be set when persistFormat is not 'none'")
|
|
185
|
+
writer = optimized_df.write.mode("overwrite")
|
|
186
|
+
if self.isDefined(self.outputOptions):
|
|
187
|
+
writer = writer.options(**self.getOrDefault(self.outputOptions))
|
|
188
|
+
if format == "parquet":
|
|
189
|
+
writer.parquet(path)
|
|
190
|
+
elif format == "json":
|
|
191
|
+
writer.json(path)
|
|
192
|
+
elif format == "csv":
|
|
193
|
+
writer.csv(path)
|
|
194
|
+
else:
|
|
195
|
+
raise ValueError(f"Unsupported format: {format}")
|
|
196
|
+
|
|
197
|
+
return optimized_df
|
|
198
|
+
|
|
199
|
+
def _validate_params(self):
|
|
200
|
+
if self.isDefined(self.executorCores):
|
|
201
|
+
val = self.getOrDefault(self.executorCores)
|
|
202
|
+
if val <= 0:
|
|
203
|
+
raise ValueError("executorCores must be > 0")
|
|
204
|
+
|
|
205
|
+
if self.isDefined(self.numWorkers):
|
|
206
|
+
val = self.getOrDefault(self.numWorkers)
|
|
207
|
+
if val <= 0:
|
|
208
|
+
raise ValueError("numWorkers must be > 0")
|
|
209
|
+
|
|
210
|
+
if self.isDefined(self.numPartitions):
|
|
211
|
+
val = self.getOrDefault(self.numPartitions)
|
|
212
|
+
if val <= 0:
|
|
213
|
+
raise ValueError("numPartitions must be > 0")
|
|
214
|
+
|
|
215
|
+
if self.isDefined(self.persistPath) and not self.isDefined(self.persistFormat):
|
|
216
|
+
raise ValueError("persistFormat must be defined when persistPath is set")
|
|
@@ -25,6 +25,7 @@ from sparknlp.annotator.embeddings.elmo_embeddings import *
|
|
|
25
25
|
from sparknlp.annotator.embeddings.e5_embeddings import *
|
|
26
26
|
from sparknlp.annotator.embeddings.instructor_embeddings import *
|
|
27
27
|
from sparknlp.annotator.embeddings.longformer_embeddings import *
|
|
28
|
+
from sparknlp.annotator.embeddings.minilm_embeddings import *
|
|
28
29
|
from sparknlp.annotator.embeddings.mpnet_embeddings import *
|
|
29
30
|
from sparknlp.annotator.embeddings.roberta_embeddings import *
|
|
30
31
|
from sparknlp.annotator.embeddings.roberta_sentence_embeddings import *
|
|
@@ -0,0 +1,189 @@
|
|
|
1
|
+
# Copyright 2017-2022 John Snow Labs
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
"""Contains classes for MiniLMEmbeddings."""
|
|
15
|
+
|
|
16
|
+
from sparknlp.common import *
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class MiniLMEmbeddings(AnnotatorModel,
|
|
20
|
+
HasEmbeddingsProperties,
|
|
21
|
+
HasCaseSensitiveProperties,
|
|
22
|
+
HasStorageRef,
|
|
23
|
+
HasBatchedAnnotate,
|
|
24
|
+
HasMaxSentenceLengthLimit):
|
|
25
|
+
"""Sentence embeddings using MiniLM.
|
|
26
|
+
|
|
27
|
+
MiniLM, a lightweight and efficient sentence embedding model that can generate text embeddings for various NLP tasks (e.g., classification, retrieval, clustering, text evaluation, etc.)
|
|
28
|
+
Note that this annotator is only supported for Spark Versions 3.4 and up.
|
|
29
|
+
|
|
30
|
+
Pretrained models can be loaded with :meth:`.pretrained` of the companion
|
|
31
|
+
object:
|
|
32
|
+
|
|
33
|
+
>>> embeddings = MiniLMEmbeddings.pretrained() \\
|
|
34
|
+
... .setInputCols(["document"]) \\
|
|
35
|
+
... .setOutputCol("minilm_embeddings")
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
The default model is ``"minilm_l6_v2"``, if no name is provided.
|
|
39
|
+
|
|
40
|
+
For available pretrained models please see the
|
|
41
|
+
`Models Hub <https://sparknlp.org/models?q=MiniLM>`__.
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
====================== ======================
|
|
45
|
+
Input Annotation types Output Annotation type
|
|
46
|
+
====================== ======================
|
|
47
|
+
``DOCUMENT`` ``SENTENCE_EMBEDDINGS``
|
|
48
|
+
====================== ======================
|
|
49
|
+
|
|
50
|
+
Parameters
|
|
51
|
+
----------
|
|
52
|
+
batchSize
|
|
53
|
+
Size of every batch , by default 8
|
|
54
|
+
dimension
|
|
55
|
+
Number of embedding dimensions, by default 384
|
|
56
|
+
caseSensitive
|
|
57
|
+
Whether to ignore case in tokens for embeddings matching, by default False
|
|
58
|
+
maxSentenceLength
|
|
59
|
+
Max sentence length to process, by default 512
|
|
60
|
+
configProtoBytes
|
|
61
|
+
ConfigProto from tensorflow, serialized into byte array.
|
|
62
|
+
|
|
63
|
+
References
|
|
64
|
+
----------
|
|
65
|
+
`MiniLM: Deep Self-Attention Distillation for Task-Agnostic Compression of Pre-Trained Transformers <https://arxiv.org/abs/2002.10957>`__
|
|
66
|
+
|
|
67
|
+
`MiniLM Github Repository <https://github.com/microsoft/unilm/tree/master/minilm>`__
|
|
68
|
+
|
|
69
|
+
**Paper abstract**
|
|
70
|
+
|
|
71
|
+
*We present a simple and effective approach to compress large pre-trained Transformer models
|
|
72
|
+
by distilling the self-attention module of the last Transformer layer. The compressed model
|
|
73
|
+
(called MiniLM) can be trained with task-agnostic distillation and then fine-tuned on various
|
|
74
|
+
downstream tasks. We evaluate MiniLM on the GLUE benchmark and show that it achieves comparable
|
|
75
|
+
results with BERT-base while being 4.3x smaller and 5.5x faster. We also show that MiniLM can
|
|
76
|
+
be further compressed to 22x smaller and 12x faster than BERT-base while maintaining comparable
|
|
77
|
+
performance.*
|
|
78
|
+
|
|
79
|
+
Examples
|
|
80
|
+
--------
|
|
81
|
+
>>> import sparknlp
|
|
82
|
+
>>> from sparknlp.base import *
|
|
83
|
+
>>> from sparknlp.annotator import *
|
|
84
|
+
>>> from pyspark.ml import Pipeline
|
|
85
|
+
>>> documentAssembler = DocumentAssembler() \\
|
|
86
|
+
... .setInputCol("text") \\
|
|
87
|
+
... .setOutputCol("document")
|
|
88
|
+
>>> embeddings = MiniLMEmbeddings.pretrained() \\
|
|
89
|
+
... .setInputCols(["document"]) \\
|
|
90
|
+
... .setOutputCol("minilm_embeddings")
|
|
91
|
+
>>> embeddingsFinisher = EmbeddingsFinisher() \\
|
|
92
|
+
... .setInputCols(["minilm_embeddings"]) \\
|
|
93
|
+
... .setOutputCols("finished_embeddings") \\
|
|
94
|
+
... .setOutputAsVector(True)
|
|
95
|
+
>>> pipeline = Pipeline().setStages([
|
|
96
|
+
... documentAssembler,
|
|
97
|
+
... embeddings,
|
|
98
|
+
... embeddingsFinisher
|
|
99
|
+
... ])
|
|
100
|
+
>>> data = spark.createDataFrame([["This is a sample sentence for embedding generation.",
|
|
101
|
+
... "Another example sentence to demonstrate MiniLM embeddings.",
|
|
102
|
+
... ]]).toDF("text")
|
|
103
|
+
>>> result = pipeline.fit(data).transform(data)
|
|
104
|
+
>>> result.selectExpr("explode(finished_embeddings) as result").show(5, 80)
|
|
105
|
+
+--------------------------------------------------------------------------------+
|
|
106
|
+
| result|
|
|
107
|
+
+--------------------------------------------------------------------------------+
|
|
108
|
+
|[[0.1234567, -0.2345678, 0.3456789, -0.4567890, 0.5678901, -0.6789012...|
|
|
109
|
+
|[[0.2345678, -0.3456789, 0.4567890, -0.5678901, 0.6789012, -0.7890123...|
|
|
110
|
+
+--------------------------------------------------------------------------------+
|
|
111
|
+
"""
|
|
112
|
+
|
|
113
|
+
name = "MiniLMEmbeddings"
|
|
114
|
+
|
|
115
|
+
inputAnnotatorTypes = [AnnotatorType.DOCUMENT]
|
|
116
|
+
|
|
117
|
+
outputAnnotatorType = AnnotatorType.SENTENCE_EMBEDDINGS
|
|
118
|
+
configProtoBytes = Param(Params._dummy(),
|
|
119
|
+
"configProtoBytes",
|
|
120
|
+
"ConfigProto from tensorflow, serialized into byte array. Get with config_proto.SerializeToString()",
|
|
121
|
+
TypeConverters.toListInt)
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def setConfigProtoBytes(self, b):
|
|
125
|
+
"""Sets configProto from tensorflow, serialized into byte array.
|
|
126
|
+
|
|
127
|
+
Parameters
|
|
128
|
+
----------
|
|
129
|
+
b : List[int]
|
|
130
|
+
ConfigProto from tensorflow, serialized into byte array
|
|
131
|
+
"""
|
|
132
|
+
return self._set(configProtoBytes=b)
|
|
133
|
+
|
|
134
|
+
@keyword_only
|
|
135
|
+
def __init__(self, classname="com.johnsnowlabs.nlp.embeddings.MiniLMEmbeddings", java_model=None):
|
|
136
|
+
super(MiniLMEmbeddings, self).__init__(
|
|
137
|
+
classname=classname,
|
|
138
|
+
java_model=java_model
|
|
139
|
+
)
|
|
140
|
+
self._setDefault(
|
|
141
|
+
dimension=384,
|
|
142
|
+
batchSize=8,
|
|
143
|
+
maxSentenceLength=512,
|
|
144
|
+
caseSensitive=False,
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
@staticmethod
|
|
148
|
+
def loadSavedModel(folder, spark_session, use_openvino=False):
|
|
149
|
+
"""Loads a locally saved model.
|
|
150
|
+
|
|
151
|
+
Parameters
|
|
152
|
+
----------
|
|
153
|
+
folder : str
|
|
154
|
+
Folder of the saved model
|
|
155
|
+
spark_session : pyspark.sql.SparkSession
|
|
156
|
+
The current SparkSession
|
|
157
|
+
use_openvino : bool
|
|
158
|
+
Use OpenVINO backend
|
|
159
|
+
|
|
160
|
+
Returns
|
|
161
|
+
-------
|
|
162
|
+
MiniLMEmbeddings
|
|
163
|
+
The restored model
|
|
164
|
+
"""
|
|
165
|
+
from sparknlp.internal import _MiniLMLoader
|
|
166
|
+
jModel = _MiniLMLoader(folder, spark_session._jsparkSession, use_openvino)._java_obj
|
|
167
|
+
return MiniLMEmbeddings(java_model=jModel)
|
|
168
|
+
|
|
169
|
+
@staticmethod
|
|
170
|
+
def pretrained(name="minilm_l6_v2", lang="en", remote_loc=None):
|
|
171
|
+
"""Downloads and loads a pretrained model.
|
|
172
|
+
|
|
173
|
+
Parameters
|
|
174
|
+
----------
|
|
175
|
+
name : str, optional
|
|
176
|
+
Name of the pretrained model, by default "minilm_l6_v2"
|
|
177
|
+
lang : str, optional
|
|
178
|
+
Language of the pretrained model, by default "en"
|
|
179
|
+
remote_loc : str, optional
|
|
180
|
+
Optional remote address of the resource, by default None. Will use
|
|
181
|
+
Spark NLPs repositories otherwise.
|
|
182
|
+
|
|
183
|
+
Returns
|
|
184
|
+
-------
|
|
185
|
+
MiniLMEmbeddings
|
|
186
|
+
The restored model
|
|
187
|
+
"""
|
|
188
|
+
from sparknlp.pretrained import ResourceDownloader
|
|
189
|
+
return ResourceDownloader.downloadModel(MiniLMEmbeddings, name, lang, remote_loc)
|
sparknlp/internal/__init__.py
CHANGED
|
@@ -257,6 +257,16 @@ class _E5Loader(ExtendedJavaWrapper):
|
|
|
257
257
|
)
|
|
258
258
|
|
|
259
259
|
|
|
260
|
+
class _MiniLMLoader(ExtendedJavaWrapper):
|
|
261
|
+
def __init__(self, path, jspark, use_openvino=False):
|
|
262
|
+
super(_MiniLMLoader, self).__init__(
|
|
263
|
+
"com.johnsnowlabs.nlp.embeddings.MiniLMEmbeddings.loadSavedModel",
|
|
264
|
+
path,
|
|
265
|
+
jspark,
|
|
266
|
+
use_openvino,
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
|
|
260
270
|
class _BGELoader(ExtendedJavaWrapper):
|
|
261
271
|
def __init__(self, path, jspark):
|
|
262
272
|
super(_BGELoader, self).__init__(
|
sparknlp/reader/pdf_to_text.py
CHANGED
|
@@ -1,3 +1,16 @@
|
|
|
1
|
+
# Copyright 2017-2025 John Snow Labs
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
1
14
|
from pyspark import keyword_only
|
|
2
15
|
from pyspark.ml.param import Param, Params, TypeConverters
|
|
3
16
|
from pyspark.ml.param.shared import HasInputCol, HasOutputCol
|
|
@@ -89,6 +102,14 @@ class PdfToText(JavaTransformer, HasInputCol, HasOutputCol,
|
|
|
89
102
|
"Force to extract only number of pages",
|
|
90
103
|
typeConverter=TypeConverters.toBoolean)
|
|
91
104
|
|
|
105
|
+
extractCoordinates = Param(Params._dummy(), "extractCoordinates",
|
|
106
|
+
"Force extract coordinates of text.",
|
|
107
|
+
typeConverter=TypeConverters.toBoolean)
|
|
108
|
+
|
|
109
|
+
normalizeLigatures = Param(Params._dummy(), "normalizeLigatures",
|
|
110
|
+
"Whether to convert ligature chars such as 'fl' into its corresponding chars (e.g., {'f', 'l'}).",
|
|
111
|
+
typeConverter=TypeConverters.toBoolean)
|
|
112
|
+
|
|
92
113
|
@keyword_only
|
|
93
114
|
def __init__(self):
|
|
94
115
|
"""
|
|
@@ -154,4 +175,16 @@ class PdfToText(JavaTransformer, HasInputCol, HasOutputCol,
|
|
|
154
175
|
"""
|
|
155
176
|
Sets the value of :py:attr:`sort`.
|
|
156
177
|
"""
|
|
157
|
-
return self._set(sort=value)
|
|
178
|
+
return self._set(sort=value)
|
|
179
|
+
|
|
180
|
+
def setExtractCoordinates(self, value):
|
|
181
|
+
"""
|
|
182
|
+
Sets the value of :py:attr:`extractCoordinates`.
|
|
183
|
+
"""
|
|
184
|
+
return self._set(extractCoordinates=value)
|
|
185
|
+
|
|
186
|
+
def setNormalizeLigatures(self, value):
|
|
187
|
+
"""
|
|
188
|
+
Sets the value of :py:attr:`normalizeLigatures`.
|
|
189
|
+
"""
|
|
190
|
+
return self._set(normalizeLigatures=value)
|
|
@@ -367,4 +367,50 @@ class SparkNLPReader(ExtendedJavaWrapper):
|
|
|
367
367
|
if not isinstance(docPath, str):
|
|
368
368
|
raise TypeError("docPath must be a string")
|
|
369
369
|
jdf = self._java_obj.xml(docPath)
|
|
370
|
+
return self.getDataFrame(self.spark, jdf)
|
|
371
|
+
|
|
372
|
+
|
|
373
|
+
def md(self, filePath):
|
|
374
|
+
"""Reads Markdown files and returns a Spark DataFrame.
|
|
375
|
+
|
|
376
|
+
Parameters
|
|
377
|
+
----------
|
|
378
|
+
filePath : str
|
|
379
|
+
Path to a Markdown file or a directory containing Markdown files.
|
|
380
|
+
|
|
381
|
+
Returns
|
|
382
|
+
-------
|
|
383
|
+
pyspark.sql.DataFrame
|
|
384
|
+
A DataFrame containing parsed Markdown content.
|
|
385
|
+
|
|
386
|
+
Examples
|
|
387
|
+
--------
|
|
388
|
+
>>> from sparknlp.reader import SparkNLPReader
|
|
389
|
+
>>> md_df = SparkNLPReader(spark).md("home/user/markdown-directory")
|
|
390
|
+
|
|
391
|
+
You can use SparkNLP for one line of code
|
|
392
|
+
|
|
393
|
+
>>> import sparknlp
|
|
394
|
+
>>> md_df = sparknlp.read().md("home/user/markdown-directory")
|
|
395
|
+
>>> md_df.show(truncate=False)
|
|
396
|
+
+-----------------------------------------------------------+
|
|
397
|
+
|md |
|
|
398
|
+
+-----------------------------------------------------------+
|
|
399
|
+
|[{Title, Sample Markdown Document, {elementId -> ..., tag -> title}}]|
|
|
400
|
+
+-----------------------------------------------------------+
|
|
401
|
+
|
|
402
|
+
>>> md_df.printSchema()
|
|
403
|
+
root
|
|
404
|
+
|-- path: string (nullable = true)
|
|
405
|
+
|-- md: array (nullable = true)
|
|
406
|
+
| |-- element: struct (containsNull = true)
|
|
407
|
+
| | |-- elementType: string (nullable = true)
|
|
408
|
+
| | |-- content: string (nullable = true)
|
|
409
|
+
| | |-- metadata: map (nullable = true)
|
|
410
|
+
| | | |-- key: string
|
|
411
|
+
| | | |-- value: string (valueContainsNull = true)
|
|
412
|
+
"""
|
|
413
|
+
if not isinstance(filePath, str):
|
|
414
|
+
raise TypeError("filePath must be a string")
|
|
415
|
+
jdf = self._java_obj.md(filePath)
|
|
370
416
|
return self.getDataFrame(self.spark, jdf)
|
|
File without changes
|
|
File without changes
|