spark-nlp 6.0.2__py2.py3-none-any.whl → 6.0.4__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of spark-nlp might be problematic. Click here for more details.
- {spark_nlp-6.0.2.dist-info → spark_nlp-6.0.4.dist-info}/METADATA +5 -5
- {spark_nlp-6.0.2.dist-info → spark_nlp-6.0.4.dist-info}/RECORD +15 -12
- sparknlp/__init__.py +1 -1
- sparknlp/annotator/dataframe_optimizer.py +216 -0
- sparknlp/annotator/embeddings/__init__.py +2 -0
- sparknlp/annotator/embeddings/e5v_embeddings.py +138 -0
- sparknlp/annotator/embeddings/minilm_embeddings.py +189 -0
- sparknlp/internal/__init__.py +18 -0
- sparknlp/partition/partition_properties.py +63 -1
- sparknlp/partition/partition_transformer.py +11 -7
- sparknlp/reader/pdf_to_text.py +34 -1
- sparknlp/reader/sparknlp_reader.py +45 -0
- sparknlp/util.py +26 -0
- {spark_nlp-6.0.2.dist-info → spark_nlp-6.0.4.dist-info}/WHEEL +0 -0
- {spark_nlp-6.0.2.dist-info → spark_nlp-6.0.4.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: spark-nlp
|
|
3
|
-
Version: 6.0.
|
|
3
|
+
Version: 6.0.4
|
|
4
4
|
Summary: John Snow Labs Spark NLP is a natural language processing library built on top of Apache Spark ML. It provides simple, performant & accurate NLP annotations for machine learning pipelines, that scale easily in a distributed environment.
|
|
5
5
|
Home-page: https://github.com/JohnSnowLabs/spark-nlp
|
|
6
6
|
Author: John Snow Labs
|
|
@@ -102,7 +102,7 @@ $ java -version
|
|
|
102
102
|
$ conda create -n sparknlp python=3.7 -y
|
|
103
103
|
$ conda activate sparknlp
|
|
104
104
|
# spark-nlp by default is based on pyspark 3.x
|
|
105
|
-
$ pip install spark-nlp==6.0.
|
|
105
|
+
$ pip install spark-nlp==6.0.4 pyspark==3.3.1
|
|
106
106
|
```
|
|
107
107
|
|
|
108
108
|
In Python console or Jupyter `Python3` kernel:
|
|
@@ -168,7 +168,7 @@ For a quick example of using pipelines and models take a look at our official [d
|
|
|
168
168
|
|
|
169
169
|
### Apache Spark Support
|
|
170
170
|
|
|
171
|
-
Spark NLP *6.0.
|
|
171
|
+
Spark NLP *6.0.4* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
|
|
172
172
|
|
|
173
173
|
| Spark NLP | Apache Spark 3.5.x | Apache Spark 3.4.x | Apache Spark 3.3.x | Apache Spark 3.2.x | Apache Spark 3.1.x | Apache Spark 3.0.x | Apache Spark 2.4.x | Apache Spark 2.3.x |
|
|
174
174
|
|-----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
|
|
@@ -198,7 +198,7 @@ Find out more about 4.x `SparkNLP` versions in our official [documentation](http
|
|
|
198
198
|
|
|
199
199
|
### Databricks Support
|
|
200
200
|
|
|
201
|
-
Spark NLP 6.0.
|
|
201
|
+
Spark NLP 6.0.4 has been tested and is compatible with the following runtimes:
|
|
202
202
|
|
|
203
203
|
| **CPU** | **GPU** |
|
|
204
204
|
|--------------------|--------------------|
|
|
@@ -215,7 +215,7 @@ We are compatible with older runtimes. For a full list check databricks support
|
|
|
215
215
|
|
|
216
216
|
### EMR Support
|
|
217
217
|
|
|
218
|
-
Spark NLP 6.0.
|
|
218
|
+
Spark NLP 6.0.4 has been tested and is compatible with the following EMR releases:
|
|
219
219
|
|
|
220
220
|
| **EMR Release** |
|
|
221
221
|
|--------------------|
|
|
@@ -3,16 +3,17 @@ com/johnsnowlabs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
|
|
|
3
3
|
com/johnsnowlabs/ml/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
4
|
com/johnsnowlabs/ml/ai/__init__.py,sha256=YQiK2M7U4d8y5irPy_HB8ae0mSpqS9583MH44pnKJXc,295
|
|
5
5
|
com/johnsnowlabs/nlp/__init__.py,sha256=DPIVXtONO5xXyOk-HB0-sNiHAcco17NN13zPS_6Uw8c,294
|
|
6
|
-
sparknlp/__init__.py,sha256=
|
|
6
|
+
sparknlp/__init__.py,sha256=4IcJQhg7fuPsgeY0yoV96ZJPK_LVPdXnH3cl_azT7eU,13814
|
|
7
7
|
sparknlp/annotation.py,sha256=I5zOxG5vV2RfPZfqN9enT1i4mo6oBcn3Lrzs37QiOiA,5635
|
|
8
8
|
sparknlp/annotation_audio.py,sha256=iRV_InSVhgvAwSRe9NTbUH9v6OGvTM-FPCpSAKVu0mE,1917
|
|
9
9
|
sparknlp/annotation_image.py,sha256=xhCe8Ko-77XqWVuuYHFrjKqF6zPd8Z-RY_rmZXNwCXU,2547
|
|
10
10
|
sparknlp/functions.py,sha256=4cVRyBjlF1YttcMNs5z7gf9NPW7q9qzGb5KOf44Phgs,12120
|
|
11
11
|
sparknlp/upload_to_hub.py,sha256=toULNLeXK3MmTpmza9dR8R8od2QJEw1eTgBDM-O9_I0,6018
|
|
12
|
-
sparknlp/util.py,sha256=
|
|
12
|
+
sparknlp/util.py,sha256=2Z499Psal-NuEJ4CHQNgHnAJrS73QQNyCzKPo1MavU8,2279
|
|
13
13
|
sparknlp/annotator/__init__.py,sha256=G746SY8dRM_AOf-gaoSKlh7D-2TKGpqqHhGr4XF-b2A,3534
|
|
14
14
|
sparknlp/annotator/chunk2_doc.py,sha256=IJ3_vQHvzjqono90AZUzZ67QSYjwquuMYbN9_HSOVcg,3141
|
|
15
15
|
sparknlp/annotator/chunker.py,sha256=8nz9B7R_mxKxcfJRfKvz2x_T29W3u4izE9k0wfYPzgE,5174
|
|
16
|
+
sparknlp/annotator/dataframe_optimizer.py,sha256=P4GySLzz1lRCZX0UBRF9_IDuXlRS1XvRWz-B2L0zqMA,7771
|
|
16
17
|
sparknlp/annotator/date2_chunk.py,sha256=tW3m_LExmhx8LMFWOGXqMyfNRXSr2dnoEHD-6DrnpXI,3153
|
|
17
18
|
sparknlp/annotator/document_character_text_splitter.py,sha256=oNrOKJAKO2h1wr0bEuSqYrrltIU_Y6J6cTHy70yKy6s,9877
|
|
18
19
|
sparknlp/annotator/document_normalizer.py,sha256=hU2fG6vaPfdngQapoeSu-_zS_LiBZNp2tcVBGl6eTpk,10973
|
|
@@ -102,7 +103,7 @@ sparknlp/annotator/cv/vit_for_image_classification.py,sha256=D2V3pxAd3rBi1817lxV
|
|
|
102
103
|
sparknlp/annotator/dependency/__init__.py,sha256=eV43oXAGaYl2N1XKIEAAZJLNP8gpHm8VxuXDeDlQzR4,774
|
|
103
104
|
sparknlp/annotator/dependency/dependency_parser.py,sha256=SxyvHPp8Hs1Xnm5X1nLTMi095XoQMtfL8pbys15mYAI,11212
|
|
104
105
|
sparknlp/annotator/dependency/typed_dependency_parser.py,sha256=60vPdYkbFk9MPGegg3m9Uik9cMXpMZd8tBvXG39gNww,12456
|
|
105
|
-
sparknlp/annotator/embeddings/__init__.py,sha256=
|
|
106
|
+
sparknlp/annotator/embeddings/__init__.py,sha256=Aw1oaP5DI0OS6259c0TEZZ6j3VFSvYFEerah5a-udVw,2528
|
|
106
107
|
sparknlp/annotator/embeddings/albert_embeddings.py,sha256=6Rd1LIn8oFIpq_ALcJh-RUjPEO7Ht8wsHY6JHSFyMkw,9995
|
|
107
108
|
sparknlp/annotator/embeddings/auto_gguf_embeddings.py,sha256=IlqkPGOH2lmZvxEyDSGX-G90DtTFOe2Rvujfbg5zvlU,20185
|
|
108
109
|
sparknlp/annotator/embeddings/bert_embeddings.py,sha256=HVUjkg56kBcpGZCo-fmPG5uatMDF3swW_lnbpy1SgSI,8463
|
|
@@ -114,9 +115,11 @@ sparknlp/annotator/embeddings/deberta_embeddings.py,sha256=_b5nzLb7heFQNN-uT2oBN
|
|
|
114
115
|
sparknlp/annotator/embeddings/distil_bert_embeddings.py,sha256=4pyMCsbvvXYeTGIMVUir9wCDKR_1f_HKtXZrTDO1Thc,9275
|
|
115
116
|
sparknlp/annotator/embeddings/doc2vec.py,sha256=Xk3MdEkXatX9lRgbFbAdnIDrLgIxzUIGWFBZeo9BTq0,13226
|
|
116
117
|
sparknlp/annotator/embeddings/e5_embeddings.py,sha256=Esuvrq9JlogGaSSzFVVDkOFMwgYwFwr17I62ZiCDm0k,7858
|
|
118
|
+
sparknlp/annotator/embeddings/e5v_embeddings.py,sha256=NFHO2nxDcgVzyKQ6yz1BWyqtjwt9QHwlkKbBXFwhsO8,5951
|
|
117
119
|
sparknlp/annotator/embeddings/elmo_embeddings.py,sha256=KV-KPs0Pq_OpPaHsnqBz2k_S7VdzyFZ4632IeFNKqJ8,9858
|
|
118
120
|
sparknlp/annotator/embeddings/instructor_embeddings.py,sha256=CTKmbuBOx_KBM4JM-Y1U5LyR-6rrnpoBGbgGE_axS1c,8670
|
|
119
121
|
sparknlp/annotator/embeddings/longformer_embeddings.py,sha256=jS4fxB5O0-d9ta9VKv8ai-17n5YHt5rML8QxUw7K4Io,8754
|
|
122
|
+
sparknlp/annotator/embeddings/minilm_embeddings.py,sha256=iKO3FPA6qkGjJAyPlhYpAWssnrNvlYjYxZaZEC0QDhc,7436
|
|
120
123
|
sparknlp/annotator/embeddings/mpnet_embeddings.py,sha256=7d6E4lS7jjkppDPvty1UHNNrbykkriFiysrxZ_RzL0U,7875
|
|
121
124
|
sparknlp/annotator/embeddings/mxbai_embeddings.py,sha256=kCaYcM3lLYJjhElLK5isdxzJqIvoGZlUKKNkySMUkE8,6017
|
|
122
125
|
sparknlp/annotator/embeddings/nomic_embeddings.py,sha256=WTllH3htx9wDD2Le8pZgKVPM_U8XNmroJb6f4PeVeP8,7347
|
|
@@ -224,7 +227,7 @@ sparknlp/common/read_as.py,sha256=imxPGwV7jr4Li_acbo0OAHHRGCBbYv-akzEGaBWEfcY,12
|
|
|
224
227
|
sparknlp/common/recursive_annotator_approach.py,sha256=vqugBw22cE3Ff7PIpRlnYFuOlchgL0nM26D8j-NdpqU,1449
|
|
225
228
|
sparknlp/common/storage.py,sha256=D91H3p8EIjNspjqAYu6ephRpCUtdcAir4_PrAbkIQWE,4842
|
|
226
229
|
sparknlp/common/utils.py,sha256=Yne6yYcwKxhOZC-U4qfYoDhWUP_6BIaAjI5X_P_df1E,1306
|
|
227
|
-
sparknlp/internal/__init__.py,sha256=
|
|
230
|
+
sparknlp/internal/__init__.py,sha256=wvC7ovDfII5GiYSwNpA1HHttnlXjbFgpYAGV68NsiQo,40446
|
|
228
231
|
sparknlp/internal/annotator_java_ml.py,sha256=UGPoThG0rGXUOXGSQnDzEDW81Mu1s5RPF29v7DFyE3c,1187
|
|
229
232
|
sparknlp/internal/annotator_transformer.py,sha256=fXmc2IWXGybqZpbEU9obmbdBYPc798y42zvSB4tqV9U,1448
|
|
230
233
|
sparknlp/internal/extended_java_wrapper.py,sha256=hwP0133-hDiDf5sBF-P3MtUsuuDj1PpQbtGZQIRwzfk,2240
|
|
@@ -234,16 +237,16 @@ sparknlp/logging/__init__.py,sha256=DoROFF5KLZe4t4Q-OHxqk1nhqbw9NQ-wb64y8icNwgw,
|
|
|
234
237
|
sparknlp/logging/comet.py,sha256=_ZBi9-hlilCAnd4lvdYMWiq4Vqsppv8kow3k0cf-NG4,15958
|
|
235
238
|
sparknlp/partition/__init__.py,sha256=L0w-yv_HnnvoKlSX5MzI2GKHW3RLLfGyq8bgWYVeKjU,749
|
|
236
239
|
sparknlp/partition/partition.py,sha256=GXEAUvOea04Vc_JK0z112cAKFrJ4AEpjLJ8xlzZt6Kw,8551
|
|
237
|
-
sparknlp/partition/partition_properties.py,sha256=
|
|
238
|
-
sparknlp/partition/partition_transformer.py,sha256=
|
|
240
|
+
sparknlp/partition/partition_properties.py,sha256=xhAMhlsTBg-WS6KWDyVbRPwO7IzpowVVhJNR-ZGhvdo,9520
|
|
241
|
+
sparknlp/partition/partition_transformer.py,sha256=lRR1h-IMlHR8M0VeB50SbU39GHHF5PgMaJ42qOriS6A,6855
|
|
239
242
|
sparknlp/pretrained/__init__.py,sha256=GV-x9UBK8F2_IR6zYatrzFcVJtkSUIMbxqWsxRUePmQ,793
|
|
240
243
|
sparknlp/pretrained/pretrained_pipeline.py,sha256=lquxiaABuA68Rmu7csamJPqBoRJqMUO0oNHsmEZDAIs,5740
|
|
241
244
|
sparknlp/pretrained/resource_downloader.py,sha256=8_-rpvO2LsX_Lq4wMPif2ca3RlJZWEabt8pDm2xymiI,7806
|
|
242
245
|
sparknlp/pretrained/utils.py,sha256=T1MrvW_DaWk_jcOjVLOea0NMFE9w8fe0ZT_5urZ_nEY,1099
|
|
243
246
|
sparknlp/reader/__init__.py,sha256=-Toj3AIBki-zXPpV8ezFTI2LX1yP_rK2bhpoa8nBkTw,685
|
|
244
247
|
sparknlp/reader/enums.py,sha256=MNGug9oJ1BBLM1Pbske13kAabalDzHa2kucF5xzFpHs,770
|
|
245
|
-
sparknlp/reader/pdf_to_text.py,sha256=
|
|
246
|
-
sparknlp/reader/sparknlp_reader.py,sha256=
|
|
248
|
+
sparknlp/reader/pdf_to_text.py,sha256=eWw-cwjosmcSZ9eHso0F5QQoeGBBnwsOhzhCXXvMjZA,7169
|
|
249
|
+
sparknlp/reader/sparknlp_reader.py,sha256=ybnMlwJaBOVbjDw7ng39jcrshlQzexwq98_PTwVeM8g,16779
|
|
247
250
|
sparknlp/training/__init__.py,sha256=qREi9u-5Vc2VjpL6-XZsyvu5jSEIdIhowW7_kKaqMqo,852
|
|
248
251
|
sparknlp/training/conll.py,sha256=wKBiSTrjc6mjsl7Nyt6B8f4yXsDJkZb-sn8iOjix9cE,6961
|
|
249
252
|
sparknlp/training/conllu.py,sha256=8r3i-tmyrLsyk1DtZ9uo2mMDCWb1yw2Y5W6UsV13MkY,4953
|
|
@@ -274,7 +277,7 @@ sparknlp/training/_tf_graph_builders_1x/ner_dl/dataset_encoder.py,sha256=R4yHFN3
|
|
|
274
277
|
sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model.py,sha256=EoCSdcIjqQ3wv13MAuuWrKV8wyVBP0SbOEW41omHlR0,23189
|
|
275
278
|
sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model_saver.py,sha256=k5CQ7gKV6HZbZMB8cKLUJuZxoZWlP_DFWdZ--aIDwsc,2356
|
|
276
279
|
sparknlp/training/_tf_graph_builders_1x/ner_dl/sentence_grouper.py,sha256=pAxjWhjazSX8Vg0MFqJiuRVw1IbnQNSs-8Xp26L4nko,870
|
|
277
|
-
spark_nlp-6.0.
|
|
278
|
-
spark_nlp-6.0.
|
|
279
|
-
spark_nlp-6.0.
|
|
280
|
-
spark_nlp-6.0.
|
|
280
|
+
spark_nlp-6.0.4.dist-info/METADATA,sha256=xU_AVvIsdTMbYdmEsyU-05YDMQizz-l26J6zKkoC1C8,19722
|
|
281
|
+
spark_nlp-6.0.4.dist-info/WHEEL,sha256=JNWh1Fm1UdwIQV075glCn4MVuCRs0sotJIq-J6rbxCU,109
|
|
282
|
+
spark_nlp-6.0.4.dist-info/top_level.txt,sha256=uuytur4pyMRw2H_txNY2ZkaucZHUs22QF8-R03ch_-E,13
|
|
283
|
+
spark_nlp-6.0.4.dist-info/RECORD,,
|
sparknlp/__init__.py
CHANGED
|
@@ -0,0 +1,216 @@
|
|
|
1
|
+
# Copyright 2017-2025 John Snow Labs
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
from pyspark.ml import Transformer
|
|
15
|
+
from pyspark.ml.param.shared import *
|
|
16
|
+
from pyspark.sql import DataFrame
|
|
17
|
+
from typing import Any
|
|
18
|
+
|
|
19
|
+
# Custom converter for string-to-string dictionaries
|
|
20
|
+
def toStringDict(value):
|
|
21
|
+
if not isinstance(value, dict):
|
|
22
|
+
raise TypeError("Expected a dictionary of strings.")
|
|
23
|
+
return {str(k): str(v) for k, v in value.items()}
|
|
24
|
+
|
|
25
|
+
class DataFrameOptimizer(Transformer):
|
|
26
|
+
"""
|
|
27
|
+
Optimizes a Spark DataFrame by repartitioning, optionally caching, and persisting it to disk.
|
|
28
|
+
|
|
29
|
+
This transformer is intended to improve performance for Spark NLP pipelines or when preparing
|
|
30
|
+
data for export. It allows partition tuning via `numPartitions` directly, or indirectly using
|
|
31
|
+
`executorCores` and `numWorkers`. The DataFrame can also be persisted in a specified format
|
|
32
|
+
(`csv`, `json`, or `parquet`) with additional writer options.
|
|
33
|
+
|
|
34
|
+
Parameters
|
|
35
|
+
----------
|
|
36
|
+
executorCores : int, optional
|
|
37
|
+
Number of cores per Spark executor (used to compute number of partitions if `numPartitions` is not set).
|
|
38
|
+
|
|
39
|
+
numWorkers : int, optional
|
|
40
|
+
Number of executor nodes (used to compute number of partitions if `numPartitions` is not set).
|
|
41
|
+
|
|
42
|
+
numPartitions : int, optional
|
|
43
|
+
Target number of partitions for the DataFrame (overrides calculation via cores × workers).
|
|
44
|
+
|
|
45
|
+
doCache : bool, default False
|
|
46
|
+
Whether to cache the DataFrame after repartitioning.
|
|
47
|
+
|
|
48
|
+
persistPath : str, optional
|
|
49
|
+
Path to save the DataFrame output (if persistence is enabled).
|
|
50
|
+
|
|
51
|
+
persistFormat : str, optional
|
|
52
|
+
Format to persist the DataFrame in: one of `'csv'`, `'json'`, or `'parquet'`.
|
|
53
|
+
|
|
54
|
+
outputOptions : dict, optional
|
|
55
|
+
Dictionary of options for the DataFrameWriter (e.g., `{"compression": "snappy"}` for parquet).
|
|
56
|
+
|
|
57
|
+
Examples
|
|
58
|
+
--------
|
|
59
|
+
>>> optimizer = DataFrameOptimizer() \\
|
|
60
|
+
... .setExecutorCores(4) \\
|
|
61
|
+
... .setNumWorkers(5) \\
|
|
62
|
+
... .setDoCache(True) \\
|
|
63
|
+
... .setPersistPath("/tmp/out") \\
|
|
64
|
+
... .setPersistFormat("parquet") \\
|
|
65
|
+
... .setOutputOptions({"compression": "snappy"})
|
|
66
|
+
|
|
67
|
+
>>> optimized_df = optimizer.transform(input_df)
|
|
68
|
+
|
|
69
|
+
Notes
|
|
70
|
+
-----
|
|
71
|
+
- You must specify either `numPartitions`, or both `executorCores` and `numWorkers`.
|
|
72
|
+
- Schema is preserved; no columns are modified or removed.
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
executorCores = Param(
|
|
76
|
+
Params._dummy(),
|
|
77
|
+
"executorCores",
|
|
78
|
+
"Number of cores per executor",
|
|
79
|
+
typeConverter = TypeConverters.toInt
|
|
80
|
+
)
|
|
81
|
+
numWorkers = Param(
|
|
82
|
+
Params._dummy(),
|
|
83
|
+
"numWorkers",
|
|
84
|
+
"Number of Spark workers",
|
|
85
|
+
typeConverter = TypeConverters.toInt
|
|
86
|
+
)
|
|
87
|
+
numPartitions = Param(
|
|
88
|
+
Params._dummy(),
|
|
89
|
+
"numPartitions",
|
|
90
|
+
"Total number of partitions (overrides executorCores * numWorkers)",
|
|
91
|
+
typeConverter = TypeConverters.toInt
|
|
92
|
+
)
|
|
93
|
+
doCache = Param(
|
|
94
|
+
Params._dummy(),
|
|
95
|
+
"doCache",
|
|
96
|
+
"Whether to cache the DataFrame",
|
|
97
|
+
typeConverter = TypeConverters.toBoolean
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
persistPath = Param(
|
|
101
|
+
Params._dummy(),
|
|
102
|
+
"persistPath",
|
|
103
|
+
"Optional path to persist the DataFrame",
|
|
104
|
+
typeConverter = TypeConverters.toString
|
|
105
|
+
)
|
|
106
|
+
persistFormat = Param(
|
|
107
|
+
Params._dummy(),
|
|
108
|
+
"persistFormat",
|
|
109
|
+
"Format to persist: parquet, json, csv",
|
|
110
|
+
typeConverter = TypeConverters.toString
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
outputOptions = Param(
|
|
114
|
+
Params._dummy(),
|
|
115
|
+
"outputOptions",
|
|
116
|
+
"Additional writer options",
|
|
117
|
+
typeConverter=toStringDict
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
def __init__(self):
|
|
121
|
+
super().__init__()
|
|
122
|
+
self._setDefault(
|
|
123
|
+
doCache=False,
|
|
124
|
+
persistFormat="none",
|
|
125
|
+
numPartitions=1,
|
|
126
|
+
executorCores=1,
|
|
127
|
+
numWorkers=1
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
# Parameter setters
|
|
131
|
+
def setExecutorCores(self, value: int):
|
|
132
|
+
"""Set the number of executor cores."""
|
|
133
|
+
return self._set(executorCores=value)
|
|
134
|
+
|
|
135
|
+
def setNumWorkers(self, value: int):
|
|
136
|
+
"""Set the number of Spark workers."""
|
|
137
|
+
return self._set(numWorkers=value)
|
|
138
|
+
|
|
139
|
+
def setNumPartitions(self, value: int):
|
|
140
|
+
"""Set the total number of partitions (overrides cores * workers)."""
|
|
141
|
+
return self._set(numPartitions=value)
|
|
142
|
+
|
|
143
|
+
def setDoCache(self, value: bool):
|
|
144
|
+
"""Set whether to cache the DataFrame."""
|
|
145
|
+
return self._set(doCache=value)
|
|
146
|
+
|
|
147
|
+
def setPersistPath(self, value: str):
|
|
148
|
+
"""Set the path where the DataFrame should be persisted."""
|
|
149
|
+
return self._set(persistPath=value)
|
|
150
|
+
|
|
151
|
+
def setPersistFormat(self, value: str):
|
|
152
|
+
"""Set the format to persist the DataFrame (parquet, json, csv)."""
|
|
153
|
+
return self._set(persistFormat=value)
|
|
154
|
+
|
|
155
|
+
def setOutputOptions(self, value: dict):
|
|
156
|
+
"""Set additional writer options (e.g. for csv headers)."""
|
|
157
|
+
return self._set(outputOptions=value)
|
|
158
|
+
|
|
159
|
+
# Optional bulk setter
|
|
160
|
+
def setParams(self, **kwargs: Any):
|
|
161
|
+
for param, value in kwargs.items():
|
|
162
|
+
self._set(**{param: value})
|
|
163
|
+
return self
|
|
164
|
+
|
|
165
|
+
def _transform(self, dataset: DataFrame) -> DataFrame:
|
|
166
|
+
self._validate_params()
|
|
167
|
+
part_count = self.getOrDefault(self.numPartitions)
|
|
168
|
+
cores = self.getOrDefault(self.executorCores)
|
|
169
|
+
workers = self.getOrDefault(self.numWorkers)
|
|
170
|
+
if cores is None or workers is None:
|
|
171
|
+
raise ValueError("Provide either numPartitions or both executorCores and numWorkers")
|
|
172
|
+
if part_count == 1:
|
|
173
|
+
part_count = cores * workers
|
|
174
|
+
|
|
175
|
+
optimized_df = dataset.repartition(part_count)
|
|
176
|
+
|
|
177
|
+
if self.getOrDefault(self.doCache):
|
|
178
|
+
optimized_df = optimized_df.cache()
|
|
179
|
+
|
|
180
|
+
format = self.getOrDefault(self.persistFormat).lower()
|
|
181
|
+
if format != "none":
|
|
182
|
+
path = self.getOrDefault(self.persistPath)
|
|
183
|
+
if not path:
|
|
184
|
+
raise ValueError("persistPath must be set when persistFormat is not 'none'")
|
|
185
|
+
writer = optimized_df.write.mode("overwrite")
|
|
186
|
+
if self.isDefined(self.outputOptions):
|
|
187
|
+
writer = writer.options(**self.getOrDefault(self.outputOptions))
|
|
188
|
+
if format == "parquet":
|
|
189
|
+
writer.parquet(path)
|
|
190
|
+
elif format == "json":
|
|
191
|
+
writer.json(path)
|
|
192
|
+
elif format == "csv":
|
|
193
|
+
writer.csv(path)
|
|
194
|
+
else:
|
|
195
|
+
raise ValueError(f"Unsupported format: {format}")
|
|
196
|
+
|
|
197
|
+
return optimized_df
|
|
198
|
+
|
|
199
|
+
def _validate_params(self):
|
|
200
|
+
if self.isDefined(self.executorCores):
|
|
201
|
+
val = self.getOrDefault(self.executorCores)
|
|
202
|
+
if val <= 0:
|
|
203
|
+
raise ValueError("executorCores must be > 0")
|
|
204
|
+
|
|
205
|
+
if self.isDefined(self.numWorkers):
|
|
206
|
+
val = self.getOrDefault(self.numWorkers)
|
|
207
|
+
if val <= 0:
|
|
208
|
+
raise ValueError("numWorkers must be > 0")
|
|
209
|
+
|
|
210
|
+
if self.isDefined(self.numPartitions):
|
|
211
|
+
val = self.getOrDefault(self.numPartitions)
|
|
212
|
+
if val <= 0:
|
|
213
|
+
raise ValueError("numPartitions must be > 0")
|
|
214
|
+
|
|
215
|
+
if self.isDefined(self.persistPath) and not self.isDefined(self.persistFormat):
|
|
216
|
+
raise ValueError("persistFormat must be defined when persistPath is set")
|
|
@@ -25,6 +25,7 @@ from sparknlp.annotator.embeddings.elmo_embeddings import *
|
|
|
25
25
|
from sparknlp.annotator.embeddings.e5_embeddings import *
|
|
26
26
|
from sparknlp.annotator.embeddings.instructor_embeddings import *
|
|
27
27
|
from sparknlp.annotator.embeddings.longformer_embeddings import *
|
|
28
|
+
from sparknlp.annotator.embeddings.minilm_embeddings import *
|
|
28
29
|
from sparknlp.annotator.embeddings.mpnet_embeddings import *
|
|
29
30
|
from sparknlp.annotator.embeddings.roberta_embeddings import *
|
|
30
31
|
from sparknlp.annotator.embeddings.roberta_sentence_embeddings import *
|
|
@@ -41,3 +42,4 @@ from sparknlp.annotator.embeddings.mxbai_embeddings import *
|
|
|
41
42
|
from sparknlp.annotator.embeddings.snowflake_embeddings import *
|
|
42
43
|
from sparknlp.annotator.embeddings.nomic_embeddings import *
|
|
43
44
|
from sparknlp.annotator.embeddings.auto_gguf_embeddings import *
|
|
45
|
+
from sparknlp.annotator.embeddings.e5v_embeddings import *
|
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
# Copyright 2017-2024 John Snow Labs
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from sparknlp.common import *
|
|
16
|
+
|
|
17
|
+
class E5VEmbeddings(AnnotatorModel,
|
|
18
|
+
HasBatchedAnnotateImage,
|
|
19
|
+
HasImageFeatureProperties,
|
|
20
|
+
HasEngine,
|
|
21
|
+
HasRescaleFactor):
|
|
22
|
+
"""Universal multimodal embeddings using the E5-V model (see https://huggingface.co/royokong/e5-v).
|
|
23
|
+
|
|
24
|
+
E5-V bridges the modality gap between different input types (text, image) and demonstrates strong performance in multimodal embeddings, even without fine-tuning. It also supports a single-modality training approach, where the model is trained exclusively on text pairs, often yielding better performance than multimodal training.
|
|
25
|
+
|
|
26
|
+
Pretrained models can be loaded with :meth:`.pretrained` of the companion object:
|
|
27
|
+
|
|
28
|
+
>>> e5vEmbeddings = E5VEmbeddings.pretrained() \
|
|
29
|
+
... .setInputCols(["image_assembler"]) \
|
|
30
|
+
... .setOutputCol("e5v")
|
|
31
|
+
|
|
32
|
+
The default model is ``"e5v_int4"``, if no name is provided.
|
|
33
|
+
|
|
34
|
+
For available pretrained models please see the `Models Hub <https://sparknlp.org/models?task=Question+Answering>`__.
|
|
35
|
+
|
|
36
|
+
====================== ======================
|
|
37
|
+
Input Annotation types Output Annotation type
|
|
38
|
+
====================== ======================
|
|
39
|
+
``IMAGE`` ``SENTENCE_EMBEDDINGS``
|
|
40
|
+
====================== ======================
|
|
41
|
+
|
|
42
|
+
Examples
|
|
43
|
+
--------
|
|
44
|
+
Image + Text Embedding:
|
|
45
|
+
>>> import sparknlp
|
|
46
|
+
>>> from sparknlp.base import *
|
|
47
|
+
>>> from sparknlp.annotator import *
|
|
48
|
+
>>> from pyspark.ml import Pipeline
|
|
49
|
+
>>> image_df = spark.read.format("image").option("dropInvalid", value = True).load(imageFolder)
|
|
50
|
+
>>> imagePrompt = "<|start_header_id|>user<|end_header_id|>\n\n<image>\\nSummary above image in one word: <|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n \n"
|
|
51
|
+
>>> test_df = image_df.withColumn("text", lit(imagePrompt))
|
|
52
|
+
>>> imageAssembler = ImageAssembler() \
|
|
53
|
+
... .setInputCol("image") \
|
|
54
|
+
... .setOutputCol("image_assembler")
|
|
55
|
+
>>> e5vEmbeddings = E5VEmbeddings.pretrained() \
|
|
56
|
+
... .setInputCols(["image_assembler"]) \
|
|
57
|
+
... .setOutputCol("e5v")
|
|
58
|
+
>>> pipeline = Pipeline().setStages([
|
|
59
|
+
... imageAssembler,
|
|
60
|
+
... e5vEmbeddings
|
|
61
|
+
... ])
|
|
62
|
+
>>> result = pipeline.fit(test_df).transform(test_df)
|
|
63
|
+
>>> result.select("e5v.embeddings").show(truncate = False)
|
|
64
|
+
|
|
65
|
+
Text-Only Embedding:
|
|
66
|
+
>>> from sparknlp.util import EmbeddingsDataFrameUtils
|
|
67
|
+
>>> textPrompt = "<|start_header_id|>user<|end_header_id|>\n\n<sent>\\nSummary above sentence in one word: <|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n \n"
|
|
68
|
+
>>> textDesc = "A cat sitting in a box."
|
|
69
|
+
>>> nullImageDF = spark.createDataFrame(spark.sparkContext.parallelize([EmbeddingsDataFrameUtils.emptyImageRow]), EmbeddingsDataFrameUtils.imageSchema)
|
|
70
|
+
>>> textDF = nullImageDF.withColumn("text", lit(textPrompt.replace("<sent>", textDesc)))
|
|
71
|
+
>>> e5vEmbeddings = E5VEmbeddings.pretrained() \
|
|
72
|
+
... .setInputCols(["image"]) \
|
|
73
|
+
... .setOutputCol("e5v")
|
|
74
|
+
>>> result = e5vEmbeddings.transform(textDF)
|
|
75
|
+
>>> result.select("e5v.embeddings").show(truncate = False)
|
|
76
|
+
"""
|
|
77
|
+
|
|
78
|
+
name = "E5VEmbeddings"
|
|
79
|
+
|
|
80
|
+
inputAnnotatorTypes = [AnnotatorType.IMAGE]
|
|
81
|
+
outputAnnotatorType = AnnotatorType.SENTENCE_EMBEDDINGS
|
|
82
|
+
|
|
83
|
+
@keyword_only
|
|
84
|
+
def __init__(self, classname="com.johnsnowlabs.nlp.embeddings.E5VEmbeddings", java_model=None):
|
|
85
|
+
"""Initializes the E5VEmbeddings annotator.
|
|
86
|
+
|
|
87
|
+
Parameters
|
|
88
|
+
----------
|
|
89
|
+
classname : str, optional
|
|
90
|
+
The Java class name of the annotator, by default "com.johnsnowlabs.nlp.annotators.embeddings.E5VEmbeddings"
|
|
91
|
+
java_model : Optional[java.lang.Object], optional
|
|
92
|
+
A pre-initialized Java model, by default None
|
|
93
|
+
"""
|
|
94
|
+
super(E5VEmbeddings, self).__init__(classname=classname, java_model=java_model)
|
|
95
|
+
self._setDefault()
|
|
96
|
+
|
|
97
|
+
@staticmethod
|
|
98
|
+
def loadSavedModel(folder, spark_session, use_openvino=False):
|
|
99
|
+
"""Loads a locally saved model.
|
|
100
|
+
|
|
101
|
+
Parameters
|
|
102
|
+
----------
|
|
103
|
+
folder : str
|
|
104
|
+
Folder of the saved model
|
|
105
|
+
spark_session : pyspark.sql.SparkSession
|
|
106
|
+
The current SparkSession
|
|
107
|
+
use_openvino : bool, optional
|
|
108
|
+
Whether to use OpenVINO engine, by default False
|
|
109
|
+
|
|
110
|
+
Returns
|
|
111
|
+
-------
|
|
112
|
+
E5VEmbeddings
|
|
113
|
+
The restored model
|
|
114
|
+
"""
|
|
115
|
+
from sparknlp.internal import _E5VEmbeddingsLoader
|
|
116
|
+
jModel = _E5VEmbeddingsLoader(folder, spark_session._jsparkSession, use_openvino)._java_obj
|
|
117
|
+
return E5VEmbeddings(java_model=jModel)
|
|
118
|
+
|
|
119
|
+
@staticmethod
|
|
120
|
+
def pretrained(name="e5v_int4", lang="en", remote_loc=None):
|
|
121
|
+
"""Downloads and loads a pretrained model.
|
|
122
|
+
|
|
123
|
+
Parameters
|
|
124
|
+
----------
|
|
125
|
+
name : str, optional
|
|
126
|
+
Name of the pretrained model, by default "e5v_int4"
|
|
127
|
+
lang : str, optional
|
|
128
|
+
Language of the pretrained model, by default "en"
|
|
129
|
+
remote_loc : str, optional
|
|
130
|
+
Optional remote address of the resource, by default None. Will use Spark NLPs repositories otherwise.
|
|
131
|
+
|
|
132
|
+
Returns
|
|
133
|
+
-------
|
|
134
|
+
E5VEmbeddings
|
|
135
|
+
The restored model
|
|
136
|
+
"""
|
|
137
|
+
from sparknlp.pretrained import ResourceDownloader
|
|
138
|
+
return ResourceDownloader.downloadModel(E5VEmbeddings, name, lang, remote_loc)
|
|
@@ -0,0 +1,189 @@
|
|
|
1
|
+
# Copyright 2017-2022 John Snow Labs
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
"""Contains classes for MiniLMEmbeddings."""
|
|
15
|
+
|
|
16
|
+
from sparknlp.common import *
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class MiniLMEmbeddings(AnnotatorModel,
|
|
20
|
+
HasEmbeddingsProperties,
|
|
21
|
+
HasCaseSensitiveProperties,
|
|
22
|
+
HasStorageRef,
|
|
23
|
+
HasBatchedAnnotate,
|
|
24
|
+
HasMaxSentenceLengthLimit):
|
|
25
|
+
"""Sentence embeddings using MiniLM.
|
|
26
|
+
|
|
27
|
+
MiniLM, a lightweight and efficient sentence embedding model that can generate text embeddings for various NLP tasks (e.g., classification, retrieval, clustering, text evaluation, etc.)
|
|
28
|
+
Note that this annotator is only supported for Spark Versions 3.4 and up.
|
|
29
|
+
|
|
30
|
+
Pretrained models can be loaded with :meth:`.pretrained` of the companion
|
|
31
|
+
object:
|
|
32
|
+
|
|
33
|
+
>>> embeddings = MiniLMEmbeddings.pretrained() \\
|
|
34
|
+
... .setInputCols(["document"]) \\
|
|
35
|
+
... .setOutputCol("minilm_embeddings")
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
The default model is ``"minilm_l6_v2"``, if no name is provided.
|
|
39
|
+
|
|
40
|
+
For available pretrained models please see the
|
|
41
|
+
`Models Hub <https://sparknlp.org/models?q=MiniLM>`__.
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
====================== ======================
|
|
45
|
+
Input Annotation types Output Annotation type
|
|
46
|
+
====================== ======================
|
|
47
|
+
``DOCUMENT`` ``SENTENCE_EMBEDDINGS``
|
|
48
|
+
====================== ======================
|
|
49
|
+
|
|
50
|
+
Parameters
|
|
51
|
+
----------
|
|
52
|
+
batchSize
|
|
53
|
+
Size of every batch , by default 8
|
|
54
|
+
dimension
|
|
55
|
+
Number of embedding dimensions, by default 384
|
|
56
|
+
caseSensitive
|
|
57
|
+
Whether to ignore case in tokens for embeddings matching, by default False
|
|
58
|
+
maxSentenceLength
|
|
59
|
+
Max sentence length to process, by default 512
|
|
60
|
+
configProtoBytes
|
|
61
|
+
ConfigProto from tensorflow, serialized into byte array.
|
|
62
|
+
|
|
63
|
+
References
|
|
64
|
+
----------
|
|
65
|
+
`MiniLM: Deep Self-Attention Distillation for Task-Agnostic Compression of Pre-Trained Transformers <https://arxiv.org/abs/2002.10957>`__
|
|
66
|
+
|
|
67
|
+
`MiniLM Github Repository <https://github.com/microsoft/unilm/tree/master/minilm>`__
|
|
68
|
+
|
|
69
|
+
**Paper abstract**
|
|
70
|
+
|
|
71
|
+
*We present a simple and effective approach to compress large pre-trained Transformer models
|
|
72
|
+
by distilling the self-attention module of the last Transformer layer. The compressed model
|
|
73
|
+
(called MiniLM) can be trained with task-agnostic distillation and then fine-tuned on various
|
|
74
|
+
downstream tasks. We evaluate MiniLM on the GLUE benchmark and show that it achieves comparable
|
|
75
|
+
results with BERT-base while being 4.3x smaller and 5.5x faster. We also show that MiniLM can
|
|
76
|
+
be further compressed to 22x smaller and 12x faster than BERT-base while maintaining comparable
|
|
77
|
+
performance.*
|
|
78
|
+
|
|
79
|
+
Examples
|
|
80
|
+
--------
|
|
81
|
+
>>> import sparknlp
|
|
82
|
+
>>> from sparknlp.base import *
|
|
83
|
+
>>> from sparknlp.annotator import *
|
|
84
|
+
>>> from pyspark.ml import Pipeline
|
|
85
|
+
>>> documentAssembler = DocumentAssembler() \\
|
|
86
|
+
... .setInputCol("text") \\
|
|
87
|
+
... .setOutputCol("document")
|
|
88
|
+
>>> embeddings = MiniLMEmbeddings.pretrained() \\
|
|
89
|
+
... .setInputCols(["document"]) \\
|
|
90
|
+
... .setOutputCol("minilm_embeddings")
|
|
91
|
+
>>> embeddingsFinisher = EmbeddingsFinisher() \\
|
|
92
|
+
... .setInputCols(["minilm_embeddings"]) \\
|
|
93
|
+
... .setOutputCols("finished_embeddings") \\
|
|
94
|
+
... .setOutputAsVector(True)
|
|
95
|
+
>>> pipeline = Pipeline().setStages([
|
|
96
|
+
... documentAssembler,
|
|
97
|
+
... embeddings,
|
|
98
|
+
... embeddingsFinisher
|
|
99
|
+
... ])
|
|
100
|
+
>>> data = spark.createDataFrame([["This is a sample sentence for embedding generation.",
|
|
101
|
+
... "Another example sentence to demonstrate MiniLM embeddings.",
|
|
102
|
+
... ]]).toDF("text")
|
|
103
|
+
>>> result = pipeline.fit(data).transform(data)
|
|
104
|
+
>>> result.selectExpr("explode(finished_embeddings) as result").show(5, 80)
|
|
105
|
+
+--------------------------------------------------------------------------------+
|
|
106
|
+
| result|
|
|
107
|
+
+--------------------------------------------------------------------------------+
|
|
108
|
+
|[[0.1234567, -0.2345678, 0.3456789, -0.4567890, 0.5678901, -0.6789012...|
|
|
109
|
+
|[[0.2345678, -0.3456789, 0.4567890, -0.5678901, 0.6789012, -0.7890123...|
|
|
110
|
+
+--------------------------------------------------------------------------------+
|
|
111
|
+
"""
|
|
112
|
+
|
|
113
|
+
name = "MiniLMEmbeddings"
|
|
114
|
+
|
|
115
|
+
inputAnnotatorTypes = [AnnotatorType.DOCUMENT]
|
|
116
|
+
|
|
117
|
+
outputAnnotatorType = AnnotatorType.SENTENCE_EMBEDDINGS
|
|
118
|
+
configProtoBytes = Param(Params._dummy(),
|
|
119
|
+
"configProtoBytes",
|
|
120
|
+
"ConfigProto from tensorflow, serialized into byte array. Get with config_proto.SerializeToString()",
|
|
121
|
+
TypeConverters.toListInt)
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def setConfigProtoBytes(self, b):
|
|
125
|
+
"""Sets configProto from tensorflow, serialized into byte array.
|
|
126
|
+
|
|
127
|
+
Parameters
|
|
128
|
+
----------
|
|
129
|
+
b : List[int]
|
|
130
|
+
ConfigProto from tensorflow, serialized into byte array
|
|
131
|
+
"""
|
|
132
|
+
return self._set(configProtoBytes=b)
|
|
133
|
+
|
|
134
|
+
@keyword_only
|
|
135
|
+
def __init__(self, classname="com.johnsnowlabs.nlp.embeddings.MiniLMEmbeddings", java_model=None):
|
|
136
|
+
super(MiniLMEmbeddings, self).__init__(
|
|
137
|
+
classname=classname,
|
|
138
|
+
java_model=java_model
|
|
139
|
+
)
|
|
140
|
+
self._setDefault(
|
|
141
|
+
dimension=384,
|
|
142
|
+
batchSize=8,
|
|
143
|
+
maxSentenceLength=512,
|
|
144
|
+
caseSensitive=False,
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
@staticmethod
|
|
148
|
+
def loadSavedModel(folder, spark_session, use_openvino=False):
|
|
149
|
+
"""Loads a locally saved model.
|
|
150
|
+
|
|
151
|
+
Parameters
|
|
152
|
+
----------
|
|
153
|
+
folder : str
|
|
154
|
+
Folder of the saved model
|
|
155
|
+
spark_session : pyspark.sql.SparkSession
|
|
156
|
+
The current SparkSession
|
|
157
|
+
use_openvino : bool
|
|
158
|
+
Use OpenVINO backend
|
|
159
|
+
|
|
160
|
+
Returns
|
|
161
|
+
-------
|
|
162
|
+
MiniLMEmbeddings
|
|
163
|
+
The restored model
|
|
164
|
+
"""
|
|
165
|
+
from sparknlp.internal import _MiniLMLoader
|
|
166
|
+
jModel = _MiniLMLoader(folder, spark_session._jsparkSession, use_openvino)._java_obj
|
|
167
|
+
return MiniLMEmbeddings(java_model=jModel)
|
|
168
|
+
|
|
169
|
+
@staticmethod
|
|
170
|
+
def pretrained(name="minilm_l6_v2", lang="en", remote_loc=None):
|
|
171
|
+
"""Downloads and loads a pretrained model.
|
|
172
|
+
|
|
173
|
+
Parameters
|
|
174
|
+
----------
|
|
175
|
+
name : str, optional
|
|
176
|
+
Name of the pretrained model, by default "minilm_l6_v2"
|
|
177
|
+
lang : str, optional
|
|
178
|
+
Language of the pretrained model, by default "en"
|
|
179
|
+
remote_loc : str, optional
|
|
180
|
+
Optional remote address of the resource, by default None. Will use
|
|
181
|
+
Spark NLPs repositories otherwise.
|
|
182
|
+
|
|
183
|
+
Returns
|
|
184
|
+
-------
|
|
185
|
+
MiniLMEmbeddings
|
|
186
|
+
The restored model
|
|
187
|
+
"""
|
|
188
|
+
from sparknlp.pretrained import ResourceDownloader
|
|
189
|
+
return ResourceDownloader.downloadModel(MiniLMEmbeddings, name, lang, remote_loc)
|
sparknlp/internal/__init__.py
CHANGED
|
@@ -257,6 +257,16 @@ class _E5Loader(ExtendedJavaWrapper):
|
|
|
257
257
|
)
|
|
258
258
|
|
|
259
259
|
|
|
260
|
+
class _MiniLMLoader(ExtendedJavaWrapper):
|
|
261
|
+
def __init__(self, path, jspark, use_openvino=False):
|
|
262
|
+
super(_MiniLMLoader, self).__init__(
|
|
263
|
+
"com.johnsnowlabs.nlp.embeddings.MiniLMEmbeddings.loadSavedModel",
|
|
264
|
+
path,
|
|
265
|
+
jspark,
|
|
266
|
+
use_openvino,
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
|
|
260
270
|
class _BGELoader(ExtendedJavaWrapper):
|
|
261
271
|
def __init__(self, path, jspark):
|
|
262
272
|
super(_BGELoader, self).__init__(
|
|
@@ -1165,3 +1175,11 @@ class _Florence2TransformerLoader(ExtendedJavaWrapper):
|
|
|
1165
1175
|
jspark,
|
|
1166
1176
|
use_openvino,
|
|
1167
1177
|
)
|
|
1178
|
+
class _E5VEmbeddingsLoader(ExtendedJavaWrapper):
|
|
1179
|
+
def __init__(self, path, jspark, use_openvino=False):
|
|
1180
|
+
super(_E5VEmbeddingsLoader, self).__init__(
|
|
1181
|
+
"com.johnsnowlabs.nlp.embeddings.E5VEmbeddings.loadSavedModel",
|
|
1182
|
+
path,
|
|
1183
|
+
jspark,
|
|
1184
|
+
use_openvino
|
|
1185
|
+
)
|
|
@@ -254,4 +254,66 @@ class HasTextReaderProperties(Params):
|
|
|
254
254
|
return self._set(threshold=value)
|
|
255
255
|
|
|
256
256
|
def getThreshold(self):
|
|
257
|
-
return self.getOrDefault(self.threshold)
|
|
257
|
+
return self.getOrDefault(self.threshold)
|
|
258
|
+
|
|
259
|
+
class HasChunkerProperties(Params):
|
|
260
|
+
|
|
261
|
+
chunkingStrategy = Param(
|
|
262
|
+
Params._dummy(),
|
|
263
|
+
"chunkingStrategy",
|
|
264
|
+
"Set the chunking strategy",
|
|
265
|
+
typeConverter=TypeConverters.toString
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
def setChunkingStrategy(self, value):
|
|
269
|
+
return self._set(chunkingStrategy=value)
|
|
270
|
+
|
|
271
|
+
maxCharacters = Param(
|
|
272
|
+
Params._dummy(),
|
|
273
|
+
"maxCharacters",
|
|
274
|
+
"Set the maximum number of characters",
|
|
275
|
+
typeConverter=TypeConverters.toInt
|
|
276
|
+
)
|
|
277
|
+
|
|
278
|
+
def setMaxCharacters(self, value):
|
|
279
|
+
return self._set(maxCharacters=value)
|
|
280
|
+
|
|
281
|
+
newAfterNChars = Param(
|
|
282
|
+
Params._dummy(),
|
|
283
|
+
"newAfterNChars",
|
|
284
|
+
"Insert a new chunk after N characters",
|
|
285
|
+
typeConverter=TypeConverters.toInt
|
|
286
|
+
)
|
|
287
|
+
|
|
288
|
+
def setNewAfterNChars(self, value):
|
|
289
|
+
return self._set(newAfterNChars=value)
|
|
290
|
+
|
|
291
|
+
overlap = Param(
|
|
292
|
+
Params._dummy(),
|
|
293
|
+
"overlap",
|
|
294
|
+
"Set the number of overlapping characters between chunks",
|
|
295
|
+
typeConverter=TypeConverters.toInt
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
def setOverlap(self, value):
|
|
299
|
+
return self._set(overlap=value)
|
|
300
|
+
|
|
301
|
+
combineTextUnderNChars = Param(
|
|
302
|
+
Params._dummy(),
|
|
303
|
+
"combineTextUnderNChars",
|
|
304
|
+
"Threshold to merge adjacent small sections",
|
|
305
|
+
typeConverter=TypeConverters.toInt
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
def setCombineTextUnderNChars(self, value):
|
|
309
|
+
return self._set(combineTextUnderNChars=value)
|
|
310
|
+
|
|
311
|
+
overlapAll = Param(
|
|
312
|
+
Params._dummy(),
|
|
313
|
+
"overlapAll",
|
|
314
|
+
"Apply overlap context between all sections, not just split chunks",
|
|
315
|
+
typeConverter=TypeConverters.toBoolean
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
def setOverlapAll(self, value):
|
|
319
|
+
return self._set(overlapAll=value)
|
|
@@ -15,13 +15,15 @@
|
|
|
15
15
|
from sparknlp.common import *
|
|
16
16
|
from sparknlp.partition.partition_properties import *
|
|
17
17
|
|
|
18
|
+
|
|
18
19
|
class PartitionTransformer(
|
|
19
20
|
AnnotatorModel,
|
|
20
21
|
HasEmailReaderProperties,
|
|
21
22
|
HasExcelReaderProperties,
|
|
22
23
|
HasHTMLReaderProperties,
|
|
23
24
|
HasPowerPointProperties,
|
|
24
|
-
HasTextReaderProperties
|
|
25
|
+
HasTextReaderProperties,
|
|
26
|
+
HasChunkerProperties
|
|
25
27
|
):
|
|
26
28
|
"""
|
|
27
29
|
The PartitionTransformer annotator allows you to use the Partition feature more smoothly
|
|
@@ -162,10 +164,6 @@ class PartitionTransformer(
|
|
|
162
164
|
def getIncludePageBreaks(self):
|
|
163
165
|
return self.getOrDefault(self.includePageBreaks)
|
|
164
166
|
|
|
165
|
-
# def setHeaders(self, headers: Dict[str, str]):
|
|
166
|
-
# self._call_java("setHeadersPython", headers)
|
|
167
|
-
# return self
|
|
168
|
-
|
|
169
167
|
@keyword_only
|
|
170
168
|
def __init__(self, classname="com.johnsnowlabs.partition.PartitionTransformer",
|
|
171
169
|
java_model=None):
|
|
@@ -192,5 +190,11 @@ class PartitionTransformer(
|
|
|
192
190
|
paragraphSplit=DOUBLE_PARAGRAPH_PATTERN,
|
|
193
191
|
shortLineWordThreshold=5,
|
|
194
192
|
maxLineCount=2000,
|
|
195
|
-
threshold=0.1
|
|
196
|
-
|
|
193
|
+
threshold=0.1,
|
|
194
|
+
chunkingStrategy="",
|
|
195
|
+
maxCharacters=100,
|
|
196
|
+
newAfterNChars=-1,
|
|
197
|
+
overlap=0,
|
|
198
|
+
combineTextUnderNChars=0,
|
|
199
|
+
overlapAll=False
|
|
200
|
+
)
|
sparknlp/reader/pdf_to_text.py
CHANGED
|
@@ -1,3 +1,16 @@
|
|
|
1
|
+
# Copyright 2017-2025 John Snow Labs
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
1
14
|
from pyspark import keyword_only
|
|
2
15
|
from pyspark.ml.param import Param, Params, TypeConverters
|
|
3
16
|
from pyspark.ml.param.shared import HasInputCol, HasOutputCol
|
|
@@ -89,6 +102,14 @@ class PdfToText(JavaTransformer, HasInputCol, HasOutputCol,
|
|
|
89
102
|
"Force to extract only number of pages",
|
|
90
103
|
typeConverter=TypeConverters.toBoolean)
|
|
91
104
|
|
|
105
|
+
extractCoordinates = Param(Params._dummy(), "extractCoordinates",
|
|
106
|
+
"Force extract coordinates of text.",
|
|
107
|
+
typeConverter=TypeConverters.toBoolean)
|
|
108
|
+
|
|
109
|
+
normalizeLigatures = Param(Params._dummy(), "normalizeLigatures",
|
|
110
|
+
"Whether to convert ligature chars such as 'fl' into its corresponding chars (e.g., {'f', 'l'}).",
|
|
111
|
+
typeConverter=TypeConverters.toBoolean)
|
|
112
|
+
|
|
92
113
|
@keyword_only
|
|
93
114
|
def __init__(self):
|
|
94
115
|
"""
|
|
@@ -154,4 +175,16 @@ class PdfToText(JavaTransformer, HasInputCol, HasOutputCol,
|
|
|
154
175
|
"""
|
|
155
176
|
Sets the value of :py:attr:`sort`.
|
|
156
177
|
"""
|
|
157
|
-
return self._set(sort=value)
|
|
178
|
+
return self._set(sort=value)
|
|
179
|
+
|
|
180
|
+
def setExtractCoordinates(self, value):
|
|
181
|
+
"""
|
|
182
|
+
Sets the value of :py:attr:`extractCoordinates`.
|
|
183
|
+
"""
|
|
184
|
+
return self._set(extractCoordinates=value)
|
|
185
|
+
|
|
186
|
+
def setNormalizeLigatures(self, value):
|
|
187
|
+
"""
|
|
188
|
+
Sets the value of :py:attr:`normalizeLigatures`.
|
|
189
|
+
"""
|
|
190
|
+
return self._set(normalizeLigatures=value)
|
|
@@ -322,4 +322,49 @@ class SparkNLPReader(ExtendedJavaWrapper):
|
|
|
322
322
|
if not isinstance(docPath, str):
|
|
323
323
|
raise TypeError("docPath must be a string")
|
|
324
324
|
jdf = self._java_obj.txt(docPath)
|
|
325
|
+
return self.getDataFrame(self.spark, jdf)
|
|
326
|
+
|
|
327
|
+
def xml(self, docPath):
|
|
328
|
+
"""Reads XML files and returns a Spark DataFrame.
|
|
329
|
+
|
|
330
|
+
Parameters
|
|
331
|
+
----------
|
|
332
|
+
docPath : str
|
|
333
|
+
Path to an XML file or a directory containing XML files.
|
|
334
|
+
|
|
335
|
+
Returns
|
|
336
|
+
-------
|
|
337
|
+
pyspark.sql.DataFrame
|
|
338
|
+
A DataFrame containing parsed XML content.
|
|
339
|
+
|
|
340
|
+
Examples
|
|
341
|
+
--------
|
|
342
|
+
>>> from sparknlp.reader import SparkNLPReader
|
|
343
|
+
>>> xml_df = SparkNLPReader(spark).xml("home/user/xml-directory")
|
|
344
|
+
|
|
345
|
+
You can use SparkNLP for one line of code
|
|
346
|
+
|
|
347
|
+
>>> import sparknlp
|
|
348
|
+
>>> xml_df = sparknlp.read().xml("home/user/xml-directory")
|
|
349
|
+
>>> xml_df.show(truncate=False)
|
|
350
|
+
+-----------------------------------------------------------+
|
|
351
|
+
|xml |
|
|
352
|
+
+-----------------------------------------------------------+
|
|
353
|
+
|[{Title, John Smith, {elementId -> ..., tag -> title}}] |
|
|
354
|
+
+-----------------------------------------------------------+
|
|
355
|
+
|
|
356
|
+
>>> xml_df.printSchema()
|
|
357
|
+
root
|
|
358
|
+
|-- path: string (nullable = true)
|
|
359
|
+
|-- xml: array (nullable = true)
|
|
360
|
+
| |-- element: struct (containsNull = true)
|
|
361
|
+
| | |-- elementType: string (nullable = true)
|
|
362
|
+
| | |-- content: string (nullable = true)
|
|
363
|
+
| | |-- metadata: map (nullable = true)
|
|
364
|
+
| | | |-- key: string
|
|
365
|
+
| | | |-- value: string (valueContainsNull = true)
|
|
366
|
+
"""
|
|
367
|
+
if not isinstance(docPath, str):
|
|
368
|
+
raise TypeError("docPath must be a string")
|
|
369
|
+
jdf = self._java_obj.xml(docPath)
|
|
325
370
|
return self.getDataFrame(self.spark, jdf)
|
sparknlp/util.py
CHANGED
|
@@ -15,6 +15,9 @@
|
|
|
15
15
|
|
|
16
16
|
|
|
17
17
|
import sparknlp.internal as _internal
|
|
18
|
+
import numpy as np
|
|
19
|
+
from pyspark.sql import Row
|
|
20
|
+
from pyspark.sql.types import StructType, StructField, StringType, IntegerType, BinaryType
|
|
18
21
|
|
|
19
22
|
|
|
20
23
|
def get_config_path():
|
|
@@ -33,3 +36,26 @@ class CoNLLGenerator:
|
|
|
33
36
|
_internal._CoNLLGeneratorExportFromTargetAndPipeline(*args).apply()
|
|
34
37
|
else:
|
|
35
38
|
raise NotImplementedError(f"No exportConllFiles alternative takes {num_args} parameters")
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class EmbeddingsDataFrameUtils:
|
|
42
|
+
"""
|
|
43
|
+
Utility for creating DataFrames compatible with multimodal embedding models (e.g., E5VEmbeddings) for text-only scenarios.
|
|
44
|
+
Provides:
|
|
45
|
+
- imageSchema: the expected schema for Spark image DataFrames
|
|
46
|
+
- emptyImageRow: a dummy image row for text-only embedding
|
|
47
|
+
"""
|
|
48
|
+
imageSchema = StructType([
|
|
49
|
+
StructField(
|
|
50
|
+
"image",
|
|
51
|
+
StructType([
|
|
52
|
+
StructField("origin", StringType(), True),
|
|
53
|
+
StructField("height", IntegerType(), True),
|
|
54
|
+
StructField("width", IntegerType(), True),
|
|
55
|
+
StructField("nChannels", IntegerType(), True),
|
|
56
|
+
StructField("mode", IntegerType(), True),
|
|
57
|
+
StructField("data", BinaryType(), True),
|
|
58
|
+
]),
|
|
59
|
+
)
|
|
60
|
+
])
|
|
61
|
+
emptyImageRow = Row(Row("", 0, 0, 0, 0, bytes()))
|
|
File without changes
|
|
File without changes
|