spark-nlp 5.4.1__py2.py3-none-any.whl → 5.5.0__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of spark-nlp might be problematic. Click here for more details.
- spark_nlp-5.5.0.dist-info/METADATA +345 -0
- {spark_nlp-5.4.1.dist-info → spark_nlp-5.5.0.dist-info}/RECORD +25 -13
- sparknlp/__init__.py +2 -2
- sparknlp/annotator/classifier_dl/__init__.py +4 -1
- sparknlp/annotator/classifier_dl/albert_for_zero_shot_classification.py +211 -0
- sparknlp/annotator/classifier_dl/camembert_for_zero_shot_classification.py +202 -0
- sparknlp/annotator/classifier_dl/deberta_for_zero_shot_classification.py +2 -15
- sparknlp/annotator/embeddings/__init__.py +3 -0
- sparknlp/annotator/embeddings/mxbai_embeddings.py +184 -0
- sparknlp/annotator/embeddings/nomic_embeddings.py +181 -0
- sparknlp/annotator/embeddings/snowflake_embeddings.py +202 -0
- sparknlp/annotator/matcher/date_matcher.py +15 -0
- sparknlp/annotator/seq2seq/__init__.py +7 -0
- sparknlp/annotator/seq2seq/auto_gguf_model.py +804 -0
- sparknlp/annotator/seq2seq/cpm_transformer.py +321 -0
- sparknlp/annotator/seq2seq/llama3_transformer.py +381 -0
- sparknlp/annotator/seq2seq/nllb_transformer.py +420 -0
- sparknlp/annotator/seq2seq/phi3_transformer.py +330 -0
- sparknlp/annotator/seq2seq/qwen_transformer.py +339 -0
- sparknlp/annotator/seq2seq/starcoder_transformer.py +335 -0
- sparknlp/annotator/similarity/document_similarity_ranker.py +22 -0
- sparknlp/internal/__init__.py +89 -0
- spark_nlp-5.4.1.dist-info/METADATA +0 -1357
- {spark_nlp-5.4.1.dist-info → spark_nlp-5.5.0.dist-info}/.uuid +0 -0
- {spark_nlp-5.4.1.dist-info → spark_nlp-5.5.0.dist-info}/WHEEL +0 -0
- {spark_nlp-5.4.1.dist-info → spark_nlp-5.5.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,345 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: spark-nlp
|
|
3
|
+
Version: 5.5.0
|
|
4
|
+
Summary: John Snow Labs Spark NLP is a natural language processing library built on top of Apache Spark ML. It provides simple, performant & accurate NLP annotations for machine learning pipelines, that scale easily in a distributed environment.
|
|
5
|
+
Home-page: https://github.com/JohnSnowLabs/spark-nlp
|
|
6
|
+
Author: John Snow Labs
|
|
7
|
+
Keywords: NLP spark vision speech deep learning transformer tensorflow BERT GPT-2 Wav2Vec2 ViT
|
|
8
|
+
Classifier: Development Status :: 5 - Production/Stable
|
|
9
|
+
Classifier: Intended Audience :: Developers
|
|
10
|
+
Classifier: Intended Audience :: Information Technology
|
|
11
|
+
Classifier: Intended Audience :: Science/Research
|
|
12
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
|
13
|
+
Classifier: Operating System :: OS Independent
|
|
14
|
+
Classifier: Operating System :: POSIX :: Linux
|
|
15
|
+
Classifier: Operating System :: MacOS :: MacOS X
|
|
16
|
+
Classifier: Operating System :: Microsoft :: Windows
|
|
17
|
+
Classifier: Programming Language :: Python :: 3
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.6
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.7
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
21
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
22
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
23
|
+
Classifier: Topic :: Scientific/Engineering :: Information Analysis
|
|
24
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
25
|
+
Classifier: Topic :: Software Development :: Internationalization
|
|
26
|
+
Classifier: Topic :: Software Development :: Localization
|
|
27
|
+
Classifier: Topic :: Software Development :: Build Tools
|
|
28
|
+
Classifier: Topic :: Text Processing :: Linguistic
|
|
29
|
+
Classifier: Topic :: Scientific/Engineering
|
|
30
|
+
Classifier: Typing :: Typed
|
|
31
|
+
Description-Content-Type: text/markdown
|
|
32
|
+
|
|
33
|
+
# Spark NLP: State-of-the-Art Natural Language Processing & LLMs Library
|
|
34
|
+
|
|
35
|
+
<p align="center">
|
|
36
|
+
<a href="https://github.com/JohnSnowLabs/spark-nlp/actions" alt="build">
|
|
37
|
+
<img src="https://github.com/JohnSnowLabs/spark-nlp/workflows/build/badge.svg" /></a>
|
|
38
|
+
<a href="https://github.com/JohnSnowLabs/spark-nlp/releases" alt="Current Release Version">
|
|
39
|
+
<img src="https://img.shields.io/github/v/release/JohnSnowLabs/spark-nlp.svg?style=flat-square&logo=github" /></a>
|
|
40
|
+
<a href="https://search.maven.org/artifact/com.johnsnowlabs.nlp/spark-nlp_2.12" alt="Maven Central">
|
|
41
|
+
<img src="https://maven-badges.herokuapp.com/maven-central/com.johnsnowlabs.nlp/spark-nlp_2.12/badge.svg" /></a>
|
|
42
|
+
<a href="https://badge.fury.io/py/spark-nlp" alt="PyPI version">
|
|
43
|
+
<img src="https://badge.fury.io/py/spark-nlp.svg" /></a>
|
|
44
|
+
<a href="https://anaconda.org/JohnSnowLabs/spark-nlp" alt="Anaconda-Cloud">
|
|
45
|
+
<img src="https://anaconda.org/johnsnowlabs/spark-nlp/badges/version.svg" /></a>
|
|
46
|
+
<a href="https://github.com/JohnSnowLabs/spark-nlp/blob/master/LICENSE" alt="License">
|
|
47
|
+
<img src="https://img.shields.io/badge/License-Apache%202.0-blue.svg" /></a>
|
|
48
|
+
<a href="https://pypi.org/project/spark-nlp/" alt="PyPi downloads">
|
|
49
|
+
<img src="https://static.pepy.tech/personalized-badge/spark-nlp?period=total&units=international_system&left_color=grey&right_color=orange&left_text=pip%20downloads" /></a>
|
|
50
|
+
</p>
|
|
51
|
+
|
|
52
|
+
Spark NLP is a state-of-the-art Natural Language Processing library built on top of Apache Spark. It provides **simple**, **performant** & **accurate** NLP annotations for machine learning pipelines that **scale** easily in a distributed environment.
|
|
53
|
+
|
|
54
|
+
Spark NLP comes with **83000+** pretrained **pipelines** and **models** in more than **200+** languages.
|
|
55
|
+
It also offers tasks such as **Tokenization**, **Word Segmentation**, **Part-of-Speech Tagging**, Word and Sentence **Embeddings**, **Named Entity Recognition**, **Dependency Parsing**, **Spell Checking**, **Text Classification**, **Sentiment Analysis**, **Token Classification**, **Machine Translation** (+180 languages), **Summarization**, **Question Answering**, **Table Question Answering**, **Text Generation**, **Image Classification**, **Image to Text (captioning)**, **Automatic Speech Recognition**, **Zero-Shot Learning**, and many more [NLP tasks](#features).
|
|
56
|
+
|
|
57
|
+
**Spark NLP** is the only open-source NLP library in **production** that offers state-of-the-art transformers such as **BERT**, **CamemBERT**, **ALBERT**, **ELECTRA**, **XLNet**, **DistilBERT**, **RoBERTa**, **DeBERTa**, **XLM-RoBERTa**, **Longformer**, **ELMO**, **Universal Sentence Encoder**, **Llama-2**, **M2M100**, **BART**, **Instructor**, **E5**, **Google T5**, **MarianMT**, **OpenAI GPT2**, **Vision Transformers (ViT)**, **OpenAI Whisper**, **Llama**, **Mistral**, **Phi**, **Qwen2**, and many more not only to **Python** and **R**, but also to **JVM** ecosystem (**Java**, **Scala**, and **Kotlin**) at **scale** by extending **Apache Spark** natively.
|
|
58
|
+
|
|
59
|
+
## Model Importing Support
|
|
60
|
+
|
|
61
|
+
Spark NLP provides easy support for importing models from various popular frameworks:
|
|
62
|
+
|
|
63
|
+
- **TensorFlow**
|
|
64
|
+
- **ONNX**
|
|
65
|
+
- **OpenVINO**
|
|
66
|
+
- **Llama.cpp (GGUF)**
|
|
67
|
+
|
|
68
|
+
This wide range of support allows you to seamlessly integrate models from different sources into your Spark NLP workflows, enhancing flexibility and compatibility with existing machine learning ecosystems.
|
|
69
|
+
|
|
70
|
+
## Project's website
|
|
71
|
+
|
|
72
|
+
Take a look at our official Spark NLP page: [https://sparknlp.org/](https://sparknlp.org/) for user
|
|
73
|
+
documentation and examples
|
|
74
|
+
|
|
75
|
+
## Features
|
|
76
|
+
|
|
77
|
+
- [Text Preprocessing](https://sparknlp.org/docs/en/features#text-preproccesing)
|
|
78
|
+
- [Parsing and Analysis](https://sparknlp.org/docs/en/features#parsing-and-analysis)
|
|
79
|
+
- [Sentiment and Classification](https://sparknlp.org/docs/en/features#sentiment-and-classification)
|
|
80
|
+
- [Embeddings](https://sparknlp.org/docs/en/features#embeddings)
|
|
81
|
+
- [Classification and Question Answering](https://sparknlp.org/docs/en/features#classification-and-question-answering-models)
|
|
82
|
+
- [Machine Translation and Generation](https://sparknlp.org/docs/en/features#machine-translation-and-generation)
|
|
83
|
+
- [Image and Speech](https://sparknlp.org/docs/en/features#image-and-speech)
|
|
84
|
+
- [Integration and Interoperability (ONNX, OpenVINO)](https://sparknlp.org/docs/en/features#integration-and-interoperability)
|
|
85
|
+
- [Pre-trained Models (36000+ in +200 languages)](https://sparknlp.org/docs/en/features#pre-trained-models)
|
|
86
|
+
- [Multi-lingual Support](https://sparknlp.org/docs/en/features#multi-lingual-support)
|
|
87
|
+
|
|
88
|
+
## Quick Start
|
|
89
|
+
|
|
90
|
+
This is a quick example of how to use Spark NLP pre-trained pipeline in Python and PySpark:
|
|
91
|
+
|
|
92
|
+
```sh
|
|
93
|
+
$ java -version
|
|
94
|
+
# should be Java 8 or 11 (Oracle or OpenJDK)
|
|
95
|
+
$ conda create -n sparknlp python=3.7 -y
|
|
96
|
+
$ conda activate sparknlp
|
|
97
|
+
# spark-nlp by default is based on pyspark 3.x
|
|
98
|
+
$ pip install spark-nlp==5.5.0 pyspark==3.3.1
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
In Python console or Jupyter `Python3` kernel:
|
|
102
|
+
|
|
103
|
+
```python
|
|
104
|
+
# Import Spark NLP
|
|
105
|
+
from sparknlp.base import *
|
|
106
|
+
from sparknlp.annotator import *
|
|
107
|
+
from sparknlp.pretrained import PretrainedPipeline
|
|
108
|
+
import sparknlp
|
|
109
|
+
|
|
110
|
+
# Start SparkSession with Spark NLP
|
|
111
|
+
# start() functions has 3 parameters: gpu, apple_silicon, and memory
|
|
112
|
+
# sparknlp.start(gpu=True) will start the session with GPU support
|
|
113
|
+
# sparknlp.start(apple_silicon=True) will start the session with macOS M1 & M2 support
|
|
114
|
+
# sparknlp.start(memory="16G") to change the default driver memory in SparkSession
|
|
115
|
+
spark = sparknlp.start()
|
|
116
|
+
|
|
117
|
+
# Download a pre-trained pipeline
|
|
118
|
+
pipeline = PretrainedPipeline('explain_document_dl', lang='en')
|
|
119
|
+
|
|
120
|
+
# Your testing dataset
|
|
121
|
+
text = """
|
|
122
|
+
The Mona Lisa is a 16th century oil painting created by Leonardo.
|
|
123
|
+
It's held at the Louvre in Paris.
|
|
124
|
+
"""
|
|
125
|
+
|
|
126
|
+
# Annotate your testing dataset
|
|
127
|
+
result = pipeline.annotate(text)
|
|
128
|
+
|
|
129
|
+
# What's in the pipeline
|
|
130
|
+
list(result.keys())
|
|
131
|
+
Output: ['entities', 'stem', 'checked', 'lemma', 'document',
|
|
132
|
+
'pos', 'token', 'ner', 'embeddings', 'sentence']
|
|
133
|
+
|
|
134
|
+
# Check the results
|
|
135
|
+
result['entities']
|
|
136
|
+
Output: ['Mona Lisa', 'Leonardo', 'Louvre', 'Paris']
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
For more examples, you can visit our dedicated [examples](https://github.com/JohnSnowLabs/spark-nlp/tree/master/examples) to showcase all Spark NLP use cases!
|
|
140
|
+
|
|
141
|
+
### Packages Cheatsheet
|
|
142
|
+
|
|
143
|
+
This is a cheatsheet for corresponding Spark NLP Maven package to Apache Spark / PySpark major version:
|
|
144
|
+
|
|
145
|
+
| Apache Spark | Spark NLP on CPU | Spark NLP on GPU | Spark NLP on AArch64 (linux) | Spark NLP on Apple Silicon |
|
|
146
|
+
|-------------------------|--------------------|----------------------------|--------------------------------|--------------------------------------|
|
|
147
|
+
| 3.0/3.1/3.2/3.3/3.4/3.5 | `spark-nlp` | `spark-nlp-gpu` | `spark-nlp-aarch64` | `spark-nlp-silicon` |
|
|
148
|
+
| Start Function | `sparknlp.start()` | `sparknlp.start(gpu=True)` | `sparknlp.start(aarch64=True)` | `sparknlp.start(apple_silicon=True)` |
|
|
149
|
+
|
|
150
|
+
NOTE: `M1/M2` and `AArch64` are under `experimental` support. Access and support to these architectures are limited by the
|
|
151
|
+
community and we had to build most of the dependencies by ourselves to make them compatible. We support these two
|
|
152
|
+
architectures, however, they may not work in some environments.
|
|
153
|
+
|
|
154
|
+
## Pipelines and Models
|
|
155
|
+
|
|
156
|
+
For a quick example of using pipelines and models take a look at our official [documentation](https://sparknlp.org/docs/en/install#pipelines-and-models)
|
|
157
|
+
|
|
158
|
+
#### Please check out our Models Hub for the full list of [pre-trained models](https://sparknlp.org/models) with examples, demo, benchmark, and more
|
|
159
|
+
|
|
160
|
+
## Platform and Ecosystem Support
|
|
161
|
+
|
|
162
|
+
### Apache Spark Support
|
|
163
|
+
|
|
164
|
+
Spark NLP *5.5.0* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, 3.4.x, and 3.5.x
|
|
165
|
+
|
|
166
|
+
| Spark NLP | Apache Spark 3.5.x | Apache Spark 3.4.x | Apache Spark 3.3.x | Apache Spark 3.2.x | Apache Spark 3.1.x | Apache Spark 3.0.x | Apache Spark 2.4.x | Apache Spark 2.3.x |
|
|
167
|
+
|-----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
|
|
168
|
+
| 5.5.x | YES | YES | YES | YES | YES | YES | NO | NO |
|
|
169
|
+
| 5.4.x | YES | YES | YES | YES | YES | YES | NO | NO |
|
|
170
|
+
| 5.3.x | YES | YES | YES | YES | YES | YES | NO | NO |
|
|
171
|
+
| 5.2.x | YES | YES | YES | YES | YES | YES | NO | NO |
|
|
172
|
+
| 5.1.x | Partially | YES | YES | YES | YES | YES | NO | NO |
|
|
173
|
+
| 5.0.x | YES | YES | YES | YES | YES | YES | NO | NO |
|
|
174
|
+
|
|
175
|
+
Find out more about `Spark NLP` versions from our [release notes](https://github.com/JohnSnowLabs/spark-nlp/releases).
|
|
176
|
+
|
|
177
|
+
### Scala and Python Support
|
|
178
|
+
|
|
179
|
+
| Spark NLP | Python 3.6 | Python 3.7 | Python 3.8 | Python 3.9 | Python 3.10| Scala 2.11 | Scala 2.12 |
|
|
180
|
+
|-----------|------------|------------|------------|------------|------------|------------|------------|
|
|
181
|
+
| 5.5.x | NO | YES | YES | YES | YES | NO | YES |
|
|
182
|
+
| 5.4.x | NO | YES | YES | YES | YES | NO | YES |
|
|
183
|
+
| 5.3.x | NO | YES | YES | YES | YES | NO | YES |
|
|
184
|
+
| 5.2.x | NO | YES | YES | YES | YES | NO | YES |
|
|
185
|
+
| 5.1.x | NO | YES | YES | YES | YES | NO | YES |
|
|
186
|
+
| 5.0.x | NO | YES | YES | YES | YES | NO | YES |
|
|
187
|
+
|
|
188
|
+
Find out more about 4.x `SparkNLP` versions in our official [documentation](https://sparknlp.org/docs/en/install#apache-spark-support)
|
|
189
|
+
|
|
190
|
+
### Databricks Support
|
|
191
|
+
|
|
192
|
+
Spark NLP 5.5.0 has been tested and is compatible with the following runtimes:
|
|
193
|
+
|
|
194
|
+
| **CPU** | **GPU** |
|
|
195
|
+
|--------------------|--------------------|
|
|
196
|
+
| 14.1 / 14.1 ML | 14.1 ML & GPU |
|
|
197
|
+
| 14.2 / 14.2 ML | 14.2 ML & GPU |
|
|
198
|
+
| 14.3 / 14.3 ML | 14.3 ML & GPU |
|
|
199
|
+
| 15.0 / 15.0 ML | 15.0 ML & GPU |
|
|
200
|
+
| 15.1 / 15.0 ML | 15.1 ML & GPU |
|
|
201
|
+
| 15.2 / 15.0 ML | 15.2 ML & GPU |
|
|
202
|
+
| 15.3 / 15.0 ML | 15.3 ML & GPU |
|
|
203
|
+
| 15.4 / 15.0 ML | 15.4 ML & GPU |
|
|
204
|
+
|
|
205
|
+
We are compatible with older runtimes. For a full list check databricks support in our official [documentation](https://sparknlp.org/docs/en/install#databricks-support)
|
|
206
|
+
|
|
207
|
+
### EMR Support
|
|
208
|
+
|
|
209
|
+
Spark NLP 5.5.0 has been tested and is compatible with the following EMR releases:
|
|
210
|
+
|
|
211
|
+
| **EMR Release** |
|
|
212
|
+
|--------------------|
|
|
213
|
+
| emr-6.13.0 |
|
|
214
|
+
| emr-6.14.0 |
|
|
215
|
+
| emr-6.15.0 |
|
|
216
|
+
| emr-7.0.0 |
|
|
217
|
+
| emr-7.1.0 |
|
|
218
|
+
| emr-7.2.0 |
|
|
219
|
+
|
|
220
|
+
We are compatible with older EMR releases. For a full list check EMR support in our official [documentation](https://sparknlp.org/docs/en/install#emr-support)
|
|
221
|
+
|
|
222
|
+
Full list of [Amazon EMR 6.x releases](https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-release-6x.html)
|
|
223
|
+
Full list of [Amazon EMR 7.x releases](https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-release-7x.html)
|
|
224
|
+
|
|
225
|
+
NOTE: The EMR 6.1.0 and 6.1.1 are not supported.
|
|
226
|
+
|
|
227
|
+
## Installation
|
|
228
|
+
|
|
229
|
+
### Command line (requires internet connection)
|
|
230
|
+
|
|
231
|
+
To install spark-nlp packages through command line follow [these instructions](https://sparknlp.org/docs/en/install#command-line) from our official documentation
|
|
232
|
+
|
|
233
|
+
### Scala
|
|
234
|
+
|
|
235
|
+
Spark NLP supports Scala 2.12.15 if you are using Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, and 3.4.x versions. Our packages are
|
|
236
|
+
deployed to Maven central. To add any of our packages as a dependency in your application you can follow [these instructions](https://sparknlp.org/docs/en/install#scala-and-java)
|
|
237
|
+
from our official documentation.
|
|
238
|
+
|
|
239
|
+
If you are interested, there is a simple SBT project for Spark NLP to guide you on how to use it in your
|
|
240
|
+
projects [Spark NLP SBT S5.5.0r](https://github.com/maziyarpanahi/spark-nlp-starter)
|
|
241
|
+
|
|
242
|
+
### Python
|
|
243
|
+
|
|
244
|
+
Spark NLP supports Python 3.7.x and above depending on your major PySpark version.
|
|
245
|
+
Check all available installations for Python in our official [documentation](https://sparknlp.org/docs/en/install#python)
|
|
246
|
+
|
|
247
|
+
### Compiled JARs
|
|
248
|
+
|
|
249
|
+
To compile the jars from source follow [these instructions](https://sparknlp.org/docs/en/compiled#jars) from our official documenation
|
|
250
|
+
|
|
251
|
+
## Platform-Specific Instructions
|
|
252
|
+
|
|
253
|
+
For detailed instructions on how to use Spark NLP on supported platforms, please refer to our official documentation:
|
|
254
|
+
|
|
255
|
+
| Platform | Supported Language(s) |
|
|
256
|
+
|-------------------------|-----------------------|
|
|
257
|
+
| [Apache Zeppelin](https://sparknlp.org/docs/en/install#apache-zeppelin) | Scala, Python |
|
|
258
|
+
| [Jupyter Notebook](https://sparknlp.org/docs/en/install#jupter-notebook) | Python |
|
|
259
|
+
| [Google Colab Notebook](https://sparknlp.org/docs/en/install#google-colab-notebook) | Python |
|
|
260
|
+
| [Kaggle Kernel](https://sparknlp.org/docs/en/install#kaggle-kernel) | Python |
|
|
261
|
+
| [Databricks Cluster](https://sparknlp.org/docs/en/install#databricks-cluster) | Scala, Python |
|
|
262
|
+
| [EMR Cluster](https://sparknlp.org/docs/en/install#emr-cluster) | Scala, Python |
|
|
263
|
+
| [GCP Dataproc Cluster](https://sparknlp.org/docs/en/install#gcp-dataproc) | Scala, Python |
|
|
264
|
+
|
|
265
|
+
### Offline
|
|
266
|
+
|
|
267
|
+
Spark NLP library and all the pre-trained models/pipelines can be used entirely offline with no access to the Internet.
|
|
268
|
+
Please check [these instructions](https://sparknlp.org/docs/en/install#s3-integration) from our official documentation
|
|
269
|
+
to use Spark NLP offline
|
|
270
|
+
|
|
271
|
+
## Advanced Settings
|
|
272
|
+
|
|
273
|
+
You can change Spark NLP configurations via Spark properties configuration.
|
|
274
|
+
Please check [these instructions](https://sparknlp.org/docs/en/install#sparknlp-properties) from our official documentation.
|
|
275
|
+
|
|
276
|
+
### S3 Integration
|
|
277
|
+
|
|
278
|
+
In Spark NLP we can define S3 locations to:
|
|
279
|
+
|
|
280
|
+
- Export log files of training models
|
|
281
|
+
- Store tensorflow graphs used in `NerDLApproach`
|
|
282
|
+
|
|
283
|
+
Please check [these instructions](https://sparknlp.org/docs/en/install#s3-integration) from our official documentation.
|
|
284
|
+
|
|
285
|
+
## Document5.5.0
|
|
286
|
+
|
|
287
|
+
### Examples
|
|
288
|
+
|
|
289
|
+
Need more **examples**? Check out our dedicated [Spark NLP Examples](https://github.com/JohnSnowLabs/spark-nlp/tree/master/examples)
|
|
290
|
+
repository to showcase all Spark NLP use cases!
|
|
291
|
+
|
|
292
|
+
Also, don't forget to check [Spark NLP in Action](https://sparknlp.org/demo) built by Streamlit.
|
|
293
|
+
|
|
294
|
+
#### All examples: [spark-nlp/examples](https://github.com/JohnSnowLabs/spark-nlp/tree/master/examples)
|
|
295
|
+
|
|
296
|
+
### FAQ
|
|
297
|
+
|
|
298
|
+
[Check our Articles and Videos page here](https://sparknlp.org/learn)
|
|
299
|
+
|
|
300
|
+
### Citation
|
|
301
|
+
|
|
302
|
+
We have published a [paper](https://www.sciencedirect.com/science/article/pii/S2665963821000063) that you can cite for
|
|
303
|
+
the Spark NLP library:
|
|
304
|
+
|
|
305
|
+
```bibtex
|
|
306
|
+
@article{KOCAMAN2021100058,
|
|
307
|
+
title = {Spark NLP: Natural language understanding at scale},
|
|
308
|
+
journal = {Software Impacts},
|
|
309
|
+
pages = {100058},
|
|
310
|
+
year = {2021},
|
|
311
|
+
issn = {2665-9638},
|
|
312
|
+
doi = {https://doi.org/10.1016/j.simpa.2021.100058},
|
|
313
|
+
url = {https://www.sciencedirect.com/science/article/pii/S2665963.2.300063},
|
|
314
|
+
author = {Veysel Kocaman and David Talby},
|
|
315
|
+
keywords = {Spark, Natural language processing, Deep learning, Tensorflow, Cluster},
|
|
316
|
+
abstract = {Spark NLP is a Natural Language Processing (NLP) library built on top of Apache Spark ML. It provides simple, performant & accurate NLP annotations for machine learning pipelines that can scale easily in a distributed environment. Spark NLP comes with 1100+ pretrained pipelines and models in more than 192+ languages. It supports nearly all the NLP tasks and modules that can be used seamlessly in a cluster. Downloaded more than 2.7 million times and experiencing 9x growth since January 2020, Spark NLP is used by 54% of healthcare organizations as the world’s most widely used NLP library in the enterprise.}
|
|
317
|
+
}
|
|
318
|
+
}5.5.0
|
|
319
|
+
```
|
|
320
|
+
|
|
321
|
+
## Community support
|
|
322
|
+
|
|
323
|
+
- [Slack](https://join.slack.com/t/spark-nlp/shared_invite/zt-198dipu77-L3UWNe_AJ8xqDk0ivmih5Q) For live discussion with the Spark NLP community and the team
|
|
324
|
+
- [GitHub](https://github.com/JohnSnowLabs/spark-nlp) Bug reports, feature requests, and contributions
|
|
325
|
+
- [Discussions](https://github.com/JohnSnowLabs/spark-nlp/discussions) Engage with other community members, share ideas,
|
|
326
|
+
and show off how you use Spark NLP!
|
|
327
|
+
- [Medium](https://medium.com/spark-nlp) Spark NLP articles
|
|
328
|
+
- [YouTube](https://www.youtube.com/channel/UCmFOjlpYEhxf_wJUDuz6xxQ/videos) Spark NLP video tutorials
|
|
329
|
+
|
|
330
|
+
## Contributing
|
|
331
|
+
|
|
332
|
+
We appreciate any sort of contributions:
|
|
333
|
+
|
|
334
|
+
- ideas
|
|
335
|
+
- feedback
|
|
336
|
+
- documentation
|
|
337
|
+
- bug reports
|
|
338
|
+
- NLP training and testing corpora
|
|
339
|
+
- Development and testing
|
|
340
|
+
|
|
341
|
+
Clone the repo and submit your pull-requests! Or directly create issues in this repo.
|
|
342
|
+
|
|
343
|
+
## John Snow Labs
|
|
344
|
+
|
|
345
|
+
[http://johnsnowlabs.com](http://johnsnowlabs.com)
|
|
@@ -3,7 +3,7 @@ com/johnsnowlabs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
|
|
|
3
3
|
com/johnsnowlabs/ml/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
4
|
com/johnsnowlabs/ml/ai/__init__.py,sha256=YQiK2M7U4d8y5irPy_HB8ae0mSpqS9583MH44pnKJXc,295
|
|
5
5
|
com/johnsnowlabs/nlp/__init__.py,sha256=DPIVXtONO5xXyOk-HB0-sNiHAcco17NN13zPS_6Uw8c,294
|
|
6
|
-
sparknlp/__init__.py,sha256=
|
|
6
|
+
sparknlp/__init__.py,sha256=AKjfuQ0s3OwAuC0XQj4FjGRISLl5RY8bO3WdLpZpQaA,13638
|
|
7
7
|
sparknlp/annotation.py,sha256=I5zOxG5vV2RfPZfqN9enT1i4mo6oBcn3Lrzs37QiOiA,5635
|
|
8
8
|
sparknlp/annotation_audio.py,sha256=iRV_InSVhgvAwSRe9NTbUH9v6OGvTM-FPCpSAKVu0mE,1917
|
|
9
9
|
sparknlp/annotation_image.py,sha256=xhCe8Ko-77XqWVuuYHFrjKqF6zPd8Z-RY_rmZXNwCXU,2547
|
|
@@ -30,10 +30,11 @@ sparknlp/annotator/audio/__init__.py,sha256=dXjtvi5c0aTZFq1Q_JciUd1uFTBVSJoUdcq0
|
|
|
30
30
|
sparknlp/annotator/audio/hubert_for_ctc.py,sha256=76PfwPZZvOHU5kfDqLueCFbmqa4W8pMNRGoCvOqjsEA,7859
|
|
31
31
|
sparknlp/annotator/audio/wav2vec2_for_ctc.py,sha256=K78P1U6vA4O1UufsLYzy0H7arsKNmwPcIV7kzDFsA5Q,6210
|
|
32
32
|
sparknlp/annotator/audio/whisper_for_ctc.py,sha256=uII51umuohqwnAW0Q7VdxEFyr_j5LMnfpcRlf8TbetA,9800
|
|
33
|
-
sparknlp/annotator/classifier_dl/__init__.py,sha256=
|
|
33
|
+
sparknlp/annotator/classifier_dl/__init__.py,sha256=74WL0W2zBfx6v0tJpx1DcRfZENs86n9JxizDDBEE41A,3934
|
|
34
34
|
sparknlp/annotator/classifier_dl/albert_for_question_answering.py,sha256=LG2dL6Fky1T35yXTUZBfIihIIGnkRFQ7ECQ3HRXXEG8,6517
|
|
35
35
|
sparknlp/annotator/classifier_dl/albert_for_sequence_classification.py,sha256=kWx7f9pcKE2qw319gn8FN0Md5dX38gbmfeoY9gWCLNk,7842
|
|
36
36
|
sparknlp/annotator/classifier_dl/albert_for_token_classification.py,sha256=5rdsjWnsAVmtP-idU7ATKJ8lkH2rtlKZLnpi4Mq27eI,6839
|
|
37
|
+
sparknlp/annotator/classifier_dl/albert_for_zero_shot_classification.py,sha256=_TgV6EiIOiD_djA3fxfoz-o37mzMeKbn6iL2kZ6GzO0,8366
|
|
37
38
|
sparknlp/annotator/classifier_dl/bart_for_zero_shot_classification.py,sha256=yqQeDdpLbNOKuSZejZjSAjT8ydYyxsTVf2aFDgSSDfc,8767
|
|
38
39
|
sparknlp/annotator/classifier_dl/bert_for_question_answering.py,sha256=2euY_RAdMPA4IHJXZAd5MkQojFOtFNhB_hSc1iVQ5DQ,6433
|
|
39
40
|
sparknlp/annotator/classifier_dl/bert_for_sequence_classification.py,sha256=AzD3RQcRuQc0DDTbL6vGiacTtHlZnbAqksNvRQq7EQE,7800
|
|
@@ -42,11 +43,12 @@ sparknlp/annotator/classifier_dl/bert_for_zero_shot_classification.py,sha256=mli
|
|
|
42
43
|
sparknlp/annotator/classifier_dl/camembert_for_question_answering.py,sha256=BeE-62tFkXMoyiy3PtcnwgT2-wqzTFo5VZHrWUqsWmM,6510
|
|
43
44
|
sparknlp/annotator/classifier_dl/camembert_for_sequence_classification.py,sha256=06bkwhNBcmNS5gR_JrMjBDW3jAdjEI5YL4SuV16Va7E,7962
|
|
44
45
|
sparknlp/annotator/classifier_dl/camembert_for_token_classification.py,sha256=vjwDE_kZiBupENaYvUZOTTqVOb3KCsGse-QX3QOutz4,6522
|
|
46
|
+
sparknlp/annotator/classifier_dl/camembert_for_zero_shot_classification.py,sha256=YUfohQ-qIG3jntfYgrjx8bOFxGTTMrpB-Sj49PNAEEU,8360
|
|
45
47
|
sparknlp/annotator/classifier_dl/classifier_dl.py,sha256=Dj-T5ByCgzgFpah7LVz_07QKBB0qNdqXB6tkvPE-nsQ,12672
|
|
46
48
|
sparknlp/annotator/classifier_dl/deberta_for_question_answering.py,sha256=oikVBeVohsSR9HPV_yq_0U7zHps94UO4lXbYu9G7MF0,6486
|
|
47
49
|
sparknlp/annotator/classifier_dl/deberta_for_sequence_classification.py,sha256=H2LDT8ttD9hxfFDrymsyCq0EwCuWl5FE2-XVqT9LcRQ,7773
|
|
48
50
|
sparknlp/annotator/classifier_dl/deberta_for_token_classification.py,sha256=jj5hB9AV-0Of505E6z62lYPIWmsqNeTX0vRRq3_7T9I,6807
|
|
49
|
-
sparknlp/annotator/classifier_dl/deberta_for_zero_shot_classification.py,sha256=
|
|
51
|
+
sparknlp/annotator/classifier_dl/deberta_for_zero_shot_classification.py,sha256=R7eVMChBY_wm7oM2j1Y18ZJ9dcIm5ysq8XBSIiVxZKw,8280
|
|
50
52
|
sparknlp/annotator/classifier_dl/distil_bert_for_question_answering.py,sha256=yA4LrI4RN4f44wbIrdpwqderTJBhAkjAHpUxcCeCROE,6552
|
|
51
53
|
sparknlp/annotator/classifier_dl/distil_bert_for_sequence_classification.py,sha256=Cax3LcVLppiHs1dyahsBSq_TLHSwI2-K7LGCZHZNs1I,7926
|
|
52
54
|
sparknlp/annotator/classifier_dl/distil_bert_for_token_classification.py,sha256=y9S83LW0Mfn4fRzopRXFj8l2gb-Nrm1rr9zRftOckJU,6832
|
|
@@ -81,7 +83,7 @@ sparknlp/annotator/cv/vit_for_image_classification.py,sha256=D2V3pxAd3rBi1817lxV
|
|
|
81
83
|
sparknlp/annotator/dependency/__init__.py,sha256=eV43oXAGaYl2N1XKIEAAZJLNP8gpHm8VxuXDeDlQzR4,774
|
|
82
84
|
sparknlp/annotator/dependency/dependency_parser.py,sha256=SxyvHPp8Hs1Xnm5X1nLTMi095XoQMtfL8pbys15mYAI,11212
|
|
83
85
|
sparknlp/annotator/dependency/typed_dependency_parser.py,sha256=60vPdYkbFk9MPGegg3m9Uik9cMXpMZd8tBvXG39gNww,12456
|
|
84
|
-
sparknlp/annotator/embeddings/__init__.py,sha256=
|
|
86
|
+
sparknlp/annotator/embeddings/__init__.py,sha256=WifjEILUN6lZs4_WIX80xl5hrrsSrBK-4oVaLh-tONc,2343
|
|
85
87
|
sparknlp/annotator/embeddings/albert_embeddings.py,sha256=6Rd1LIn8oFIpq_ALcJh-RUjPEO7Ht8wsHY6JHSFyMkw,9995
|
|
86
88
|
sparknlp/annotator/embeddings/bert_embeddings.py,sha256=HVUjkg56kBcpGZCo-fmPG5uatMDF3swW_lnbpy1SgSI,8463
|
|
87
89
|
sparknlp/annotator/embeddings/bert_sentence_embeddings.py,sha256=NQy9KuXT9aKsTpYCR5RAeoFWI2YqEGorbdYrf_0KKmw,9148
|
|
@@ -96,9 +98,12 @@ sparknlp/annotator/embeddings/elmo_embeddings.py,sha256=KV-KPs0Pq_OpPaHsnqBz2k_S
|
|
|
96
98
|
sparknlp/annotator/embeddings/instructor_embeddings.py,sha256=CTKmbuBOx_KBM4JM-Y1U5LyR-6rrnpoBGbgGE_axS1c,8670
|
|
97
99
|
sparknlp/annotator/embeddings/longformer_embeddings.py,sha256=jS4fxB5O0-d9ta9VKv8ai-17n5YHt5rML8QxUw7K4Io,8754
|
|
98
100
|
sparknlp/annotator/embeddings/mpnet_embeddings.py,sha256=7d6E4lS7jjkppDPvty1UHNNrbykkriFiysrxZ_RzL0U,7875
|
|
101
|
+
sparknlp/annotator/embeddings/mxbai_embeddings.py,sha256=kCaYcM3lLYJjhElLK5isdxzJqIvoGZlUKKNkySMUkE8,6017
|
|
102
|
+
sparknlp/annotator/embeddings/nomic_embeddings.py,sha256=SfiTTpx0MqeHGC_nyoFNxJbfEQL4v-PrNH6hAOFsd8c,7338
|
|
99
103
|
sparknlp/annotator/embeddings/roberta_embeddings.py,sha256=q_WHby2lDcPc5bVHkGc6X_GwT3qyDUBLUVz5ZW4HCSY,9229
|
|
100
104
|
sparknlp/annotator/embeddings/roberta_sentence_embeddings.py,sha256=KVrD4z_tIU-sphK6dmbbnHBBt8-Y89C_BFQAkN99kZo,8181
|
|
101
105
|
sparknlp/annotator/embeddings/sentence_embeddings.py,sha256=azuA1FKMtTJ9suwJqTEHeWHumT6kYdfURTe_1fsqcB8,5402
|
|
106
|
+
sparknlp/annotator/embeddings/snowflake_embeddings.py,sha256=2k7oxSSgu2Y8U2fkfZCCneG9PQjXzmJsl41BDsD0hfE,7260
|
|
102
107
|
sparknlp/annotator/embeddings/uae_embeddings.py,sha256=sqTT67vcegVxcyoATISLPJSmOnA6J_otB6iREKOb6e4,8794
|
|
103
108
|
sparknlp/annotator/embeddings/universal_sentence_encoder.py,sha256=_fTo-K78RjxiIKptpsI32mpW87RFCdXM16epHv4RVQY,8571
|
|
104
109
|
sparknlp/annotator/embeddings/word2vec.py,sha256=UBhA4qUczQOx1t82Eu51lxx1-wJ_RLnCb__ncowSNhk,13229
|
|
@@ -114,7 +119,7 @@ sparknlp/annotator/ld_dl/__init__.py,sha256=gWNGOaozABT83J4Mn7JmNQsXzm27s3PHpMQm
|
|
|
114
119
|
sparknlp/annotator/ld_dl/language_detector_dl.py,sha256=Z1ej9cdIZueWQ71pdZ1BaogBNZifZiUNGXs4BnnVS2Q,8079
|
|
115
120
|
sparknlp/annotator/matcher/__init__.py,sha256=S-GGVeU1Lf6fZUgJKiFHKvHOjAGrP4TlgdGEK_85KaI,920
|
|
116
121
|
sparknlp/annotator/matcher/big_text_matcher.py,sha256=UeGBP6ibEmYt1Bi-HwnoPotiCosMrg-IHZ3ZNCqQvvg,9785
|
|
117
|
-
sparknlp/annotator/matcher/date_matcher.py,sha256=
|
|
122
|
+
sparknlp/annotator/matcher/date_matcher.py,sha256=FrjTVoNBq1Z7E4qSJKvfV5rC7Mlm9R1gdohPEQswCNI,11737
|
|
118
123
|
sparknlp/annotator/matcher/multi_date_matcher.py,sha256=-zCp4HugIpSN6U4-c1uN_dGn7x69xYy6SUoQz6dY34s,4475
|
|
119
124
|
sparknlp/annotator/matcher/regex_matcher.py,sha256=GYhJNjBnCvfeEMv7d2_kDnqjfHtC7Hts--Ttlkjdozs,8380
|
|
120
125
|
sparknlp/annotator/matcher/text_matcher.py,sha256=kpP1-5BEFEW9kantoHvjhcCbX1qsGm4gEDtFHATWOwA,10636
|
|
@@ -139,17 +144,24 @@ sparknlp/annotator/sentence/sentence_detector_dl.py,sha256=-Osj9Bm9KyZRTAWkOsK9c
|
|
|
139
144
|
sparknlp/annotator/sentiment/__init__.py,sha256=Lq3vKaZS1YATLMg0VNXSVtkWL5q5G9taGBvdrvSwnfg,766
|
|
140
145
|
sparknlp/annotator/sentiment/sentiment_detector.py,sha256=m545NGU0Xzg_PO6_qIfpli1uZj7JQcyFgqe9R6wAPFI,8154
|
|
141
146
|
sparknlp/annotator/sentiment/vivekn_sentiment.py,sha256=4rpXWDgzU6ddnbrSCp9VdLb2epCc9oZ3c6XcqxEw8nk,9655
|
|
142
|
-
sparknlp/annotator/seq2seq/__init__.py,sha256=
|
|
147
|
+
sparknlp/annotator/seq2seq/__init__.py,sha256=Fdz1zsxpB6vM2a0sKuGCSMD1ZgqeVqAez0-AtppMGB4,1541
|
|
148
|
+
sparknlp/annotator/seq2seq/auto_gguf_model.py,sha256=IrhVFUh8SFodJBl5k_V6_7qCu_Ib1l223t3dzp3HsZI,39402
|
|
143
149
|
sparknlp/annotator/seq2seq/bart_transformer.py,sha256=I1flM4yeCzEAKOdQllBC30XuedxVJ7ferkFhZ6gwEbE,18481
|
|
150
|
+
sparknlp/annotator/seq2seq/cpm_transformer.py,sha256=zRbw_xAsaRnhuyYAW8UAGhz7mIyhpqk09nCsJb8-7rg,13298
|
|
144
151
|
sparknlp/annotator/seq2seq/gpt2_transformer.py,sha256=Oz95R_NRR4tWHu_bW6Ak2832ZILXycp3ify7LfRSi8o,15310
|
|
145
152
|
sparknlp/annotator/seq2seq/llama2_transformer.py,sha256=3LzTR0VerFdFmOizsrs2Q7HTnjELJ5WtfUgx5XnOqGM,13898
|
|
153
|
+
sparknlp/annotator/seq2seq/llama3_transformer.py,sha256=dA3rIEVOLmlnJwhqkYmL_GrrcRVpoUY_i7QIyA5N2jM,14920
|
|
146
154
|
sparknlp/annotator/seq2seq/m2m100_transformer.py,sha256=uIL9RZuuryTIdAy9TbJf9wbz6RekhW8S079bJhaB6i4,16116
|
|
147
155
|
sparknlp/annotator/seq2seq/marian_transformer.py,sha256=mQ4Ylh7ZzXAOue8f-x0gqzfS3vAz3XUdD7eQ2XhcEs4,13781
|
|
148
156
|
sparknlp/annotator/seq2seq/mistral_transformer.py,sha256=PJegrSQts_58rkt96xaHlqU1fKIaz8hxt7DTPkGS10A,14254
|
|
157
|
+
sparknlp/annotator/seq2seq/nllb_transformer.py,sha256=hbE2k5YDAZUWPk0qyx6-5xIZi3nBFeFqLcr6lEU9LZ8,19474
|
|
149
158
|
sparknlp/annotator/seq2seq/phi2_transformer.py,sha256=WwKCUOH8qGFv62YF63HjuT7bMVldh06gHvaZH3tbSDk,13787
|
|
159
|
+
sparknlp/annotator/seq2seq/phi3_transformer.py,sha256=rIFSS0sit9kUazUvMvwGFSRRsIuFNqpLH28bVKkFzx4,14219
|
|
160
|
+
sparknlp/annotator/seq2seq/qwen_transformer.py,sha256=UmxF84gQsqFVyofuki9TxaPTS_fP71WTj-ylCMLKsYY,14624
|
|
161
|
+
sparknlp/annotator/seq2seq/starcoder_transformer.py,sha256=BTXbSMRpXnDvrfh-6iFS5k6g6EcPV9zBl4U-SSC19wA,14293
|
|
150
162
|
sparknlp/annotator/seq2seq/t5_transformer.py,sha256=wDVxNLluIU1HGZFqaKKc4YTt4l-elPlAtQ7EEa0f5tg,17308
|
|
151
163
|
sparknlp/annotator/similarity/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
152
|
-
sparknlp/annotator/similarity/document_similarity_ranker.py,sha256=
|
|
164
|
+
sparknlp/annotator/similarity/document_similarity_ranker.py,sha256=BHV2XWA18YvBn_OKOVvR0TmPPnHSgiAgpZpaPz7ar_s,15826
|
|
153
165
|
sparknlp/annotator/spell_check/__init__.py,sha256=sdnPR3f3Q9mHiv-n4g_O7KpRWPRPweyATSF6Tth_Niw,830
|
|
154
166
|
sparknlp/annotator/spell_check/context_spell_checker.py,sha256=OtjN51K3TyQpFmZrhPrvxZwCJsENFwTkeNKQYWrP-Gw,31992
|
|
155
167
|
sparknlp/annotator/spell_check/norvig_sweeting.py,sha256=6ET9KnAqXIQDJ5U9px1ixUbC6R63ln_ljruvh_oLiwA,13197
|
|
@@ -188,7 +200,7 @@ sparknlp/common/read_as.py,sha256=imxPGwV7jr4Li_acbo0OAHHRGCBbYv-akzEGaBWEfcY,12
|
|
|
188
200
|
sparknlp/common/recursive_annotator_approach.py,sha256=vqugBw22cE3Ff7PIpRlnYFuOlchgL0nM26D8j-NdpqU,1449
|
|
189
201
|
sparknlp/common/storage.py,sha256=D91H3p8EIjNspjqAYu6ephRpCUtdcAir4_PrAbkIQWE,4842
|
|
190
202
|
sparknlp/common/utils.py,sha256=Yne6yYcwKxhOZC-U4qfYoDhWUP_6BIaAjI5X_P_df1E,1306
|
|
191
|
-
sparknlp/internal/__init__.py,sha256=
|
|
203
|
+
sparknlp/internal/__init__.py,sha256=nK-9lncAVRXmyP8ATbiMwRnLJVe4IEd_r5Z3gEqDK3g,33672
|
|
192
204
|
sparknlp/internal/annotator_java_ml.py,sha256=UGPoThG0rGXUOXGSQnDzEDW81Mu1s5RPF29v7DFyE3c,1187
|
|
193
205
|
sparknlp/internal/annotator_transformer.py,sha256=fXmc2IWXGybqZpbEU9obmbdBYPc798y42zvSB4tqV9U,1448
|
|
194
206
|
sparknlp/internal/extended_java_wrapper.py,sha256=hwP0133-hDiDf5sBF-P3MtUsuuDj1PpQbtGZQIRwzfk,2240
|
|
@@ -230,8 +242,8 @@ sparknlp/training/_tf_graph_builders_1x/ner_dl/dataset_encoder.py,sha256=R4yHFN3
|
|
|
230
242
|
sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model.py,sha256=EoCSdcIjqQ3wv13MAuuWrKV8wyVBP0SbOEW41omHlR0,23189
|
|
231
243
|
sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model_saver.py,sha256=k5CQ7gKV6HZbZMB8cKLUJuZxoZWlP_DFWdZ--aIDwsc,2356
|
|
232
244
|
sparknlp/training/_tf_graph_builders_1x/ner_dl/sentence_grouper.py,sha256=pAxjWhjazSX8Vg0MFqJiuRVw1IbnQNSs-8Xp26L4nko,870
|
|
233
|
-
spark_nlp-5.
|
|
234
|
-
spark_nlp-5.
|
|
235
|
-
spark_nlp-5.
|
|
236
|
-
spark_nlp-5.
|
|
237
|
-
spark_nlp-5.
|
|
245
|
+
spark_nlp-5.5.0.dist-info/.uuid,sha256=1f6hF51aIuv9yCvh31NU9lOpS34NE-h3a0Et7R9yR6A,36
|
|
246
|
+
spark_nlp-5.5.0.dist-info/METADATA,sha256=FccpjBJS2ERU0kJM5kPn_bUo4VyX4l8tHLImemv6czo,19156
|
|
247
|
+
spark_nlp-5.5.0.dist-info/WHEEL,sha256=bb2Ot9scclHKMOLDEHY6B2sicWOgugjFKaJsT7vwMQo,110
|
|
248
|
+
spark_nlp-5.5.0.dist-info/top_level.txt,sha256=uuytur4pyMRw2H_txNY2ZkaucZHUs22QF8-R03ch_-E,13
|
|
249
|
+
spark_nlp-5.5.0.dist-info/RECORD,,
|
sparknlp/__init__.py
CHANGED
|
@@ -129,7 +129,7 @@ def start(gpu=False,
|
|
|
129
129
|
The initiated Spark session.
|
|
130
130
|
|
|
131
131
|
"""
|
|
132
|
-
current_version = "5.
|
|
132
|
+
current_version = "5.5.0"
|
|
133
133
|
|
|
134
134
|
if params is None:
|
|
135
135
|
params = {}
|
|
@@ -310,4 +310,4 @@ def version():
|
|
|
310
310
|
str
|
|
311
311
|
The current Spark NLP version.
|
|
312
312
|
"""
|
|
313
|
-
return '5.
|
|
313
|
+
return '5.5.0'
|
|
@@ -51,4 +51,7 @@ from sparknlp.annotator.classifier_dl.bart_for_zero_shot_classification import *
|
|
|
51
51
|
from sparknlp.annotator.classifier_dl.deberta_for_zero_shot_classification import *
|
|
52
52
|
from sparknlp.annotator.classifier_dl.mpnet_for_sequence_classification import *
|
|
53
53
|
from sparknlp.annotator.classifier_dl.mpnet_for_question_answering import *
|
|
54
|
-
from sparknlp.annotator.classifier_dl.mpnet_for_token_classification import *
|
|
54
|
+
from sparknlp.annotator.classifier_dl.mpnet_for_token_classification import *
|
|
55
|
+
from sparknlp.annotator.classifier_dl.albert_for_zero_shot_classification import *
|
|
56
|
+
from sparknlp.annotator.classifier_dl.camembert_for_zero_shot_classification import *
|
|
57
|
+
|