spark-nlp 5.2.2__py2.py3-none-any.whl → 5.3.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of spark-nlp might be problematic. Click here for more details.

@@ -41,7 +41,7 @@ class BertForZeroShotClassification(AnnotatorModel,
41
41
  ... .setInputCols(["token", "document"]) \\
42
42
  ... .setOutputCol("label")
43
43
 
44
- The default model is ``"bert_base_cased_zero_shot_classifier_xnli"``, if no name is
44
+ The default model is ``"bert_zero_shot_classifier_mnli"``, if no name is
45
45
  provided.
46
46
 
47
47
  For available pretrained models please see the `Models Hub
@@ -189,14 +189,14 @@ class BertForZeroShotClassification(AnnotatorModel,
189
189
  return BertForZeroShotClassification(java_model=jModel)
190
190
 
191
191
  @staticmethod
192
- def pretrained(name="bert_base_cased_zero_shot_classifier_xnli", lang="en", remote_loc=None):
192
+ def pretrained(name="bert_zero_shot_classifier_mnli", lang="xx", remote_loc=None):
193
193
  """Downloads and loads a pretrained model.
194
194
 
195
195
  Parameters
196
196
  ----------
197
197
  name : str, optional
198
198
  Name of the pretrained model, by default
199
- "bert_base_cased_zero_shot_classifier_xnli"
199
+ "bert_zero_shot_classifier_mnli"
200
200
  lang : str, optional
201
201
  Language of the pretrained model, by default "en"
202
202
  remote_loc : str, optional
@@ -0,0 +1,206 @@
1
+ # Copyright 2017-2023 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains classes for DeBertaForZeroShotClassification."""
15
+
16
+ from sparknlp.common import *
17
+
18
+
19
+ class DeBertaForZeroShotClassification(AnnotatorModel,
20
+ HasCaseSensitiveProperties,
21
+ HasBatchedAnnotate,
22
+ HasClassifierActivationProperties,
23
+ HasCandidateLabelsProperties,
24
+ HasEngine):
25
+ """DeBertaForZeroShotClassification using a `ModelForSequenceClassification` trained on NLI (natural language
26
+ inference) tasks. Equivalent of `DeBertaForSequenceClassification` models, but these models don't require a hardcoded
27
+ number of potential classes, they can be chosen at runtime. It usually means it's slower but it is much more
28
+ flexible.
29
+ Any combination of sequences and labels can be passed and each combination will be posed as a premise/hypothesis
30
+ pair and passed to the pretrained model.
31
+ Pretrained models can be loaded with :meth:`.pretrained` of the companion
32
+ object:
33
+ >>> sequenceClassifier = DeBertaForZeroShotClassification.pretrained() \\
34
+ ... .setInputCols(["token", "document"]) \\
35
+ ... .setOutputCol("label")
36
+ The default model is ``"deberta_base_zero_shot_classifier_mnli_anli_v3"``, if no name is
37
+ provided.
38
+ For available pretrained models please see the `Models Hub
39
+ <https://sparknlp.orgtask=Text+Classification>`__.
40
+ To see which models are compatible and how to import them see
41
+ `Import Transformers into Spark NLP 🚀
42
+ <https://github.com/JohnSnowLabs/spark-nlp/discussions/5669>`_.
43
+ ====================== ======================
44
+ Input Annotation types Output Annotation type
45
+ ====================== ======================
46
+ ``DOCUMENT, TOKEN`` ``CATEGORY``
47
+ ====================== ======================
48
+ Parameters
49
+ ----------
50
+ batchSize
51
+ Batch size. Large values allows faster processing but requires more
52
+ memory, by default 8
53
+ caseSensitive
54
+ Whether to ignore case in tokens for embeddings matching, by default
55
+ True
56
+ configProtoBytes
57
+ ConfigProto from tensorflow, serialized into byte array.
58
+ maxSentenceLength
59
+ Max sentence length to process, by default 128
60
+ coalesceSentences
61
+ Instead of 1 class per sentence (if inputCols is `sentence`) output 1
62
+ class per document by averaging probabilities in all sentences, by
63
+ default False
64
+ activation
65
+ Whether to calculate logits via Softmax or Sigmoid, by default
66
+ `"softmax"`.
67
+ Examples
68
+ --------
69
+ >>> import sparknlp
70
+ >>> from sparknlp.base import *
71
+ >>> from sparknlp.annotator import *
72
+ >>> from pyspark.ml import Pipeline
73
+ >>> documentAssembler = DocumentAssembler() \\
74
+ ... .setInputCol("text") \\
75
+ ... .setOutputCol("document")
76
+ >>> tokenizer = Tokenizer() \\
77
+ ... .setInputCols(["document"]) \\
78
+ ... .setOutputCol("token")
79
+ >>> sequenceClassifier = DeBertaForZeroShotClassification.pretrained() \\
80
+ ... .setInputCols(["token", "document"]) \\
81
+ ... .setOutputCol("label") \\
82
+ ... .setCaseSensitive(True)
83
+ >>> pipeline = Pipeline().setStages([
84
+ ... documentAssembler,
85
+ ... tokenizer,
86
+ ... sequenceClassifier
87
+ ... ])
88
+ >>> data = spark.createDataFrame([["I loved this movie when I was a child.", "It was pretty boring."]]).toDF("text")
89
+ >>> result = pipeline.fit(data).transform(data)
90
+ >>> result.select("label.result").show(truncate=False)
91
+ +------+
92
+ |result|
93
+ +------+
94
+ |[pos] |
95
+ |[neg] |
96
+ +------+
97
+ """
98
+ name = "DeBertaForZeroShotClassification"
99
+
100
+ inputAnnotatorTypes = [AnnotatorType.DOCUMENT, AnnotatorType.TOKEN]
101
+
102
+ outputAnnotatorType = AnnotatorType.CATEGORY
103
+
104
+ maxSentenceLength = Param(Params._dummy(),
105
+ "maxSentenceLength",
106
+ "Max sentence length to process",
107
+ typeConverter=TypeConverters.toInt)
108
+
109
+ configProtoBytes = Param(Params._dummy(),
110
+ "configProtoBytes",
111
+ "ConfigProto from tensorflow, serialized into byte array. Get with config_proto.SerializeToString()",
112
+ TypeConverters.toListInt)
113
+
114
+ coalesceSentences = Param(Params._dummy(), "coalesceSentences",
115
+ "Instead of 1 class per sentence (if inputCols is '''sentence''') output 1 class per document by averaging probabilities in all sentences.",
116
+ TypeConverters.toBoolean)
117
+
118
+ def getClasses(self):
119
+ """
120
+ Returns labels used to train this model
121
+ """
122
+ return self._call_java("getClasses")
123
+
124
+ def setConfigProtoBytes(self, b):
125
+ """Sets configProto from tensorflow, serialized into byte array.
126
+ Parameters
127
+ ----------
128
+ b : List[int]
129
+ ConfigProto from tensorflow, serialized into byte array
130
+ """
131
+ return self._set(configProtoBytes=b)
132
+
133
+ def setMaxSentenceLength(self, value):
134
+ """Sets max sentence length to process, by default 128.
135
+ Parameters
136
+ ----------
137
+ value : int
138
+ Max sentence length to process
139
+ """
140
+ return self._set(maxSentenceLength=value)
141
+
142
+ def setCoalesceSentences(self, value):
143
+ """Instead of 1 class per sentence (if inputCols is '''sentence''') output 1 class per document by averaging
144
+ probabilities in all sentences. Due to max sequence length limit in almost all transformer models such as DeBerta
145
+ (512 tokens), this parameter helps to feed all the sentences into the model and averaging all the probabilities
146
+ for the entire document instead of probabilities per sentence. (Default: true)
147
+ Parameters
148
+ ----------
149
+ value : bool
150
+ If the output of all sentences will be averaged to one output
151
+ """
152
+ return self._set(coalesceSentences=value)
153
+
154
+ @keyword_only
155
+ def __init__(self, classname="com.johnsnowlabs.nlp.annotators.classifier.dl.DeBertaForZeroShotClassification",
156
+ java_model=None):
157
+ super(DeBertaForZeroShotClassification, self).__init__(
158
+ classname=classname,
159
+ java_model=java_model
160
+ )
161
+ self._setDefault(
162
+ batchSize=8,
163
+ maxSentenceLength=128,
164
+ caseSensitive=True,
165
+ coalesceSentences=False,
166
+ activation="softmax"
167
+ )
168
+
169
+ @staticmethod
170
+ def loadSavedModel(folder, spark_session):
171
+ """Loads a locally saved model.
172
+ Parameters
173
+ ----------
174
+ folder : str
175
+ Folder of the saved model
176
+ spark_session : pyspark.sql.SparkSession
177
+ The current SparkSession
178
+ Returns
179
+ -------
180
+ DeBertaForZeroShotClassification
181
+ The restored model
182
+ """
183
+ from sparknlp.internal import _DeBertaForZeroShotClassification
184
+ jModel = _DeBertaForZeroShotClassification(folder, spark_session._jsparkSession)._java_obj
185
+ return DeBertaForZeroShotClassification(java_model=jModel)
186
+
187
+ @staticmethod
188
+ def pretrained(name="deberta_base_zero_shot_classifier_mnli_anli_v3", lang="en", remote_loc=None):
189
+ """Downloads and loads a pretrained model.
190
+ Parameters
191
+ ----------
192
+ name : str, optional
193
+ Name of the pretrained model, by default
194
+ "deberta_base_zero_shot_classifier_mnli_anli_v3"
195
+ lang : str, optional
196
+ Language of the pretrained model, by default "en"
197
+ remote_loc : str, optional
198
+ Optional remote address of the resource, by default None. Will use
199
+ Spark NLPs repositories otherwise.
200
+ Returns
201
+ -------
202
+ DeBertaForZeroShotClassification
203
+ The restored model
204
+ """
205
+ from sparknlp.pretrained import ResourceDownloader
206
+ return ResourceDownloader.downloadModel(DeBertaForZeroShotClassification, name, lang, remote_loc)
@@ -0,0 +1,148 @@
1
+ # Copyright 2017-2022 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from sparknlp.common import *
16
+
17
+
18
+ class MPNetForQuestionAnswering(AnnotatorModel,
19
+ HasCaseSensitiveProperties,
20
+ HasBatchedAnnotate,
21
+ HasEngine,
22
+ HasMaxSentenceLengthLimit):
23
+ """MPNetForQuestionAnswering can load MPNet Models with a span classification head on top for extractive
24
+ question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute span start
25
+ logits and span end logits).
26
+
27
+ Pretrained models can be loaded with :meth:`.pretrained` of the companion
28
+ object:
29
+
30
+ >>> spanClassifier = MPNetForQuestionAnswering.pretrained() \\
31
+ ... .setInputCols(["document_question", "document_context"]) \\
32
+ ... .setOutputCol("answer")
33
+
34
+ The default model is ``"mpnet_base_question_answering_squad2"``, if no name is
35
+ provided.
36
+
37
+ For available pretrained models please see the `Models Hub
38
+ <https://sparknlp.org/models?task=Question+Answering>`__.
39
+
40
+ To see which models are compatible and how to import them see
41
+ `Import Transformers into Spark NLP 🚀
42
+ <https://github.com/JohnSnowLabs/spark-nlp/discussions/5669>`_.
43
+
44
+ ====================== ======================
45
+ Input Annotation types Output Annotation type
46
+ ====================== ======================
47
+ ``DOCUMENT, DOCUMENT`` ``CHUNK``
48
+ ====================== ======================
49
+
50
+ Parameters
51
+ ----------
52
+ batchSize
53
+ Batch size. Large values allows faster processing but requires more
54
+ memory, by default 8
55
+ caseSensitive
56
+ Whether to ignore case in tokens for embeddings matching, by default
57
+ False
58
+ maxSentenceLength
59
+ Max sentence length to process, by default 128
60
+
61
+ Examples
62
+ --------
63
+ >>> import sparknlp
64
+ >>> from sparknlp.base import *
65
+ >>> from sparknlp.annotator import *
66
+ >>> from pyspark.ml import Pipeline
67
+ >>> documentAssembler = MultiDocumentAssembler() \\
68
+ ... .setInputCols(["question", "context"]) \\
69
+ ... .setOutputCol(["document_question", "document_context"])
70
+ >>> spanClassifier = MPNetForQuestionAnswering.pretrained() \\
71
+ ... .setInputCols(["document_question", "document_context"]) \\
72
+ ... .setOutputCol("answer") \\
73
+ ... .setCaseSensitive(False)
74
+ >>> pipeline = Pipeline().setStages([
75
+ ... documentAssembler,
76
+ ... spanClassifier
77
+ ... ])
78
+ >>> data = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
79
+ >>> result = pipeline.fit(data).transform(data)
80
+ >>> result.select("answer.result").show(truncate=False)
81
+ +--------------------+
82
+ |result |
83
+ +--------------------+
84
+ |[Clara] |
85
+ +--------------------+
86
+ """
87
+ name = "MPNetForQuestionAnswering"
88
+
89
+ inputAnnotatorTypes = [AnnotatorType.DOCUMENT, AnnotatorType.DOCUMENT]
90
+
91
+ outputAnnotatorType = AnnotatorType.CHUNK
92
+
93
+
94
+ @keyword_only
95
+ def __init__(self, classname="com.johnsnowlabs.nlp.annotators.classifier.dl.MPNetForQuestionAnswering",
96
+ java_model=None):
97
+ super(MPNetForQuestionAnswering, self).__init__(
98
+ classname=classname,
99
+ java_model=java_model
100
+ )
101
+ self._setDefault(
102
+ batchSize=8,
103
+ maxSentenceLength=384,
104
+ caseSensitive=False
105
+ )
106
+
107
+ @staticmethod
108
+ def loadSavedModel(folder, spark_session):
109
+ """Loads a locally saved model.
110
+
111
+ Parameters
112
+ ----------
113
+ folder : str
114
+ Folder of the saved model
115
+ spark_session : pyspark.sql.SparkSession
116
+ The current SparkSession
117
+
118
+ Returns
119
+ -------
120
+ MPNetForQuestionAnswering
121
+ The restored model
122
+ """
123
+ from sparknlp.internal import _MPNetForQuestionAnsweringLoader
124
+ jModel = _MPNetForQuestionAnsweringLoader(folder, spark_session._jsparkSession)._java_obj
125
+ return MPNetForQuestionAnswering(java_model=jModel)
126
+
127
+ @staticmethod
128
+ def pretrained(name="mpnet_base_question_answering_squad2", lang="en", remote_loc=None):
129
+ """Downloads and loads a pretrained model.
130
+
131
+ Parameters
132
+ ----------
133
+ name : str, optional
134
+ Name of the pretrained model, by default
135
+ "mpnet_base_question_answering_squad2"
136
+ lang : str, optional
137
+ Language of the pretrained model, by default "en"
138
+ remote_loc : str, optional
139
+ Optional remote address of the resource, by default None. Will use
140
+ Spark NLPs repositories otherwise.
141
+
142
+ Returns
143
+ -------
144
+ MPNetForQuestionAnswering
145
+ The restored model
146
+ """
147
+ from sparknlp.pretrained import ResourceDownloader
148
+ return ResourceDownloader.downloadModel(MPNetForQuestionAnswering, name, lang, remote_loc)
@@ -0,0 +1,188 @@
1
+ # Copyright 2017-2022 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains classes for MPNetForSequenceClassification."""
15
+
16
+ from sparknlp.common import *
17
+
18
+
19
+ class MPNetForSequenceClassification(AnnotatorModel,
20
+ HasCaseSensitiveProperties,
21
+ HasBatchedAnnotate,
22
+ HasClassifierActivationProperties,
23
+ HasEngine,
24
+ HasMaxSentenceLengthLimit):
25
+ """MPNetForSequenceClassification can load MPNet Models with sequence classification/regression head on
26
+ top (a linear layer on top of the pooled output) e.g. for multi-class document classification tasks.
27
+
28
+ Pretrained models can be loaded with :meth:`.pretrained` of the companion
29
+ object:
30
+
31
+ >>> sequenceClassifier = MPNetForSequenceClassification.pretrained() \\
32
+ ... .setInputCols(["token", "document"]) \\
33
+ ... .setOutputCol("label")
34
+
35
+ The default model is ``"mpnet_sequence_classifier_ukr_message"``, if no name is
36
+ provided.
37
+
38
+ For available pretrained models please see the `Models Hub
39
+ <https://sparknlp.org/models?task=Text+Classification>`__.
40
+
41
+ To see which models are compatible and how to import them see
42
+ `Import Transformers into Spark NLP 🚀
43
+ <https://github.com/JohnSnowLabs/spark-nlp/discussions/5669>`_.
44
+
45
+ ====================== ======================
46
+ Input Annotation types Output Annotation type
47
+ ====================== ======================
48
+ ``DOCUMENT, TOKEN`` ``CATEGORY``
49
+ ====================== ======================
50
+
51
+ Parameters
52
+ ----------
53
+ batchSize
54
+ Batch size. Large values allows faster processing but requires more
55
+ memory, by default 8
56
+ caseSensitive
57
+ Whether to ignore case in tokens for embeddings matching, by default
58
+ True
59
+ maxSentenceLength
60
+ Max sentence length to process, by default 128
61
+ coalesceSentences
62
+ Instead of 1 class per sentence (if inputCols is `sentence`) output
63
+ 1 class per document by averaging probabilities in all sentences, by
64
+ default False.
65
+ activation
66
+ Whether to calculate logits via Softmax or Sigmoid, by default
67
+ `"softmax"`.
68
+
69
+ Examples
70
+ --------
71
+ >>> import sparknlp
72
+ >>> from sparknlp.base import *
73
+ >>> from sparknlp.annotator import *
74
+ >>> from pyspark.ml import Pipeline
75
+ >>> document = DocumentAssembler() \\
76
+ ... .setInputCol("text") \\
77
+ ... .setOutputCol("document")
78
+ >>> tokenizer = Tokenizer() \\
79
+ ... .setInputCols(["document"]) \\
80
+ ... .setOutputCol("token")
81
+ >>> sequenceClassifier = MPNetForSequenceClassification \\
82
+ ... .pretrained() \\
83
+ ... .setInputCols(["document", "token"]) \\
84
+ ... .setOutputCol("label")
85
+ >>> data = spark.createDataFrame([
86
+ ... ["I love driving my car."],
87
+ ... ["The next bus will arrive in 20 minutes."],
88
+ ... ["pineapple on pizza is the worst 🤮"],
89
+ ... ]).toDF("text")
90
+ >>> pipeline = Pipeline().setStages([document, tokenizer, sequenceClassifier])
91
+ >>> pipelineModel = pipeline.fit(data)
92
+ >>> results = pipelineModel.transform(data)
93
+ >>> results.select("label.result").show()
94
+ +--------------------+
95
+ | result|
96
+ +--------------------+
97
+ | [TRANSPORT/CAR]|
98
+ |[TRANSPORT/MOVEMENT]|
99
+ | [FOOD]|
100
+ +--------------------+
101
+ """
102
+ name = "MPNetForSequenceClassification"
103
+
104
+ inputAnnotatorTypes = [AnnotatorType.DOCUMENT, AnnotatorType.TOKEN]
105
+
106
+ outputAnnotatorType = AnnotatorType.CATEGORY
107
+
108
+
109
+ coalesceSentences = Param(Params._dummy(), "coalesceSentences",
110
+ "Instead of 1 class per sentence (if inputCols is '''sentence''') output 1 class per document by averaging probabilities in all sentences.",
111
+ TypeConverters.toBoolean)
112
+
113
+ def getClasses(self):
114
+ """
115
+ Returns labels used to train this model
116
+ """
117
+ return self._call_java("getClasses")
118
+
119
+
120
+ def setCoalesceSentences(self, value):
121
+ """Instead of 1 class per sentence (if inputCols is '''sentence''') output 1 class per document by averaging probabilities in all sentences.
122
+ Due to max sequence length limit in almost all transformer models such as BERT (512 tokens), this parameter helps feeding all the sentences
123
+ into the model and averaging all the probabilities for the entire document instead of probabilities per sentence. (Default: true)
124
+
125
+ Parameters
126
+ ----------
127
+ value : bool
128
+ If the output of all sentences will be averaged to one output
129
+ """
130
+ return self._set(coalesceSentences=value)
131
+
132
+ @keyword_only
133
+ def __init__(self, classname="com.johnsnowlabs.nlp.annotators.classifier.dl.MPNetForSequenceClassification",
134
+ java_model=None):
135
+ super(MPNetForSequenceClassification, self).__init__(
136
+ classname=classname,
137
+ java_model=java_model
138
+ )
139
+ self._setDefault(
140
+ batchSize=8,
141
+ maxSentenceLength=128,
142
+ caseSensitive=True,
143
+ coalesceSentences=False,
144
+ activation="softmax"
145
+ )
146
+
147
+ @staticmethod
148
+ def loadSavedModel(folder, spark_session):
149
+ """Loads a locally saved model.
150
+
151
+ Parameters
152
+ ----------
153
+ folder : str
154
+ Folder of the saved model
155
+ spark_session : pyspark.sql.SparkSession
156
+ The current SparkSession
157
+
158
+ Returns
159
+ -------
160
+ MPNetForSequenceClassification
161
+ The restored model
162
+ """
163
+ from sparknlp.internal import _MPNetForSequenceClassificationLoader
164
+ jModel = _MPNetForSequenceClassificationLoader(folder, spark_session._jsparkSession)._java_obj
165
+ return MPNetForSequenceClassification(java_model=jModel)
166
+
167
+ @staticmethod
168
+ def pretrained(name="mpnet_sequence_classifier_ukr_message", lang="en", remote_loc=None):
169
+ """Downloads and loads a pretrained model.
170
+
171
+ Parameters
172
+ ----------
173
+ name : str, optional
174
+ Name of the pretrained model, by default
175
+ "MPNet_base_sequence_classifier_imdb"
176
+ lang : str, optional
177
+ Language of the pretrained model, by default "en"
178
+ remote_loc : str, optional
179
+ Optional remote address of the resource, by default None. Will use
180
+ Spark NLPs repositories otherwise.
181
+
182
+ Returns
183
+ -------
184
+ MPNetForSequenceClassification
185
+ The restored model
186
+ """
187
+ from sparknlp.pretrained import ResourceDownloader
188
+ return ResourceDownloader.downloadModel(MPNetForSequenceClassification, name, lang, remote_loc)
@@ -228,5 +228,5 @@ class EntityRulerModel(AnnotatorModel, HasStorageModel):
228
228
 
229
229
  @staticmethod
230
230
  def loadStorage(path, spark, storage_ref):
231
- HasStorageModel.loadStorages(path, spark, storage_ref, EntityRulerModel.databases)
231
+ HasStorageModel.loadStorages(path, spark, storage_ref, EntityRulerModel.database)
232
232
 
@@ -17,3 +17,5 @@ from sparknlp.annotator.seq2seq.gpt2_transformer import *
17
17
  from sparknlp.annotator.seq2seq.marian_transformer import *
18
18
  from sparknlp.annotator.seq2seq.t5_transformer import *
19
19
  from sparknlp.annotator.seq2seq.bart_transformer import *
20
+ from sparknlp.annotator.seq2seq.llama2_transformer import *
21
+ from sparknlp.annotator.seq2seq.m2m100_transformer import *