spark-nlp 4.2.6__py2.py3-none-any.whl → 6.2.1__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- com/johnsnowlabs/ml/__init__.py +0 -0
- com/johnsnowlabs/ml/ai/__init__.py +10 -0
- spark_nlp-6.2.1.dist-info/METADATA +362 -0
- spark_nlp-6.2.1.dist-info/RECORD +292 -0
- {spark_nlp-4.2.6.dist-info → spark_nlp-6.2.1.dist-info}/WHEEL +1 -1
- sparknlp/__init__.py +81 -28
- sparknlp/annotation.py +3 -2
- sparknlp/annotator/__init__.py +6 -0
- sparknlp/annotator/audio/__init__.py +2 -0
- sparknlp/annotator/audio/hubert_for_ctc.py +188 -0
- sparknlp/annotator/audio/wav2vec2_for_ctc.py +14 -14
- sparknlp/annotator/audio/whisper_for_ctc.py +251 -0
- sparknlp/{base → annotator}/chunk2_doc.py +4 -7
- sparknlp/annotator/chunker.py +1 -2
- sparknlp/annotator/classifier_dl/__init__.py +17 -0
- sparknlp/annotator/classifier_dl/albert_for_multiple_choice.py +161 -0
- sparknlp/annotator/classifier_dl/albert_for_question_answering.py +3 -15
- sparknlp/annotator/classifier_dl/albert_for_sequence_classification.py +4 -18
- sparknlp/annotator/classifier_dl/albert_for_token_classification.py +3 -17
- sparknlp/annotator/classifier_dl/albert_for_zero_shot_classification.py +211 -0
- sparknlp/annotator/classifier_dl/bart_for_zero_shot_classification.py +225 -0
- sparknlp/annotator/classifier_dl/bert_for_multiple_choice.py +161 -0
- sparknlp/annotator/classifier_dl/bert_for_question_answering.py +6 -20
- sparknlp/annotator/classifier_dl/bert_for_sequence_classification.py +3 -17
- sparknlp/annotator/classifier_dl/bert_for_token_classification.py +3 -17
- sparknlp/annotator/classifier_dl/bert_for_zero_shot_classification.py +212 -0
- sparknlp/annotator/classifier_dl/camembert_for_question_answering.py +168 -0
- sparknlp/annotator/classifier_dl/camembert_for_sequence_classification.py +5 -19
- sparknlp/annotator/classifier_dl/camembert_for_token_classification.py +5 -19
- sparknlp/annotator/classifier_dl/camembert_for_zero_shot_classification.py +202 -0
- sparknlp/annotator/classifier_dl/classifier_dl.py +4 -4
- sparknlp/annotator/classifier_dl/deberta_for_question_answering.py +3 -17
- sparknlp/annotator/classifier_dl/deberta_for_sequence_classification.py +4 -19
- sparknlp/annotator/classifier_dl/deberta_for_token_classification.py +5 -21
- sparknlp/annotator/classifier_dl/deberta_for_zero_shot_classification.py +193 -0
- sparknlp/annotator/classifier_dl/distil_bert_for_question_answering.py +3 -17
- sparknlp/annotator/classifier_dl/distil_bert_for_sequence_classification.py +4 -18
- sparknlp/annotator/classifier_dl/distil_bert_for_token_classification.py +3 -17
- sparknlp/annotator/classifier_dl/distil_bert_for_zero_shot_classification.py +211 -0
- sparknlp/annotator/classifier_dl/distilbert_for_multiple_choice.py +161 -0
- sparknlp/annotator/classifier_dl/longformer_for_question_answering.py +3 -17
- sparknlp/annotator/classifier_dl/longformer_for_sequence_classification.py +4 -18
- sparknlp/annotator/classifier_dl/longformer_for_token_classification.py +3 -17
- sparknlp/annotator/classifier_dl/mpnet_for_question_answering.py +148 -0
- sparknlp/annotator/classifier_dl/mpnet_for_sequence_classification.py +188 -0
- sparknlp/annotator/classifier_dl/mpnet_for_token_classification.py +173 -0
- sparknlp/annotator/classifier_dl/multi_classifier_dl.py +3 -3
- sparknlp/annotator/classifier_dl/roberta_for_multiple_choice.py +161 -0
- sparknlp/annotator/classifier_dl/roberta_for_question_answering.py +3 -17
- sparknlp/annotator/classifier_dl/roberta_for_sequence_classification.py +4 -18
- sparknlp/annotator/classifier_dl/roberta_for_token_classification.py +1 -1
- sparknlp/annotator/classifier_dl/roberta_for_zero_shot_classification.py +225 -0
- sparknlp/annotator/classifier_dl/sentiment_dl.py +4 -4
- sparknlp/annotator/classifier_dl/tapas_for_question_answering.py +2 -2
- sparknlp/annotator/classifier_dl/xlm_roberta_for_multiple_choice.py +149 -0
- sparknlp/annotator/classifier_dl/xlm_roberta_for_question_answering.py +3 -17
- sparknlp/annotator/classifier_dl/xlm_roberta_for_sequence_classification.py +4 -18
- sparknlp/annotator/classifier_dl/xlm_roberta_for_token_classification.py +6 -20
- sparknlp/annotator/classifier_dl/xlm_roberta_for_zero_shot_classification.py +225 -0
- sparknlp/annotator/classifier_dl/xlnet_for_sequence_classification.py +4 -18
- sparknlp/annotator/classifier_dl/xlnet_for_token_classification.py +3 -17
- sparknlp/annotator/cleaners/__init__.py +15 -0
- sparknlp/annotator/cleaners/cleaner.py +202 -0
- sparknlp/annotator/cleaners/extractor.py +191 -0
- sparknlp/annotator/coref/spanbert_coref.py +4 -18
- sparknlp/annotator/cv/__init__.py +15 -0
- sparknlp/annotator/cv/blip_for_question_answering.py +172 -0
- sparknlp/annotator/cv/clip_for_zero_shot_classification.py +193 -0
- sparknlp/annotator/cv/convnext_for_image_classification.py +269 -0
- sparknlp/annotator/cv/florence2_transformer.py +180 -0
- sparknlp/annotator/cv/gemma3_for_multimodal.py +346 -0
- sparknlp/annotator/cv/internvl_for_multimodal.py +280 -0
- sparknlp/annotator/cv/janus_for_multimodal.py +351 -0
- sparknlp/annotator/cv/llava_for_multimodal.py +328 -0
- sparknlp/annotator/cv/mllama_for_multimodal.py +340 -0
- sparknlp/annotator/cv/paligemma_for_multimodal.py +308 -0
- sparknlp/annotator/cv/phi3_vision_for_multimodal.py +328 -0
- sparknlp/annotator/cv/qwen2vl_transformer.py +332 -0
- sparknlp/annotator/cv/smolvlm_transformer.py +426 -0
- sparknlp/annotator/cv/swin_for_image_classification.py +242 -0
- sparknlp/annotator/cv/vision_encoder_decoder_for_image_captioning.py +240 -0
- sparknlp/annotator/cv/vit_for_image_classification.py +36 -4
- sparknlp/annotator/dataframe_optimizer.py +216 -0
- sparknlp/annotator/date2_chunk.py +88 -0
- sparknlp/annotator/dependency/dependency_parser.py +2 -3
- sparknlp/annotator/dependency/typed_dependency_parser.py +3 -4
- sparknlp/annotator/document_character_text_splitter.py +228 -0
- sparknlp/annotator/document_normalizer.py +37 -1
- sparknlp/annotator/document_token_splitter.py +175 -0
- sparknlp/annotator/document_token_splitter_test.py +85 -0
- sparknlp/annotator/embeddings/__init__.py +11 -0
- sparknlp/annotator/embeddings/albert_embeddings.py +4 -18
- sparknlp/annotator/embeddings/auto_gguf_embeddings.py +539 -0
- sparknlp/annotator/embeddings/bert_embeddings.py +9 -22
- sparknlp/annotator/embeddings/bert_sentence_embeddings.py +12 -24
- sparknlp/annotator/embeddings/bge_embeddings.py +199 -0
- sparknlp/annotator/embeddings/camembert_embeddings.py +4 -20
- sparknlp/annotator/embeddings/chunk_embeddings.py +1 -2
- sparknlp/annotator/embeddings/deberta_embeddings.py +2 -16
- sparknlp/annotator/embeddings/distil_bert_embeddings.py +5 -19
- sparknlp/annotator/embeddings/doc2vec.py +7 -1
- sparknlp/annotator/embeddings/e5_embeddings.py +195 -0
- sparknlp/annotator/embeddings/e5v_embeddings.py +138 -0
- sparknlp/annotator/embeddings/elmo_embeddings.py +2 -2
- sparknlp/annotator/embeddings/instructor_embeddings.py +204 -0
- sparknlp/annotator/embeddings/longformer_embeddings.py +3 -17
- sparknlp/annotator/embeddings/minilm_embeddings.py +189 -0
- sparknlp/annotator/embeddings/mpnet_embeddings.py +192 -0
- sparknlp/annotator/embeddings/mxbai_embeddings.py +184 -0
- sparknlp/annotator/embeddings/nomic_embeddings.py +181 -0
- sparknlp/annotator/embeddings/roberta_embeddings.py +9 -21
- sparknlp/annotator/embeddings/roberta_sentence_embeddings.py +7 -21
- sparknlp/annotator/embeddings/sentence_embeddings.py +2 -3
- sparknlp/annotator/embeddings/snowflake_embeddings.py +202 -0
- sparknlp/annotator/embeddings/uae_embeddings.py +211 -0
- sparknlp/annotator/embeddings/universal_sentence_encoder.py +3 -3
- sparknlp/annotator/embeddings/word2vec.py +7 -1
- sparknlp/annotator/embeddings/word_embeddings.py +4 -5
- sparknlp/annotator/embeddings/xlm_roberta_embeddings.py +9 -21
- sparknlp/annotator/embeddings/xlm_roberta_sentence_embeddings.py +7 -21
- sparknlp/annotator/embeddings/xlnet_embeddings.py +4 -18
- sparknlp/annotator/er/entity_ruler.py +37 -23
- sparknlp/annotator/keyword_extraction/yake_keyword_extraction.py +2 -3
- sparknlp/annotator/ld_dl/language_detector_dl.py +2 -2
- sparknlp/annotator/lemmatizer.py +3 -4
- sparknlp/annotator/matcher/date_matcher.py +35 -3
- sparknlp/annotator/matcher/multi_date_matcher.py +1 -2
- sparknlp/annotator/matcher/regex_matcher.py +3 -3
- sparknlp/annotator/matcher/text_matcher.py +2 -3
- sparknlp/annotator/n_gram_generator.py +1 -2
- sparknlp/annotator/ner/__init__.py +3 -1
- sparknlp/annotator/ner/ner_converter.py +18 -0
- sparknlp/annotator/ner/ner_crf.py +4 -5
- sparknlp/annotator/ner/ner_dl.py +10 -5
- sparknlp/annotator/ner/ner_dl_graph_checker.py +293 -0
- sparknlp/annotator/ner/ner_overwriter.py +2 -2
- sparknlp/annotator/ner/zero_shot_ner_model.py +173 -0
- sparknlp/annotator/normalizer.py +2 -2
- sparknlp/annotator/openai/__init__.py +16 -0
- sparknlp/annotator/openai/openai_completion.py +349 -0
- sparknlp/annotator/openai/openai_embeddings.py +106 -0
- sparknlp/annotator/pos/perceptron.py +6 -7
- sparknlp/annotator/sentence/sentence_detector.py +2 -2
- sparknlp/annotator/sentence/sentence_detector_dl.py +3 -3
- sparknlp/annotator/sentiment/sentiment_detector.py +4 -5
- sparknlp/annotator/sentiment/vivekn_sentiment.py +4 -5
- sparknlp/annotator/seq2seq/__init__.py +17 -0
- sparknlp/annotator/seq2seq/auto_gguf_model.py +304 -0
- sparknlp/annotator/seq2seq/auto_gguf_reranker.py +334 -0
- sparknlp/annotator/seq2seq/auto_gguf_vision_model.py +336 -0
- sparknlp/annotator/seq2seq/bart_transformer.py +420 -0
- sparknlp/annotator/seq2seq/cohere_transformer.py +357 -0
- sparknlp/annotator/seq2seq/cpm_transformer.py +321 -0
- sparknlp/annotator/seq2seq/gpt2_transformer.py +1 -1
- sparknlp/annotator/seq2seq/llama2_transformer.py +343 -0
- sparknlp/annotator/seq2seq/llama3_transformer.py +381 -0
- sparknlp/annotator/seq2seq/m2m100_transformer.py +392 -0
- sparknlp/annotator/seq2seq/marian_transformer.py +124 -3
- sparknlp/annotator/seq2seq/mistral_transformer.py +348 -0
- sparknlp/annotator/seq2seq/nllb_transformer.py +420 -0
- sparknlp/annotator/seq2seq/olmo_transformer.py +326 -0
- sparknlp/annotator/seq2seq/phi2_transformer.py +326 -0
- sparknlp/annotator/seq2seq/phi3_transformer.py +330 -0
- sparknlp/annotator/seq2seq/phi4_transformer.py +387 -0
- sparknlp/annotator/seq2seq/qwen_transformer.py +340 -0
- sparknlp/annotator/seq2seq/starcoder_transformer.py +335 -0
- sparknlp/annotator/seq2seq/t5_transformer.py +54 -4
- sparknlp/annotator/similarity/__init__.py +0 -0
- sparknlp/annotator/similarity/document_similarity_ranker.py +379 -0
- sparknlp/annotator/spell_check/context_spell_checker.py +116 -17
- sparknlp/annotator/spell_check/norvig_sweeting.py +3 -6
- sparknlp/annotator/spell_check/symmetric_delete.py +1 -1
- sparknlp/annotator/stemmer.py +2 -3
- sparknlp/annotator/stop_words_cleaner.py +3 -4
- sparknlp/annotator/tf_ner_dl_graph_builder.py +1 -1
- sparknlp/annotator/token/__init__.py +0 -1
- sparknlp/annotator/token/recursive_tokenizer.py +2 -3
- sparknlp/annotator/token/tokenizer.py +2 -3
- sparknlp/annotator/ws/word_segmenter.py +35 -10
- sparknlp/base/__init__.py +2 -3
- sparknlp/base/doc2_chunk.py +0 -3
- sparknlp/base/document_assembler.py +5 -5
- sparknlp/base/embeddings_finisher.py +14 -2
- sparknlp/base/finisher.py +15 -4
- sparknlp/base/gguf_ranking_finisher.py +234 -0
- sparknlp/base/image_assembler.py +69 -0
- sparknlp/base/light_pipeline.py +53 -21
- sparknlp/base/multi_document_assembler.py +9 -13
- sparknlp/base/prompt_assembler.py +207 -0
- sparknlp/base/token_assembler.py +1 -2
- sparknlp/common/__init__.py +2 -0
- sparknlp/common/annotator_type.py +1 -0
- sparknlp/common/completion_post_processing.py +37 -0
- sparknlp/common/match_strategy.py +33 -0
- sparknlp/common/properties.py +914 -9
- sparknlp/internal/__init__.py +841 -116
- sparknlp/internal/annotator_java_ml.py +1 -1
- sparknlp/internal/annotator_transformer.py +3 -0
- sparknlp/logging/comet.py +2 -2
- sparknlp/partition/__init__.py +16 -0
- sparknlp/partition/partition.py +244 -0
- sparknlp/partition/partition_properties.py +902 -0
- sparknlp/partition/partition_transformer.py +200 -0
- sparknlp/pretrained/pretrained_pipeline.py +1 -1
- sparknlp/pretrained/resource_downloader.py +126 -2
- sparknlp/reader/__init__.py +15 -0
- sparknlp/reader/enums.py +19 -0
- sparknlp/reader/pdf_to_text.py +190 -0
- sparknlp/reader/reader2doc.py +124 -0
- sparknlp/reader/reader2image.py +136 -0
- sparknlp/reader/reader2table.py +44 -0
- sparknlp/reader/reader_assembler.py +159 -0
- sparknlp/reader/sparknlp_reader.py +461 -0
- sparknlp/training/__init__.py +1 -0
- sparknlp/training/conll.py +8 -2
- sparknlp/training/spacy_to_annotation.py +57 -0
- sparknlp/util.py +26 -0
- spark_nlp-4.2.6.dist-info/METADATA +0 -1256
- spark_nlp-4.2.6.dist-info/RECORD +0 -196
- {spark_nlp-4.2.6.dist-info → spark_nlp-6.2.1.dist-info}/top_level.txt +0 -0
- /sparknlp/annotator/{token/token2_chunk.py → token2_chunk.py} +0 -0
|
@@ -0,0 +1,293 @@
|
|
|
1
|
+
# Copyright 2017-2025 John Snow Labs
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
"""Contains classes for NerDL."""
|
|
15
|
+
|
|
16
|
+
from pyspark.ml.util import JavaMLReadable
|
|
17
|
+
|
|
18
|
+
import sparknlp.internal as _internal
|
|
19
|
+
from sparknlp.common import *
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class NerDLGraphChecker(
|
|
23
|
+
JavaEstimator,
|
|
24
|
+
JavaMLWritable,
|
|
25
|
+
_internal.ParamsGettersSetters,
|
|
26
|
+
):
|
|
27
|
+
"""Checks whether a suitable NerDL graph is available for the given training dataset, before any
|
|
28
|
+
computations/training is done. This annotator is useful for custom training cases, where
|
|
29
|
+
specialized graphs are needed.
|
|
30
|
+
|
|
31
|
+
This annotator will fill graph hyperparameters as metadata in the label column, which will be
|
|
32
|
+
available for NerDLApproach, saving computations.
|
|
33
|
+
|
|
34
|
+
Important: This annotator should be used or positioned before any embedding or NerDLApproach
|
|
35
|
+
annotators in the pipeline and will process the whole dataset to extract the required graph parameters.
|
|
36
|
+
|
|
37
|
+
This annotator requires a dataset with at least two columns: one with tokens and one with the
|
|
38
|
+
labels. In addition, it requires the used embedding annotator in the pipeline to extract the
|
|
39
|
+
suitable embedding dimension.
|
|
40
|
+
|
|
41
|
+
For extended examples of usage, see the`Examples
|
|
42
|
+
<https://github.com/JohnSnowLabs/spark-nlp/blob/master//home/ducha/Workspace/scala/spark-nlp-feature/examples/python/training/english/dl-ner/ner_dl_graph_checker.ipynb>`__
|
|
43
|
+
and the `NerDLGraphCheckerTestSpec
|
|
44
|
+
<https://github.com/JohnSnowLabs/spark-nlp/blob/master/src/test/scala/com/johnsnowlabs/nlp/annotators/ner/dl/NerDLGraphCheckerTestSpec.scala>`__.
|
|
45
|
+
|
|
46
|
+
==================================== ======================
|
|
47
|
+
Input Annotation types Output Annotation type
|
|
48
|
+
==================================== ======================
|
|
49
|
+
``DOCUMENT, TOKEN`` `NONE`
|
|
50
|
+
==================================== ======================
|
|
51
|
+
|
|
52
|
+
Parameters
|
|
53
|
+
----------
|
|
54
|
+
inputCols
|
|
55
|
+
Column names of input annotations
|
|
56
|
+
labelColumn
|
|
57
|
+
Column name for data labels
|
|
58
|
+
embeddingsDim
|
|
59
|
+
Dimensionality of embeddings
|
|
60
|
+
|
|
61
|
+
Examples
|
|
62
|
+
--------
|
|
63
|
+
>>> import sparknlp
|
|
64
|
+
>>> from sparknlp.base import *
|
|
65
|
+
>>> from sparknlp.annotator import *
|
|
66
|
+
>>> from pyspark.ml import Pipeline
|
|
67
|
+
|
|
68
|
+
This CoNLL dataset already includes a sentence, token and label
|
|
69
|
+
column with their respective annotator types. If a custom dataset is used,
|
|
70
|
+
these need to be defined with for example:
|
|
71
|
+
|
|
72
|
+
>>> conll = CoNLL()
|
|
73
|
+
>>> trainingData = conll.readDataset(spark, "src/test/resources/conll2003/eng.train")
|
|
74
|
+
>>> embeddings = BertEmbeddings \\
|
|
75
|
+
... .pretrained() \\
|
|
76
|
+
... .setInputCols(["sentence", "token"]) \\
|
|
77
|
+
... .setOutputCol("embeddings")
|
|
78
|
+
|
|
79
|
+
This annotatorr requires the data for NerDLApproach graphs: text, tokens, labels and the embedding model
|
|
80
|
+
|
|
81
|
+
>>> nerDLGraphChecker = NerDLGraphChecker() \\
|
|
82
|
+
... .setInputCols(["sentence", "token"]) \\
|
|
83
|
+
... .setLabelColumn("label") \\
|
|
84
|
+
... .setEmbeddingsModel(embeddings)
|
|
85
|
+
>>> nerTagger = NerDLApproach() \\
|
|
86
|
+
... .setInputCols(["sentence", "token", "embeddings"]) \\
|
|
87
|
+
... .setLabelColumn("label") \\
|
|
88
|
+
... .setOutputCol("ner") \\
|
|
89
|
+
... .setMaxEpochs(1) \\
|
|
90
|
+
... .setRandomSeed(0) \\
|
|
91
|
+
... .setVerbose(0)
|
|
92
|
+
>>> pipeline = Pipeline().setStages([nerDLGraphChecker, embeddings, nerTagger])
|
|
93
|
+
|
|
94
|
+
If we now fit the model with a graph missing, then an exception is raised.
|
|
95
|
+
|
|
96
|
+
>>> pipelineModel = pipeline.fit(trainingData)
|
|
97
|
+
"""
|
|
98
|
+
|
|
99
|
+
inputCols = Param(
|
|
100
|
+
Params._dummy(),
|
|
101
|
+
"inputCols",
|
|
102
|
+
"Input columns",
|
|
103
|
+
typeConverter=TypeConverters.toListString,
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
def setInputCols(self, *value):
|
|
107
|
+
"""Sets column names of input annotations.
|
|
108
|
+
|
|
109
|
+
Parameters
|
|
110
|
+
----------
|
|
111
|
+
*value : List[str]
|
|
112
|
+
Input columns for the annotator
|
|
113
|
+
"""
|
|
114
|
+
if type(value[0]) == str or type(value[0]) == list:
|
|
115
|
+
# self.inputColsValidation(value)
|
|
116
|
+
if len(value) == 1 and type(value[0]) == list:
|
|
117
|
+
return self._set(inputCols=value[0])
|
|
118
|
+
else:
|
|
119
|
+
return self._set(inputCols=list(value))
|
|
120
|
+
else:
|
|
121
|
+
raise TypeError(
|
|
122
|
+
"InputCols datatype not supported. It must be either str or list"
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
labelColumn = Param(
|
|
126
|
+
Params._dummy(),
|
|
127
|
+
"labelColumn",
|
|
128
|
+
"Column with label per each token",
|
|
129
|
+
typeConverter=TypeConverters.toString,
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
def setLabelColumn(self, value):
|
|
133
|
+
"""Sets name of column for data labels.
|
|
134
|
+
|
|
135
|
+
Parameters
|
|
136
|
+
----------
|
|
137
|
+
value : str
|
|
138
|
+
Column for data labels
|
|
139
|
+
"""
|
|
140
|
+
return self._set(labelColumn=value)
|
|
141
|
+
|
|
142
|
+
embeddingsDim = Param(
|
|
143
|
+
Params._dummy(),
|
|
144
|
+
"embeddingsDim",
|
|
145
|
+
"Dimensionality of embeddings",
|
|
146
|
+
typeConverter=TypeConverters.toInt,
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
def setEmbeddingsDim(self, value: int):
|
|
150
|
+
"""Sets Dimensionality of embeddings
|
|
151
|
+
|
|
152
|
+
Parameters
|
|
153
|
+
----------
|
|
154
|
+
value : int
|
|
155
|
+
Dimensionality of embeddings
|
|
156
|
+
"""
|
|
157
|
+
return self._set(embeddingsDim=value)
|
|
158
|
+
|
|
159
|
+
def setEmbeddingsModel(self, model: HasEmbeddingsProperties):
|
|
160
|
+
"""
|
|
161
|
+
Get embeddingsDim from a given embeddings model, if possible.
|
|
162
|
+
Falls back to setEmbeddingsDim if dimension cannot be obtained automatically.
|
|
163
|
+
"""
|
|
164
|
+
# Try Python API first
|
|
165
|
+
if hasattr(model, "getDimension"):
|
|
166
|
+
dim = model.getDimension()
|
|
167
|
+
return self.setEmbeddingsDim(int(dim))
|
|
168
|
+
# Try JVM side if available
|
|
169
|
+
if hasattr(model, "_java_obj") and hasattr(model._java_obj, "getDimension"):
|
|
170
|
+
dim = int(model._java_obj.getDimension())
|
|
171
|
+
return self.setEmbeddingsDim(dim)
|
|
172
|
+
raise ValueError(
|
|
173
|
+
"Could not infer embeddings dimension from provided model. "
|
|
174
|
+
"Use setEmbeddingsDim(dim) explicitly."
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
inputAnnotatorTypes = [
|
|
178
|
+
AnnotatorType.DOCUMENT,
|
|
179
|
+
AnnotatorType.TOKEN,
|
|
180
|
+
]
|
|
181
|
+
|
|
182
|
+
graphFolder = Param(
|
|
183
|
+
Params._dummy(),
|
|
184
|
+
"graphFolder",
|
|
185
|
+
"Folder path that contain external graph files",
|
|
186
|
+
TypeConverters.toString,
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
def setGraphFolder(self, p):
|
|
190
|
+
"""Sets folder path that contain external graph files.
|
|
191
|
+
|
|
192
|
+
Parameters
|
|
193
|
+
----------
|
|
194
|
+
p : str
|
|
195
|
+
Folder path that contain external graph files
|
|
196
|
+
"""
|
|
197
|
+
return self._set(graphFolder=p)
|
|
198
|
+
|
|
199
|
+
@keyword_only
|
|
200
|
+
def __init__(self):
|
|
201
|
+
_internal.ParamsGettersSetters.__init__(self)
|
|
202
|
+
classname = "com.johnsnowlabs.nlp.annotators.ner.dl.NerDLGraphChecker"
|
|
203
|
+
self.__class__._java_class_name = classname
|
|
204
|
+
self._java_obj = self._new_java_obj(classname, self.uid)
|
|
205
|
+
# self._setDefault()
|
|
206
|
+
|
|
207
|
+
def _create_model(self, java_model):
|
|
208
|
+
return NerDLGraphCheckerModel(java_model=java_model)
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
class NerDLGraphCheckerModel(
|
|
212
|
+
JavaModel,
|
|
213
|
+
JavaMLWritable,
|
|
214
|
+
JavaMLReadable,
|
|
215
|
+
_internal.ParamsGettersSetters,
|
|
216
|
+
):
|
|
217
|
+
"""Resulting model from `NerDLGraphChecker`, that updates dataframe metadata (label column)
|
|
218
|
+
with NerDLGraph parameters. It does not perform any actual data transformations, as the
|
|
219
|
+
checks/computations are done during the `fit` phase.
|
|
220
|
+
|
|
221
|
+
This annotator should never be used directly.
|
|
222
|
+
"""
|
|
223
|
+
|
|
224
|
+
inputAnnotatorTypes = [
|
|
225
|
+
AnnotatorType.DOCUMENT,
|
|
226
|
+
AnnotatorType.TOKEN,
|
|
227
|
+
]
|
|
228
|
+
|
|
229
|
+
@keyword_only
|
|
230
|
+
def __init__(
|
|
231
|
+
self,
|
|
232
|
+
classname="com.johnsnowlabs.nlp.annotators.ner.dl.NerDLGraphCheckerModel",
|
|
233
|
+
java_model=None,
|
|
234
|
+
):
|
|
235
|
+
# Custom init, different from AnnotatorModel
|
|
236
|
+
# We don't have a output annotation column, so we inherit directly from JavaModel
|
|
237
|
+
if java_model is not None:
|
|
238
|
+
super(NerDLGraphCheckerModel, self).__init__(java_model=java_model)
|
|
239
|
+
self._java_obj = java_model
|
|
240
|
+
self._transfer_params_from_java()
|
|
241
|
+
elif classname:
|
|
242
|
+
super(NerDLGraphCheckerModel, self).__init__()
|
|
243
|
+
self.__class__._java_class_name = classname
|
|
244
|
+
self._java_obj = self._new_java_obj(classname)
|
|
245
|
+
|
|
246
|
+
# Metadata keys for graph parameters
|
|
247
|
+
graphParamsMetadataKey = "NerDLGraphCheckerParams"
|
|
248
|
+
embeddingsDimKey = "embeddingsDim"
|
|
249
|
+
labelsKey = "labels"
|
|
250
|
+
charsKey = "chars"
|
|
251
|
+
dsLenKey = "dsLen"
|
|
252
|
+
|
|
253
|
+
labelColumn = Param(
|
|
254
|
+
Params._dummy(),
|
|
255
|
+
"labelColumn",
|
|
256
|
+
"Column with label per each token",
|
|
257
|
+
typeConverter=TypeConverters.toString,
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
embeddingsDim = Param(
|
|
261
|
+
Params._dummy(),
|
|
262
|
+
"embeddingsDim",
|
|
263
|
+
"Dimensionality of embeddings",
|
|
264
|
+
typeConverter=TypeConverters.toInt,
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
labels = Param(
|
|
268
|
+
Params._dummy(),
|
|
269
|
+
"labels",
|
|
270
|
+
"Labels in the dataset",
|
|
271
|
+
typeConverter=TypeConverters.toListString,
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
chars = Param(
|
|
275
|
+
Params._dummy(),
|
|
276
|
+
"chars",
|
|
277
|
+
"Set of characters in the dataset",
|
|
278
|
+
typeConverter=TypeConverters.toListString,
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
graphFolder = Param(
|
|
282
|
+
Params._dummy(),
|
|
283
|
+
"graphFolder",
|
|
284
|
+
"Folder path that contain external graph files",
|
|
285
|
+
typeConverter=TypeConverters.toString,
|
|
286
|
+
)
|
|
287
|
+
|
|
288
|
+
dsLen = Param(
|
|
289
|
+
Params._dummy(),
|
|
290
|
+
"dsLen",
|
|
291
|
+
"Length of the training dataset.",
|
|
292
|
+
typeConverter=TypeConverters.toInt,
|
|
293
|
+
)
|
|
@@ -21,8 +21,8 @@ class NerOverwriter(AnnotatorModel):
|
|
|
21
21
|
|
|
22
22
|
The input for this Annotator have to be entities that are already extracted,
|
|
23
23
|
Annotator type ``NAMED_ENTITY``. The strings specified with
|
|
24
|
-
:meth:`.setStopWords` will have new entities assigned to, specified
|
|
25
|
-
:meth:`.setNewResult`.
|
|
24
|
+
:meth:`.NerOverwriter.setStopWords` will have new entities assigned to, specified
|
|
25
|
+
with :meth:`.NerOverwriter.setNewResult`.
|
|
26
26
|
|
|
27
27
|
====================== ======================
|
|
28
28
|
Input Annotation types Output Annotation type
|
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
# Copyright 2017-2023 John Snow Labs
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from sparknlp.common import *
|
|
16
|
+
from sparknlp.annotator.classifier_dl import RoBertaForQuestionAnswering
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class ZeroShotNerModel(RoBertaForQuestionAnswering, HasEngine):
|
|
20
|
+
"""ZeroShotNerModel implements zero shot named entity recognition by utilizing RoBERTa
|
|
21
|
+
transformer models fine tuned on a question answering task.
|
|
22
|
+
|
|
23
|
+
Its input is a list of document annotations and it automatically generates questions which are
|
|
24
|
+
used to recognize entities. The definitions of entities is given by a dictionary structures,
|
|
25
|
+
specifying a set of questions for each entity. The model is based on
|
|
26
|
+
RoBertaForQuestionAnswering.
|
|
27
|
+
|
|
28
|
+
For more extended examples see the
|
|
29
|
+
`Examples <https://github.com/JohnSnowLabs/spark-nlp/blob/master/examples/python/annotation/text/english/named-entity-recognition/ZeroShot_NER.ipynb>`__.
|
|
30
|
+
|
|
31
|
+
Pretrained models can be loaded with ``pretrained`` of the companion object:
|
|
32
|
+
|
|
33
|
+
.. code-block:: python
|
|
34
|
+
|
|
35
|
+
zeroShotNer = ZeroShotNerModel.pretrained() \\
|
|
36
|
+
.setInputCols(["document"]) \\
|
|
37
|
+
.setOutputCol("zer_shot_ner")
|
|
38
|
+
|
|
39
|
+
====================== ======================
|
|
40
|
+
Input Annotation types Output Annotation type
|
|
41
|
+
====================== ======================
|
|
42
|
+
``DOCUMENT, TOKEN`` ``NAMED_ENTITY``
|
|
43
|
+
====================== ======================
|
|
44
|
+
|
|
45
|
+
Parameters
|
|
46
|
+
----------
|
|
47
|
+
entityDefinitions
|
|
48
|
+
A dictionary with definitions of named entities. The keys of dictionary are the entity labels and the
|
|
49
|
+
values are lists of questions. For example:
|
|
50
|
+
{
|
|
51
|
+
"CITY": ["Which city?", "Which town?"],
|
|
52
|
+
"NAME": ["What is her name?", "What is his name?"]}
|
|
53
|
+
|
|
54
|
+
predictionThreshold
|
|
55
|
+
Minimal confidence score to encode an entity (Default: 0.01f)
|
|
56
|
+
ignoreEntities
|
|
57
|
+
A list of entity labels which are discarded from the output.
|
|
58
|
+
|
|
59
|
+
References
|
|
60
|
+
----------
|
|
61
|
+
`RoBERTa: A Robustly Optimized BERT Pretraining Approach <https://arxiv.org/abs/1907.11692>`__ : for details about the RoBERTa transformer
|
|
62
|
+
:class:`.RoBertaForQuestionAnswering` : for the SparkNLP implementation of RoBERTa question answering
|
|
63
|
+
|
|
64
|
+
Examples
|
|
65
|
+
--------
|
|
66
|
+
>>> document_assembler = DocumentAssembler() \\
|
|
67
|
+
... .setInputCol("text") \\
|
|
68
|
+
... .setOutputCol("document")
|
|
69
|
+
>>> sentence_detector = SentenceDetector() \\
|
|
70
|
+
... .setInputCols(["document"]) \\
|
|
71
|
+
... .setOutputCol("sentence")
|
|
72
|
+
>>> tokenizer = Tokenizer() \\
|
|
73
|
+
... .setInputCols(["sentence"]) \\
|
|
74
|
+
... .setOutputCol("token")
|
|
75
|
+
>>> zero_shot_ner = ZeroShotNerModel() \\
|
|
76
|
+
... .pretrained() \\
|
|
77
|
+
... .setEntityDefinitions(
|
|
78
|
+
... {
|
|
79
|
+
... "NAME": ["What is his name?", "What is my name?", "What is her name?"],
|
|
80
|
+
... "CITY": ["Which city?", "Which is the city?"]
|
|
81
|
+
... }) \\
|
|
82
|
+
... .setInputCols(["sentence", "token"]) \\
|
|
83
|
+
... .setOutputCol("zero_shot_ner") \\
|
|
84
|
+
>>> data = spark.createDataFrame(
|
|
85
|
+
... [["My name is Clara, I live in New York and Hellen lives in Paris."]]
|
|
86
|
+
... ).toDF("text")
|
|
87
|
+
>>> Pipeline() \\
|
|
88
|
+
... .setStages([document_assembler, sentence_detector, tokenizer, zero_shot_ner]) \\
|
|
89
|
+
... .fit(data) \\
|
|
90
|
+
... .transform(data) \\
|
|
91
|
+
... .selectExpr("document", "explode(zero_shot_ner) AS entity") \\
|
|
92
|
+
... .select(
|
|
93
|
+
... "document.result",
|
|
94
|
+
... "entity.result",
|
|
95
|
+
... "entity.metadata.word",
|
|
96
|
+
... "entity.metadata.confidence",
|
|
97
|
+
... "entity.metadata.question") \\
|
|
98
|
+
... .show(truncate=False)
|
|
99
|
+
+-----------------------------------------------------------------+------+------+----------+------------------+
|
|
100
|
+
|result |result|word |confidence|question |
|
|
101
|
+
+-----------------------------------------------------------------+------+------+----------+------------------+
|
|
102
|
+
|[My name is Clara, I live in New York and Hellen lives in Paris.]|B-CITY|Paris |0.5328949 |Which is the city?|
|
|
103
|
+
|[My name is Clara, I live in New York and Hellen lives in Paris.]|B-NAME|Clara |0.9360068 |What is my name? |
|
|
104
|
+
|[My name is Clara, I live in New York and Hellen lives in Paris.]|B-CITY|New |0.83294415|Which city? |
|
|
105
|
+
|[My name is Clara, I live in New York and Hellen lives in Paris.]|I-CITY|York |0.83294415|Which city? |
|
|
106
|
+
|[My name is Clara, I live in New York and Hellen lives in Paris.]|B-NAME|Hellen|0.45366877|What is her name? |
|
|
107
|
+
+-----------------------------------------------------------------+------+------+----------+------------------+
|
|
108
|
+
"""
|
|
109
|
+
inputAnnotatorTypes = [AnnotatorType.DOCUMENT, AnnotatorType.TOKEN]
|
|
110
|
+
outputAnnotatorType = AnnotatorType.NAMED_ENTITY
|
|
111
|
+
|
|
112
|
+
name = "ZeroShotNerModel"
|
|
113
|
+
|
|
114
|
+
predictionThreshold = Param(Params._dummy(),
|
|
115
|
+
"predictionThreshold",
|
|
116
|
+
"Minimal confidence score to encode an entity (default is 0.1)",
|
|
117
|
+
TypeConverters.toFloat)
|
|
118
|
+
|
|
119
|
+
ignoreEntities = Param(Params._dummy(),
|
|
120
|
+
"ignoreEntities",
|
|
121
|
+
"List of entities to ignore",
|
|
122
|
+
TypeConverters.toListString)
|
|
123
|
+
|
|
124
|
+
def setPredictionThreshold(self, threshold):
|
|
125
|
+
"""Sets the minimal confidence score to encode an entity
|
|
126
|
+
|
|
127
|
+
Parameters
|
|
128
|
+
----------
|
|
129
|
+
threshold : float
|
|
130
|
+
minimal confidence score to encode an entity (default is 0.1)
|
|
131
|
+
"""
|
|
132
|
+
return self._set(predictionThreshold=threshold)
|
|
133
|
+
|
|
134
|
+
def setEntityDefinitions(self, definitions):
|
|
135
|
+
"""Set entity definitions
|
|
136
|
+
|
|
137
|
+
Parameters
|
|
138
|
+
----------
|
|
139
|
+
definitions : dict[str, list[str]]
|
|
140
|
+
|
|
141
|
+
"""
|
|
142
|
+
self._call_java("setEntityDefinitions", definitions)
|
|
143
|
+
|
|
144
|
+
return self
|
|
145
|
+
|
|
146
|
+
def getClasses(self):
|
|
147
|
+
"""
|
|
148
|
+
Returns the list of entities which are recognized
|
|
149
|
+
"""
|
|
150
|
+
return self._call_java("getEntities")
|
|
151
|
+
|
|
152
|
+
@keyword_only
|
|
153
|
+
def __init__(self, classname="com.johnsnowlabs.nlp.annotators.ner.dl.ZeroShotNerModel", java_model=None):
|
|
154
|
+
super(ZeroShotNerModel, self).__init__(
|
|
155
|
+
classname=classname,
|
|
156
|
+
java_model=java_model
|
|
157
|
+
)
|
|
158
|
+
self._setDefault(
|
|
159
|
+
predictionThreshold=0.01,
|
|
160
|
+
ignoreEntities=[]
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
@staticmethod
|
|
164
|
+
def pretrained(name="zero_shot_ner_roberta", lang="en", remote_loc=None):
|
|
165
|
+
from sparknlp.pretrained import ResourceDownloader
|
|
166
|
+
return ResourceDownloader.downloadModel(ZeroShotNerModel, name, lang, remote_loc,
|
|
167
|
+
j_dwn='PythonResourceDownloader')
|
|
168
|
+
|
|
169
|
+
@staticmethod
|
|
170
|
+
def load(path):
|
|
171
|
+
from sparknlp.internal import _RobertaQAToZeroShotNerLoader
|
|
172
|
+
jModel = _RobertaQAToZeroShotNerLoader(path)._java_obj
|
|
173
|
+
return ZeroShotNerModel(java_model=jModel)
|
sparknlp/annotator/normalizer.py
CHANGED
|
@@ -20,8 +20,8 @@ class Normalizer(AnnotatorApproach):
|
|
|
20
20
|
all dirty characters from text following a regex pattern and transforms
|
|
21
21
|
words based on a provided dictionary
|
|
22
22
|
|
|
23
|
-
For extended examples of usage, see the `
|
|
24
|
-
<https://github.com/JohnSnowLabs/spark-nlp
|
|
23
|
+
For extended examples of usage, see the `Examples
|
|
24
|
+
<https://github.com/JohnSnowLabs/spark-nlp/blob/master/examples/python/annotation/text/english/document-normalizer/document_normalizer_notebook.ipynb>`__.
|
|
25
25
|
|
|
26
26
|
====================== ======================
|
|
27
27
|
Input Annotation types Output Annotation type
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
# Copyright 2017-2023 John Snow Labs
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
"""Module of annotators for openai integration."""
|
|
15
|
+
from sparknlp.annotator.openai.openai_completion import *
|
|
16
|
+
from sparknlp.annotator.openai.openai_embeddings import *
|