spark-nlp 2.6.3rc1__py2.py3-none-any.whl → 6.2.1__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (329) hide show
  1. com/johnsnowlabs/ml/__init__.py +0 -0
  2. com/johnsnowlabs/ml/ai/__init__.py +10 -0
  3. com/johnsnowlabs/nlp/__init__.py +4 -2
  4. spark_nlp-6.2.1.dist-info/METADATA +362 -0
  5. spark_nlp-6.2.1.dist-info/RECORD +292 -0
  6. {spark_nlp-2.6.3rc1.dist-info → spark_nlp-6.2.1.dist-info}/WHEEL +1 -1
  7. sparknlp/__init__.py +281 -27
  8. sparknlp/annotation.py +137 -6
  9. sparknlp/annotation_audio.py +61 -0
  10. sparknlp/annotation_image.py +82 -0
  11. sparknlp/annotator/__init__.py +93 -0
  12. sparknlp/annotator/audio/__init__.py +16 -0
  13. sparknlp/annotator/audio/hubert_for_ctc.py +188 -0
  14. sparknlp/annotator/audio/wav2vec2_for_ctc.py +161 -0
  15. sparknlp/annotator/audio/whisper_for_ctc.py +251 -0
  16. sparknlp/annotator/chunk2_doc.py +85 -0
  17. sparknlp/annotator/chunker.py +137 -0
  18. sparknlp/annotator/classifier_dl/__init__.py +61 -0
  19. sparknlp/annotator/classifier_dl/albert_for_multiple_choice.py +161 -0
  20. sparknlp/annotator/classifier_dl/albert_for_question_answering.py +172 -0
  21. sparknlp/annotator/classifier_dl/albert_for_sequence_classification.py +201 -0
  22. sparknlp/annotator/classifier_dl/albert_for_token_classification.py +179 -0
  23. sparknlp/annotator/classifier_dl/albert_for_zero_shot_classification.py +211 -0
  24. sparknlp/annotator/classifier_dl/bart_for_zero_shot_classification.py +225 -0
  25. sparknlp/annotator/classifier_dl/bert_for_multiple_choice.py +161 -0
  26. sparknlp/annotator/classifier_dl/bert_for_question_answering.py +168 -0
  27. sparknlp/annotator/classifier_dl/bert_for_sequence_classification.py +202 -0
  28. sparknlp/annotator/classifier_dl/bert_for_token_classification.py +177 -0
  29. sparknlp/annotator/classifier_dl/bert_for_zero_shot_classification.py +212 -0
  30. sparknlp/annotator/classifier_dl/camembert_for_question_answering.py +168 -0
  31. sparknlp/annotator/classifier_dl/camembert_for_sequence_classification.py +205 -0
  32. sparknlp/annotator/classifier_dl/camembert_for_token_classification.py +173 -0
  33. sparknlp/annotator/classifier_dl/camembert_for_zero_shot_classification.py +202 -0
  34. sparknlp/annotator/classifier_dl/classifier_dl.py +320 -0
  35. sparknlp/annotator/classifier_dl/deberta_for_question_answering.py +168 -0
  36. sparknlp/annotator/classifier_dl/deberta_for_sequence_classification.py +198 -0
  37. sparknlp/annotator/classifier_dl/deberta_for_token_classification.py +175 -0
  38. sparknlp/annotator/classifier_dl/deberta_for_zero_shot_classification.py +193 -0
  39. sparknlp/annotator/classifier_dl/distil_bert_for_question_answering.py +168 -0
  40. sparknlp/annotator/classifier_dl/distil_bert_for_sequence_classification.py +201 -0
  41. sparknlp/annotator/classifier_dl/distil_bert_for_token_classification.py +175 -0
  42. sparknlp/annotator/classifier_dl/distil_bert_for_zero_shot_classification.py +211 -0
  43. sparknlp/annotator/classifier_dl/distilbert_for_multiple_choice.py +161 -0
  44. sparknlp/annotator/classifier_dl/longformer_for_question_answering.py +168 -0
  45. sparknlp/annotator/classifier_dl/longformer_for_sequence_classification.py +201 -0
  46. sparknlp/annotator/classifier_dl/longformer_for_token_classification.py +176 -0
  47. sparknlp/annotator/classifier_dl/mpnet_for_question_answering.py +148 -0
  48. sparknlp/annotator/classifier_dl/mpnet_for_sequence_classification.py +188 -0
  49. sparknlp/annotator/classifier_dl/mpnet_for_token_classification.py +173 -0
  50. sparknlp/annotator/classifier_dl/multi_classifier_dl.py +395 -0
  51. sparknlp/annotator/classifier_dl/roberta_for_multiple_choice.py +161 -0
  52. sparknlp/annotator/classifier_dl/roberta_for_question_answering.py +168 -0
  53. sparknlp/annotator/classifier_dl/roberta_for_sequence_classification.py +201 -0
  54. sparknlp/annotator/classifier_dl/roberta_for_token_classification.py +189 -0
  55. sparknlp/annotator/classifier_dl/roberta_for_zero_shot_classification.py +225 -0
  56. sparknlp/annotator/classifier_dl/sentiment_dl.py +378 -0
  57. sparknlp/annotator/classifier_dl/tapas_for_question_answering.py +170 -0
  58. sparknlp/annotator/classifier_dl/xlm_roberta_for_multiple_choice.py +149 -0
  59. sparknlp/annotator/classifier_dl/xlm_roberta_for_question_answering.py +168 -0
  60. sparknlp/annotator/classifier_dl/xlm_roberta_for_sequence_classification.py +201 -0
  61. sparknlp/annotator/classifier_dl/xlm_roberta_for_token_classification.py +173 -0
  62. sparknlp/annotator/classifier_dl/xlm_roberta_for_zero_shot_classification.py +225 -0
  63. sparknlp/annotator/classifier_dl/xlnet_for_sequence_classification.py +201 -0
  64. sparknlp/annotator/classifier_dl/xlnet_for_token_classification.py +176 -0
  65. sparknlp/annotator/cleaners/__init__.py +15 -0
  66. sparknlp/annotator/cleaners/cleaner.py +202 -0
  67. sparknlp/annotator/cleaners/extractor.py +191 -0
  68. sparknlp/annotator/coref/__init__.py +1 -0
  69. sparknlp/annotator/coref/spanbert_coref.py +221 -0
  70. sparknlp/annotator/cv/__init__.py +29 -0
  71. sparknlp/annotator/cv/blip_for_question_answering.py +172 -0
  72. sparknlp/annotator/cv/clip_for_zero_shot_classification.py +193 -0
  73. sparknlp/annotator/cv/convnext_for_image_classification.py +269 -0
  74. sparknlp/annotator/cv/florence2_transformer.py +180 -0
  75. sparknlp/annotator/cv/gemma3_for_multimodal.py +346 -0
  76. sparknlp/annotator/cv/internvl_for_multimodal.py +280 -0
  77. sparknlp/annotator/cv/janus_for_multimodal.py +351 -0
  78. sparknlp/annotator/cv/llava_for_multimodal.py +328 -0
  79. sparknlp/annotator/cv/mllama_for_multimodal.py +340 -0
  80. sparknlp/annotator/cv/paligemma_for_multimodal.py +308 -0
  81. sparknlp/annotator/cv/phi3_vision_for_multimodal.py +328 -0
  82. sparknlp/annotator/cv/qwen2vl_transformer.py +332 -0
  83. sparknlp/annotator/cv/smolvlm_transformer.py +426 -0
  84. sparknlp/annotator/cv/swin_for_image_classification.py +242 -0
  85. sparknlp/annotator/cv/vision_encoder_decoder_for_image_captioning.py +240 -0
  86. sparknlp/annotator/cv/vit_for_image_classification.py +217 -0
  87. sparknlp/annotator/dataframe_optimizer.py +216 -0
  88. sparknlp/annotator/date2_chunk.py +88 -0
  89. sparknlp/annotator/dependency/__init__.py +17 -0
  90. sparknlp/annotator/dependency/dependency_parser.py +294 -0
  91. sparknlp/annotator/dependency/typed_dependency_parser.py +318 -0
  92. sparknlp/annotator/document_character_text_splitter.py +228 -0
  93. sparknlp/annotator/document_normalizer.py +235 -0
  94. sparknlp/annotator/document_token_splitter.py +175 -0
  95. sparknlp/annotator/document_token_splitter_test.py +85 -0
  96. sparknlp/annotator/embeddings/__init__.py +45 -0
  97. sparknlp/annotator/embeddings/albert_embeddings.py +230 -0
  98. sparknlp/annotator/embeddings/auto_gguf_embeddings.py +539 -0
  99. sparknlp/annotator/embeddings/bert_embeddings.py +208 -0
  100. sparknlp/annotator/embeddings/bert_sentence_embeddings.py +224 -0
  101. sparknlp/annotator/embeddings/bge_embeddings.py +199 -0
  102. sparknlp/annotator/embeddings/camembert_embeddings.py +210 -0
  103. sparknlp/annotator/embeddings/chunk_embeddings.py +149 -0
  104. sparknlp/annotator/embeddings/deberta_embeddings.py +208 -0
  105. sparknlp/annotator/embeddings/distil_bert_embeddings.py +221 -0
  106. sparknlp/annotator/embeddings/doc2vec.py +352 -0
  107. sparknlp/annotator/embeddings/e5_embeddings.py +195 -0
  108. sparknlp/annotator/embeddings/e5v_embeddings.py +138 -0
  109. sparknlp/annotator/embeddings/elmo_embeddings.py +251 -0
  110. sparknlp/annotator/embeddings/instructor_embeddings.py +204 -0
  111. sparknlp/annotator/embeddings/longformer_embeddings.py +211 -0
  112. sparknlp/annotator/embeddings/minilm_embeddings.py +189 -0
  113. sparknlp/annotator/embeddings/mpnet_embeddings.py +192 -0
  114. sparknlp/annotator/embeddings/mxbai_embeddings.py +184 -0
  115. sparknlp/annotator/embeddings/nomic_embeddings.py +181 -0
  116. sparknlp/annotator/embeddings/roberta_embeddings.py +225 -0
  117. sparknlp/annotator/embeddings/roberta_sentence_embeddings.py +191 -0
  118. sparknlp/annotator/embeddings/sentence_embeddings.py +134 -0
  119. sparknlp/annotator/embeddings/snowflake_embeddings.py +202 -0
  120. sparknlp/annotator/embeddings/uae_embeddings.py +211 -0
  121. sparknlp/annotator/embeddings/universal_sentence_encoder.py +211 -0
  122. sparknlp/annotator/embeddings/word2vec.py +353 -0
  123. sparknlp/annotator/embeddings/word_embeddings.py +385 -0
  124. sparknlp/annotator/embeddings/xlm_roberta_embeddings.py +225 -0
  125. sparknlp/annotator/embeddings/xlm_roberta_sentence_embeddings.py +194 -0
  126. sparknlp/annotator/embeddings/xlnet_embeddings.py +227 -0
  127. sparknlp/annotator/er/__init__.py +16 -0
  128. sparknlp/annotator/er/entity_ruler.py +267 -0
  129. sparknlp/annotator/graph_extraction.py +368 -0
  130. sparknlp/annotator/keyword_extraction/__init__.py +16 -0
  131. sparknlp/annotator/keyword_extraction/yake_keyword_extraction.py +270 -0
  132. sparknlp/annotator/ld_dl/__init__.py +16 -0
  133. sparknlp/annotator/ld_dl/language_detector_dl.py +199 -0
  134. sparknlp/annotator/lemmatizer.py +250 -0
  135. sparknlp/annotator/matcher/__init__.py +20 -0
  136. sparknlp/annotator/matcher/big_text_matcher.py +272 -0
  137. sparknlp/annotator/matcher/date_matcher.py +303 -0
  138. sparknlp/annotator/matcher/multi_date_matcher.py +109 -0
  139. sparknlp/annotator/matcher/regex_matcher.py +221 -0
  140. sparknlp/annotator/matcher/text_matcher.py +290 -0
  141. sparknlp/annotator/n_gram_generator.py +141 -0
  142. sparknlp/annotator/ner/__init__.py +21 -0
  143. sparknlp/annotator/ner/ner_approach.py +94 -0
  144. sparknlp/annotator/ner/ner_converter.py +148 -0
  145. sparknlp/annotator/ner/ner_crf.py +397 -0
  146. sparknlp/annotator/ner/ner_dl.py +591 -0
  147. sparknlp/annotator/ner/ner_dl_graph_checker.py +293 -0
  148. sparknlp/annotator/ner/ner_overwriter.py +166 -0
  149. sparknlp/annotator/ner/zero_shot_ner_model.py +173 -0
  150. sparknlp/annotator/normalizer.py +230 -0
  151. sparknlp/annotator/openai/__init__.py +16 -0
  152. sparknlp/annotator/openai/openai_completion.py +349 -0
  153. sparknlp/annotator/openai/openai_embeddings.py +106 -0
  154. sparknlp/annotator/param/__init__.py +17 -0
  155. sparknlp/annotator/param/classifier_encoder.py +98 -0
  156. sparknlp/annotator/param/evaluation_dl_params.py +130 -0
  157. sparknlp/annotator/pos/__init__.py +16 -0
  158. sparknlp/annotator/pos/perceptron.py +263 -0
  159. sparknlp/annotator/sentence/__init__.py +17 -0
  160. sparknlp/annotator/sentence/sentence_detector.py +290 -0
  161. sparknlp/annotator/sentence/sentence_detector_dl.py +467 -0
  162. sparknlp/annotator/sentiment/__init__.py +17 -0
  163. sparknlp/annotator/sentiment/sentiment_detector.py +208 -0
  164. sparknlp/annotator/sentiment/vivekn_sentiment.py +242 -0
  165. sparknlp/annotator/seq2seq/__init__.py +35 -0
  166. sparknlp/annotator/seq2seq/auto_gguf_model.py +304 -0
  167. sparknlp/annotator/seq2seq/auto_gguf_reranker.py +334 -0
  168. sparknlp/annotator/seq2seq/auto_gguf_vision_model.py +336 -0
  169. sparknlp/annotator/seq2seq/bart_transformer.py +420 -0
  170. sparknlp/annotator/seq2seq/cohere_transformer.py +357 -0
  171. sparknlp/annotator/seq2seq/cpm_transformer.py +321 -0
  172. sparknlp/annotator/seq2seq/gpt2_transformer.py +363 -0
  173. sparknlp/annotator/seq2seq/llama2_transformer.py +343 -0
  174. sparknlp/annotator/seq2seq/llama3_transformer.py +381 -0
  175. sparknlp/annotator/seq2seq/m2m100_transformer.py +392 -0
  176. sparknlp/annotator/seq2seq/marian_transformer.py +374 -0
  177. sparknlp/annotator/seq2seq/mistral_transformer.py +348 -0
  178. sparknlp/annotator/seq2seq/nllb_transformer.py +420 -0
  179. sparknlp/annotator/seq2seq/olmo_transformer.py +326 -0
  180. sparknlp/annotator/seq2seq/phi2_transformer.py +326 -0
  181. sparknlp/annotator/seq2seq/phi3_transformer.py +330 -0
  182. sparknlp/annotator/seq2seq/phi4_transformer.py +387 -0
  183. sparknlp/annotator/seq2seq/qwen_transformer.py +340 -0
  184. sparknlp/annotator/seq2seq/starcoder_transformer.py +335 -0
  185. sparknlp/annotator/seq2seq/t5_transformer.py +425 -0
  186. sparknlp/annotator/similarity/__init__.py +0 -0
  187. sparknlp/annotator/similarity/document_similarity_ranker.py +379 -0
  188. sparknlp/annotator/spell_check/__init__.py +18 -0
  189. sparknlp/annotator/spell_check/context_spell_checker.py +911 -0
  190. sparknlp/annotator/spell_check/norvig_sweeting.py +358 -0
  191. sparknlp/annotator/spell_check/symmetric_delete.py +299 -0
  192. sparknlp/annotator/stemmer.py +79 -0
  193. sparknlp/annotator/stop_words_cleaner.py +190 -0
  194. sparknlp/annotator/tf_ner_dl_graph_builder.py +179 -0
  195. sparknlp/annotator/token/__init__.py +19 -0
  196. sparknlp/annotator/token/chunk_tokenizer.py +118 -0
  197. sparknlp/annotator/token/recursive_tokenizer.py +205 -0
  198. sparknlp/annotator/token/regex_tokenizer.py +208 -0
  199. sparknlp/annotator/token/tokenizer.py +561 -0
  200. sparknlp/annotator/token2_chunk.py +76 -0
  201. sparknlp/annotator/ws/__init__.py +16 -0
  202. sparknlp/annotator/ws/word_segmenter.py +429 -0
  203. sparknlp/base/__init__.py +30 -0
  204. sparknlp/base/audio_assembler.py +95 -0
  205. sparknlp/base/doc2_chunk.py +169 -0
  206. sparknlp/base/document_assembler.py +164 -0
  207. sparknlp/base/embeddings_finisher.py +201 -0
  208. sparknlp/base/finisher.py +217 -0
  209. sparknlp/base/gguf_ranking_finisher.py +234 -0
  210. sparknlp/base/graph_finisher.py +125 -0
  211. sparknlp/base/has_recursive_fit.py +24 -0
  212. sparknlp/base/has_recursive_transform.py +22 -0
  213. sparknlp/base/image_assembler.py +172 -0
  214. sparknlp/base/light_pipeline.py +429 -0
  215. sparknlp/base/multi_document_assembler.py +164 -0
  216. sparknlp/base/prompt_assembler.py +207 -0
  217. sparknlp/base/recursive_pipeline.py +107 -0
  218. sparknlp/base/table_assembler.py +145 -0
  219. sparknlp/base/token_assembler.py +124 -0
  220. sparknlp/common/__init__.py +26 -0
  221. sparknlp/common/annotator_approach.py +41 -0
  222. sparknlp/common/annotator_model.py +47 -0
  223. sparknlp/common/annotator_properties.py +114 -0
  224. sparknlp/common/annotator_type.py +38 -0
  225. sparknlp/common/completion_post_processing.py +37 -0
  226. sparknlp/common/coverage_result.py +22 -0
  227. sparknlp/common/match_strategy.py +33 -0
  228. sparknlp/common/properties.py +1298 -0
  229. sparknlp/common/read_as.py +33 -0
  230. sparknlp/common/recursive_annotator_approach.py +35 -0
  231. sparknlp/common/storage.py +149 -0
  232. sparknlp/common/utils.py +39 -0
  233. sparknlp/functions.py +315 -5
  234. sparknlp/internal/__init__.py +1199 -0
  235. sparknlp/internal/annotator_java_ml.py +32 -0
  236. sparknlp/internal/annotator_transformer.py +37 -0
  237. sparknlp/internal/extended_java_wrapper.py +63 -0
  238. sparknlp/internal/params_getters_setters.py +71 -0
  239. sparknlp/internal/recursive.py +70 -0
  240. sparknlp/logging/__init__.py +15 -0
  241. sparknlp/logging/comet.py +467 -0
  242. sparknlp/partition/__init__.py +16 -0
  243. sparknlp/partition/partition.py +244 -0
  244. sparknlp/partition/partition_properties.py +902 -0
  245. sparknlp/partition/partition_transformer.py +200 -0
  246. sparknlp/pretrained/__init__.py +17 -0
  247. sparknlp/pretrained/pretrained_pipeline.py +158 -0
  248. sparknlp/pretrained/resource_downloader.py +216 -0
  249. sparknlp/pretrained/utils.py +35 -0
  250. sparknlp/reader/__init__.py +15 -0
  251. sparknlp/reader/enums.py +19 -0
  252. sparknlp/reader/pdf_to_text.py +190 -0
  253. sparknlp/reader/reader2doc.py +124 -0
  254. sparknlp/reader/reader2image.py +136 -0
  255. sparknlp/reader/reader2table.py +44 -0
  256. sparknlp/reader/reader_assembler.py +159 -0
  257. sparknlp/reader/sparknlp_reader.py +461 -0
  258. sparknlp/training/__init__.py +20 -0
  259. sparknlp/training/_tf_graph_builders/__init__.py +0 -0
  260. sparknlp/training/_tf_graph_builders/graph_builders.py +299 -0
  261. sparknlp/training/_tf_graph_builders/ner_dl/__init__.py +0 -0
  262. sparknlp/training/_tf_graph_builders/ner_dl/create_graph.py +41 -0
  263. sparknlp/training/_tf_graph_builders/ner_dl/dataset_encoder.py +78 -0
  264. sparknlp/training/_tf_graph_builders/ner_dl/ner_model.py +521 -0
  265. sparknlp/training/_tf_graph_builders/ner_dl/ner_model_saver.py +62 -0
  266. sparknlp/training/_tf_graph_builders/ner_dl/sentence_grouper.py +28 -0
  267. sparknlp/training/_tf_graph_builders/tf2contrib/__init__.py +36 -0
  268. sparknlp/training/_tf_graph_builders/tf2contrib/core_rnn_cell.py +385 -0
  269. sparknlp/training/_tf_graph_builders/tf2contrib/fused_rnn_cell.py +183 -0
  270. sparknlp/training/_tf_graph_builders/tf2contrib/gru_ops.py +235 -0
  271. sparknlp/training/_tf_graph_builders/tf2contrib/lstm_ops.py +665 -0
  272. sparknlp/training/_tf_graph_builders/tf2contrib/rnn.py +245 -0
  273. sparknlp/training/_tf_graph_builders/tf2contrib/rnn_cell.py +4006 -0
  274. sparknlp/training/_tf_graph_builders_1x/__init__.py +0 -0
  275. sparknlp/training/_tf_graph_builders_1x/graph_builders.py +277 -0
  276. sparknlp/training/_tf_graph_builders_1x/ner_dl/__init__.py +0 -0
  277. sparknlp/training/_tf_graph_builders_1x/ner_dl/create_graph.py +34 -0
  278. sparknlp/training/_tf_graph_builders_1x/ner_dl/dataset_encoder.py +78 -0
  279. sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model.py +532 -0
  280. sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model_saver.py +62 -0
  281. sparknlp/training/_tf_graph_builders_1x/ner_dl/sentence_grouper.py +28 -0
  282. sparknlp/training/conll.py +150 -0
  283. sparknlp/training/conllu.py +103 -0
  284. sparknlp/training/pos.py +103 -0
  285. sparknlp/training/pub_tator.py +76 -0
  286. sparknlp/training/spacy_to_annotation.py +57 -0
  287. sparknlp/training/tfgraphs.py +5 -0
  288. sparknlp/upload_to_hub.py +149 -0
  289. sparknlp/util.py +51 -5
  290. com/__init__.pyc +0 -0
  291. com/__pycache__/__init__.cpython-36.pyc +0 -0
  292. com/johnsnowlabs/__init__.pyc +0 -0
  293. com/johnsnowlabs/__pycache__/__init__.cpython-36.pyc +0 -0
  294. com/johnsnowlabs/nlp/__init__.pyc +0 -0
  295. com/johnsnowlabs/nlp/__pycache__/__init__.cpython-36.pyc +0 -0
  296. spark_nlp-2.6.3rc1.dist-info/METADATA +0 -36
  297. spark_nlp-2.6.3rc1.dist-info/RECORD +0 -48
  298. sparknlp/__init__.pyc +0 -0
  299. sparknlp/__pycache__/__init__.cpython-36.pyc +0 -0
  300. sparknlp/__pycache__/annotation.cpython-36.pyc +0 -0
  301. sparknlp/__pycache__/annotator.cpython-36.pyc +0 -0
  302. sparknlp/__pycache__/base.cpython-36.pyc +0 -0
  303. sparknlp/__pycache__/common.cpython-36.pyc +0 -0
  304. sparknlp/__pycache__/embeddings.cpython-36.pyc +0 -0
  305. sparknlp/__pycache__/functions.cpython-36.pyc +0 -0
  306. sparknlp/__pycache__/internal.cpython-36.pyc +0 -0
  307. sparknlp/__pycache__/pretrained.cpython-36.pyc +0 -0
  308. sparknlp/__pycache__/storage.cpython-36.pyc +0 -0
  309. sparknlp/__pycache__/training.cpython-36.pyc +0 -0
  310. sparknlp/__pycache__/util.cpython-36.pyc +0 -0
  311. sparknlp/annotation.pyc +0 -0
  312. sparknlp/annotator.py +0 -3006
  313. sparknlp/annotator.pyc +0 -0
  314. sparknlp/base.py +0 -347
  315. sparknlp/base.pyc +0 -0
  316. sparknlp/common.py +0 -193
  317. sparknlp/common.pyc +0 -0
  318. sparknlp/embeddings.py +0 -40
  319. sparknlp/embeddings.pyc +0 -0
  320. sparknlp/internal.py +0 -288
  321. sparknlp/internal.pyc +0 -0
  322. sparknlp/pretrained.py +0 -123
  323. sparknlp/pretrained.pyc +0 -0
  324. sparknlp/storage.py +0 -32
  325. sparknlp/storage.pyc +0 -0
  326. sparknlp/training.py +0 -62
  327. sparknlp/training.pyc +0 -0
  328. sparknlp/util.pyc +0 -0
  329. {spark_nlp-2.6.3rc1.dist-info → spark_nlp-6.2.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,225 @@
1
+ # Copyright 2017-2023 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains classes for RoBertaForZeroShotClassification."""
15
+
16
+ from sparknlp.common import *
17
+
18
+
19
+ class RoBertaForZeroShotClassification(AnnotatorModel,
20
+ HasCaseSensitiveProperties,
21
+ HasBatchedAnnotate,
22
+ HasClassifierActivationProperties,
23
+ HasCandidateLabelsProperties,
24
+ HasEngine):
25
+ """RoBertaForZeroShotClassification using a `ModelForSequenceClassification` trained on NLI (natural language
26
+ inference) tasks. Equivalent of `RoBertaForSequenceClassification` models, but these models don't require a hardcoded
27
+ number of potential classes, they can be chosen at runtime. It usually means it's slower but it is much more
28
+ flexible.
29
+
30
+ Note that the model will loop through all provided labels. So the more labels you have, the
31
+ longer this process will take.
32
+
33
+ Any combination of sequences and labels can be passed and each combination will be posed as a premise/hypothesis
34
+ pair and passed to the pretrained model.
35
+
36
+ Pretrained models can be loaded with :meth:`.pretrained` of the companion
37
+ object:
38
+
39
+ >>> sequenceClassifier = RoBertaForZeroShotClassification.pretrained() \\
40
+ ... .setInputCols(["token", "document"]) \\
41
+ ... .setOutputCol("label")
42
+
43
+ The default model is ``"roberta_base_zero_shot_classifier_nli"``, if no name is
44
+ provided.
45
+
46
+ For available pretrained models please see the `Models Hub
47
+ <https://sparknlp.orgtask=Text+Classification>`__.
48
+
49
+ To see which models are compatible and how to import them see
50
+ `Import Transformers into Spark NLP 🚀
51
+ <https://github.com/JohnSnowLabs/spark-nlp/discussions/5669>`_.
52
+
53
+ ====================== ======================
54
+ Input Annotation types Output Annotation type
55
+ ====================== ======================
56
+ ``DOCUMENT, TOKEN`` ``CATEGORY``
57
+ ====================== ======================
58
+
59
+ Parameters
60
+ ----------
61
+ batchSize
62
+ Batch size. Large values allows faster processing but requires more
63
+ memory, by default 8
64
+ caseSensitive
65
+ Whether to ignore case in tokens for embeddings matching, by default
66
+ True
67
+ configProtoBytes
68
+ ConfigProto from tensorflow, serialized into byte array.
69
+ maxSentenceLength
70
+ Max sentence length to process, by default 128
71
+ coalesceSentences
72
+ Instead of 1 class per sentence (if inputCols is `sentence`) output 1
73
+ class per document by averaging probabilities in all sentences, by
74
+ default False
75
+ activation
76
+ Whether to calculate logits via Softmax or Sigmoid, by default
77
+ `"softmax"`.
78
+
79
+ Examples
80
+ --------
81
+ >>> import sparknlp
82
+ >>> from sparknlp.base import *
83
+ >>> from sparknlp.annotator import *
84
+ >>> from pyspark.ml import Pipeline
85
+ >>> documentAssembler = DocumentAssembler() \\
86
+ ... .setInputCol("text") \\
87
+ ... .setOutputCol("document")
88
+ >>> tokenizer = Tokenizer() \\
89
+ ... .setInputCols(["document"]) \\
90
+ ... .setOutputCol("token")
91
+ >>> sequenceClassifier = RoBertaForZeroShotClassification.pretrained() \\
92
+ ... .setInputCols(["token", "document"]) \\
93
+ ... .setOutputCol("label") \\
94
+ ... .setCaseSensitive(True)
95
+ >>> pipeline = Pipeline().setStages([
96
+ ... documentAssembler,
97
+ ... tokenizer,
98
+ ... sequenceClassifier
99
+ ... ])
100
+ >>> data = spark.createDataFrame([["I loved this movie when I was a child.", "It was pretty boring."]]).toDF("text")
101
+ >>> result = pipeline.fit(data).transform(data)
102
+ >>> result.select("label.result").show(truncate=False)
103
+ +------+
104
+ |result|
105
+ +------+
106
+ |[pos] |
107
+ |[neg] |
108
+ +------+
109
+ """
110
+ name = "RoBertaForZeroShotClassification"
111
+
112
+ inputAnnotatorTypes = [AnnotatorType.DOCUMENT, AnnotatorType.TOKEN]
113
+
114
+ outputAnnotatorType = AnnotatorType.CATEGORY
115
+
116
+ maxSentenceLength = Param(Params._dummy(),
117
+ "maxSentenceLength",
118
+ "Max sentence length to process",
119
+ typeConverter=TypeConverters.toInt)
120
+
121
+ configProtoBytes = Param(Params._dummy(),
122
+ "configProtoBytes",
123
+ "ConfigProto from tensorflow, serialized into byte array. Get with config_proto.SerializeToString()",
124
+ TypeConverters.toListInt)
125
+
126
+ coalesceSentences = Param(Params._dummy(), "coalesceSentences",
127
+ "Instead of 1 class per sentence (if inputCols is '''sentence''') output 1 class per document by averaging probabilities in all sentences.",
128
+ TypeConverters.toBoolean)
129
+
130
+ def getClasses(self):
131
+ """
132
+ Returns labels used to train this model
133
+ """
134
+ return self._call_java("getClasses")
135
+
136
+ def setConfigProtoBytes(self, b):
137
+ """Sets configProto from tensorflow, serialized into byte array.
138
+
139
+ Parameters
140
+ ----------
141
+ b : List[int]
142
+ ConfigProto from tensorflow, serialized into byte array
143
+ """
144
+ return self._set(configProtoBytes=b)
145
+
146
+ def setMaxSentenceLength(self, value):
147
+ """Sets max sentence length to process, by default 128.
148
+
149
+ Parameters
150
+ ----------
151
+ value : int
152
+ Max sentence length to process
153
+ """
154
+ return self._set(maxSentenceLength=value)
155
+
156
+ def setCoalesceSentences(self, value):
157
+ """Instead of 1 class per sentence (if inputCols is '''sentence''') output 1 class per document by averaging
158
+ probabilities in all sentences. Due to max sequence length limit in almost all transformer models such as RoBerta
159
+ (512 tokens), this parameter helps to feed all the sentences into the model and averaging all the probabilities
160
+ for the entire document instead of probabilities per sentence. (Default: true)
161
+
162
+ Parameters
163
+ ----------
164
+ value : bool
165
+ If the output of all sentences will be averaged to one output
166
+ """
167
+ return self._set(coalesceSentences=value)
168
+
169
+ @keyword_only
170
+ def __init__(self, classname="com.johnsnowlabs.nlp.annotators.classifier.dl.RoBertaForZeroShotClassification",
171
+ java_model=None):
172
+ super(RoBertaForZeroShotClassification, self).__init__(
173
+ classname=classname,
174
+ java_model=java_model
175
+ )
176
+ self._setDefault(
177
+ batchSize=8,
178
+ maxSentenceLength=128,
179
+ caseSensitive=True,
180
+ coalesceSentences=False,
181
+ activation="softmax"
182
+ )
183
+
184
+ @staticmethod
185
+ def loadSavedModel(folder, spark_session):
186
+ """Loads a locally saved model.
187
+
188
+ Parameters
189
+ ----------
190
+ folder : str
191
+ Folder of the saved model
192
+ spark_session : pyspark.sql.SparkSession
193
+ The current SparkSession
194
+
195
+ Returns
196
+ -------
197
+ RoBertaForZeroShotClassification
198
+ The restored model
199
+ """
200
+ from sparknlp.internal import _RoBertaForZeroShotClassification
201
+ jModel = _RoBertaForZeroShotClassification(folder, spark_session._jsparkSession)._java_obj
202
+ return RoBertaForZeroShotClassification(java_model=jModel)
203
+
204
+ @staticmethod
205
+ def pretrained(name="roberta_base_zero_shot_classifier_nli", lang="en", remote_loc=None):
206
+ """Downloads and loads a pretrained model.
207
+
208
+ Parameters
209
+ ----------
210
+ name : str, optional
211
+ Name of the pretrained model, by default
212
+ "roberta_base_zero_shot_classifier_nli"
213
+ lang : str, optional
214
+ Language of the pretrained model, by default "en"
215
+ remote_loc : str, optional
216
+ Optional remote address of the resource, by default None. Will use
217
+ Spark NLPs repositories otherwise.
218
+
219
+ Returns
220
+ -------
221
+ RoBertaForZeroShotClassification
222
+ The restored model
223
+ """
224
+ from sparknlp.pretrained import ResourceDownloader
225
+ return ResourceDownloader.downloadModel(RoBertaForZeroShotClassification, name, lang, remote_loc)
@@ -0,0 +1,378 @@
1
+ # Copyright 2017-2022 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains classes for SentimentDL."""
15
+
16
+ from sparknlp.annotator.param import EvaluationDLParams, ClassifierEncoder
17
+ from sparknlp.common import *
18
+
19
+
20
+ class SentimentDLApproach(AnnotatorApproach, EvaluationDLParams, ClassifierEncoder):
21
+ """Trains a SentimentDL, an annotator for multi-class sentiment analysis.
22
+
23
+ In natural language processing, sentiment analysis is the task of
24
+ classifying the affective state or subjective view of a text. A common
25
+ example is if either a product review or tweet can be interpreted positively
26
+ or negatively.
27
+
28
+ For the instantiated/pretrained models, see :class:`.SentimentDLModel`.
29
+
30
+ Setting a test dataset to monitor model metrics can be done with
31
+ ``.setTestDataset``. The method expects a path to a parquet file containing a
32
+ dataframe that has the same required columns as the training dataframe. The
33
+ pre-processing steps for the training dataframe should also be applied to the test
34
+ dataframe. The following example will show how to create the test dataset:
35
+
36
+ >>> documentAssembler = DocumentAssembler() \\
37
+ ... .setInputCol("text") \\
38
+ ... .setOutputCol("document")
39
+ >>> embeddings = UniversalSentenceEncoder.pretrained() \\
40
+ ... .setInputCols(["document"]) \\
41
+ ... .setOutputCol("sentence_embeddings")
42
+ >>> preProcessingPipeline = Pipeline().setStages([documentAssembler, embeddings])
43
+ >>> (train, test) = data.randomSplit([0.8, 0.2])
44
+ >>> preProcessingPipeline \\
45
+ ... .fit(test) \\
46
+ ... .transform(test)
47
+ ... .write \\
48
+ ... .mode("overwrite") \\
49
+ ... .parquet("test_data")
50
+ >>> classifier = SentimentDLApproach() \\
51
+ ... .setInputCols(["sentence_embeddings"]) \\
52
+ ... .setOutputCol("sentiment") \\
53
+ ... .setLabelColumn("label") \\
54
+ ... .setTestDataset("test_data")
55
+
56
+ For extended examples of usage, see the `Examples <https://github.com/JohnSnowLabs/spark-nlp/blob/master/examples/python/training/english/classification/SentimentDL_train_multiclass_sentiment_classifier.ipynb>`__.
57
+
58
+ ======================= ======================
59
+ Input Annotation types Output Annotation type
60
+ ======================= ======================
61
+ ``SENTENCE_EMBEDDINGS`` ``CATEGORY``
62
+ ======================= ======================
63
+
64
+ Parameters
65
+ ----------
66
+ batchSize
67
+ Batch size, by default 64
68
+ configProtoBytes
69
+ ConfigProto from tensorflow, serialized into byte array.
70
+ dropout
71
+ Dropout coefficient, by default 0.5
72
+ enableOutputLogs
73
+ Whether to use stdout in addition to Spark logs, by default False
74
+ evaluationLogExtended
75
+ Whether logs for validation to be extended: it displays time and evaluation of
76
+ each label. Default is False.
77
+ labelColumn
78
+ Column with label per each token
79
+ lr
80
+ Learning Rate, by default 0.005
81
+ maxEpochs
82
+ Maximum number of epochs to train, by default 30
83
+ outputLogsPath
84
+ Folder path to save training logs
85
+ randomSeed
86
+ Random seed
87
+ testDataset
88
+ Path to test dataset. If set used to calculate statistic on it during training.
89
+ threshold
90
+ The minimum threshold for the final result otheriwse it will be neutral,
91
+ by default 0.6
92
+ thresholdLabel
93
+ In case the score is less than threshold, what should be the label, by default
94
+ "neutral"
95
+ validationSplit
96
+ Choose the proportion of training dataset to be validated against the
97
+ model on each Epoch. The value should be between 0.0 and 1.0 and by
98
+ default it is 0.0 and off.
99
+ verbose
100
+ Level of verbosity during training
101
+
102
+ Notes
103
+ -----
104
+ - This annotator accepts a label column of a single item in either type of
105
+ String, Int, Float, or Double. So positive sentiment can be expressed as
106
+ either ``"positive"`` or ``0``, negative sentiment as ``"negative"`` or
107
+ ``1``.
108
+ - UniversalSentenceEncoder, BertSentenceEmbeddings, or SentenceEmbeddings
109
+ can be used for the ``inputCol``.
110
+
111
+ Examples
112
+ --------
113
+ In this example, ``sentiment.csv`` is in the form::
114
+
115
+ text,label
116
+ This movie is the best movie I have watched ever! In my opinion this movie can win an award.,0
117
+ This was a terrible movie! The acting was bad really bad!,1
118
+
119
+ The model can then be trained with
120
+
121
+ >>> import sparknlp
122
+ >>> from sparknlp.base import *
123
+ >>> from sparknlp.annotator import *
124
+ >>> from pyspark.ml import Pipeline
125
+ >>> smallCorpus = spark.read.option("header", "True").csv("src/test/resources/classifier/sentiment.csv")
126
+ >>> documentAssembler = DocumentAssembler() \\
127
+ ... .setInputCol("text") \\
128
+ ... .setOutputCol("document")
129
+ >>> useEmbeddings = UniversalSentenceEncoder.pretrained() \\
130
+ ... .setInputCols(["document"]) \\
131
+ ... .setOutputCol("sentence_embeddings")
132
+ >>> docClassifier = SentimentDLApproach() \\
133
+ ... .setInputCols(["sentence_embeddings"]) \\
134
+ ... .setOutputCol("sentiment") \\
135
+ ... .setLabelColumn("label") \\
136
+ ... .setBatchSize(32) \\
137
+ ... .setMaxEpochs(1) \\
138
+ ... .setLr(5e-3) \\
139
+ ... .setDropout(0.5)
140
+ >>> pipeline = Pipeline().setStages([
141
+ ... documentAssembler,
142
+ ... useEmbeddings,
143
+ ... docClassifier
144
+ ... ])
145
+ >>> pipelineModel = pipeline.fit(smallCorpus)
146
+ """
147
+
148
+ inputAnnotatorTypes = [AnnotatorType.SENTENCE_EMBEDDINGS]
149
+
150
+ outputAnnotatorType = AnnotatorType.CATEGORY
151
+
152
+ dropout = Param(Params._dummy(), "dropout", "Dropout coefficient", TypeConverters.toFloat)
153
+
154
+ threshold = Param(Params._dummy(), "threshold",
155
+ "The minimum threshold for the final result otheriwse it will be neutral", TypeConverters.toFloat)
156
+
157
+ thresholdLabel = Param(Params._dummy(), "thresholdLabel",
158
+ "In case the score is less than threshold, what should be the label. Default is neutral.",
159
+ TypeConverters.toString)
160
+
161
+ def setDropout(self, v):
162
+ """Sets dropout coefficient, by default 0.5.
163
+
164
+ Parameters
165
+ ----------
166
+ v : float
167
+ Dropout coefficient
168
+ """
169
+ self._set(dropout=v)
170
+ return self
171
+
172
+ def setThreshold(self, v):
173
+ """Sets the minimum threshold for the final result otheriwse it will be
174
+ neutral, by default 0.6.
175
+
176
+ Parameters
177
+ ----------
178
+ v : float
179
+ Minimum threshold for the final result
180
+ """
181
+ self._set(threshold=v)
182
+ return self
183
+
184
+ def setThresholdLabel(self, p):
185
+ """Sets what the label should be, if the score is less than threshold,
186
+ by default "neutral".
187
+
188
+ Parameters
189
+ ----------
190
+ p : str
191
+ The label, if the score is less than threshold
192
+ """
193
+ return self._set(thresholdLabel=p)
194
+
195
+ def _create_model(self, java_model):
196
+ return SentimentDLModel(java_model=java_model)
197
+
198
+ @keyword_only
199
+ def __init__(self):
200
+ super(SentimentDLApproach, self).__init__(
201
+ classname="com.johnsnowlabs.nlp.annotators.classifier.dl.SentimentDLApproach")
202
+ self._setDefault(
203
+ maxEpochs=30,
204
+ lr=float(0.005),
205
+ batchSize=64,
206
+ dropout=float(0.5),
207
+ enableOutputLogs=False,
208
+ evaluationLogExtended=False,
209
+ threshold=0.6,
210
+ thresholdLabel="neutral"
211
+ )
212
+
213
+
214
+ class SentimentDLModel(AnnotatorModel, HasStorageRef, HasEngine):
215
+ """SentimentDL, an annotator for multi-class sentiment analysis.
216
+
217
+ In natural language processing, sentiment analysis is the task of
218
+ classifying the affective state or subjective view of a text. A common
219
+ example is if either a product review or tweet can be interpreted positively
220
+ or negatively.
221
+
222
+ This is the instantiated model of the :class:`.SentimentDLApproach`. For
223
+ training your own model, please see the documentation of that class.
224
+
225
+ Pretrained models can be loaded with :meth:`.pretrained` of the companion
226
+ object:
227
+
228
+ >>> sentiment = SentimentDLModel.pretrained() \\
229
+ ... .setInputCols(["sentence_embeddings"]) \\
230
+ ... .setOutputCol("sentiment")
231
+
232
+
233
+ The default model is ``"sentimentdl_use_imdb"``, if no name is provided. It
234
+ is english sentiment analysis trained on the IMDB dataset. For available
235
+ pretrained models please see the `Models Hub
236
+ <https://sparknlp.org/models?task=Sentiment+Analysis>`__.
237
+
238
+ For extended examples of usage, see the `Examples
239
+ <https://github.com/JohnSnowLabs/spark-nlp/blob/master/examples/python/training/english/classification/SentimentDL_train_multiclass_sentiment_classifier.ipynb>`__.
240
+
241
+ ======================= ======================
242
+ Input Annotation types Output Annotation type
243
+ ======================= ======================
244
+ ``SENTENCE_EMBEDDINGS`` ``CATEGORY``
245
+ ======================= ======================
246
+
247
+ Parameters
248
+ ----------
249
+ configProtoBytes
250
+ ConfigProto from tensorflow, serialized into byte array.
251
+ threshold
252
+ The minimum threshold for the final result otheriwse it will be neutral,
253
+ by default 0.6
254
+ thresholdLabel
255
+ In case the score is less than threshold, what should be the label.
256
+ Default is neutral, by default "neutral"
257
+ classes
258
+ Tags used to trained this SentimentDLModel
259
+
260
+ Examples
261
+ --------
262
+ >>> import sparknlp
263
+ >>> from sparknlp.base import *
264
+ >>> from sparknlp.annotator import *
265
+ >>> from pyspark.ml import Pipeline
266
+ >>> documentAssembler = DocumentAssembler() \\
267
+ ... .setInputCol("text") \\
268
+ ... .setOutputCol("document")
269
+ >>> useEmbeddings = UniversalSentenceEncoder.pretrained() \\
270
+ ... .setInputCols(["document"]) \\
271
+ ... .setOutputCol("sentence_embeddings")
272
+ >>> sentiment = SentimentDLModel.pretrained("sentimentdl_use_twitter") \\
273
+ ... .setInputCols(["sentence_embeddings"]) \\
274
+ ... .setThreshold(0.7) \\
275
+ ... .setOutputCol("sentiment")
276
+ >>> pipeline = Pipeline().setStages([
277
+ ... documentAssembler,
278
+ ... useEmbeddings,
279
+ ... sentiment
280
+ ... ])
281
+ >>> data = spark.createDataFrame([
282
+ ... ["Wow, the new video is awesome!"],
283
+ ... ["bruh what a damn waste of time"]
284
+ ... ]).toDF("text")
285
+ >>> result = pipeline.fit(data).transform(data)
286
+ >>> result.select("text", "sentiment.result").show(truncate=False)
287
+ +------------------------------+----------+
288
+ |text |result |
289
+ +------------------------------+----------+
290
+ |Wow, the new video is awesome!|[positive]|
291
+ |bruh what a damn waste of time|[negative]|
292
+ +------------------------------+----------+
293
+ """
294
+ name = "SentimentDLModel"
295
+
296
+ inputAnnotatorTypes = [AnnotatorType.SENTENCE_EMBEDDINGS]
297
+
298
+ outputAnnotatorType = AnnotatorType.CATEGORY
299
+
300
+ def __init__(self, classname="com.johnsnowlabs.nlp.annotators.classifier.dl.SentimentDLModel", java_model=None):
301
+ super(SentimentDLModel, self).__init__(
302
+ classname=classname,
303
+ java_model=java_model
304
+ )
305
+ self._setDefault(
306
+ threshold=0.6,
307
+ thresholdLabel="neutral"
308
+ )
309
+
310
+ configProtoBytes = Param(Params._dummy(), "configProtoBytes",
311
+ "ConfigProto from tensorflow, serialized into byte array. Get with config_proto.SerializeToString()",
312
+ TypeConverters.toListInt)
313
+
314
+ threshold = Param(Params._dummy(), "threshold",
315
+ "The minimum threshold for the final result otheriwse it will be neutral", TypeConverters.toFloat)
316
+
317
+ thresholdLabel = Param(Params._dummy(), "thresholdLabel",
318
+ "In case the score is less than threshold, what should be the label. Default is neutral.",
319
+ TypeConverters.toString)
320
+
321
+ classes = Param(Params._dummy(), "classes",
322
+ "get the tags used to trained this SentimentDLModel",
323
+ TypeConverters.toListString)
324
+
325
+ def setConfigProtoBytes(self, b):
326
+ """Sets configProto from tensorflow, serialized into byte array.
327
+
328
+ Parameters
329
+ ----------
330
+ b : List[int]
331
+ ConfigProto from tensorflow, serialized into byte array
332
+ """
333
+ return self._set(configProtoBytes=b)
334
+
335
+ def setThreshold(self, v):
336
+ """Sets the minimum threshold for the final result otheriwse it will be
337
+ neutral, by default 0.6.
338
+
339
+ Parameters
340
+ ----------
341
+ v : float
342
+ Minimum threshold for the final result
343
+ """
344
+ self._set(threshold=v)
345
+ return self
346
+
347
+ def setThresholdLabel(self, p):
348
+ """Sets what the label should be, if the score is less than threshold,
349
+ by default "neutral".
350
+
351
+ Parameters
352
+ ----------
353
+ p : str
354
+ The label, if the score is less than threshold
355
+ """
356
+ return self._set(thresholdLabel=p)
357
+
358
+ @staticmethod
359
+ def pretrained(name="sentimentdl_use_imdb", lang="en", remote_loc=None):
360
+ """Downloads and loads a pretrained model.
361
+
362
+ Parameters
363
+ ----------
364
+ name : str, optional
365
+ Name of the pretrained model, by default "sentimentdl_use_imdb"
366
+ lang : str, optional
367
+ Language of the pretrained model, by default "en"
368
+ remote_loc : str, optional
369
+ Optional remote address of the resource, by default None. Will use
370
+ Spark NLPs repositories otherwise.
371
+
372
+ Returns
373
+ -------
374
+ SentimentDLModel
375
+ The restored model
376
+ """
377
+ from sparknlp.pretrained import ResourceDownloader
378
+ return ResourceDownloader.downloadModel(SentimentDLModel, name, lang, remote_loc)