spark-nlp 2.6.3rc1__py2.py3-none-any.whl → 6.2.1__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (329) hide show
  1. com/johnsnowlabs/ml/__init__.py +0 -0
  2. com/johnsnowlabs/ml/ai/__init__.py +10 -0
  3. com/johnsnowlabs/nlp/__init__.py +4 -2
  4. spark_nlp-6.2.1.dist-info/METADATA +362 -0
  5. spark_nlp-6.2.1.dist-info/RECORD +292 -0
  6. {spark_nlp-2.6.3rc1.dist-info → spark_nlp-6.2.1.dist-info}/WHEEL +1 -1
  7. sparknlp/__init__.py +281 -27
  8. sparknlp/annotation.py +137 -6
  9. sparknlp/annotation_audio.py +61 -0
  10. sparknlp/annotation_image.py +82 -0
  11. sparknlp/annotator/__init__.py +93 -0
  12. sparknlp/annotator/audio/__init__.py +16 -0
  13. sparknlp/annotator/audio/hubert_for_ctc.py +188 -0
  14. sparknlp/annotator/audio/wav2vec2_for_ctc.py +161 -0
  15. sparknlp/annotator/audio/whisper_for_ctc.py +251 -0
  16. sparknlp/annotator/chunk2_doc.py +85 -0
  17. sparknlp/annotator/chunker.py +137 -0
  18. sparknlp/annotator/classifier_dl/__init__.py +61 -0
  19. sparknlp/annotator/classifier_dl/albert_for_multiple_choice.py +161 -0
  20. sparknlp/annotator/classifier_dl/albert_for_question_answering.py +172 -0
  21. sparknlp/annotator/classifier_dl/albert_for_sequence_classification.py +201 -0
  22. sparknlp/annotator/classifier_dl/albert_for_token_classification.py +179 -0
  23. sparknlp/annotator/classifier_dl/albert_for_zero_shot_classification.py +211 -0
  24. sparknlp/annotator/classifier_dl/bart_for_zero_shot_classification.py +225 -0
  25. sparknlp/annotator/classifier_dl/bert_for_multiple_choice.py +161 -0
  26. sparknlp/annotator/classifier_dl/bert_for_question_answering.py +168 -0
  27. sparknlp/annotator/classifier_dl/bert_for_sequence_classification.py +202 -0
  28. sparknlp/annotator/classifier_dl/bert_for_token_classification.py +177 -0
  29. sparknlp/annotator/classifier_dl/bert_for_zero_shot_classification.py +212 -0
  30. sparknlp/annotator/classifier_dl/camembert_for_question_answering.py +168 -0
  31. sparknlp/annotator/classifier_dl/camembert_for_sequence_classification.py +205 -0
  32. sparknlp/annotator/classifier_dl/camembert_for_token_classification.py +173 -0
  33. sparknlp/annotator/classifier_dl/camembert_for_zero_shot_classification.py +202 -0
  34. sparknlp/annotator/classifier_dl/classifier_dl.py +320 -0
  35. sparknlp/annotator/classifier_dl/deberta_for_question_answering.py +168 -0
  36. sparknlp/annotator/classifier_dl/deberta_for_sequence_classification.py +198 -0
  37. sparknlp/annotator/classifier_dl/deberta_for_token_classification.py +175 -0
  38. sparknlp/annotator/classifier_dl/deberta_for_zero_shot_classification.py +193 -0
  39. sparknlp/annotator/classifier_dl/distil_bert_for_question_answering.py +168 -0
  40. sparknlp/annotator/classifier_dl/distil_bert_for_sequence_classification.py +201 -0
  41. sparknlp/annotator/classifier_dl/distil_bert_for_token_classification.py +175 -0
  42. sparknlp/annotator/classifier_dl/distil_bert_for_zero_shot_classification.py +211 -0
  43. sparknlp/annotator/classifier_dl/distilbert_for_multiple_choice.py +161 -0
  44. sparknlp/annotator/classifier_dl/longformer_for_question_answering.py +168 -0
  45. sparknlp/annotator/classifier_dl/longformer_for_sequence_classification.py +201 -0
  46. sparknlp/annotator/classifier_dl/longformer_for_token_classification.py +176 -0
  47. sparknlp/annotator/classifier_dl/mpnet_for_question_answering.py +148 -0
  48. sparknlp/annotator/classifier_dl/mpnet_for_sequence_classification.py +188 -0
  49. sparknlp/annotator/classifier_dl/mpnet_for_token_classification.py +173 -0
  50. sparknlp/annotator/classifier_dl/multi_classifier_dl.py +395 -0
  51. sparknlp/annotator/classifier_dl/roberta_for_multiple_choice.py +161 -0
  52. sparknlp/annotator/classifier_dl/roberta_for_question_answering.py +168 -0
  53. sparknlp/annotator/classifier_dl/roberta_for_sequence_classification.py +201 -0
  54. sparknlp/annotator/classifier_dl/roberta_for_token_classification.py +189 -0
  55. sparknlp/annotator/classifier_dl/roberta_for_zero_shot_classification.py +225 -0
  56. sparknlp/annotator/classifier_dl/sentiment_dl.py +378 -0
  57. sparknlp/annotator/classifier_dl/tapas_for_question_answering.py +170 -0
  58. sparknlp/annotator/classifier_dl/xlm_roberta_for_multiple_choice.py +149 -0
  59. sparknlp/annotator/classifier_dl/xlm_roberta_for_question_answering.py +168 -0
  60. sparknlp/annotator/classifier_dl/xlm_roberta_for_sequence_classification.py +201 -0
  61. sparknlp/annotator/classifier_dl/xlm_roberta_for_token_classification.py +173 -0
  62. sparknlp/annotator/classifier_dl/xlm_roberta_for_zero_shot_classification.py +225 -0
  63. sparknlp/annotator/classifier_dl/xlnet_for_sequence_classification.py +201 -0
  64. sparknlp/annotator/classifier_dl/xlnet_for_token_classification.py +176 -0
  65. sparknlp/annotator/cleaners/__init__.py +15 -0
  66. sparknlp/annotator/cleaners/cleaner.py +202 -0
  67. sparknlp/annotator/cleaners/extractor.py +191 -0
  68. sparknlp/annotator/coref/__init__.py +1 -0
  69. sparknlp/annotator/coref/spanbert_coref.py +221 -0
  70. sparknlp/annotator/cv/__init__.py +29 -0
  71. sparknlp/annotator/cv/blip_for_question_answering.py +172 -0
  72. sparknlp/annotator/cv/clip_for_zero_shot_classification.py +193 -0
  73. sparknlp/annotator/cv/convnext_for_image_classification.py +269 -0
  74. sparknlp/annotator/cv/florence2_transformer.py +180 -0
  75. sparknlp/annotator/cv/gemma3_for_multimodal.py +346 -0
  76. sparknlp/annotator/cv/internvl_for_multimodal.py +280 -0
  77. sparknlp/annotator/cv/janus_for_multimodal.py +351 -0
  78. sparknlp/annotator/cv/llava_for_multimodal.py +328 -0
  79. sparknlp/annotator/cv/mllama_for_multimodal.py +340 -0
  80. sparknlp/annotator/cv/paligemma_for_multimodal.py +308 -0
  81. sparknlp/annotator/cv/phi3_vision_for_multimodal.py +328 -0
  82. sparknlp/annotator/cv/qwen2vl_transformer.py +332 -0
  83. sparknlp/annotator/cv/smolvlm_transformer.py +426 -0
  84. sparknlp/annotator/cv/swin_for_image_classification.py +242 -0
  85. sparknlp/annotator/cv/vision_encoder_decoder_for_image_captioning.py +240 -0
  86. sparknlp/annotator/cv/vit_for_image_classification.py +217 -0
  87. sparknlp/annotator/dataframe_optimizer.py +216 -0
  88. sparknlp/annotator/date2_chunk.py +88 -0
  89. sparknlp/annotator/dependency/__init__.py +17 -0
  90. sparknlp/annotator/dependency/dependency_parser.py +294 -0
  91. sparknlp/annotator/dependency/typed_dependency_parser.py +318 -0
  92. sparknlp/annotator/document_character_text_splitter.py +228 -0
  93. sparknlp/annotator/document_normalizer.py +235 -0
  94. sparknlp/annotator/document_token_splitter.py +175 -0
  95. sparknlp/annotator/document_token_splitter_test.py +85 -0
  96. sparknlp/annotator/embeddings/__init__.py +45 -0
  97. sparknlp/annotator/embeddings/albert_embeddings.py +230 -0
  98. sparknlp/annotator/embeddings/auto_gguf_embeddings.py +539 -0
  99. sparknlp/annotator/embeddings/bert_embeddings.py +208 -0
  100. sparknlp/annotator/embeddings/bert_sentence_embeddings.py +224 -0
  101. sparknlp/annotator/embeddings/bge_embeddings.py +199 -0
  102. sparknlp/annotator/embeddings/camembert_embeddings.py +210 -0
  103. sparknlp/annotator/embeddings/chunk_embeddings.py +149 -0
  104. sparknlp/annotator/embeddings/deberta_embeddings.py +208 -0
  105. sparknlp/annotator/embeddings/distil_bert_embeddings.py +221 -0
  106. sparknlp/annotator/embeddings/doc2vec.py +352 -0
  107. sparknlp/annotator/embeddings/e5_embeddings.py +195 -0
  108. sparknlp/annotator/embeddings/e5v_embeddings.py +138 -0
  109. sparknlp/annotator/embeddings/elmo_embeddings.py +251 -0
  110. sparknlp/annotator/embeddings/instructor_embeddings.py +204 -0
  111. sparknlp/annotator/embeddings/longformer_embeddings.py +211 -0
  112. sparknlp/annotator/embeddings/minilm_embeddings.py +189 -0
  113. sparknlp/annotator/embeddings/mpnet_embeddings.py +192 -0
  114. sparknlp/annotator/embeddings/mxbai_embeddings.py +184 -0
  115. sparknlp/annotator/embeddings/nomic_embeddings.py +181 -0
  116. sparknlp/annotator/embeddings/roberta_embeddings.py +225 -0
  117. sparknlp/annotator/embeddings/roberta_sentence_embeddings.py +191 -0
  118. sparknlp/annotator/embeddings/sentence_embeddings.py +134 -0
  119. sparknlp/annotator/embeddings/snowflake_embeddings.py +202 -0
  120. sparknlp/annotator/embeddings/uae_embeddings.py +211 -0
  121. sparknlp/annotator/embeddings/universal_sentence_encoder.py +211 -0
  122. sparknlp/annotator/embeddings/word2vec.py +353 -0
  123. sparknlp/annotator/embeddings/word_embeddings.py +385 -0
  124. sparknlp/annotator/embeddings/xlm_roberta_embeddings.py +225 -0
  125. sparknlp/annotator/embeddings/xlm_roberta_sentence_embeddings.py +194 -0
  126. sparknlp/annotator/embeddings/xlnet_embeddings.py +227 -0
  127. sparknlp/annotator/er/__init__.py +16 -0
  128. sparknlp/annotator/er/entity_ruler.py +267 -0
  129. sparknlp/annotator/graph_extraction.py +368 -0
  130. sparknlp/annotator/keyword_extraction/__init__.py +16 -0
  131. sparknlp/annotator/keyword_extraction/yake_keyword_extraction.py +270 -0
  132. sparknlp/annotator/ld_dl/__init__.py +16 -0
  133. sparknlp/annotator/ld_dl/language_detector_dl.py +199 -0
  134. sparknlp/annotator/lemmatizer.py +250 -0
  135. sparknlp/annotator/matcher/__init__.py +20 -0
  136. sparknlp/annotator/matcher/big_text_matcher.py +272 -0
  137. sparknlp/annotator/matcher/date_matcher.py +303 -0
  138. sparknlp/annotator/matcher/multi_date_matcher.py +109 -0
  139. sparknlp/annotator/matcher/regex_matcher.py +221 -0
  140. sparknlp/annotator/matcher/text_matcher.py +290 -0
  141. sparknlp/annotator/n_gram_generator.py +141 -0
  142. sparknlp/annotator/ner/__init__.py +21 -0
  143. sparknlp/annotator/ner/ner_approach.py +94 -0
  144. sparknlp/annotator/ner/ner_converter.py +148 -0
  145. sparknlp/annotator/ner/ner_crf.py +397 -0
  146. sparknlp/annotator/ner/ner_dl.py +591 -0
  147. sparknlp/annotator/ner/ner_dl_graph_checker.py +293 -0
  148. sparknlp/annotator/ner/ner_overwriter.py +166 -0
  149. sparknlp/annotator/ner/zero_shot_ner_model.py +173 -0
  150. sparknlp/annotator/normalizer.py +230 -0
  151. sparknlp/annotator/openai/__init__.py +16 -0
  152. sparknlp/annotator/openai/openai_completion.py +349 -0
  153. sparknlp/annotator/openai/openai_embeddings.py +106 -0
  154. sparknlp/annotator/param/__init__.py +17 -0
  155. sparknlp/annotator/param/classifier_encoder.py +98 -0
  156. sparknlp/annotator/param/evaluation_dl_params.py +130 -0
  157. sparknlp/annotator/pos/__init__.py +16 -0
  158. sparknlp/annotator/pos/perceptron.py +263 -0
  159. sparknlp/annotator/sentence/__init__.py +17 -0
  160. sparknlp/annotator/sentence/sentence_detector.py +290 -0
  161. sparknlp/annotator/sentence/sentence_detector_dl.py +467 -0
  162. sparknlp/annotator/sentiment/__init__.py +17 -0
  163. sparknlp/annotator/sentiment/sentiment_detector.py +208 -0
  164. sparknlp/annotator/sentiment/vivekn_sentiment.py +242 -0
  165. sparknlp/annotator/seq2seq/__init__.py +35 -0
  166. sparknlp/annotator/seq2seq/auto_gguf_model.py +304 -0
  167. sparknlp/annotator/seq2seq/auto_gguf_reranker.py +334 -0
  168. sparknlp/annotator/seq2seq/auto_gguf_vision_model.py +336 -0
  169. sparknlp/annotator/seq2seq/bart_transformer.py +420 -0
  170. sparknlp/annotator/seq2seq/cohere_transformer.py +357 -0
  171. sparknlp/annotator/seq2seq/cpm_transformer.py +321 -0
  172. sparknlp/annotator/seq2seq/gpt2_transformer.py +363 -0
  173. sparknlp/annotator/seq2seq/llama2_transformer.py +343 -0
  174. sparknlp/annotator/seq2seq/llama3_transformer.py +381 -0
  175. sparknlp/annotator/seq2seq/m2m100_transformer.py +392 -0
  176. sparknlp/annotator/seq2seq/marian_transformer.py +374 -0
  177. sparknlp/annotator/seq2seq/mistral_transformer.py +348 -0
  178. sparknlp/annotator/seq2seq/nllb_transformer.py +420 -0
  179. sparknlp/annotator/seq2seq/olmo_transformer.py +326 -0
  180. sparknlp/annotator/seq2seq/phi2_transformer.py +326 -0
  181. sparknlp/annotator/seq2seq/phi3_transformer.py +330 -0
  182. sparknlp/annotator/seq2seq/phi4_transformer.py +387 -0
  183. sparknlp/annotator/seq2seq/qwen_transformer.py +340 -0
  184. sparknlp/annotator/seq2seq/starcoder_transformer.py +335 -0
  185. sparknlp/annotator/seq2seq/t5_transformer.py +425 -0
  186. sparknlp/annotator/similarity/__init__.py +0 -0
  187. sparknlp/annotator/similarity/document_similarity_ranker.py +379 -0
  188. sparknlp/annotator/spell_check/__init__.py +18 -0
  189. sparknlp/annotator/spell_check/context_spell_checker.py +911 -0
  190. sparknlp/annotator/spell_check/norvig_sweeting.py +358 -0
  191. sparknlp/annotator/spell_check/symmetric_delete.py +299 -0
  192. sparknlp/annotator/stemmer.py +79 -0
  193. sparknlp/annotator/stop_words_cleaner.py +190 -0
  194. sparknlp/annotator/tf_ner_dl_graph_builder.py +179 -0
  195. sparknlp/annotator/token/__init__.py +19 -0
  196. sparknlp/annotator/token/chunk_tokenizer.py +118 -0
  197. sparknlp/annotator/token/recursive_tokenizer.py +205 -0
  198. sparknlp/annotator/token/regex_tokenizer.py +208 -0
  199. sparknlp/annotator/token/tokenizer.py +561 -0
  200. sparknlp/annotator/token2_chunk.py +76 -0
  201. sparknlp/annotator/ws/__init__.py +16 -0
  202. sparknlp/annotator/ws/word_segmenter.py +429 -0
  203. sparknlp/base/__init__.py +30 -0
  204. sparknlp/base/audio_assembler.py +95 -0
  205. sparknlp/base/doc2_chunk.py +169 -0
  206. sparknlp/base/document_assembler.py +164 -0
  207. sparknlp/base/embeddings_finisher.py +201 -0
  208. sparknlp/base/finisher.py +217 -0
  209. sparknlp/base/gguf_ranking_finisher.py +234 -0
  210. sparknlp/base/graph_finisher.py +125 -0
  211. sparknlp/base/has_recursive_fit.py +24 -0
  212. sparknlp/base/has_recursive_transform.py +22 -0
  213. sparknlp/base/image_assembler.py +172 -0
  214. sparknlp/base/light_pipeline.py +429 -0
  215. sparknlp/base/multi_document_assembler.py +164 -0
  216. sparknlp/base/prompt_assembler.py +207 -0
  217. sparknlp/base/recursive_pipeline.py +107 -0
  218. sparknlp/base/table_assembler.py +145 -0
  219. sparknlp/base/token_assembler.py +124 -0
  220. sparknlp/common/__init__.py +26 -0
  221. sparknlp/common/annotator_approach.py +41 -0
  222. sparknlp/common/annotator_model.py +47 -0
  223. sparknlp/common/annotator_properties.py +114 -0
  224. sparknlp/common/annotator_type.py +38 -0
  225. sparknlp/common/completion_post_processing.py +37 -0
  226. sparknlp/common/coverage_result.py +22 -0
  227. sparknlp/common/match_strategy.py +33 -0
  228. sparknlp/common/properties.py +1298 -0
  229. sparknlp/common/read_as.py +33 -0
  230. sparknlp/common/recursive_annotator_approach.py +35 -0
  231. sparknlp/common/storage.py +149 -0
  232. sparknlp/common/utils.py +39 -0
  233. sparknlp/functions.py +315 -5
  234. sparknlp/internal/__init__.py +1199 -0
  235. sparknlp/internal/annotator_java_ml.py +32 -0
  236. sparknlp/internal/annotator_transformer.py +37 -0
  237. sparknlp/internal/extended_java_wrapper.py +63 -0
  238. sparknlp/internal/params_getters_setters.py +71 -0
  239. sparknlp/internal/recursive.py +70 -0
  240. sparknlp/logging/__init__.py +15 -0
  241. sparknlp/logging/comet.py +467 -0
  242. sparknlp/partition/__init__.py +16 -0
  243. sparknlp/partition/partition.py +244 -0
  244. sparknlp/partition/partition_properties.py +902 -0
  245. sparknlp/partition/partition_transformer.py +200 -0
  246. sparknlp/pretrained/__init__.py +17 -0
  247. sparknlp/pretrained/pretrained_pipeline.py +158 -0
  248. sparknlp/pretrained/resource_downloader.py +216 -0
  249. sparknlp/pretrained/utils.py +35 -0
  250. sparknlp/reader/__init__.py +15 -0
  251. sparknlp/reader/enums.py +19 -0
  252. sparknlp/reader/pdf_to_text.py +190 -0
  253. sparknlp/reader/reader2doc.py +124 -0
  254. sparknlp/reader/reader2image.py +136 -0
  255. sparknlp/reader/reader2table.py +44 -0
  256. sparknlp/reader/reader_assembler.py +159 -0
  257. sparknlp/reader/sparknlp_reader.py +461 -0
  258. sparknlp/training/__init__.py +20 -0
  259. sparknlp/training/_tf_graph_builders/__init__.py +0 -0
  260. sparknlp/training/_tf_graph_builders/graph_builders.py +299 -0
  261. sparknlp/training/_tf_graph_builders/ner_dl/__init__.py +0 -0
  262. sparknlp/training/_tf_graph_builders/ner_dl/create_graph.py +41 -0
  263. sparknlp/training/_tf_graph_builders/ner_dl/dataset_encoder.py +78 -0
  264. sparknlp/training/_tf_graph_builders/ner_dl/ner_model.py +521 -0
  265. sparknlp/training/_tf_graph_builders/ner_dl/ner_model_saver.py +62 -0
  266. sparknlp/training/_tf_graph_builders/ner_dl/sentence_grouper.py +28 -0
  267. sparknlp/training/_tf_graph_builders/tf2contrib/__init__.py +36 -0
  268. sparknlp/training/_tf_graph_builders/tf2contrib/core_rnn_cell.py +385 -0
  269. sparknlp/training/_tf_graph_builders/tf2contrib/fused_rnn_cell.py +183 -0
  270. sparknlp/training/_tf_graph_builders/tf2contrib/gru_ops.py +235 -0
  271. sparknlp/training/_tf_graph_builders/tf2contrib/lstm_ops.py +665 -0
  272. sparknlp/training/_tf_graph_builders/tf2contrib/rnn.py +245 -0
  273. sparknlp/training/_tf_graph_builders/tf2contrib/rnn_cell.py +4006 -0
  274. sparknlp/training/_tf_graph_builders_1x/__init__.py +0 -0
  275. sparknlp/training/_tf_graph_builders_1x/graph_builders.py +277 -0
  276. sparknlp/training/_tf_graph_builders_1x/ner_dl/__init__.py +0 -0
  277. sparknlp/training/_tf_graph_builders_1x/ner_dl/create_graph.py +34 -0
  278. sparknlp/training/_tf_graph_builders_1x/ner_dl/dataset_encoder.py +78 -0
  279. sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model.py +532 -0
  280. sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model_saver.py +62 -0
  281. sparknlp/training/_tf_graph_builders_1x/ner_dl/sentence_grouper.py +28 -0
  282. sparknlp/training/conll.py +150 -0
  283. sparknlp/training/conllu.py +103 -0
  284. sparknlp/training/pos.py +103 -0
  285. sparknlp/training/pub_tator.py +76 -0
  286. sparknlp/training/spacy_to_annotation.py +57 -0
  287. sparknlp/training/tfgraphs.py +5 -0
  288. sparknlp/upload_to_hub.py +149 -0
  289. sparknlp/util.py +51 -5
  290. com/__init__.pyc +0 -0
  291. com/__pycache__/__init__.cpython-36.pyc +0 -0
  292. com/johnsnowlabs/__init__.pyc +0 -0
  293. com/johnsnowlabs/__pycache__/__init__.cpython-36.pyc +0 -0
  294. com/johnsnowlabs/nlp/__init__.pyc +0 -0
  295. com/johnsnowlabs/nlp/__pycache__/__init__.cpython-36.pyc +0 -0
  296. spark_nlp-2.6.3rc1.dist-info/METADATA +0 -36
  297. spark_nlp-2.6.3rc1.dist-info/RECORD +0 -48
  298. sparknlp/__init__.pyc +0 -0
  299. sparknlp/__pycache__/__init__.cpython-36.pyc +0 -0
  300. sparknlp/__pycache__/annotation.cpython-36.pyc +0 -0
  301. sparknlp/__pycache__/annotator.cpython-36.pyc +0 -0
  302. sparknlp/__pycache__/base.cpython-36.pyc +0 -0
  303. sparknlp/__pycache__/common.cpython-36.pyc +0 -0
  304. sparknlp/__pycache__/embeddings.cpython-36.pyc +0 -0
  305. sparknlp/__pycache__/functions.cpython-36.pyc +0 -0
  306. sparknlp/__pycache__/internal.cpython-36.pyc +0 -0
  307. sparknlp/__pycache__/pretrained.cpython-36.pyc +0 -0
  308. sparknlp/__pycache__/storage.cpython-36.pyc +0 -0
  309. sparknlp/__pycache__/training.cpython-36.pyc +0 -0
  310. sparknlp/__pycache__/util.cpython-36.pyc +0 -0
  311. sparknlp/annotation.pyc +0 -0
  312. sparknlp/annotator.py +0 -3006
  313. sparknlp/annotator.pyc +0 -0
  314. sparknlp/base.py +0 -347
  315. sparknlp/base.pyc +0 -0
  316. sparknlp/common.py +0 -193
  317. sparknlp/common.pyc +0 -0
  318. sparknlp/embeddings.py +0 -40
  319. sparknlp/embeddings.pyc +0 -0
  320. sparknlp/internal.py +0 -288
  321. sparknlp/internal.pyc +0 -0
  322. sparknlp/pretrained.py +0 -123
  323. sparknlp/pretrained.pyc +0 -0
  324. sparknlp/storage.py +0 -32
  325. sparknlp/storage.pyc +0 -0
  326. sparknlp/training.py +0 -62
  327. sparknlp/training.pyc +0 -0
  328. sparknlp/util.pyc +0 -0
  329. {spark_nlp-2.6.3rc1.dist-info → spark_nlp-6.2.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,374 @@
1
+ # Copyright 2017-2022 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains classes for the MarianTransformer."""
15
+
16
+ from sparknlp.common import *
17
+
18
+
19
+ class MarianTransformer(AnnotatorModel, HasBatchedAnnotate, HasEngine):
20
+ """MarianTransformer: Fast Neural Machine Translation
21
+
22
+ Marian is an efficient, free Neural Machine Translation framework written in
23
+ pure C++ with minimal dependencies. It is mainly being developed by the
24
+ Microsoft Translator team. Many academic (most notably the University of
25
+ Edinburgh and in the past the Adam Mickiewicz University in Poznań) and
26
+ commercial contributors help with its development. MarianTransformer uses
27
+ the models trained by MarianNMT.
28
+
29
+ It is currently the engine behind the Microsoft Translator Neural Machine
30
+ Translation services and being deployed by many companies, organizations and
31
+ research projects.
32
+
33
+ Note that this model only supports inputs up to 512 tokens. If you are
34
+ working with longer inputs, consider splitting them first. For example, you
35
+ can use the SentenceDetectorDL annotator to split longer texts into
36
+ sentences.
37
+
38
+ Pretrained models can be loaded with :meth:`.pretrained` of the companion
39
+ object:
40
+
41
+ >>> marian = MarianTransformer.pretrained() \\
42
+ ... .setInputCols(["sentence"]) \\
43
+ ... .setOutputCol("translation")
44
+
45
+ The default model is ``"opus_mt_en_fr"``, default language is ``"xx"``
46
+ (meaning multi-lingual), if no values are provided.
47
+
48
+ For available pretrained models please see the `Models Hub <https://sparknlp.org/models?task=Translation>`__.
49
+
50
+ For extended examples of usage, see the `Examples <https://github.com/JohnSnowLabs/spark-nlp/blob/master/examples/python/annotation/text/multilingual/Translation_Marian.ipynb>`__.
51
+
52
+ ====================== ======================
53
+ Input Annotation types Output Annotation type
54
+ ====================== ======================
55
+ ``DOCUMENT`` ``DOCUMENT``
56
+ ====================== ======================
57
+
58
+ Parameters
59
+ ----------
60
+ batchSize
61
+ Size of every batch, by default 1
62
+ configProtoBytes
63
+ ConfigProto from tensorflow, serialized into byte array.
64
+ langId
65
+ Transformer's task, e.g. "summarize>", by default ""
66
+ maxInputLength
67
+ Controls the maximum length for encoder inputs (source language texts),
68
+ by default 40
69
+ maxOutputLength
70
+ Controls the maximum length for decoder outputs (target language texts),
71
+ by default 40
72
+
73
+ Notes
74
+ -----
75
+ This is a very computationally expensive module especially on larger
76
+ sequence. The use of an accelerator such as GPU is recommended.
77
+
78
+ References
79
+ ----------
80
+ `MarianNMT at GitHub <https://marian-nmt.github.io/>`__
81
+
82
+ `Marian: Fast Neural Machine Translation in C++ <https://www.aclweb.org/anthology/P18-4020/>`__
83
+
84
+ **Paper Abstract:**
85
+
86
+ *We present Marian, an efficient and self-contained Neural Machine
87
+ Translation framework with an integrated automatic differentiation
88
+ engine based on dynamic computation graphs. Marian is written entirely in
89
+ C++. We describe the design of the encoder-decoder framework and
90
+ demonstrate that a research-friendly toolkit can achieve high training
91
+ and translation speed.*
92
+
93
+ Examples
94
+ --------
95
+ >>> import sparknlp
96
+ >>> from sparknlp.base import *
97
+ >>> from sparknlp.annotator import *
98
+ >>> from pyspark.ml import Pipeline
99
+ >>> documentAssembler = DocumentAssembler() \\
100
+ ... .setInputCol("text") \\
101
+ ... .setOutputCol("document")
102
+ >>> sentence = SentenceDetectorDLModel.pretrained("sentence_detector_dl", "xx") \\
103
+ ... .setInputCols("document") \\
104
+ ... .setOutputCol("sentence")
105
+ >>> marian = MarianTransformer.pretrained() \\
106
+ ... .setInputCols("sentence") \\
107
+ ... .setOutputCol("translation") \\
108
+ ... .setMaxInputLength(30)
109
+ >>> pipeline = Pipeline() \\
110
+ ... .setStages([
111
+ ... documentAssembler,
112
+ ... sentence,
113
+ ... marian
114
+ ... ])
115
+ >>> data = spark.createDataFrame([["What is the capital of France? We should know this in french."]]).toDF("text")
116
+ >>> result = pipeline.fit(data).transform(data)
117
+ >>> result.selectExpr("explode(translation.result) as result").show(truncate=False)
118
+ +-------------------------------------+
119
+ |result |
120
+ +-------------------------------------+
121
+ |Quelle est la capitale de la France ?|
122
+ |On devrait le savoir en français. |
123
+ +-------------------------------------+
124
+ """
125
+
126
+ name = "MarianTransformer"
127
+
128
+ inputAnnotatorTypes = [AnnotatorType.DOCUMENT]
129
+
130
+ outputAnnotatorType = AnnotatorType.DOCUMENT
131
+
132
+ configProtoBytes = Param(Params._dummy(),
133
+ "configProtoBytes",
134
+ "ConfigProto from tensorflow, serialized into byte array. Get with config_proto.SerializeToString()",
135
+ TypeConverters.toListInt)
136
+
137
+ langId = Param(Params._dummy(), "langId", "Transformer's task, e.g. summarize>",
138
+ typeConverter=TypeConverters.toString)
139
+
140
+ maxInputLength = Param(Params._dummy(), "maxInputLength",
141
+ "Controls the maximum length for encoder inputs (source language texts)",
142
+ typeConverter=TypeConverters.toInt)
143
+
144
+ maxOutputLength = Param(Params._dummy(), "maxOutputLength",
145
+ "Controls the maximum length for decoder outputs (target language texts)",
146
+ typeConverter=TypeConverters.toInt)
147
+
148
+ doSample = Param(Params._dummy(), "doSample", "Whether or not to use sampling; use greedy decoding otherwise",
149
+ typeConverter=TypeConverters.toBoolean)
150
+
151
+ temperature = Param(Params._dummy(), "temperature", "The value used to module the next token probabilities",
152
+ typeConverter=TypeConverters.toFloat)
153
+
154
+ topK = Param(Params._dummy(), "topK",
155
+ "The number of highest probability vocabulary tokens to keep for top-k-filtering",
156
+ typeConverter=TypeConverters.toInt)
157
+
158
+ topP = Param(Params._dummy(), "topP",
159
+ "If set to float < 1, only the most probable tokens with probabilities that add up to ``top_p`` or higher are kept for generation",
160
+ typeConverter=TypeConverters.toFloat)
161
+
162
+ repetitionPenalty = Param(Params._dummy(), "repetitionPenalty",
163
+ "The parameter for repetition penalty. 1.0 means no penalty. See `this paper <https://arxiv.org/pdf/1909.05858.pdf>`__ for more details",
164
+ typeConverter=TypeConverters.toFloat)
165
+
166
+ noRepeatNgramSize = Param(Params._dummy(), "noRepeatNgramSize",
167
+ "If set to int > 0, all ngrams of that size can only occur once",
168
+ typeConverter=TypeConverters.toInt)
169
+
170
+
171
+ ignoreTokenIds = Param(Params._dummy(), "ignoreTokenIds",
172
+ "A list of token ids which are ignored in the decoder's output",
173
+ typeConverter=TypeConverters.toListInt)
174
+
175
+ def setIgnoreTokenIds(self, value):
176
+ """A list of token ids which are ignored in the decoder's output.
177
+
178
+ Parameters
179
+ ----------
180
+ value : List[int]
181
+ The words to be filtered out
182
+ """
183
+ return self._set(ignoreTokenIds=value)
184
+
185
+ def setConfigProtoBytes(self, b):
186
+ """Sets configProto from tensorflow, serialized into byte array.
187
+
188
+ Parameters
189
+ ----------
190
+ b : List[int]
191
+ ConfigProto from tensorflow, serialized into byte array
192
+ """
193
+ return self._set(configProtoBytes=b)
194
+
195
+ def setLangId(self, value):
196
+ """Sets transformer's task, e.g. "summarize>", by default "".
197
+
198
+ Parameters
199
+ ----------
200
+ value : str
201
+ Transformer's task, e.g. "summarize>"
202
+ """
203
+ return self._set(langId=value)
204
+
205
+ def setMaxInputLength(self, value):
206
+ """Sets the maximum length for encoder inputs (source language texts),
207
+ by default 40. The value should be less than 512, as the Marian Transformer does not
208
+ support inputs longer than 512 tokens.
209
+
210
+ Parameters
211
+ ----------
212
+ value : int
213
+ The maximum length for encoder inputs (source language texts)
214
+ """
215
+ if value > 512:
216
+ raise ValueError("MarianTransformer model does not support sequences longer than 512.")
217
+ return self._set(maxInputLength=value)
218
+
219
+ def setMaxOutputLength(self, value):
220
+ """Sets the maximum length for decoder outputs (target language texts),
221
+ by default 40.
222
+
223
+ Parameters
224
+ ----------
225
+ value : int
226
+ The maximum length for decoder outputs (target language texts)
227
+ """
228
+ return self._set(maxOutputLength=value)
229
+
230
+
231
+ def setDoSample(self, value):
232
+ """Sets whether or not to use sampling, use greedy decoding otherwise.
233
+
234
+ Parameters
235
+ ----------
236
+ value : bool
237
+ Whether or not to use sampling; use greedy decoding otherwise
238
+ """
239
+ return self._set(doSample=value)
240
+
241
+ def setTemperature(self, value):
242
+ """Sets the value used to module the next token probabilities.
243
+
244
+ Parameters
245
+ ----------
246
+ value : float
247
+ The value used to module the next token probabilities
248
+ """
249
+ return self._set(temperature=value)
250
+
251
+ def setTopK(self, value):
252
+ """Sets the number of highest probability vocabulary tokens to keep for
253
+ top-k-filtering.
254
+
255
+ Parameters
256
+ ----------
257
+ value : int
258
+ Number of highest probability vocabulary tokens to keep
259
+ """
260
+ return self._set(topK=value)
261
+
262
+ def setTopP(self, value):
263
+ """Sets the top cumulative probability for vocabulary tokens.
264
+
265
+ If set to float < 1, only the most probable tokens with probabilities
266
+ that add up to ``topP`` or higher are kept for generation.
267
+
268
+ Parameters
269
+ ----------
270
+ value : float
271
+ Cumulative probability for vocabulary tokens
272
+ """
273
+ return self._set(topP=value)
274
+
275
+ def setRepetitionPenalty(self, value):
276
+ """Sets the parameter for repetition penalty. 1.0 means no penalty.
277
+
278
+ Parameters
279
+ ----------
280
+ value : float
281
+ The repetition penalty
282
+
283
+ References
284
+ ----------
285
+ See `Ctrl: A Conditional Transformer Language Model For Controllable
286
+ Generation <https://arxiv.org/pdf/1909.05858.pdf>`__ for more details.
287
+ """
288
+ return self._set(repetitionPenalty=value)
289
+
290
+ def setNoRepeatNgramSize(self, value):
291
+ """Sets size of n-grams that can only occur once.
292
+
293
+ If set to int > 0, all ngrams of that size can only occur once.
294
+
295
+ Parameters
296
+ ----------
297
+ value : int
298
+ N-gram size can only occur once
299
+ """
300
+ return self._set(noRepeatNgramSize=value)
301
+
302
+ def setRandomSeed(self, seed):
303
+ """Sets random seed.
304
+
305
+ Parameters
306
+ ----------
307
+ seed : int
308
+ Random seed
309
+ """
310
+ self._call_java("setRandomSeed", seed)
311
+
312
+ return self
313
+
314
+ @keyword_only
315
+ def __init__(self, classname="com.johnsnowlabs.nlp.annotators.seq2seq.MarianTransformer", java_model=None):
316
+ super(MarianTransformer, self).__init__(
317
+ classname=classname,
318
+ java_model=java_model
319
+ )
320
+ self._setDefault(
321
+ batchSize=1,
322
+ maxInputLength=40,
323
+ maxOutputLength=40,
324
+ langId="",
325
+ doSample=False,
326
+ temperature=1.0,
327
+ topK=50,
328
+ topP=1.0,
329
+ repetitionPenalty=1.0,
330
+ noRepeatNgramSize=0,
331
+ ignoreTokenIds=[]
332
+ )
333
+
334
+ @staticmethod
335
+ def loadSavedModel(folder, spark_session):
336
+ """Loads a locally saved model.
337
+
338
+ Parameters
339
+ ----------
340
+ folder : str
341
+ Folder of the saved model
342
+ spark_session : pyspark.sql.SparkSession
343
+ The current SparkSession
344
+
345
+ Returns
346
+ -------
347
+ MarianTransformer
348
+ The restored model
349
+ """
350
+ from sparknlp.internal import _MarianLoader
351
+ jModel = _MarianLoader(folder, spark_session._jsparkSession)._java_obj
352
+ return MarianTransformer(java_model=jModel)
353
+
354
+ @staticmethod
355
+ def pretrained(name="opus_mt_en_fr", lang="xx", remote_loc=None):
356
+ """Downloads and loads a pretrained model.
357
+
358
+ Parameters
359
+ ----------
360
+ name : str, optional
361
+ Name of the pretrained model, by default "opus_mt_en_fr"
362
+ lang : str, optional
363
+ Language of the pretrained model, by default "xx"
364
+ remote_loc : str, optional
365
+ Optional remote address of the resource, by default None. Will use
366
+ Spark NLPs repositories otherwise.
367
+
368
+ Returns
369
+ -------
370
+ MarianTransformer
371
+ The restored model
372
+ """
373
+ from sparknlp.pretrained import ResourceDownloader
374
+ return ResourceDownloader.downloadModel(MarianTransformer, name, lang, remote_loc)