spark-nlp 2.6.3rc1__py2.py3-none-any.whl → 6.2.1__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (329) hide show
  1. com/johnsnowlabs/ml/__init__.py +0 -0
  2. com/johnsnowlabs/ml/ai/__init__.py +10 -0
  3. com/johnsnowlabs/nlp/__init__.py +4 -2
  4. spark_nlp-6.2.1.dist-info/METADATA +362 -0
  5. spark_nlp-6.2.1.dist-info/RECORD +292 -0
  6. {spark_nlp-2.6.3rc1.dist-info → spark_nlp-6.2.1.dist-info}/WHEEL +1 -1
  7. sparknlp/__init__.py +281 -27
  8. sparknlp/annotation.py +137 -6
  9. sparknlp/annotation_audio.py +61 -0
  10. sparknlp/annotation_image.py +82 -0
  11. sparknlp/annotator/__init__.py +93 -0
  12. sparknlp/annotator/audio/__init__.py +16 -0
  13. sparknlp/annotator/audio/hubert_for_ctc.py +188 -0
  14. sparknlp/annotator/audio/wav2vec2_for_ctc.py +161 -0
  15. sparknlp/annotator/audio/whisper_for_ctc.py +251 -0
  16. sparknlp/annotator/chunk2_doc.py +85 -0
  17. sparknlp/annotator/chunker.py +137 -0
  18. sparknlp/annotator/classifier_dl/__init__.py +61 -0
  19. sparknlp/annotator/classifier_dl/albert_for_multiple_choice.py +161 -0
  20. sparknlp/annotator/classifier_dl/albert_for_question_answering.py +172 -0
  21. sparknlp/annotator/classifier_dl/albert_for_sequence_classification.py +201 -0
  22. sparknlp/annotator/classifier_dl/albert_for_token_classification.py +179 -0
  23. sparknlp/annotator/classifier_dl/albert_for_zero_shot_classification.py +211 -0
  24. sparknlp/annotator/classifier_dl/bart_for_zero_shot_classification.py +225 -0
  25. sparknlp/annotator/classifier_dl/bert_for_multiple_choice.py +161 -0
  26. sparknlp/annotator/classifier_dl/bert_for_question_answering.py +168 -0
  27. sparknlp/annotator/classifier_dl/bert_for_sequence_classification.py +202 -0
  28. sparknlp/annotator/classifier_dl/bert_for_token_classification.py +177 -0
  29. sparknlp/annotator/classifier_dl/bert_for_zero_shot_classification.py +212 -0
  30. sparknlp/annotator/classifier_dl/camembert_for_question_answering.py +168 -0
  31. sparknlp/annotator/classifier_dl/camembert_for_sequence_classification.py +205 -0
  32. sparknlp/annotator/classifier_dl/camembert_for_token_classification.py +173 -0
  33. sparknlp/annotator/classifier_dl/camembert_for_zero_shot_classification.py +202 -0
  34. sparknlp/annotator/classifier_dl/classifier_dl.py +320 -0
  35. sparknlp/annotator/classifier_dl/deberta_for_question_answering.py +168 -0
  36. sparknlp/annotator/classifier_dl/deberta_for_sequence_classification.py +198 -0
  37. sparknlp/annotator/classifier_dl/deberta_for_token_classification.py +175 -0
  38. sparknlp/annotator/classifier_dl/deberta_for_zero_shot_classification.py +193 -0
  39. sparknlp/annotator/classifier_dl/distil_bert_for_question_answering.py +168 -0
  40. sparknlp/annotator/classifier_dl/distil_bert_for_sequence_classification.py +201 -0
  41. sparknlp/annotator/classifier_dl/distil_bert_for_token_classification.py +175 -0
  42. sparknlp/annotator/classifier_dl/distil_bert_for_zero_shot_classification.py +211 -0
  43. sparknlp/annotator/classifier_dl/distilbert_for_multiple_choice.py +161 -0
  44. sparknlp/annotator/classifier_dl/longformer_for_question_answering.py +168 -0
  45. sparknlp/annotator/classifier_dl/longformer_for_sequence_classification.py +201 -0
  46. sparknlp/annotator/classifier_dl/longformer_for_token_classification.py +176 -0
  47. sparknlp/annotator/classifier_dl/mpnet_for_question_answering.py +148 -0
  48. sparknlp/annotator/classifier_dl/mpnet_for_sequence_classification.py +188 -0
  49. sparknlp/annotator/classifier_dl/mpnet_for_token_classification.py +173 -0
  50. sparknlp/annotator/classifier_dl/multi_classifier_dl.py +395 -0
  51. sparknlp/annotator/classifier_dl/roberta_for_multiple_choice.py +161 -0
  52. sparknlp/annotator/classifier_dl/roberta_for_question_answering.py +168 -0
  53. sparknlp/annotator/classifier_dl/roberta_for_sequence_classification.py +201 -0
  54. sparknlp/annotator/classifier_dl/roberta_for_token_classification.py +189 -0
  55. sparknlp/annotator/classifier_dl/roberta_for_zero_shot_classification.py +225 -0
  56. sparknlp/annotator/classifier_dl/sentiment_dl.py +378 -0
  57. sparknlp/annotator/classifier_dl/tapas_for_question_answering.py +170 -0
  58. sparknlp/annotator/classifier_dl/xlm_roberta_for_multiple_choice.py +149 -0
  59. sparknlp/annotator/classifier_dl/xlm_roberta_for_question_answering.py +168 -0
  60. sparknlp/annotator/classifier_dl/xlm_roberta_for_sequence_classification.py +201 -0
  61. sparknlp/annotator/classifier_dl/xlm_roberta_for_token_classification.py +173 -0
  62. sparknlp/annotator/classifier_dl/xlm_roberta_for_zero_shot_classification.py +225 -0
  63. sparknlp/annotator/classifier_dl/xlnet_for_sequence_classification.py +201 -0
  64. sparknlp/annotator/classifier_dl/xlnet_for_token_classification.py +176 -0
  65. sparknlp/annotator/cleaners/__init__.py +15 -0
  66. sparknlp/annotator/cleaners/cleaner.py +202 -0
  67. sparknlp/annotator/cleaners/extractor.py +191 -0
  68. sparknlp/annotator/coref/__init__.py +1 -0
  69. sparknlp/annotator/coref/spanbert_coref.py +221 -0
  70. sparknlp/annotator/cv/__init__.py +29 -0
  71. sparknlp/annotator/cv/blip_for_question_answering.py +172 -0
  72. sparknlp/annotator/cv/clip_for_zero_shot_classification.py +193 -0
  73. sparknlp/annotator/cv/convnext_for_image_classification.py +269 -0
  74. sparknlp/annotator/cv/florence2_transformer.py +180 -0
  75. sparknlp/annotator/cv/gemma3_for_multimodal.py +346 -0
  76. sparknlp/annotator/cv/internvl_for_multimodal.py +280 -0
  77. sparknlp/annotator/cv/janus_for_multimodal.py +351 -0
  78. sparknlp/annotator/cv/llava_for_multimodal.py +328 -0
  79. sparknlp/annotator/cv/mllama_for_multimodal.py +340 -0
  80. sparknlp/annotator/cv/paligemma_for_multimodal.py +308 -0
  81. sparknlp/annotator/cv/phi3_vision_for_multimodal.py +328 -0
  82. sparknlp/annotator/cv/qwen2vl_transformer.py +332 -0
  83. sparknlp/annotator/cv/smolvlm_transformer.py +426 -0
  84. sparknlp/annotator/cv/swin_for_image_classification.py +242 -0
  85. sparknlp/annotator/cv/vision_encoder_decoder_for_image_captioning.py +240 -0
  86. sparknlp/annotator/cv/vit_for_image_classification.py +217 -0
  87. sparknlp/annotator/dataframe_optimizer.py +216 -0
  88. sparknlp/annotator/date2_chunk.py +88 -0
  89. sparknlp/annotator/dependency/__init__.py +17 -0
  90. sparknlp/annotator/dependency/dependency_parser.py +294 -0
  91. sparknlp/annotator/dependency/typed_dependency_parser.py +318 -0
  92. sparknlp/annotator/document_character_text_splitter.py +228 -0
  93. sparknlp/annotator/document_normalizer.py +235 -0
  94. sparknlp/annotator/document_token_splitter.py +175 -0
  95. sparknlp/annotator/document_token_splitter_test.py +85 -0
  96. sparknlp/annotator/embeddings/__init__.py +45 -0
  97. sparknlp/annotator/embeddings/albert_embeddings.py +230 -0
  98. sparknlp/annotator/embeddings/auto_gguf_embeddings.py +539 -0
  99. sparknlp/annotator/embeddings/bert_embeddings.py +208 -0
  100. sparknlp/annotator/embeddings/bert_sentence_embeddings.py +224 -0
  101. sparknlp/annotator/embeddings/bge_embeddings.py +199 -0
  102. sparknlp/annotator/embeddings/camembert_embeddings.py +210 -0
  103. sparknlp/annotator/embeddings/chunk_embeddings.py +149 -0
  104. sparknlp/annotator/embeddings/deberta_embeddings.py +208 -0
  105. sparknlp/annotator/embeddings/distil_bert_embeddings.py +221 -0
  106. sparknlp/annotator/embeddings/doc2vec.py +352 -0
  107. sparknlp/annotator/embeddings/e5_embeddings.py +195 -0
  108. sparknlp/annotator/embeddings/e5v_embeddings.py +138 -0
  109. sparknlp/annotator/embeddings/elmo_embeddings.py +251 -0
  110. sparknlp/annotator/embeddings/instructor_embeddings.py +204 -0
  111. sparknlp/annotator/embeddings/longformer_embeddings.py +211 -0
  112. sparknlp/annotator/embeddings/minilm_embeddings.py +189 -0
  113. sparknlp/annotator/embeddings/mpnet_embeddings.py +192 -0
  114. sparknlp/annotator/embeddings/mxbai_embeddings.py +184 -0
  115. sparknlp/annotator/embeddings/nomic_embeddings.py +181 -0
  116. sparknlp/annotator/embeddings/roberta_embeddings.py +225 -0
  117. sparknlp/annotator/embeddings/roberta_sentence_embeddings.py +191 -0
  118. sparknlp/annotator/embeddings/sentence_embeddings.py +134 -0
  119. sparknlp/annotator/embeddings/snowflake_embeddings.py +202 -0
  120. sparknlp/annotator/embeddings/uae_embeddings.py +211 -0
  121. sparknlp/annotator/embeddings/universal_sentence_encoder.py +211 -0
  122. sparknlp/annotator/embeddings/word2vec.py +353 -0
  123. sparknlp/annotator/embeddings/word_embeddings.py +385 -0
  124. sparknlp/annotator/embeddings/xlm_roberta_embeddings.py +225 -0
  125. sparknlp/annotator/embeddings/xlm_roberta_sentence_embeddings.py +194 -0
  126. sparknlp/annotator/embeddings/xlnet_embeddings.py +227 -0
  127. sparknlp/annotator/er/__init__.py +16 -0
  128. sparknlp/annotator/er/entity_ruler.py +267 -0
  129. sparknlp/annotator/graph_extraction.py +368 -0
  130. sparknlp/annotator/keyword_extraction/__init__.py +16 -0
  131. sparknlp/annotator/keyword_extraction/yake_keyword_extraction.py +270 -0
  132. sparknlp/annotator/ld_dl/__init__.py +16 -0
  133. sparknlp/annotator/ld_dl/language_detector_dl.py +199 -0
  134. sparknlp/annotator/lemmatizer.py +250 -0
  135. sparknlp/annotator/matcher/__init__.py +20 -0
  136. sparknlp/annotator/matcher/big_text_matcher.py +272 -0
  137. sparknlp/annotator/matcher/date_matcher.py +303 -0
  138. sparknlp/annotator/matcher/multi_date_matcher.py +109 -0
  139. sparknlp/annotator/matcher/regex_matcher.py +221 -0
  140. sparknlp/annotator/matcher/text_matcher.py +290 -0
  141. sparknlp/annotator/n_gram_generator.py +141 -0
  142. sparknlp/annotator/ner/__init__.py +21 -0
  143. sparknlp/annotator/ner/ner_approach.py +94 -0
  144. sparknlp/annotator/ner/ner_converter.py +148 -0
  145. sparknlp/annotator/ner/ner_crf.py +397 -0
  146. sparknlp/annotator/ner/ner_dl.py +591 -0
  147. sparknlp/annotator/ner/ner_dl_graph_checker.py +293 -0
  148. sparknlp/annotator/ner/ner_overwriter.py +166 -0
  149. sparknlp/annotator/ner/zero_shot_ner_model.py +173 -0
  150. sparknlp/annotator/normalizer.py +230 -0
  151. sparknlp/annotator/openai/__init__.py +16 -0
  152. sparknlp/annotator/openai/openai_completion.py +349 -0
  153. sparknlp/annotator/openai/openai_embeddings.py +106 -0
  154. sparknlp/annotator/param/__init__.py +17 -0
  155. sparknlp/annotator/param/classifier_encoder.py +98 -0
  156. sparknlp/annotator/param/evaluation_dl_params.py +130 -0
  157. sparknlp/annotator/pos/__init__.py +16 -0
  158. sparknlp/annotator/pos/perceptron.py +263 -0
  159. sparknlp/annotator/sentence/__init__.py +17 -0
  160. sparknlp/annotator/sentence/sentence_detector.py +290 -0
  161. sparknlp/annotator/sentence/sentence_detector_dl.py +467 -0
  162. sparknlp/annotator/sentiment/__init__.py +17 -0
  163. sparknlp/annotator/sentiment/sentiment_detector.py +208 -0
  164. sparknlp/annotator/sentiment/vivekn_sentiment.py +242 -0
  165. sparknlp/annotator/seq2seq/__init__.py +35 -0
  166. sparknlp/annotator/seq2seq/auto_gguf_model.py +304 -0
  167. sparknlp/annotator/seq2seq/auto_gguf_reranker.py +334 -0
  168. sparknlp/annotator/seq2seq/auto_gguf_vision_model.py +336 -0
  169. sparknlp/annotator/seq2seq/bart_transformer.py +420 -0
  170. sparknlp/annotator/seq2seq/cohere_transformer.py +357 -0
  171. sparknlp/annotator/seq2seq/cpm_transformer.py +321 -0
  172. sparknlp/annotator/seq2seq/gpt2_transformer.py +363 -0
  173. sparknlp/annotator/seq2seq/llama2_transformer.py +343 -0
  174. sparknlp/annotator/seq2seq/llama3_transformer.py +381 -0
  175. sparknlp/annotator/seq2seq/m2m100_transformer.py +392 -0
  176. sparknlp/annotator/seq2seq/marian_transformer.py +374 -0
  177. sparknlp/annotator/seq2seq/mistral_transformer.py +348 -0
  178. sparknlp/annotator/seq2seq/nllb_transformer.py +420 -0
  179. sparknlp/annotator/seq2seq/olmo_transformer.py +326 -0
  180. sparknlp/annotator/seq2seq/phi2_transformer.py +326 -0
  181. sparknlp/annotator/seq2seq/phi3_transformer.py +330 -0
  182. sparknlp/annotator/seq2seq/phi4_transformer.py +387 -0
  183. sparknlp/annotator/seq2seq/qwen_transformer.py +340 -0
  184. sparknlp/annotator/seq2seq/starcoder_transformer.py +335 -0
  185. sparknlp/annotator/seq2seq/t5_transformer.py +425 -0
  186. sparknlp/annotator/similarity/__init__.py +0 -0
  187. sparknlp/annotator/similarity/document_similarity_ranker.py +379 -0
  188. sparknlp/annotator/spell_check/__init__.py +18 -0
  189. sparknlp/annotator/spell_check/context_spell_checker.py +911 -0
  190. sparknlp/annotator/spell_check/norvig_sweeting.py +358 -0
  191. sparknlp/annotator/spell_check/symmetric_delete.py +299 -0
  192. sparknlp/annotator/stemmer.py +79 -0
  193. sparknlp/annotator/stop_words_cleaner.py +190 -0
  194. sparknlp/annotator/tf_ner_dl_graph_builder.py +179 -0
  195. sparknlp/annotator/token/__init__.py +19 -0
  196. sparknlp/annotator/token/chunk_tokenizer.py +118 -0
  197. sparknlp/annotator/token/recursive_tokenizer.py +205 -0
  198. sparknlp/annotator/token/regex_tokenizer.py +208 -0
  199. sparknlp/annotator/token/tokenizer.py +561 -0
  200. sparknlp/annotator/token2_chunk.py +76 -0
  201. sparknlp/annotator/ws/__init__.py +16 -0
  202. sparknlp/annotator/ws/word_segmenter.py +429 -0
  203. sparknlp/base/__init__.py +30 -0
  204. sparknlp/base/audio_assembler.py +95 -0
  205. sparknlp/base/doc2_chunk.py +169 -0
  206. sparknlp/base/document_assembler.py +164 -0
  207. sparknlp/base/embeddings_finisher.py +201 -0
  208. sparknlp/base/finisher.py +217 -0
  209. sparknlp/base/gguf_ranking_finisher.py +234 -0
  210. sparknlp/base/graph_finisher.py +125 -0
  211. sparknlp/base/has_recursive_fit.py +24 -0
  212. sparknlp/base/has_recursive_transform.py +22 -0
  213. sparknlp/base/image_assembler.py +172 -0
  214. sparknlp/base/light_pipeline.py +429 -0
  215. sparknlp/base/multi_document_assembler.py +164 -0
  216. sparknlp/base/prompt_assembler.py +207 -0
  217. sparknlp/base/recursive_pipeline.py +107 -0
  218. sparknlp/base/table_assembler.py +145 -0
  219. sparknlp/base/token_assembler.py +124 -0
  220. sparknlp/common/__init__.py +26 -0
  221. sparknlp/common/annotator_approach.py +41 -0
  222. sparknlp/common/annotator_model.py +47 -0
  223. sparknlp/common/annotator_properties.py +114 -0
  224. sparknlp/common/annotator_type.py +38 -0
  225. sparknlp/common/completion_post_processing.py +37 -0
  226. sparknlp/common/coverage_result.py +22 -0
  227. sparknlp/common/match_strategy.py +33 -0
  228. sparknlp/common/properties.py +1298 -0
  229. sparknlp/common/read_as.py +33 -0
  230. sparknlp/common/recursive_annotator_approach.py +35 -0
  231. sparknlp/common/storage.py +149 -0
  232. sparknlp/common/utils.py +39 -0
  233. sparknlp/functions.py +315 -5
  234. sparknlp/internal/__init__.py +1199 -0
  235. sparknlp/internal/annotator_java_ml.py +32 -0
  236. sparknlp/internal/annotator_transformer.py +37 -0
  237. sparknlp/internal/extended_java_wrapper.py +63 -0
  238. sparknlp/internal/params_getters_setters.py +71 -0
  239. sparknlp/internal/recursive.py +70 -0
  240. sparknlp/logging/__init__.py +15 -0
  241. sparknlp/logging/comet.py +467 -0
  242. sparknlp/partition/__init__.py +16 -0
  243. sparknlp/partition/partition.py +244 -0
  244. sparknlp/partition/partition_properties.py +902 -0
  245. sparknlp/partition/partition_transformer.py +200 -0
  246. sparknlp/pretrained/__init__.py +17 -0
  247. sparknlp/pretrained/pretrained_pipeline.py +158 -0
  248. sparknlp/pretrained/resource_downloader.py +216 -0
  249. sparknlp/pretrained/utils.py +35 -0
  250. sparknlp/reader/__init__.py +15 -0
  251. sparknlp/reader/enums.py +19 -0
  252. sparknlp/reader/pdf_to_text.py +190 -0
  253. sparknlp/reader/reader2doc.py +124 -0
  254. sparknlp/reader/reader2image.py +136 -0
  255. sparknlp/reader/reader2table.py +44 -0
  256. sparknlp/reader/reader_assembler.py +159 -0
  257. sparknlp/reader/sparknlp_reader.py +461 -0
  258. sparknlp/training/__init__.py +20 -0
  259. sparknlp/training/_tf_graph_builders/__init__.py +0 -0
  260. sparknlp/training/_tf_graph_builders/graph_builders.py +299 -0
  261. sparknlp/training/_tf_graph_builders/ner_dl/__init__.py +0 -0
  262. sparknlp/training/_tf_graph_builders/ner_dl/create_graph.py +41 -0
  263. sparknlp/training/_tf_graph_builders/ner_dl/dataset_encoder.py +78 -0
  264. sparknlp/training/_tf_graph_builders/ner_dl/ner_model.py +521 -0
  265. sparknlp/training/_tf_graph_builders/ner_dl/ner_model_saver.py +62 -0
  266. sparknlp/training/_tf_graph_builders/ner_dl/sentence_grouper.py +28 -0
  267. sparknlp/training/_tf_graph_builders/tf2contrib/__init__.py +36 -0
  268. sparknlp/training/_tf_graph_builders/tf2contrib/core_rnn_cell.py +385 -0
  269. sparknlp/training/_tf_graph_builders/tf2contrib/fused_rnn_cell.py +183 -0
  270. sparknlp/training/_tf_graph_builders/tf2contrib/gru_ops.py +235 -0
  271. sparknlp/training/_tf_graph_builders/tf2contrib/lstm_ops.py +665 -0
  272. sparknlp/training/_tf_graph_builders/tf2contrib/rnn.py +245 -0
  273. sparknlp/training/_tf_graph_builders/tf2contrib/rnn_cell.py +4006 -0
  274. sparknlp/training/_tf_graph_builders_1x/__init__.py +0 -0
  275. sparknlp/training/_tf_graph_builders_1x/graph_builders.py +277 -0
  276. sparknlp/training/_tf_graph_builders_1x/ner_dl/__init__.py +0 -0
  277. sparknlp/training/_tf_graph_builders_1x/ner_dl/create_graph.py +34 -0
  278. sparknlp/training/_tf_graph_builders_1x/ner_dl/dataset_encoder.py +78 -0
  279. sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model.py +532 -0
  280. sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model_saver.py +62 -0
  281. sparknlp/training/_tf_graph_builders_1x/ner_dl/sentence_grouper.py +28 -0
  282. sparknlp/training/conll.py +150 -0
  283. sparknlp/training/conllu.py +103 -0
  284. sparknlp/training/pos.py +103 -0
  285. sparknlp/training/pub_tator.py +76 -0
  286. sparknlp/training/spacy_to_annotation.py +57 -0
  287. sparknlp/training/tfgraphs.py +5 -0
  288. sparknlp/upload_to_hub.py +149 -0
  289. sparknlp/util.py +51 -5
  290. com/__init__.pyc +0 -0
  291. com/__pycache__/__init__.cpython-36.pyc +0 -0
  292. com/johnsnowlabs/__init__.pyc +0 -0
  293. com/johnsnowlabs/__pycache__/__init__.cpython-36.pyc +0 -0
  294. com/johnsnowlabs/nlp/__init__.pyc +0 -0
  295. com/johnsnowlabs/nlp/__pycache__/__init__.cpython-36.pyc +0 -0
  296. spark_nlp-2.6.3rc1.dist-info/METADATA +0 -36
  297. spark_nlp-2.6.3rc1.dist-info/RECORD +0 -48
  298. sparknlp/__init__.pyc +0 -0
  299. sparknlp/__pycache__/__init__.cpython-36.pyc +0 -0
  300. sparknlp/__pycache__/annotation.cpython-36.pyc +0 -0
  301. sparknlp/__pycache__/annotator.cpython-36.pyc +0 -0
  302. sparknlp/__pycache__/base.cpython-36.pyc +0 -0
  303. sparknlp/__pycache__/common.cpython-36.pyc +0 -0
  304. sparknlp/__pycache__/embeddings.cpython-36.pyc +0 -0
  305. sparknlp/__pycache__/functions.cpython-36.pyc +0 -0
  306. sparknlp/__pycache__/internal.cpython-36.pyc +0 -0
  307. sparknlp/__pycache__/pretrained.cpython-36.pyc +0 -0
  308. sparknlp/__pycache__/storage.cpython-36.pyc +0 -0
  309. sparknlp/__pycache__/training.cpython-36.pyc +0 -0
  310. sparknlp/__pycache__/util.cpython-36.pyc +0 -0
  311. sparknlp/annotation.pyc +0 -0
  312. sparknlp/annotator.py +0 -3006
  313. sparknlp/annotator.pyc +0 -0
  314. sparknlp/base.py +0 -347
  315. sparknlp/base.pyc +0 -0
  316. sparknlp/common.py +0 -193
  317. sparknlp/common.pyc +0 -0
  318. sparknlp/embeddings.py +0 -40
  319. sparknlp/embeddings.pyc +0 -0
  320. sparknlp/internal.py +0 -288
  321. sparknlp/internal.pyc +0 -0
  322. sparknlp/pretrained.py +0 -123
  323. sparknlp/pretrained.pyc +0 -0
  324. sparknlp/storage.py +0 -32
  325. sparknlp/storage.pyc +0 -0
  326. sparknlp/training.py +0 -62
  327. sparknlp/training.pyc +0 -0
  328. sparknlp/util.pyc +0 -0
  329. {spark_nlp-2.6.3rc1.dist-info → spark_nlp-6.2.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,270 @@
1
+ # Copyright 2017-2022 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains classes for YakeKeywordExtraction."""
15
+
16
+
17
+ from sparknlp.common import *
18
+
19
+
20
+ class YakeKeywordExtraction(AnnotatorModel):
21
+ """Yake is an Unsupervised, Corpus-Independent, Domain and
22
+ Language-Independent and Single-Document keyword extraction algorithm.
23
+
24
+ Extracting keywords from texts has become a challenge for individuals and
25
+ organizations as the information grows in complexity and size. The need to
26
+ automate this task so that text can be processed in a timely and adequate
27
+ manner has led to the emergence of automatic keyword extraction tools. Yake
28
+ is a novel feature-based system for multi-lingual keyword extraction, which
29
+ supports texts of different sizes, domain or languages. Unlike other
30
+ approaches, Yake does not rely on dictionaries nor thesauri, neither is
31
+ trained against any corpora. Instead, it follows an unsupervised approach
32
+ which builds upon features extracted from the text, making it thus
33
+ applicable to documents written in different languages without the need for
34
+ further knowledge. This can be beneficial for a large number of tasks and a
35
+ plethora of situations where access to training corpora is either limited or
36
+ restricted. The algorithm makes use of the position of a sentence and token.
37
+ Therefore, to use the annotator, the text should be first sent through a
38
+ Sentence Boundary Detector and then a tokenizer.
39
+
40
+ See the parameters section for tweakable parameters to get the best result
41
+ from the annotator.
42
+
43
+ Note that each keyword will be given a keyword score greater than 0 (The
44
+ lower the score better the keyword). Therefore to filter the keywords, an
45
+ upper bound for the score can be set with :meth:`.setThreshold`.
46
+
47
+ For extended examples of usage, see the `Examples
48
+ <https://github.com/JohnSnowLabs/spark-nlp/blob/master/examples/python/annotation/text/english/keyword-extraction/Keyword_Extraction_YAKE.ipynb>`__.
49
+
50
+ ====================== ======================
51
+ Input Annotation types Output Annotation type
52
+ ====================== ======================
53
+ ``TOKEN`` ``CHUNK``
54
+ ====================== ======================
55
+
56
+ Parameters
57
+ ----------
58
+ minNGrams
59
+ Minimum N-grams a keyword should have, by default 2
60
+ maxNGrams
61
+ Maximum N-grams a keyword should have, by default 3
62
+ threshold
63
+ Keyword Score threshold, by default -1
64
+ windowSize
65
+ Window size for Co-Occurrence, by default 3
66
+ nKeywords
67
+ Number of Keywords to extract, by default 30
68
+ stopWords
69
+ the words to be filtered out, by default english stop words from Spark
70
+ ML
71
+
72
+ References
73
+ ----------
74
+ `Campos, R., Mangaravite, V., Pasquali, A., Jatowt, A., Jorge, A., Nunes, C.
75
+ and Jatowt, A. (2020). YAKE! Keyword Extraction from Single Documents using
76
+ Multiple Local Features. In Information Sciences Journal. Elsevier, Vol 509,
77
+ pp 257-289
78
+ <https://www.sciencedirect.com/science/article/pii/S0020025519308588>`__
79
+
80
+ **Paper abstract:**
81
+
82
+ *As the amount of generated information grows, reading and summarizing texts
83
+ of large collections turns into a challenging task. Many documents do not
84
+ come with descriptive terms, thus requiring humans to generate keywords
85
+ on-the-fly. The need to automate this kind of task demands the development
86
+ of keyword extraction systems with the ability to automatically identify
87
+ keywords within the text. One approach is to resort to machine-learning
88
+ algorithms. These, however, depend on large annotated text corpora, which
89
+ are not always available. An alternative solution is to consider an
90
+ unsupervised approach. In this article, we describe YAKE!, a light-weight
91
+ unsupervised automatic keyword extraction method which rests on statistical
92
+ text features extracted from single documents to select the most relevant
93
+ keywords of a text. Our system does not need to be trained on a particular
94
+ set of documents, nor does it depend on dictionaries, external corpora, text
95
+ size, language, or domain. To demonstrate the merits and significance of
96
+ YAKE!, we compare it against ten state-of-the-art unsupervised approaches
97
+ and one supervised method. Experimental results carried out on top of twenty
98
+ datasets show that YAKE! significantly outperforms other unsupervised
99
+ methods on texts of different sizes, languages, and domains.*
100
+
101
+ Examples
102
+ --------
103
+ >>> import sparknlp
104
+ >>> from sparknlp.base import *
105
+ >>> from sparknlp.annotator import *
106
+ >>> from pyspark.ml import Pipeline
107
+ >>> documentAssembler = DocumentAssembler() \\
108
+ ... .setInputCol("text") \\
109
+ ... .setOutputCol("document")
110
+ >>> sentenceDetector = SentenceDetector() \\
111
+ ... .setInputCols(["document"]) \\
112
+ ... .setOutputCol("sentence")
113
+ >>> token = Tokenizer() \\
114
+ ... .setInputCols(["sentence"]) \\
115
+ ... .setOutputCol("token") \\
116
+ ... .setContextChars(["(", "]", "?", "!", ".", ","])
117
+ >>> keywords = YakeKeywordExtraction() \\
118
+ ... .setInputCols(["token"]) \\
119
+ ... .setOutputCol("keywords") \\
120
+ ... .setThreshold(0.6) \\
121
+ ... .setMinNGrams(2) \\
122
+ ... .setNKeywords(10)
123
+ >>> pipeline = Pipeline().setStages([
124
+ ... documentAssembler,
125
+ ... sentenceDetector,
126
+ ... token,
127
+ ... keywords
128
+ ... ])
129
+ >>> data = spark.createDataFrame([[
130
+ ... "Sources tell us that Google is acquiring Kaggle, a platform that hosts data science and machine learning competitions. Details about the transaction remain somewhat vague, but given that Google is hosting its Cloud Next conference in San Francisco this week, the official announcement could come as early as tomorrow. Reached by phone, Kaggle co-founder CEO Anthony Goldbloom declined to deny that the acquisition is happening. Google itself declined 'to comment on rumors'. Kaggle, which has about half a million data scientists on its platform, was founded by Goldbloom and Ben Hamner in 2010. The service got an early start and even though it has a few competitors like DrivenData, TopCoder and HackerRank, it has managed to stay well ahead of them by focusing on its specific niche. The service is basically the de facto home for running data science and machine learning competitions. With Kaggle, Google is buying one of the largest and most active communities for data scientists - and with that, it will get increased mindshare in this community, too (though it already has plenty of that thanks to Tensorflow and other projects). Kaggle has a bit of a history with Google, too, but that's pretty recent. Earlier this month, Google and Kaggle teamed up to host a $100,000 machine learning competition around classifying YouTube videos. That competition had some deep integrations with the Google Cloud Platform, too. Our understanding is that Google will keep the service running - likely under its current name. While the acquisition is probably more about Kaggle's community than technology, Kaggle did build some interesting tools for hosting its competition and 'kernels', too. On Kaggle, kernels are basically the source code for analyzing data sets and developers can share this code on the platform (the company previously called them 'scripts'). Like similar competition-centric sites, Kaggle also runs a job board, too. It's unclear what Google will do with that part of the service. According to Crunchbase, Kaggle raised $12.5 million (though PitchBook says it's $12.75) since its launch in 2010. Investors in Kaggle include Index Ventures, SV Angel, Max Levchin, NaRavikant, Google chie economist Hal Varian, Khosla Ventures and Yuri Milner"
131
+ ... ]]).toDF("text")
132
+ >>> result = pipeline.fit(data).transform(data)
133
+
134
+ Combine the result and score (contained in keywords.metadata)
135
+
136
+ >>> scores = result \\
137
+ ... .selectExpr("explode(arrays_zip(keywords.result, keywords.metadata)) as resultTuples") \\
138
+ ... .selectExpr("resultTuples['0'] as keyword", "resultTuples['1'].score as score")
139
+
140
+ Order ascending, as lower scores means higher importance
141
+
142
+ >>> scores.orderBy("score").show(5, truncate = False)
143
+ +---------------------+-------------------+
144
+ |keyword |score |
145
+ +---------------------+-------------------+
146
+ |google cloud |0.32051516486864573|
147
+ |google cloud platform|0.37786450577630676|
148
+ |ceo anthony goldbloom|0.39922830978423146|
149
+ |san francisco |0.40224744669493756|
150
+ |anthony goldbloom |0.41584827825302534|
151
+ +---------------------+-------------------+
152
+ """
153
+ name = "YakeKeywordExtraction"
154
+
155
+ inputAnnotatorTypes = [AnnotatorType.TOKEN]
156
+
157
+ outputAnnotatorType = AnnotatorType.CHUNK
158
+
159
+ @keyword_only
160
+ def __init__(self):
161
+ super(YakeKeywordExtraction, self).__init__(
162
+ classname="com.johnsnowlabs.nlp.annotators.keyword.yake.YakeKeywordExtraction")
163
+ self._setDefault(
164
+ minNGrams=2,
165
+ maxNGrams=3,
166
+ nKeywords=30,
167
+ windowSize=3,
168
+ threshold=-1,
169
+ stopWords=YakeKeywordExtraction.loadDefaultStopWords("english")
170
+ )
171
+
172
+ minNGrams = Param(Params._dummy(), "minNGrams", "Minimum N-grams a keyword should have",
173
+ typeConverter=TypeConverters.toInt)
174
+ maxNGrams = Param(Params._dummy(), "maxNGrams", "Maximum N-grams a keyword should have",
175
+ typeConverter=TypeConverters.toInt)
176
+ threshold = Param(Params._dummy(), "threshold", "Keyword Score threshold", typeConverter=TypeConverters.toFloat)
177
+ windowSize = Param(Params._dummy(), "windowSize", "Window size for Co-Occurrence",
178
+ typeConverter=TypeConverters.toInt)
179
+ nKeywords = Param(Params._dummy(), "nKeywords", "Number of Keywords to extract", typeConverter=TypeConverters.toInt)
180
+ stopWords = Param(Params._dummy(), "stopWords",
181
+ "the words to be filtered out. by default it's english stop words from Spark ML",
182
+ typeConverter=TypeConverters.toListString)
183
+
184
+ def setWindowSize(self, value):
185
+ """Sets window size for Co-Occurrence, by default 3.
186
+
187
+ Parameters
188
+ ----------
189
+ value : int
190
+ Window size for Co-Occurrence
191
+ """
192
+ return self._set(windowSize=value)
193
+
194
+ def setMinNGrams(self, value):
195
+ """Sets minimum N-grams a keyword should have, by default 2.
196
+
197
+ Parameters
198
+ ----------
199
+ value : int
200
+ Minimum N-grams a keyword should have
201
+ """
202
+ return self._set(minNGrams=value)
203
+
204
+ def setMaxNGrams(self, value):
205
+ """Sets maximum N-grams a keyword should have, by default 3.
206
+
207
+ Parameters
208
+ ----------
209
+ value : int
210
+ Maximum N-grams a keyword should have
211
+ """
212
+ return self._set(maxNGrams=value)
213
+
214
+ def setThreshold(self, value):
215
+ """Sets keyword Score threshold, by default -1.
216
+
217
+ Parameters
218
+ ----------
219
+ value : int
220
+ Keyword Score threshold, by default -1
221
+ """
222
+ return self._set(threshold=value)
223
+
224
+ def setNKeywords(self, value):
225
+ """Sets number of Keywords to extract, by default 30.
226
+
227
+ Parameters
228
+ ----------
229
+ value : int
230
+ Number of Keywords to extract
231
+ """
232
+ return self._set(nKeywords=value)
233
+
234
+ def setStopWords(self, value):
235
+ """Sets the words to be filtered out, by default english stop words from
236
+ Spark ML.
237
+
238
+ Parameters
239
+ ----------
240
+ value : List[str]
241
+ The words to be filtered out
242
+ """
243
+ return self._set(stopWords=value)
244
+
245
+ def getStopWords(self):
246
+ """Gets the words to be filtered out, by default english stop words from
247
+ Spark ML.
248
+
249
+ Returns
250
+ -------
251
+ List[str]
252
+ The words to be filtered out
253
+ """
254
+ return self.getOrDefault(self.stopWords)
255
+
256
+ def loadDefaultStopWords(language="english"):
257
+ """Loads the default stop words for the given language.
258
+
259
+ Supported languages: danish, dutch, english, finnish, french, german,
260
+ hungarian, italian, norwegian, portuguese, russian, spanish, swedish,
261
+ turkish
262
+
263
+ Parameters
264
+ ----------
265
+ language : str, optional
266
+ Language stopwords to load, by default "english"
267
+ """
268
+ from pyspark.ml.wrapper import _jvm
269
+ stopWordsObj = _jvm().org.apache.spark.ml.feature.StopWordsRemover
270
+ return list(stopWordsObj.loadDefaultStopWords(language))
@@ -0,0 +1,16 @@
1
+ # Copyright 2017-2022 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Module of annotators for language detection."""
16
+ from sparknlp.annotator.ld_dl.language_detector_dl import *
@@ -0,0 +1,199 @@
1
+ # Copyright 2017-2022 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains classes for LanguageDetectorDL."""
15
+
16
+ from sparknlp.common import *
17
+
18
+
19
+ class LanguageDetectorDL(AnnotatorModel, HasStorageRef, HasEngine):
20
+ """Language Identification and Detection by using CNN and RNN architectures
21
+ in TensorFlow.
22
+
23
+ ``LanguageDetectorDL`` is an annotator that detects the language of
24
+ documents or sentences depending on the inputCols. The models are trained on
25
+ large datasets such as Wikipedia and Tatoeba. Depending on the language
26
+ (how similar the characters are), the LanguageDetectorDL works best with
27
+ text longer than 140 characters. The output is a language code in
28
+ `Wiki Code style <https://en.wikipedia.org/wiki/List_of_Wikipedias>`__.
29
+
30
+ Pretrained models can be loaded with :meth:`.pretrained` of the companion
31
+ object:
32
+
33
+ >>> languageDetector = LanguageDetectorDL.pretrained() \\
34
+ ... .setInputCols(["sentence"]) \\
35
+ ... .setOutputCol("language")
36
+
37
+ The default model is ``"ld_wiki_tatoeba_cnn_21"``, default language is
38
+ ``"xx"`` (meaning multi-lingual), if no values are provided.
39
+
40
+ For available pretrained models please see the `Models Hub <https://sparknlp.org/models?task=Language+Detection>`__.
41
+
42
+ For extended examples of usage, see the `Examples <https://github.com/JohnSnowLabs/spark-nlp/blob/master/examples/python/annotation/text/english/language-detection/Language_Detection_and_Indentification.ipynb>`__.
43
+
44
+ ====================== ======================
45
+ Input Annotation types Output Annotation type
46
+ ====================== ======================
47
+ ``DOCUMENT`` ``LANGUAGE``
48
+ ====================== ======================
49
+
50
+ Parameters
51
+ ----------
52
+ configProtoBytes
53
+ ConfigProto from tensorflow, serialized into byte array.
54
+ threshold
55
+ The minimum threshold for the final result otheriwse it will be either
56
+ neutral or the value set in thresholdLabel, by default 0.5
57
+ thresholdLabel
58
+ In case the score is less than threshold, what should be the label, by
59
+ default Unknown
60
+ coalesceSentences
61
+ If sets to true the output of all sentences will be averaged to one
62
+ output instead of one output per sentence, by default True.
63
+ languages
64
+ The languages used to trained the model
65
+
66
+ Examples
67
+ --------
68
+ >>> import sparknlp
69
+ >>> from sparknlp.base import *
70
+ >>> from sparknlp.annotator import *
71
+ >>> from pyspark.ml import Pipeline
72
+ >>> documentAssembler = DocumentAssembler() \\
73
+ ... .setInputCol("text") \\
74
+ ... .setOutputCol("document")
75
+ >>> languageDetector = LanguageDetectorDL.pretrained() \\
76
+ ... .setInputCols("document") \\
77
+ ... .setOutputCol("language")
78
+ >>> pipeline = Pipeline() \\
79
+ ... .setStages([
80
+ ... documentAssembler,
81
+ ... languageDetector
82
+ ... ])
83
+ >>> data = spark.createDataFrame([
84
+ ... ["Spark NLP is an open-source text processing library for advanced natural language processing for the Python, Java and Scala programming languages."],
85
+ ... ["Spark NLP est une bibliothèque de traitement de texte open source pour le traitement avancé du langage naturel pour les langages de programmation Python, Java et Scala."],
86
+ ... ["Spark NLP ist eine Open-Source-Textverarbeitungsbibliothek für fortgeschrittene natürliche Sprachverarbeitung für die Programmiersprachen Python, Java und Scala."]
87
+ ... ]).toDF("text")
88
+ >>> result = pipeline.fit(data).transform(data)
89
+ >>> result.select("language.result").show(truncate=False)
90
+ +------+
91
+ |result|
92
+ +------+
93
+ |[en] |
94
+ |[fr] |
95
+ |[de] |
96
+ +------+
97
+ """
98
+ name = "LanguageDetectorDL"
99
+
100
+ inputAnnotatorTypes = [AnnotatorType.DOCUMENT]
101
+
102
+ outputAnnotatorType = AnnotatorType.LANGUAGE
103
+
104
+ def __init__(self, classname="com.johnsnowlabs.nlp.annotators.ld.dl.LanguageDetectorDL", java_model=None):
105
+ super(LanguageDetectorDL, self).__init__(
106
+ classname=classname,
107
+ java_model=java_model
108
+ )
109
+ self._setDefault(
110
+ threshold=0.5,
111
+ thresholdLabel="Unknown",
112
+ coalesceSentences=True
113
+ )
114
+
115
+ configProtoBytes = Param(Params._dummy(), "configProtoBytes",
116
+ "ConfigProto from tensorflow, serialized into byte array. Get with config_proto.SerializeToString()",
117
+ TypeConverters.toListInt)
118
+
119
+ threshold = Param(Params._dummy(), "threshold",
120
+ "The minimum threshold for the final result otheriwse it will be either neutral or the value set in thresholdLabel.",
121
+ TypeConverters.toFloat)
122
+
123
+ thresholdLabel = Param(Params._dummy(), "thresholdLabel",
124
+ "In case the score is less than threshold, what should be the label. Default is neutral.",
125
+ TypeConverters.toString)
126
+
127
+ coalesceSentences = Param(Params._dummy(), "coalesceSentences",
128
+ "If sets to true the output of all sentences will be averaged to one output instead of one output per sentence. Default to false.",
129
+ TypeConverters.toBoolean)
130
+
131
+ languages = Param(Params._dummy(), "languages",
132
+ "get the languages used to trained the model",
133
+ TypeConverters.toListString)
134
+
135
+ def setConfigProtoBytes(self, b):
136
+ """Sets configProto from tensorflow, serialized into byte array.
137
+
138
+ Parameters
139
+ ----------
140
+ b : List[int]
141
+ ConfigProto from tensorflow, serialized into byte array
142
+ """
143
+ return self._set(configProtoBytes=b)
144
+
145
+ def setThreshold(self, v):
146
+ """Sets the minimum threshold for the final result otherwise it will be
147
+ either neutral or the value set in thresholdLabel, by default 0.5.
148
+
149
+ Parameters
150
+ ----------
151
+ v : float
152
+ Minimum threshold for the final result
153
+ """
154
+ self._set(threshold=v)
155
+ return self
156
+
157
+ def setThresholdLabel(self, p):
158
+ """Sets what should be the label in case the score is less than
159
+ threshold, by default Unknown.
160
+
161
+ Parameters
162
+ ----------
163
+ p : str
164
+ The replacement label.
165
+ """
166
+ return self._set(thresholdLabel=p)
167
+
168
+ def setCoalesceSentences(self, value):
169
+ """Sets if the output of all sentences will be averaged to one output
170
+ instead of one output per sentence, by default True.
171
+
172
+ Parameters
173
+ ----------
174
+ value : bool
175
+ If the output of all sentences will be averaged to one output
176
+ """
177
+ return self._set(coalesceSentences=value)
178
+
179
+ @staticmethod
180
+ def pretrained(name="ld_wiki_tatoeba_cnn_21", lang="xx", remote_loc=None):
181
+ """Downloads and loads a pretrained model.
182
+
183
+ Parameters
184
+ ----------
185
+ name : str, optional
186
+ Name of the pretrained model, by default "ld_wiki_tatoeba_cnn_21"
187
+ lang : str, optional
188
+ Language of the pretrained model, by default "xx"
189
+ remote_loc : str, optional
190
+ Optional remote address of the resource, by default None. Will use
191
+ Spark NLPs repositories otherwise.
192
+
193
+ Returns
194
+ -------
195
+ LanguageDetectorDL
196
+ The restored model
197
+ """
198
+ from sparknlp.pretrained import ResourceDownloader
199
+ return ResourceDownloader.downloadModel(LanguageDetectorDL, name, lang, remote_loc)