spark-nlp 2.6.3rc1__py2.py3-none-any.whl → 6.2.1__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (329) hide show
  1. com/johnsnowlabs/ml/__init__.py +0 -0
  2. com/johnsnowlabs/ml/ai/__init__.py +10 -0
  3. com/johnsnowlabs/nlp/__init__.py +4 -2
  4. spark_nlp-6.2.1.dist-info/METADATA +362 -0
  5. spark_nlp-6.2.1.dist-info/RECORD +292 -0
  6. {spark_nlp-2.6.3rc1.dist-info → spark_nlp-6.2.1.dist-info}/WHEEL +1 -1
  7. sparknlp/__init__.py +281 -27
  8. sparknlp/annotation.py +137 -6
  9. sparknlp/annotation_audio.py +61 -0
  10. sparknlp/annotation_image.py +82 -0
  11. sparknlp/annotator/__init__.py +93 -0
  12. sparknlp/annotator/audio/__init__.py +16 -0
  13. sparknlp/annotator/audio/hubert_for_ctc.py +188 -0
  14. sparknlp/annotator/audio/wav2vec2_for_ctc.py +161 -0
  15. sparknlp/annotator/audio/whisper_for_ctc.py +251 -0
  16. sparknlp/annotator/chunk2_doc.py +85 -0
  17. sparknlp/annotator/chunker.py +137 -0
  18. sparknlp/annotator/classifier_dl/__init__.py +61 -0
  19. sparknlp/annotator/classifier_dl/albert_for_multiple_choice.py +161 -0
  20. sparknlp/annotator/classifier_dl/albert_for_question_answering.py +172 -0
  21. sparknlp/annotator/classifier_dl/albert_for_sequence_classification.py +201 -0
  22. sparknlp/annotator/classifier_dl/albert_for_token_classification.py +179 -0
  23. sparknlp/annotator/classifier_dl/albert_for_zero_shot_classification.py +211 -0
  24. sparknlp/annotator/classifier_dl/bart_for_zero_shot_classification.py +225 -0
  25. sparknlp/annotator/classifier_dl/bert_for_multiple_choice.py +161 -0
  26. sparknlp/annotator/classifier_dl/bert_for_question_answering.py +168 -0
  27. sparknlp/annotator/classifier_dl/bert_for_sequence_classification.py +202 -0
  28. sparknlp/annotator/classifier_dl/bert_for_token_classification.py +177 -0
  29. sparknlp/annotator/classifier_dl/bert_for_zero_shot_classification.py +212 -0
  30. sparknlp/annotator/classifier_dl/camembert_for_question_answering.py +168 -0
  31. sparknlp/annotator/classifier_dl/camembert_for_sequence_classification.py +205 -0
  32. sparknlp/annotator/classifier_dl/camembert_for_token_classification.py +173 -0
  33. sparknlp/annotator/classifier_dl/camembert_for_zero_shot_classification.py +202 -0
  34. sparknlp/annotator/classifier_dl/classifier_dl.py +320 -0
  35. sparknlp/annotator/classifier_dl/deberta_for_question_answering.py +168 -0
  36. sparknlp/annotator/classifier_dl/deberta_for_sequence_classification.py +198 -0
  37. sparknlp/annotator/classifier_dl/deberta_for_token_classification.py +175 -0
  38. sparknlp/annotator/classifier_dl/deberta_for_zero_shot_classification.py +193 -0
  39. sparknlp/annotator/classifier_dl/distil_bert_for_question_answering.py +168 -0
  40. sparknlp/annotator/classifier_dl/distil_bert_for_sequence_classification.py +201 -0
  41. sparknlp/annotator/classifier_dl/distil_bert_for_token_classification.py +175 -0
  42. sparknlp/annotator/classifier_dl/distil_bert_for_zero_shot_classification.py +211 -0
  43. sparknlp/annotator/classifier_dl/distilbert_for_multiple_choice.py +161 -0
  44. sparknlp/annotator/classifier_dl/longformer_for_question_answering.py +168 -0
  45. sparknlp/annotator/classifier_dl/longformer_for_sequence_classification.py +201 -0
  46. sparknlp/annotator/classifier_dl/longformer_for_token_classification.py +176 -0
  47. sparknlp/annotator/classifier_dl/mpnet_for_question_answering.py +148 -0
  48. sparknlp/annotator/classifier_dl/mpnet_for_sequence_classification.py +188 -0
  49. sparknlp/annotator/classifier_dl/mpnet_for_token_classification.py +173 -0
  50. sparknlp/annotator/classifier_dl/multi_classifier_dl.py +395 -0
  51. sparknlp/annotator/classifier_dl/roberta_for_multiple_choice.py +161 -0
  52. sparknlp/annotator/classifier_dl/roberta_for_question_answering.py +168 -0
  53. sparknlp/annotator/classifier_dl/roberta_for_sequence_classification.py +201 -0
  54. sparknlp/annotator/classifier_dl/roberta_for_token_classification.py +189 -0
  55. sparknlp/annotator/classifier_dl/roberta_for_zero_shot_classification.py +225 -0
  56. sparknlp/annotator/classifier_dl/sentiment_dl.py +378 -0
  57. sparknlp/annotator/classifier_dl/tapas_for_question_answering.py +170 -0
  58. sparknlp/annotator/classifier_dl/xlm_roberta_for_multiple_choice.py +149 -0
  59. sparknlp/annotator/classifier_dl/xlm_roberta_for_question_answering.py +168 -0
  60. sparknlp/annotator/classifier_dl/xlm_roberta_for_sequence_classification.py +201 -0
  61. sparknlp/annotator/classifier_dl/xlm_roberta_for_token_classification.py +173 -0
  62. sparknlp/annotator/classifier_dl/xlm_roberta_for_zero_shot_classification.py +225 -0
  63. sparknlp/annotator/classifier_dl/xlnet_for_sequence_classification.py +201 -0
  64. sparknlp/annotator/classifier_dl/xlnet_for_token_classification.py +176 -0
  65. sparknlp/annotator/cleaners/__init__.py +15 -0
  66. sparknlp/annotator/cleaners/cleaner.py +202 -0
  67. sparknlp/annotator/cleaners/extractor.py +191 -0
  68. sparknlp/annotator/coref/__init__.py +1 -0
  69. sparknlp/annotator/coref/spanbert_coref.py +221 -0
  70. sparknlp/annotator/cv/__init__.py +29 -0
  71. sparknlp/annotator/cv/blip_for_question_answering.py +172 -0
  72. sparknlp/annotator/cv/clip_for_zero_shot_classification.py +193 -0
  73. sparknlp/annotator/cv/convnext_for_image_classification.py +269 -0
  74. sparknlp/annotator/cv/florence2_transformer.py +180 -0
  75. sparknlp/annotator/cv/gemma3_for_multimodal.py +346 -0
  76. sparknlp/annotator/cv/internvl_for_multimodal.py +280 -0
  77. sparknlp/annotator/cv/janus_for_multimodal.py +351 -0
  78. sparknlp/annotator/cv/llava_for_multimodal.py +328 -0
  79. sparknlp/annotator/cv/mllama_for_multimodal.py +340 -0
  80. sparknlp/annotator/cv/paligemma_for_multimodal.py +308 -0
  81. sparknlp/annotator/cv/phi3_vision_for_multimodal.py +328 -0
  82. sparknlp/annotator/cv/qwen2vl_transformer.py +332 -0
  83. sparknlp/annotator/cv/smolvlm_transformer.py +426 -0
  84. sparknlp/annotator/cv/swin_for_image_classification.py +242 -0
  85. sparknlp/annotator/cv/vision_encoder_decoder_for_image_captioning.py +240 -0
  86. sparknlp/annotator/cv/vit_for_image_classification.py +217 -0
  87. sparknlp/annotator/dataframe_optimizer.py +216 -0
  88. sparknlp/annotator/date2_chunk.py +88 -0
  89. sparknlp/annotator/dependency/__init__.py +17 -0
  90. sparknlp/annotator/dependency/dependency_parser.py +294 -0
  91. sparknlp/annotator/dependency/typed_dependency_parser.py +318 -0
  92. sparknlp/annotator/document_character_text_splitter.py +228 -0
  93. sparknlp/annotator/document_normalizer.py +235 -0
  94. sparknlp/annotator/document_token_splitter.py +175 -0
  95. sparknlp/annotator/document_token_splitter_test.py +85 -0
  96. sparknlp/annotator/embeddings/__init__.py +45 -0
  97. sparknlp/annotator/embeddings/albert_embeddings.py +230 -0
  98. sparknlp/annotator/embeddings/auto_gguf_embeddings.py +539 -0
  99. sparknlp/annotator/embeddings/bert_embeddings.py +208 -0
  100. sparknlp/annotator/embeddings/bert_sentence_embeddings.py +224 -0
  101. sparknlp/annotator/embeddings/bge_embeddings.py +199 -0
  102. sparknlp/annotator/embeddings/camembert_embeddings.py +210 -0
  103. sparknlp/annotator/embeddings/chunk_embeddings.py +149 -0
  104. sparknlp/annotator/embeddings/deberta_embeddings.py +208 -0
  105. sparknlp/annotator/embeddings/distil_bert_embeddings.py +221 -0
  106. sparknlp/annotator/embeddings/doc2vec.py +352 -0
  107. sparknlp/annotator/embeddings/e5_embeddings.py +195 -0
  108. sparknlp/annotator/embeddings/e5v_embeddings.py +138 -0
  109. sparknlp/annotator/embeddings/elmo_embeddings.py +251 -0
  110. sparknlp/annotator/embeddings/instructor_embeddings.py +204 -0
  111. sparknlp/annotator/embeddings/longformer_embeddings.py +211 -0
  112. sparknlp/annotator/embeddings/minilm_embeddings.py +189 -0
  113. sparknlp/annotator/embeddings/mpnet_embeddings.py +192 -0
  114. sparknlp/annotator/embeddings/mxbai_embeddings.py +184 -0
  115. sparknlp/annotator/embeddings/nomic_embeddings.py +181 -0
  116. sparknlp/annotator/embeddings/roberta_embeddings.py +225 -0
  117. sparknlp/annotator/embeddings/roberta_sentence_embeddings.py +191 -0
  118. sparknlp/annotator/embeddings/sentence_embeddings.py +134 -0
  119. sparknlp/annotator/embeddings/snowflake_embeddings.py +202 -0
  120. sparknlp/annotator/embeddings/uae_embeddings.py +211 -0
  121. sparknlp/annotator/embeddings/universal_sentence_encoder.py +211 -0
  122. sparknlp/annotator/embeddings/word2vec.py +353 -0
  123. sparknlp/annotator/embeddings/word_embeddings.py +385 -0
  124. sparknlp/annotator/embeddings/xlm_roberta_embeddings.py +225 -0
  125. sparknlp/annotator/embeddings/xlm_roberta_sentence_embeddings.py +194 -0
  126. sparknlp/annotator/embeddings/xlnet_embeddings.py +227 -0
  127. sparknlp/annotator/er/__init__.py +16 -0
  128. sparknlp/annotator/er/entity_ruler.py +267 -0
  129. sparknlp/annotator/graph_extraction.py +368 -0
  130. sparknlp/annotator/keyword_extraction/__init__.py +16 -0
  131. sparknlp/annotator/keyword_extraction/yake_keyword_extraction.py +270 -0
  132. sparknlp/annotator/ld_dl/__init__.py +16 -0
  133. sparknlp/annotator/ld_dl/language_detector_dl.py +199 -0
  134. sparknlp/annotator/lemmatizer.py +250 -0
  135. sparknlp/annotator/matcher/__init__.py +20 -0
  136. sparknlp/annotator/matcher/big_text_matcher.py +272 -0
  137. sparknlp/annotator/matcher/date_matcher.py +303 -0
  138. sparknlp/annotator/matcher/multi_date_matcher.py +109 -0
  139. sparknlp/annotator/matcher/regex_matcher.py +221 -0
  140. sparknlp/annotator/matcher/text_matcher.py +290 -0
  141. sparknlp/annotator/n_gram_generator.py +141 -0
  142. sparknlp/annotator/ner/__init__.py +21 -0
  143. sparknlp/annotator/ner/ner_approach.py +94 -0
  144. sparknlp/annotator/ner/ner_converter.py +148 -0
  145. sparknlp/annotator/ner/ner_crf.py +397 -0
  146. sparknlp/annotator/ner/ner_dl.py +591 -0
  147. sparknlp/annotator/ner/ner_dl_graph_checker.py +293 -0
  148. sparknlp/annotator/ner/ner_overwriter.py +166 -0
  149. sparknlp/annotator/ner/zero_shot_ner_model.py +173 -0
  150. sparknlp/annotator/normalizer.py +230 -0
  151. sparknlp/annotator/openai/__init__.py +16 -0
  152. sparknlp/annotator/openai/openai_completion.py +349 -0
  153. sparknlp/annotator/openai/openai_embeddings.py +106 -0
  154. sparknlp/annotator/param/__init__.py +17 -0
  155. sparknlp/annotator/param/classifier_encoder.py +98 -0
  156. sparknlp/annotator/param/evaluation_dl_params.py +130 -0
  157. sparknlp/annotator/pos/__init__.py +16 -0
  158. sparknlp/annotator/pos/perceptron.py +263 -0
  159. sparknlp/annotator/sentence/__init__.py +17 -0
  160. sparknlp/annotator/sentence/sentence_detector.py +290 -0
  161. sparknlp/annotator/sentence/sentence_detector_dl.py +467 -0
  162. sparknlp/annotator/sentiment/__init__.py +17 -0
  163. sparknlp/annotator/sentiment/sentiment_detector.py +208 -0
  164. sparknlp/annotator/sentiment/vivekn_sentiment.py +242 -0
  165. sparknlp/annotator/seq2seq/__init__.py +35 -0
  166. sparknlp/annotator/seq2seq/auto_gguf_model.py +304 -0
  167. sparknlp/annotator/seq2seq/auto_gguf_reranker.py +334 -0
  168. sparknlp/annotator/seq2seq/auto_gguf_vision_model.py +336 -0
  169. sparknlp/annotator/seq2seq/bart_transformer.py +420 -0
  170. sparknlp/annotator/seq2seq/cohere_transformer.py +357 -0
  171. sparknlp/annotator/seq2seq/cpm_transformer.py +321 -0
  172. sparknlp/annotator/seq2seq/gpt2_transformer.py +363 -0
  173. sparknlp/annotator/seq2seq/llama2_transformer.py +343 -0
  174. sparknlp/annotator/seq2seq/llama3_transformer.py +381 -0
  175. sparknlp/annotator/seq2seq/m2m100_transformer.py +392 -0
  176. sparknlp/annotator/seq2seq/marian_transformer.py +374 -0
  177. sparknlp/annotator/seq2seq/mistral_transformer.py +348 -0
  178. sparknlp/annotator/seq2seq/nllb_transformer.py +420 -0
  179. sparknlp/annotator/seq2seq/olmo_transformer.py +326 -0
  180. sparknlp/annotator/seq2seq/phi2_transformer.py +326 -0
  181. sparknlp/annotator/seq2seq/phi3_transformer.py +330 -0
  182. sparknlp/annotator/seq2seq/phi4_transformer.py +387 -0
  183. sparknlp/annotator/seq2seq/qwen_transformer.py +340 -0
  184. sparknlp/annotator/seq2seq/starcoder_transformer.py +335 -0
  185. sparknlp/annotator/seq2seq/t5_transformer.py +425 -0
  186. sparknlp/annotator/similarity/__init__.py +0 -0
  187. sparknlp/annotator/similarity/document_similarity_ranker.py +379 -0
  188. sparknlp/annotator/spell_check/__init__.py +18 -0
  189. sparknlp/annotator/spell_check/context_spell_checker.py +911 -0
  190. sparknlp/annotator/spell_check/norvig_sweeting.py +358 -0
  191. sparknlp/annotator/spell_check/symmetric_delete.py +299 -0
  192. sparknlp/annotator/stemmer.py +79 -0
  193. sparknlp/annotator/stop_words_cleaner.py +190 -0
  194. sparknlp/annotator/tf_ner_dl_graph_builder.py +179 -0
  195. sparknlp/annotator/token/__init__.py +19 -0
  196. sparknlp/annotator/token/chunk_tokenizer.py +118 -0
  197. sparknlp/annotator/token/recursive_tokenizer.py +205 -0
  198. sparknlp/annotator/token/regex_tokenizer.py +208 -0
  199. sparknlp/annotator/token/tokenizer.py +561 -0
  200. sparknlp/annotator/token2_chunk.py +76 -0
  201. sparknlp/annotator/ws/__init__.py +16 -0
  202. sparknlp/annotator/ws/word_segmenter.py +429 -0
  203. sparknlp/base/__init__.py +30 -0
  204. sparknlp/base/audio_assembler.py +95 -0
  205. sparknlp/base/doc2_chunk.py +169 -0
  206. sparknlp/base/document_assembler.py +164 -0
  207. sparknlp/base/embeddings_finisher.py +201 -0
  208. sparknlp/base/finisher.py +217 -0
  209. sparknlp/base/gguf_ranking_finisher.py +234 -0
  210. sparknlp/base/graph_finisher.py +125 -0
  211. sparknlp/base/has_recursive_fit.py +24 -0
  212. sparknlp/base/has_recursive_transform.py +22 -0
  213. sparknlp/base/image_assembler.py +172 -0
  214. sparknlp/base/light_pipeline.py +429 -0
  215. sparknlp/base/multi_document_assembler.py +164 -0
  216. sparknlp/base/prompt_assembler.py +207 -0
  217. sparknlp/base/recursive_pipeline.py +107 -0
  218. sparknlp/base/table_assembler.py +145 -0
  219. sparknlp/base/token_assembler.py +124 -0
  220. sparknlp/common/__init__.py +26 -0
  221. sparknlp/common/annotator_approach.py +41 -0
  222. sparknlp/common/annotator_model.py +47 -0
  223. sparknlp/common/annotator_properties.py +114 -0
  224. sparknlp/common/annotator_type.py +38 -0
  225. sparknlp/common/completion_post_processing.py +37 -0
  226. sparknlp/common/coverage_result.py +22 -0
  227. sparknlp/common/match_strategy.py +33 -0
  228. sparknlp/common/properties.py +1298 -0
  229. sparknlp/common/read_as.py +33 -0
  230. sparknlp/common/recursive_annotator_approach.py +35 -0
  231. sparknlp/common/storage.py +149 -0
  232. sparknlp/common/utils.py +39 -0
  233. sparknlp/functions.py +315 -5
  234. sparknlp/internal/__init__.py +1199 -0
  235. sparknlp/internal/annotator_java_ml.py +32 -0
  236. sparknlp/internal/annotator_transformer.py +37 -0
  237. sparknlp/internal/extended_java_wrapper.py +63 -0
  238. sparknlp/internal/params_getters_setters.py +71 -0
  239. sparknlp/internal/recursive.py +70 -0
  240. sparknlp/logging/__init__.py +15 -0
  241. sparknlp/logging/comet.py +467 -0
  242. sparknlp/partition/__init__.py +16 -0
  243. sparknlp/partition/partition.py +244 -0
  244. sparknlp/partition/partition_properties.py +902 -0
  245. sparknlp/partition/partition_transformer.py +200 -0
  246. sparknlp/pretrained/__init__.py +17 -0
  247. sparknlp/pretrained/pretrained_pipeline.py +158 -0
  248. sparknlp/pretrained/resource_downloader.py +216 -0
  249. sparknlp/pretrained/utils.py +35 -0
  250. sparknlp/reader/__init__.py +15 -0
  251. sparknlp/reader/enums.py +19 -0
  252. sparknlp/reader/pdf_to_text.py +190 -0
  253. sparknlp/reader/reader2doc.py +124 -0
  254. sparknlp/reader/reader2image.py +136 -0
  255. sparknlp/reader/reader2table.py +44 -0
  256. sparknlp/reader/reader_assembler.py +159 -0
  257. sparknlp/reader/sparknlp_reader.py +461 -0
  258. sparknlp/training/__init__.py +20 -0
  259. sparknlp/training/_tf_graph_builders/__init__.py +0 -0
  260. sparknlp/training/_tf_graph_builders/graph_builders.py +299 -0
  261. sparknlp/training/_tf_graph_builders/ner_dl/__init__.py +0 -0
  262. sparknlp/training/_tf_graph_builders/ner_dl/create_graph.py +41 -0
  263. sparknlp/training/_tf_graph_builders/ner_dl/dataset_encoder.py +78 -0
  264. sparknlp/training/_tf_graph_builders/ner_dl/ner_model.py +521 -0
  265. sparknlp/training/_tf_graph_builders/ner_dl/ner_model_saver.py +62 -0
  266. sparknlp/training/_tf_graph_builders/ner_dl/sentence_grouper.py +28 -0
  267. sparknlp/training/_tf_graph_builders/tf2contrib/__init__.py +36 -0
  268. sparknlp/training/_tf_graph_builders/tf2contrib/core_rnn_cell.py +385 -0
  269. sparknlp/training/_tf_graph_builders/tf2contrib/fused_rnn_cell.py +183 -0
  270. sparknlp/training/_tf_graph_builders/tf2contrib/gru_ops.py +235 -0
  271. sparknlp/training/_tf_graph_builders/tf2contrib/lstm_ops.py +665 -0
  272. sparknlp/training/_tf_graph_builders/tf2contrib/rnn.py +245 -0
  273. sparknlp/training/_tf_graph_builders/tf2contrib/rnn_cell.py +4006 -0
  274. sparknlp/training/_tf_graph_builders_1x/__init__.py +0 -0
  275. sparknlp/training/_tf_graph_builders_1x/graph_builders.py +277 -0
  276. sparknlp/training/_tf_graph_builders_1x/ner_dl/__init__.py +0 -0
  277. sparknlp/training/_tf_graph_builders_1x/ner_dl/create_graph.py +34 -0
  278. sparknlp/training/_tf_graph_builders_1x/ner_dl/dataset_encoder.py +78 -0
  279. sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model.py +532 -0
  280. sparknlp/training/_tf_graph_builders_1x/ner_dl/ner_model_saver.py +62 -0
  281. sparknlp/training/_tf_graph_builders_1x/ner_dl/sentence_grouper.py +28 -0
  282. sparknlp/training/conll.py +150 -0
  283. sparknlp/training/conllu.py +103 -0
  284. sparknlp/training/pos.py +103 -0
  285. sparknlp/training/pub_tator.py +76 -0
  286. sparknlp/training/spacy_to_annotation.py +57 -0
  287. sparknlp/training/tfgraphs.py +5 -0
  288. sparknlp/upload_to_hub.py +149 -0
  289. sparknlp/util.py +51 -5
  290. com/__init__.pyc +0 -0
  291. com/__pycache__/__init__.cpython-36.pyc +0 -0
  292. com/johnsnowlabs/__init__.pyc +0 -0
  293. com/johnsnowlabs/__pycache__/__init__.cpython-36.pyc +0 -0
  294. com/johnsnowlabs/nlp/__init__.pyc +0 -0
  295. com/johnsnowlabs/nlp/__pycache__/__init__.cpython-36.pyc +0 -0
  296. spark_nlp-2.6.3rc1.dist-info/METADATA +0 -36
  297. spark_nlp-2.6.3rc1.dist-info/RECORD +0 -48
  298. sparknlp/__init__.pyc +0 -0
  299. sparknlp/__pycache__/__init__.cpython-36.pyc +0 -0
  300. sparknlp/__pycache__/annotation.cpython-36.pyc +0 -0
  301. sparknlp/__pycache__/annotator.cpython-36.pyc +0 -0
  302. sparknlp/__pycache__/base.cpython-36.pyc +0 -0
  303. sparknlp/__pycache__/common.cpython-36.pyc +0 -0
  304. sparknlp/__pycache__/embeddings.cpython-36.pyc +0 -0
  305. sparknlp/__pycache__/functions.cpython-36.pyc +0 -0
  306. sparknlp/__pycache__/internal.cpython-36.pyc +0 -0
  307. sparknlp/__pycache__/pretrained.cpython-36.pyc +0 -0
  308. sparknlp/__pycache__/storage.cpython-36.pyc +0 -0
  309. sparknlp/__pycache__/training.cpython-36.pyc +0 -0
  310. sparknlp/__pycache__/util.cpython-36.pyc +0 -0
  311. sparknlp/annotation.pyc +0 -0
  312. sparknlp/annotator.py +0 -3006
  313. sparknlp/annotator.pyc +0 -0
  314. sparknlp/base.py +0 -347
  315. sparknlp/base.pyc +0 -0
  316. sparknlp/common.py +0 -193
  317. sparknlp/common.pyc +0 -0
  318. sparknlp/embeddings.py +0 -40
  319. sparknlp/embeddings.pyc +0 -0
  320. sparknlp/internal.py +0 -288
  321. sparknlp/internal.pyc +0 -0
  322. sparknlp/pretrained.py +0 -123
  323. sparknlp/pretrained.pyc +0 -0
  324. sparknlp/storage.py +0 -32
  325. sparknlp/storage.pyc +0 -0
  326. sparknlp/training.py +0 -62
  327. sparknlp/training.pyc +0 -0
  328. sparknlp/util.pyc +0 -0
  329. {spark_nlp-2.6.3rc1.dist-info → spark_nlp-6.2.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,429 @@
1
+ # Copyright 2017-2022 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains classes for the WordSegmenter."""
15
+
16
+ from sparknlp.common import *
17
+
18
+
19
+ class WordSegmenterApproach(AnnotatorApproach):
20
+ """Trains a WordSegmenter which tokenizes non-english or non-whitespace
21
+ separated texts.
22
+
23
+ Many languages are not whitespace separated and their sentences are a concatenation
24
+ of many symbols, like Korean, Japanese or Chinese. Without understanding the
25
+ language, splitting the words into their corresponding tokens is impossible. The
26
+ WordSegmenter is trained to understand these languages and split them into
27
+ semantically correct parts.
28
+
29
+ This annotator is based on the paper Chinese Word Segmentation as Character Tagging
30
+ [1]. Word segmentation is treated as a tagging problem. Each character is be tagged
31
+ as on of four different labels: LL (left boundary), RR (right boundary), MM (middle)
32
+ and LR (word by itself). The label depends on the position of the word in the
33
+ sentence. LL tagged words will combine with the word on the right. Likewise, RR
34
+ tagged words combine with words on the left. MM tagged words are treated as the
35
+ middle of the word and combine with either side. LR tagged words are words by
36
+ themselves.
37
+
38
+ Example (from [1], Example 3(a) (raw), 3(b) (tagged), 3(c) (translation)):
39
+ - 上海 计划 到 本 世纪 末 实现 人均 国内 生产 总值 五千 美元
40
+ - 上/LL 海/RR 计/LL 划/RR 到/LR 本/LR 世/LL 纪/RR 末/LR 实/LL 现/RR 人/LL 均/RR
41
+ 国/LL 内/RR 生/LL 产/RR 总/LL值/RR 五/LL 千/RR 美/LL 元/RR
42
+ - Shanghai plans to reach the goal of 5,000 dollars in per capita GDP by the end
43
+ of the century.
44
+
45
+ For instantiated/pretrained models, see :class:`.WordSegmenterModel`.
46
+
47
+ To train your own model, a training dataset consisting of `Part-Of-Speech
48
+ tags <https://en.wikipedia.org/wiki/Part-of-speech_tagging>`__ is required.
49
+ The data has to be loaded into a dataframe, where the column is an
50
+ Annotation of type ``POS``. This can be set with :meth:`.setPosColumn`.
51
+
52
+ **Tip**:
53
+ The helper class :class:`.POS` might be useful to read training data into
54
+ data frames.
55
+
56
+ For extended examples of usage, see the `Examples
57
+ <https://github.com/JohnSnowLabs/spark-nlp/blob/master/examples/python/annotation/text/chinese/word_segmentation>`__.
58
+
59
+ References
60
+ ----------
61
+
62
+ `[1] <https://aclanthology.org/O03-4002.pdf>`__ Xue, Nianwen. “Chinese Word
63
+ Segmentation as Character Tagging.” International Journal of Computational
64
+ Linguistics & Chinese Language Processing, Volume 8, Number 1, February 2003:
65
+ Special Issue on Word Formation and Chinese Language Processing, 2003, pp. 29-48.
66
+ ACLWeb, https://aclanthology.org/O03-4002.
67
+
68
+ ====================== ======================
69
+ Input Annotation types Output Annotation type
70
+ ====================== ======================
71
+ ``DOCUMENT`` ``TOKEN``
72
+ ====================== ======================
73
+
74
+ Parameters
75
+ ----------
76
+ posCol
77
+ column of Array of POS tags that match tokens
78
+ nIterations
79
+ Number of iterations in training, converges to better accuracy, by
80
+ default 5
81
+ frequencyThreshold
82
+ How many times at least a tag on a word to be marked as frequent, by
83
+ default 5
84
+ ambiguityThreshold
85
+ How much percentage of total amount of words are covered to be marked as
86
+ frequent, by default 0.97
87
+ enableRegexTokenizer
88
+ Whether to use RegexTokenizer before segmentation. Useful for multilingual text
89
+ toLowercase
90
+ Indicates whether to convert all characters to lowercase before tokenizing. Used only when enableRegexTokenizer is true
91
+ pattern
92
+ regex pattern used for tokenizing. Used only when enableRegexTokenizer is true
93
+
94
+ Examples
95
+ --------
96
+ In this example, ``"chinese_train.utf8"`` is in the form of::
97
+
98
+ 十|LL 四|RR 不|LL 是|RR 四|LL 十|RR
99
+
100
+ and is loaded with the `POS` class to create a dataframe of ``POS`` type
101
+ Annotations.
102
+
103
+ >>> import sparknlp
104
+ >>> from sparknlp.base import *
105
+ >>> from sparknlp.annotator import *
106
+ >>> from pyspark.ml import Pipeline
107
+ >>> documentAssembler = DocumentAssembler() \\
108
+ ... .setInputCol("text") \\
109
+ ... .setOutputCol("document")
110
+ >>> wordSegmenter = WordSegmenterApproach() \\
111
+ ... .setInputCols(["document"]) \\
112
+ ... .setOutputCol("token") \\
113
+ ... .setPosColumn("tags") \\
114
+ ... .setNIterations(5)
115
+ >>> pipeline = Pipeline().setStages([
116
+ ... documentAssembler,
117
+ ... wordSegmenter
118
+ ... ])
119
+ >>> trainingDataSet = POS().readDataset(
120
+ ... spark,
121
+ ... "src/test/resources/word-segmenter/chinese_train.utf8"
122
+ ... )
123
+ >>> pipelineModel = pipeline.fit(trainingDataSet)
124
+ """
125
+ name = "WordSegmenterApproach"
126
+
127
+ inputAnnotatorTypes = [AnnotatorType.DOCUMENT]
128
+
129
+ outputAnnotatorType = AnnotatorType.TOKEN
130
+
131
+ posCol = Param(Params._dummy(),
132
+ "posCol",
133
+ "column of Array of POS tags that match tokens",
134
+ typeConverter=TypeConverters.toString)
135
+
136
+ nIterations = Param(Params._dummy(),
137
+ "nIterations",
138
+ "Number of iterations in training, converges to better accuracy",
139
+ typeConverter=TypeConverters.toInt)
140
+
141
+ frequencyThreshold = Param(Params._dummy(),
142
+ "frequencyThreshold",
143
+ "How many times at least a tag on a word to be marked as frequent",
144
+ typeConverter=TypeConverters.toInt)
145
+
146
+ ambiguityThreshold = Param(Params._dummy(),
147
+ "ambiguityThreshold",
148
+ "How much percentage of total amount of words are covered to be marked as frequent",
149
+ typeConverter=TypeConverters.toFloat)
150
+
151
+ enableRegexTokenizer = Param(Params._dummy(),
152
+ "enableRegexTokenizer",
153
+ "Whether to use RegexTokenizer before segmentation. Useful for multilingual text",
154
+ typeConverter=TypeConverters.toBoolean)
155
+
156
+ toLowercase = Param(Params._dummy(),
157
+ "toLowercase",
158
+ "Indicates whether to convert all characters to lowercase before tokenizing.",
159
+ typeConverter=TypeConverters.toBoolean)
160
+
161
+ pattern = Param(Params._dummy(),
162
+ "pattern",
163
+ "regex pattern used for tokenizing. Defaults \\s+",
164
+ typeConverter=TypeConverters.toString)
165
+
166
+ @keyword_only
167
+ def __init__(self):
168
+ super(WordSegmenterApproach, self).__init__(
169
+ classname="com.johnsnowlabs.nlp.annotators.ws.WordSegmenterApproach")
170
+ self._setDefault(
171
+ nIterations=5, frequencyThreshold=5, ambiguityThreshold=0.97,
172
+ enableRegexTokenizer=False, toLowercase=False, pattern="\\s+"
173
+ )
174
+
175
+ def setPosColumn(self, value):
176
+ """Sets column name for array of POS tags that match tokens.
177
+
178
+ Parameters
179
+ ----------
180
+ value : str
181
+ Name of the column
182
+ """
183
+ return self._set(posCol=value)
184
+
185
+ def setNIterations(self, value):
186
+ """Sets number of iterations in training, converges to better accuracy,
187
+ by default 5.
188
+
189
+ Parameters
190
+ ----------
191
+ value : int
192
+ Number of iterations
193
+ """
194
+ return self._set(nIterations=value)
195
+
196
+ def setFrequencyThreshold(self, value):
197
+ """Sets how many times at least a tag on a word to be marked as
198
+ frequent, by default 5.
199
+
200
+ Parameters
201
+ ----------
202
+ value : int
203
+ Frequency threshold to be marked as frequent
204
+ """
205
+ return self._set(frequencyThreshold=value)
206
+
207
+ def setAmbiguityThreshold(self, value):
208
+ """Sets the percentage of total amount of words are covered to be
209
+ marked as frequent, by default 0.97.
210
+
211
+ Parameters
212
+ ----------
213
+ value : float
214
+ Percentage of total amount of words are covered to be
215
+ marked as frequent
216
+ """
217
+ return self._set(ambiguityThreshold=value)
218
+
219
+ def getNIterations(self):
220
+ """Gets number of iterations in training, converges to better accuracy.
221
+
222
+ Returns
223
+ -------
224
+ int
225
+ Number of iterations
226
+ """
227
+ return self.getOrDefault(self.nIterations)
228
+
229
+ def getFrequencyThreshold(self):
230
+ """Sets How many times at least a tag on a word to be marked as
231
+ frequent.
232
+
233
+ Returns
234
+ -------
235
+ int
236
+ Frequency threshold to be marked as frequent
237
+ """
238
+ return self.getOrDefault(self.frequencyThreshold)
239
+
240
+ def getAmbiguityThreshold(self):
241
+ """Sets How much percentage of total amount of words are covered to be
242
+ marked as frequent.
243
+
244
+ Returns
245
+ -------
246
+ float
247
+ Percentage of total amount of words are covered to be
248
+ marked as frequent
249
+ """
250
+ return self.getOrDefault(self.ambiguityThreshold)
251
+
252
+ def setEnableRegexTokenizer(self, value):
253
+ """Sets whether to to use RegexTokenizer before segmentation.
254
+ Useful for multilingual text
255
+
256
+ Parameters
257
+ ----------
258
+ value : bool
259
+ Whether to use RegexTokenizer before segmentation
260
+ """
261
+ return self._set(enableRegexTokenizer=value)
262
+
263
+ def setToLowercase(self, value):
264
+ """Sets whether to convert all characters to lowercase before
265
+ tokenizing, by default False.
266
+
267
+ Parameters
268
+ ----------
269
+ value : bool
270
+ Whether to convert all characters to lowercase before tokenizing
271
+ """
272
+ return self._set(toLowercase=value)
273
+
274
+ def setPattern(self, value):
275
+ """Sets the regex pattern used for tokenizing, by default ``\\s+``.
276
+
277
+ Parameters
278
+ ----------
279
+ value : str
280
+ Regex pattern used for tokenizing
281
+ """
282
+ return self._set(pattern=value)
283
+
284
+ def _create_model(self, java_model):
285
+ return WordSegmenterModel(java_model=java_model)
286
+
287
+
288
+ class WordSegmenterModel(AnnotatorModel):
289
+ """WordSegmenter which tokenizes non-english or non-whitespace separated
290
+ texts.
291
+
292
+ Many languages are not whitespace separated and their sentences are a
293
+ concatenation of many symbols, like Korean, Japanese or Chinese. Without
294
+ understanding the language, splitting the words into their corresponding
295
+ tokens is impossible. The WordSegmenter is trained to understand these
296
+ languages and plit them into semantically correct parts.
297
+
298
+ This is the instantiated model of the :class:`.WordSegmenterApproach`. For
299
+ training your own model, please see the documentation of that class.
300
+
301
+ Pretrained models can be loaded with :meth:`.pretrained` of the companion
302
+ object:
303
+
304
+ >>> wordSegmenter = WordSegmenterModel.pretrained() \\
305
+ ... .setInputCols(["document"]) \\
306
+ ... .setOutputCol("words_segmented")
307
+
308
+ The default model is ``"wordseg_pku"``, default language is ``"zh"``, if no
309
+ values are provided. For available pretrained models please see the `Models
310
+ Hub <https://sparknlp.org/models?task=Word+Segmentation>`__.
311
+
312
+ For extended examples of usage, see the `Examples
313
+ <https://github.com/JohnSnowLabs/spark-nlp/blob/master/jupyter/annotation/chinese/word_segmentation/words_segmenter_demo.ipynb>`__.
314
+
315
+ ====================== ======================
316
+ Input Annotation types Output Annotation type
317
+ ====================== ======================
318
+ ``DOCUMENT`` ``TOKEN``
319
+ ====================== ======================
320
+
321
+ Parameters
322
+ ----------
323
+ None
324
+
325
+ Examples
326
+ --------
327
+ >>> import sparknlp
328
+ >>> from sparknlp.base import *
329
+ >>> from sparknlp.annotator import *
330
+ >>> from pyspark.ml import Pipeline
331
+ >>> documentAssembler = DocumentAssembler() \\
332
+ ... .setInputCol("text") \\
333
+ ... .setOutputCol("document")
334
+ >>> wordSegmenter = WordSegmenterModel.pretrained() \\
335
+ ... .setInputCols(["document"]) \\
336
+ ... .setOutputCol("token")
337
+ >>> pipeline = Pipeline().setStages([
338
+ ... documentAssembler,
339
+ ... wordSegmenter
340
+ ... ])
341
+ >>> data = spark.createDataFrame([["然而,這樣的處理也衍生了一些問題。"]]).toDF("text")
342
+ >>> result = pipeline.fit(data).transform(data)
343
+ >>> result.select("token.result").show(truncate=False)
344
+ +--------------------------------------------------------+
345
+ |result |
346
+ +--------------------------------------------------------+
347
+ |[然而, ,, 這樣, 的, 處理, 也, 衍生, 了, 一些, 問題, 。 ]|
348
+ +--------------------------------------------------------+
349
+ """
350
+ name = "WordSegmenterModel"
351
+
352
+ inputAnnotatorTypes = [AnnotatorType.DOCUMENT]
353
+
354
+ outputAnnotatorType = AnnotatorType.TOKEN
355
+
356
+ enableRegexTokenizer = Param(Params._dummy(),
357
+ "enableRegexTokenizer",
358
+ "Whether to use RegexTokenizer before segmentation. Useful for multilingual text",
359
+ typeConverter=TypeConverters.toBoolean)
360
+
361
+ toLowercase = Param(Params._dummy(),
362
+ "toLowercase",
363
+ "Indicates whether to convert all characters to lowercase before tokenizing.",
364
+ typeConverter=TypeConverters.toBoolean)
365
+
366
+ pattern = Param(Params._dummy(),
367
+ "pattern",
368
+ "regex pattern used for tokenizing. Defaults \\s+",
369
+ typeConverter=TypeConverters.toString)
370
+
371
+ def setEnableRegexTokenizer(self, value):
372
+ """Sets whether to to use RegexTokenizer before segmentation.
373
+ Useful for multilingual text
374
+
375
+ Parameters
376
+ ----------
377
+ value : bool
378
+ Whether to use RegexTokenizer before segmentation
379
+ """
380
+ return self._set(enableRegexTokenizer=value)
381
+
382
+ def setToLowercase(self, value):
383
+ """Sets whether to convert all characters to lowercase before
384
+ tokenizing, by default False.
385
+
386
+ Parameters
387
+ ----------
388
+ value : bool
389
+ Whether to convert all characters to lowercase before tokenizing
390
+ """
391
+ return self._set(toLowercase=value)
392
+
393
+ def setPattern(self, value):
394
+ """Sets the regex pattern used for tokenizing, by default ``\\s+``.
395
+
396
+ Parameters
397
+ ----------
398
+ value : str
399
+ Regex pattern used for tokenizing
400
+ """
401
+ return self._set(pattern=value)
402
+
403
+ def __init__(self, classname="com.johnsnowlabs.nlp.annotators.ws.WordSegmenterModel", java_model=None):
404
+ super(WordSegmenterModel, self).__init__(
405
+ classname=classname,
406
+ java_model=java_model
407
+ )
408
+
409
+ @staticmethod
410
+ def pretrained(name="wordseg_pku", lang="zh", remote_loc=None):
411
+ """Downloads and loads a pretrained model.
412
+
413
+ Parameters
414
+ ----------
415
+ name : str, optional
416
+ Name of the pretrained model, by default "wordseg_pku"
417
+ lang : str, optional
418
+ Language of the pretrained model, by default "en"
419
+ remote_loc : str, optional
420
+ Optional remote address of the resource, by default None. Will use
421
+ Spark NLPs repositories otherwise.
422
+
423
+ Returns
424
+ -------
425
+ WordSegmenterModel
426
+ The restored model
427
+ """
428
+ from sparknlp.pretrained import ResourceDownloader
429
+ return ResourceDownloader.downloadModel(WordSegmenterModel, name, lang, remote_loc)
@@ -0,0 +1,30 @@
1
+ # Copyright 2017-2022 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Module of base Spark NLP annotators."""
15
+ from sparknlp.base.doc2_chunk import *
16
+ from sparknlp.base.document_assembler import *
17
+ from sparknlp.base.multi_document_assembler import *
18
+ from sparknlp.base.embeddings_finisher import *
19
+ from sparknlp.base.finisher import *
20
+ from sparknlp.base.gguf_ranking_finisher import *
21
+ from sparknlp.base.graph_finisher import *
22
+ from sparknlp.base.has_recursive_fit import *
23
+ from sparknlp.base.has_recursive_transform import *
24
+ from sparknlp.base.light_pipeline import *
25
+ from sparknlp.base.recursive_pipeline import *
26
+ from sparknlp.base.token_assembler import *
27
+ from sparknlp.base.image_assembler import *
28
+ from sparknlp.base.audio_assembler import *
29
+ from sparknlp.base.table_assembler import *
30
+ from sparknlp.base.prompt_assembler import *
@@ -0,0 +1,95 @@
1
+ # Copyright 2017-2022 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains classes for the AudioAssembler."""
15
+
16
+ from pyspark import keyword_only
17
+ from pyspark.ml.param import TypeConverters, Params, Param
18
+
19
+ from sparknlp.common import AnnotatorType
20
+ from sparknlp.internal import AnnotatorTransformer
21
+
22
+
23
+ class AudioAssembler(AnnotatorTransformer):
24
+ """Prepares Floats or Doubles from a processed audio file(s)
25
+ This component is needed to process audio.
26
+
27
+ ====================== ======================
28
+ Input Annotation types Output Annotation type
29
+ ====================== ======================
30
+ ``NONE`` ``AUDIO``
31
+ ====================== ======================
32
+
33
+ Parameters
34
+ ----------
35
+ inputCol
36
+ Input column name
37
+ outputCol
38
+ Output column name
39
+
40
+ Examples
41
+ --------
42
+ >>> import sparknlp
43
+ >>> from sparknlp.base import *
44
+ >>> from pyspark.ml import Pipeline
45
+ >>> data = spark.read.option("inferSchema", value = True)\
46
+ .parquet("./tmp/librispeech_asr_dummy_clean_audio_array_parquet")\
47
+ .select($"float_array".cast("array<float>").as("audio_content"))
48
+ >>> audioAssembler = AudioAssembler().setInputCol("audio_content").setOutputCol("audio_assembler")
49
+ >>> result = audioAssembler.transform(data)
50
+ >>> result.select("audio_assembler").show()
51
+ >>> result.select("audio_assembler").printSchema()
52
+ root
53
+ |-- audio_content: array (nullable = true)
54
+ | |-- element: float (containsNull = true)
55
+ """
56
+
57
+ inputCol = Param(Params._dummy(), "inputCol", "input column name", typeConverter=TypeConverters.toString)
58
+ outputCol = Param(Params._dummy(), "outputCol", "output column name", typeConverter=TypeConverters.toString)
59
+ name = 'AudioAssembler'
60
+
61
+ outputAnnotatorType = AnnotatorType.AUDIO
62
+
63
+ @keyword_only
64
+ def __init__(self):
65
+ super(AudioAssembler, self).__init__(classname="com.johnsnowlabs.nlp.AudioAssembler")
66
+ self._setDefault(outputCol="audio_assembler", inputCol='audio')
67
+
68
+ @keyword_only
69
+ def setParams(self):
70
+ kwargs = self._input_kwargs
71
+ return self._set(**kwargs)
72
+
73
+ def setInputCol(self, value):
74
+ """Sets input column name.
75
+
76
+ Parameters
77
+ ----------
78
+ value : str
79
+ Name of the input column that has audio in format of Array[Float] or Array[Double]
80
+ """
81
+ return self._set(inputCol=value)
82
+
83
+ def setOutputCol(self, value):
84
+ """Sets output column name.
85
+
86
+ Parameters
87
+ ----------
88
+ value : str
89
+ Name of the Output Column
90
+ """
91
+ return self._set(outputCol=value)
92
+
93
+ def getOutputCol(self):
94
+ """Gets output column name of annotations."""
95
+ return self.getOrDefault(self.outputCol)