spark-nlp 4.2.6__py2.py3-none-any.whl → 6.2.1__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (221) hide show
  1. com/johnsnowlabs/ml/__init__.py +0 -0
  2. com/johnsnowlabs/ml/ai/__init__.py +10 -0
  3. spark_nlp-6.2.1.dist-info/METADATA +362 -0
  4. spark_nlp-6.2.1.dist-info/RECORD +292 -0
  5. {spark_nlp-4.2.6.dist-info → spark_nlp-6.2.1.dist-info}/WHEEL +1 -1
  6. sparknlp/__init__.py +81 -28
  7. sparknlp/annotation.py +3 -2
  8. sparknlp/annotator/__init__.py +6 -0
  9. sparknlp/annotator/audio/__init__.py +2 -0
  10. sparknlp/annotator/audio/hubert_for_ctc.py +188 -0
  11. sparknlp/annotator/audio/wav2vec2_for_ctc.py +14 -14
  12. sparknlp/annotator/audio/whisper_for_ctc.py +251 -0
  13. sparknlp/{base → annotator}/chunk2_doc.py +4 -7
  14. sparknlp/annotator/chunker.py +1 -2
  15. sparknlp/annotator/classifier_dl/__init__.py +17 -0
  16. sparknlp/annotator/classifier_dl/albert_for_multiple_choice.py +161 -0
  17. sparknlp/annotator/classifier_dl/albert_for_question_answering.py +3 -15
  18. sparknlp/annotator/classifier_dl/albert_for_sequence_classification.py +4 -18
  19. sparknlp/annotator/classifier_dl/albert_for_token_classification.py +3 -17
  20. sparknlp/annotator/classifier_dl/albert_for_zero_shot_classification.py +211 -0
  21. sparknlp/annotator/classifier_dl/bart_for_zero_shot_classification.py +225 -0
  22. sparknlp/annotator/classifier_dl/bert_for_multiple_choice.py +161 -0
  23. sparknlp/annotator/classifier_dl/bert_for_question_answering.py +6 -20
  24. sparknlp/annotator/classifier_dl/bert_for_sequence_classification.py +3 -17
  25. sparknlp/annotator/classifier_dl/bert_for_token_classification.py +3 -17
  26. sparknlp/annotator/classifier_dl/bert_for_zero_shot_classification.py +212 -0
  27. sparknlp/annotator/classifier_dl/camembert_for_question_answering.py +168 -0
  28. sparknlp/annotator/classifier_dl/camembert_for_sequence_classification.py +5 -19
  29. sparknlp/annotator/classifier_dl/camembert_for_token_classification.py +5 -19
  30. sparknlp/annotator/classifier_dl/camembert_for_zero_shot_classification.py +202 -0
  31. sparknlp/annotator/classifier_dl/classifier_dl.py +4 -4
  32. sparknlp/annotator/classifier_dl/deberta_for_question_answering.py +3 -17
  33. sparknlp/annotator/classifier_dl/deberta_for_sequence_classification.py +4 -19
  34. sparknlp/annotator/classifier_dl/deberta_for_token_classification.py +5 -21
  35. sparknlp/annotator/classifier_dl/deberta_for_zero_shot_classification.py +193 -0
  36. sparknlp/annotator/classifier_dl/distil_bert_for_question_answering.py +3 -17
  37. sparknlp/annotator/classifier_dl/distil_bert_for_sequence_classification.py +4 -18
  38. sparknlp/annotator/classifier_dl/distil_bert_for_token_classification.py +3 -17
  39. sparknlp/annotator/classifier_dl/distil_bert_for_zero_shot_classification.py +211 -0
  40. sparknlp/annotator/classifier_dl/distilbert_for_multiple_choice.py +161 -0
  41. sparknlp/annotator/classifier_dl/longformer_for_question_answering.py +3 -17
  42. sparknlp/annotator/classifier_dl/longformer_for_sequence_classification.py +4 -18
  43. sparknlp/annotator/classifier_dl/longformer_for_token_classification.py +3 -17
  44. sparknlp/annotator/classifier_dl/mpnet_for_question_answering.py +148 -0
  45. sparknlp/annotator/classifier_dl/mpnet_for_sequence_classification.py +188 -0
  46. sparknlp/annotator/classifier_dl/mpnet_for_token_classification.py +173 -0
  47. sparknlp/annotator/classifier_dl/multi_classifier_dl.py +3 -3
  48. sparknlp/annotator/classifier_dl/roberta_for_multiple_choice.py +161 -0
  49. sparknlp/annotator/classifier_dl/roberta_for_question_answering.py +3 -17
  50. sparknlp/annotator/classifier_dl/roberta_for_sequence_classification.py +4 -18
  51. sparknlp/annotator/classifier_dl/roberta_for_token_classification.py +1 -1
  52. sparknlp/annotator/classifier_dl/roberta_for_zero_shot_classification.py +225 -0
  53. sparknlp/annotator/classifier_dl/sentiment_dl.py +4 -4
  54. sparknlp/annotator/classifier_dl/tapas_for_question_answering.py +2 -2
  55. sparknlp/annotator/classifier_dl/xlm_roberta_for_multiple_choice.py +149 -0
  56. sparknlp/annotator/classifier_dl/xlm_roberta_for_question_answering.py +3 -17
  57. sparknlp/annotator/classifier_dl/xlm_roberta_for_sequence_classification.py +4 -18
  58. sparknlp/annotator/classifier_dl/xlm_roberta_for_token_classification.py +6 -20
  59. sparknlp/annotator/classifier_dl/xlm_roberta_for_zero_shot_classification.py +225 -0
  60. sparknlp/annotator/classifier_dl/xlnet_for_sequence_classification.py +4 -18
  61. sparknlp/annotator/classifier_dl/xlnet_for_token_classification.py +3 -17
  62. sparknlp/annotator/cleaners/__init__.py +15 -0
  63. sparknlp/annotator/cleaners/cleaner.py +202 -0
  64. sparknlp/annotator/cleaners/extractor.py +191 -0
  65. sparknlp/annotator/coref/spanbert_coref.py +4 -18
  66. sparknlp/annotator/cv/__init__.py +15 -0
  67. sparknlp/annotator/cv/blip_for_question_answering.py +172 -0
  68. sparknlp/annotator/cv/clip_for_zero_shot_classification.py +193 -0
  69. sparknlp/annotator/cv/convnext_for_image_classification.py +269 -0
  70. sparknlp/annotator/cv/florence2_transformer.py +180 -0
  71. sparknlp/annotator/cv/gemma3_for_multimodal.py +346 -0
  72. sparknlp/annotator/cv/internvl_for_multimodal.py +280 -0
  73. sparknlp/annotator/cv/janus_for_multimodal.py +351 -0
  74. sparknlp/annotator/cv/llava_for_multimodal.py +328 -0
  75. sparknlp/annotator/cv/mllama_for_multimodal.py +340 -0
  76. sparknlp/annotator/cv/paligemma_for_multimodal.py +308 -0
  77. sparknlp/annotator/cv/phi3_vision_for_multimodal.py +328 -0
  78. sparknlp/annotator/cv/qwen2vl_transformer.py +332 -0
  79. sparknlp/annotator/cv/smolvlm_transformer.py +426 -0
  80. sparknlp/annotator/cv/swin_for_image_classification.py +242 -0
  81. sparknlp/annotator/cv/vision_encoder_decoder_for_image_captioning.py +240 -0
  82. sparknlp/annotator/cv/vit_for_image_classification.py +36 -4
  83. sparknlp/annotator/dataframe_optimizer.py +216 -0
  84. sparknlp/annotator/date2_chunk.py +88 -0
  85. sparknlp/annotator/dependency/dependency_parser.py +2 -3
  86. sparknlp/annotator/dependency/typed_dependency_parser.py +3 -4
  87. sparknlp/annotator/document_character_text_splitter.py +228 -0
  88. sparknlp/annotator/document_normalizer.py +37 -1
  89. sparknlp/annotator/document_token_splitter.py +175 -0
  90. sparknlp/annotator/document_token_splitter_test.py +85 -0
  91. sparknlp/annotator/embeddings/__init__.py +11 -0
  92. sparknlp/annotator/embeddings/albert_embeddings.py +4 -18
  93. sparknlp/annotator/embeddings/auto_gguf_embeddings.py +539 -0
  94. sparknlp/annotator/embeddings/bert_embeddings.py +9 -22
  95. sparknlp/annotator/embeddings/bert_sentence_embeddings.py +12 -24
  96. sparknlp/annotator/embeddings/bge_embeddings.py +199 -0
  97. sparknlp/annotator/embeddings/camembert_embeddings.py +4 -20
  98. sparknlp/annotator/embeddings/chunk_embeddings.py +1 -2
  99. sparknlp/annotator/embeddings/deberta_embeddings.py +2 -16
  100. sparknlp/annotator/embeddings/distil_bert_embeddings.py +5 -19
  101. sparknlp/annotator/embeddings/doc2vec.py +7 -1
  102. sparknlp/annotator/embeddings/e5_embeddings.py +195 -0
  103. sparknlp/annotator/embeddings/e5v_embeddings.py +138 -0
  104. sparknlp/annotator/embeddings/elmo_embeddings.py +2 -2
  105. sparknlp/annotator/embeddings/instructor_embeddings.py +204 -0
  106. sparknlp/annotator/embeddings/longformer_embeddings.py +3 -17
  107. sparknlp/annotator/embeddings/minilm_embeddings.py +189 -0
  108. sparknlp/annotator/embeddings/mpnet_embeddings.py +192 -0
  109. sparknlp/annotator/embeddings/mxbai_embeddings.py +184 -0
  110. sparknlp/annotator/embeddings/nomic_embeddings.py +181 -0
  111. sparknlp/annotator/embeddings/roberta_embeddings.py +9 -21
  112. sparknlp/annotator/embeddings/roberta_sentence_embeddings.py +7 -21
  113. sparknlp/annotator/embeddings/sentence_embeddings.py +2 -3
  114. sparknlp/annotator/embeddings/snowflake_embeddings.py +202 -0
  115. sparknlp/annotator/embeddings/uae_embeddings.py +211 -0
  116. sparknlp/annotator/embeddings/universal_sentence_encoder.py +3 -3
  117. sparknlp/annotator/embeddings/word2vec.py +7 -1
  118. sparknlp/annotator/embeddings/word_embeddings.py +4 -5
  119. sparknlp/annotator/embeddings/xlm_roberta_embeddings.py +9 -21
  120. sparknlp/annotator/embeddings/xlm_roberta_sentence_embeddings.py +7 -21
  121. sparknlp/annotator/embeddings/xlnet_embeddings.py +4 -18
  122. sparknlp/annotator/er/entity_ruler.py +37 -23
  123. sparknlp/annotator/keyword_extraction/yake_keyword_extraction.py +2 -3
  124. sparknlp/annotator/ld_dl/language_detector_dl.py +2 -2
  125. sparknlp/annotator/lemmatizer.py +3 -4
  126. sparknlp/annotator/matcher/date_matcher.py +35 -3
  127. sparknlp/annotator/matcher/multi_date_matcher.py +1 -2
  128. sparknlp/annotator/matcher/regex_matcher.py +3 -3
  129. sparknlp/annotator/matcher/text_matcher.py +2 -3
  130. sparknlp/annotator/n_gram_generator.py +1 -2
  131. sparknlp/annotator/ner/__init__.py +3 -1
  132. sparknlp/annotator/ner/ner_converter.py +18 -0
  133. sparknlp/annotator/ner/ner_crf.py +4 -5
  134. sparknlp/annotator/ner/ner_dl.py +10 -5
  135. sparknlp/annotator/ner/ner_dl_graph_checker.py +293 -0
  136. sparknlp/annotator/ner/ner_overwriter.py +2 -2
  137. sparknlp/annotator/ner/zero_shot_ner_model.py +173 -0
  138. sparknlp/annotator/normalizer.py +2 -2
  139. sparknlp/annotator/openai/__init__.py +16 -0
  140. sparknlp/annotator/openai/openai_completion.py +349 -0
  141. sparknlp/annotator/openai/openai_embeddings.py +106 -0
  142. sparknlp/annotator/pos/perceptron.py +6 -7
  143. sparknlp/annotator/sentence/sentence_detector.py +2 -2
  144. sparknlp/annotator/sentence/sentence_detector_dl.py +3 -3
  145. sparknlp/annotator/sentiment/sentiment_detector.py +4 -5
  146. sparknlp/annotator/sentiment/vivekn_sentiment.py +4 -5
  147. sparknlp/annotator/seq2seq/__init__.py +17 -0
  148. sparknlp/annotator/seq2seq/auto_gguf_model.py +304 -0
  149. sparknlp/annotator/seq2seq/auto_gguf_reranker.py +334 -0
  150. sparknlp/annotator/seq2seq/auto_gguf_vision_model.py +336 -0
  151. sparknlp/annotator/seq2seq/bart_transformer.py +420 -0
  152. sparknlp/annotator/seq2seq/cohere_transformer.py +357 -0
  153. sparknlp/annotator/seq2seq/cpm_transformer.py +321 -0
  154. sparknlp/annotator/seq2seq/gpt2_transformer.py +1 -1
  155. sparknlp/annotator/seq2seq/llama2_transformer.py +343 -0
  156. sparknlp/annotator/seq2seq/llama3_transformer.py +381 -0
  157. sparknlp/annotator/seq2seq/m2m100_transformer.py +392 -0
  158. sparknlp/annotator/seq2seq/marian_transformer.py +124 -3
  159. sparknlp/annotator/seq2seq/mistral_transformer.py +348 -0
  160. sparknlp/annotator/seq2seq/nllb_transformer.py +420 -0
  161. sparknlp/annotator/seq2seq/olmo_transformer.py +326 -0
  162. sparknlp/annotator/seq2seq/phi2_transformer.py +326 -0
  163. sparknlp/annotator/seq2seq/phi3_transformer.py +330 -0
  164. sparknlp/annotator/seq2seq/phi4_transformer.py +387 -0
  165. sparknlp/annotator/seq2seq/qwen_transformer.py +340 -0
  166. sparknlp/annotator/seq2seq/starcoder_transformer.py +335 -0
  167. sparknlp/annotator/seq2seq/t5_transformer.py +54 -4
  168. sparknlp/annotator/similarity/__init__.py +0 -0
  169. sparknlp/annotator/similarity/document_similarity_ranker.py +379 -0
  170. sparknlp/annotator/spell_check/context_spell_checker.py +116 -17
  171. sparknlp/annotator/spell_check/norvig_sweeting.py +3 -6
  172. sparknlp/annotator/spell_check/symmetric_delete.py +1 -1
  173. sparknlp/annotator/stemmer.py +2 -3
  174. sparknlp/annotator/stop_words_cleaner.py +3 -4
  175. sparknlp/annotator/tf_ner_dl_graph_builder.py +1 -1
  176. sparknlp/annotator/token/__init__.py +0 -1
  177. sparknlp/annotator/token/recursive_tokenizer.py +2 -3
  178. sparknlp/annotator/token/tokenizer.py +2 -3
  179. sparknlp/annotator/ws/word_segmenter.py +35 -10
  180. sparknlp/base/__init__.py +2 -3
  181. sparknlp/base/doc2_chunk.py +0 -3
  182. sparknlp/base/document_assembler.py +5 -5
  183. sparknlp/base/embeddings_finisher.py +14 -2
  184. sparknlp/base/finisher.py +15 -4
  185. sparknlp/base/gguf_ranking_finisher.py +234 -0
  186. sparknlp/base/image_assembler.py +69 -0
  187. sparknlp/base/light_pipeline.py +53 -21
  188. sparknlp/base/multi_document_assembler.py +9 -13
  189. sparknlp/base/prompt_assembler.py +207 -0
  190. sparknlp/base/token_assembler.py +1 -2
  191. sparknlp/common/__init__.py +2 -0
  192. sparknlp/common/annotator_type.py +1 -0
  193. sparknlp/common/completion_post_processing.py +37 -0
  194. sparknlp/common/match_strategy.py +33 -0
  195. sparknlp/common/properties.py +914 -9
  196. sparknlp/internal/__init__.py +841 -116
  197. sparknlp/internal/annotator_java_ml.py +1 -1
  198. sparknlp/internal/annotator_transformer.py +3 -0
  199. sparknlp/logging/comet.py +2 -2
  200. sparknlp/partition/__init__.py +16 -0
  201. sparknlp/partition/partition.py +244 -0
  202. sparknlp/partition/partition_properties.py +902 -0
  203. sparknlp/partition/partition_transformer.py +200 -0
  204. sparknlp/pretrained/pretrained_pipeline.py +1 -1
  205. sparknlp/pretrained/resource_downloader.py +126 -2
  206. sparknlp/reader/__init__.py +15 -0
  207. sparknlp/reader/enums.py +19 -0
  208. sparknlp/reader/pdf_to_text.py +190 -0
  209. sparknlp/reader/reader2doc.py +124 -0
  210. sparknlp/reader/reader2image.py +136 -0
  211. sparknlp/reader/reader2table.py +44 -0
  212. sparknlp/reader/reader_assembler.py +159 -0
  213. sparknlp/reader/sparknlp_reader.py +461 -0
  214. sparknlp/training/__init__.py +1 -0
  215. sparknlp/training/conll.py +8 -2
  216. sparknlp/training/spacy_to_annotation.py +57 -0
  217. sparknlp/util.py +26 -0
  218. spark_nlp-4.2.6.dist-info/METADATA +0 -1256
  219. spark_nlp-4.2.6.dist-info/RECORD +0 -196
  220. {spark_nlp-4.2.6.dist-info → spark_nlp-6.2.1.dist-info}/top_level.txt +0 -0
  221. /sparknlp/annotator/{token/token2_chunk.py → token2_chunk.py} +0 -0
@@ -0,0 +1,340 @@
1
+ # Copyright 2017-2024 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from sparknlp.common import *
16
+
17
+ class MLLamaForMultimodal(AnnotatorModel,
18
+ HasBatchedAnnotateImage,
19
+ HasImageFeatureProperties,
20
+ HasEngine,
21
+ HasCandidateLabelsProperties,
22
+ HasRescaleFactor):
23
+ """
24
+ MLLamaForMultimodal can load LLAMA 3.2 Vision models for visual question answering.
25
+ The model consists of a vision encoder, a text encoder, and a text decoder.
26
+ The vision encoder encodes the input image, the text encoder processes the input question
27
+ alongside the image encoding, and the text decoder generates the answer to the question.
28
+
29
+ The Llama 3.2-Vision collection comprises pretrained and instruction-tuned multimodal large
30
+ language models (LLMs) available in 11B and 90B sizes. These models are optimized for visual
31
+ recognition, image reasoning, captioning, and answering general questions about images.
32
+ The models outperform many open-source and proprietary multimodal models on standard industry
33
+ benchmarks.
34
+
35
+ Pretrained models can be loaded with :meth:`.pretrained` of the companion object:
36
+
37
+ >>> visualQAClassifier = MLLamaForMultimodal.pretrained() \\
38
+ ... .setInputCols(["image_assembler"]) \\
39
+ ... .setOutputCol("answer")
40
+
41
+ The default model is `"llama_3_2_11b_vision_instruct_int4"`, if no name is provided.
42
+
43
+ For available pretrained models, refer to the `Models Hub
44
+ <https://sparknlp.org/models?task=Question+Answering>`__.
45
+
46
+ Models from the HuggingFace 🤗 Transformers library are also compatible with Spark NLP 🚀.
47
+ To check compatibility and learn how to import them, see `Import Transformers into Spark NLP 🚀
48
+ <https://github.com/JohnSnowLabs/spark-nlp/discussions/5669>`_. For extended examples, refer to
49
+ the `MLLamaForMultimodal Test Suite
50
+ <https://github.com/JohnSnowLabs/spark-nlp/blob/master/src/test/scala/com/johnsnowlabs/nlp/annotators/cv/MLLamaForMultimodalTest.scala>`_.
51
+
52
+ ====================== ======================
53
+ Input Annotation types Output Annotation type
54
+ ====================== ======================
55
+ ``IMAGE`` ``DOCUMENT``
56
+ ====================== ======================
57
+
58
+ Parameters
59
+ ----------
60
+ batchSize : int, optional
61
+ Batch size. Larger values allow faster processing but require more memory,
62
+ by default 2.
63
+ configProtoBytes : bytes, optional
64
+ ConfigProto from TensorFlow, serialized into a byte array.
65
+ maxSentenceLength : int, optional
66
+ Maximum sentence length to process, by default 50.
67
+
68
+ Examples
69
+ --------
70
+ >>> import sparknlp
71
+ >>> from sparknlp.base import *
72
+ >>> from sparknlp.annotator import *
73
+ >>> from pyspark.ml import Pipeline
74
+ >>> from pyspark.sql.functions import lit
75
+ >>> image_df = SparkSessionForTest.spark.read.format("image").load(path=images_path)
76
+ >>> test_df = image_df.withColumn(
77
+ ... "text",
78
+ ... lit("<|begin_of_text|><|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n<|image|>What is unusual on this image?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n")
79
+ ... )
80
+ >>> imageAssembler = ImageAssembler() \\
81
+ ... .setInputCol("image") \\
82
+ ... .setOutputCol("image_assembler")
83
+ >>> visualQAClassifier = MLLamaForMultimodal.pretrained() \\
84
+ ... .setInputCols("image_assembler") \\
85
+ ... .setOutputCol("answer")
86
+ >>> pipeline = Pipeline().setStages([
87
+ ... imageAssembler,
88
+ ... visualQAClassifier
89
+ ... ])
90
+ >>> result = pipeline.fit(test_df).transform(test_df)
91
+ >>> result.select("image_assembler.origin", "answer.result").show(truncate=False)
92
+ +--------------------------------------+----------------------------------------------------------------------+
93
+ |origin |result |
94
+ +--------------------------------------+----------------------------------------------------------------------+
95
+ |[file:///content/images/cat_image.jpg]|[The unusual aspect of this picture is the presence of two cats lying on a pink couch]|
96
+ +--------------------------------------+----------------------------------------------------------------------+
97
+ """
98
+
99
+
100
+ name = "MLLamaForMultimodal"
101
+
102
+ inputAnnotatorTypes = [AnnotatorType.IMAGE]
103
+
104
+ outputAnnotatorType = AnnotatorType.DOCUMENT
105
+
106
+ configProtoBytes = Param(Params._dummy(),
107
+ "configProtoBytes",
108
+ "ConfigProto from tensorflow, serialized into byte array. Get with "
109
+ "config_proto.SerializeToString()",
110
+ TypeConverters.toListInt)
111
+
112
+ minOutputLength = Param(Params._dummy(), "minOutputLength", "Minimum length of the sequence to be generated",
113
+ typeConverter=TypeConverters.toInt)
114
+
115
+ maxOutputLength = Param(Params._dummy(), "maxOutputLength", "Maximum length of output text",
116
+ typeConverter=TypeConverters.toInt)
117
+
118
+ doSample = Param(Params._dummy(), "doSample", "Whether or not to use sampling; use greedy decoding otherwise",
119
+ typeConverter=TypeConverters.toBoolean)
120
+
121
+ temperature = Param(Params._dummy(), "temperature", "The value used to module the next token probabilities",
122
+ typeConverter=TypeConverters.toFloat)
123
+
124
+ topK = Param(Params._dummy(), "topK",
125
+ "The number of highest probability vocabulary tokens to keep for top-k-filtering",
126
+ typeConverter=TypeConverters.toInt)
127
+
128
+ topP = Param(Params._dummy(), "topP",
129
+ "If set to float < 1, only the most probable tokens with probabilities that add up to ``top_p`` or higher are kept for generation",
130
+ typeConverter=TypeConverters.toFloat)
131
+
132
+ repetitionPenalty = Param(Params._dummy(), "repetitionPenalty",
133
+ "The parameter for repetition penalty. 1.0 means no penalty. See `this paper <https://arxiv.org/pdf/1909.05858.pdf>`__ for more details",
134
+ typeConverter=TypeConverters.toFloat)
135
+
136
+ noRepeatNgramSize = Param(Params._dummy(), "noRepeatNgramSize",
137
+ "If set to int > 0, all ngrams of that size can only occur once",
138
+ typeConverter=TypeConverters.toInt)
139
+
140
+ ignoreTokenIds = Param(Params._dummy(), "ignoreTokenIds",
141
+ "A list of token ids which are ignored in the decoder's output",
142
+ typeConverter=TypeConverters.toListInt)
143
+ beamSize = Param(Params._dummy(), "beamSize",
144
+ "The Number of beams for beam search.",
145
+ typeConverter=TypeConverters.toInt)
146
+
147
+ def setMaxSentenceSize(self, value):
148
+ """Sets Maximum sentence length that the annotator will process, by
149
+ default 50.
150
+
151
+ Parameters
152
+ ----------
153
+ value : int
154
+ Maximum sentence length that the annotator will process
155
+ """
156
+ return self._set(maxSentenceLength=value)
157
+
158
+ def setIgnoreTokenIds(self, value):
159
+ """A list of token ids which are ignored in the decoder's output.
160
+
161
+ Parameters
162
+ ----------
163
+ value : List[int]
164
+ The words to be filtered out
165
+ """
166
+ return self._set(ignoreTokenIds=value)
167
+
168
+ def setConfigProtoBytes(self, b):
169
+ """Sets configProto from tensorflow, serialized into byte array.
170
+
171
+ Parameters
172
+ ----------
173
+ b : List[int]
174
+ ConfigProto from tensorflow, serialized into byte array
175
+ """
176
+ return self._set(configProtoBytes=b)
177
+
178
+ def setMinOutputLength(self, value):
179
+ """Sets minimum length of the sequence to be generated.
180
+
181
+ Parameters
182
+ ----------
183
+ value : int
184
+ Minimum length of the sequence to be generated
185
+ """
186
+ return self._set(minOutputLength=value)
187
+
188
+ def setMaxOutputLength(self, value):
189
+ """Sets maximum length of output text.
190
+
191
+ Parameters
192
+ ----------
193
+ value : int
194
+ Maximum length of output text
195
+ """
196
+ return self._set(maxOutputLength=value)
197
+
198
+ def setDoSample(self, value):
199
+ """Sets whether or not to use sampling, use greedy decoding otherwise.
200
+
201
+ Parameters
202
+ ----------
203
+ value : bool
204
+ Whether or not to use sampling; use greedy decoding otherwise
205
+ """
206
+ return self._set(doSample=value)
207
+
208
+ def setTemperature(self, value):
209
+ """Sets the value used to module the next token probabilities.
210
+
211
+ Parameters
212
+ ----------
213
+ value : float
214
+ The value used to module the next token probabilities
215
+ """
216
+ return self._set(temperature=value)
217
+
218
+ def setTopK(self, value):
219
+ """Sets the number of highest probability vocabulary tokens to keep for
220
+ top-k-filtering.
221
+
222
+ Parameters
223
+ ----------
224
+ value : int
225
+ Number of highest probability vocabulary tokens to keep
226
+ """
227
+ return self._set(topK=value)
228
+
229
+ def setTopP(self, value):
230
+ """Sets the top cumulative probability for vocabulary tokens.
231
+
232
+ If set to float < 1, only the most probable tokens with probabilities
233
+ that add up to ``topP`` or higher are kept for generation.
234
+
235
+ Parameters
236
+ ----------
237
+ value : float
238
+ Cumulative probability for vocabulary tokens
239
+ """
240
+ return self._set(topP=value)
241
+
242
+ def setRepetitionPenalty(self, value):
243
+ """Sets the parameter for repetition penalty. 1.0 means no penalty.
244
+
245
+ Parameters
246
+ ----------
247
+ value : float
248
+ The repetition penalty
249
+
250
+ References
251
+ ----------
252
+ See `Ctrl: A Conditional Transformer Language Model For Controllable
253
+ Generation <https://arxiv.org/pdf/1909.05858.pdf>`__ for more details.
254
+ """
255
+ return self._set(repetitionPenalty=value)
256
+
257
+ def setNoRepeatNgramSize(self, value):
258
+ """Sets size of n-grams that can only occur once.
259
+
260
+ If set to int > 0, all ngrams of that size can only occur once.
261
+
262
+ Parameters
263
+ ----------
264
+ value : int
265
+ N-gram size can only occur once
266
+ """
267
+ return self._set(noRepeatNgramSize=value)
268
+
269
+ def setBeamSize(self, value):
270
+ """Sets the number of beam size for beam search, by default `4`.
271
+
272
+ Parameters
273
+ ----------
274
+ value : int
275
+ Number of beam size for beam search
276
+ """
277
+ return self._set(beamSize=value)
278
+ @keyword_only
279
+ def __init__(self, classname="com.johnsnowlabs.nlp.annotators.cv.MLLamaForMultimodal",
280
+ java_model=None):
281
+ super(MLLamaForMultimodal, self).__init__(
282
+ classname=classname,
283
+ java_model=java_model
284
+ )
285
+ self._setDefault(
286
+ batchSize=1,
287
+ minOutputLength=0,
288
+ maxOutputLength=50,
289
+ doSample=False,
290
+ temperature=1,
291
+ topK=50,
292
+ topP=1,
293
+ repetitionPenalty=1.0,
294
+ noRepeatNgramSize=0,
295
+ ignoreTokenIds=[],
296
+ beamSize=1,
297
+ )
298
+
299
+ @staticmethod
300
+ def loadSavedModel(folder, spark_session, use_openvino=False):
301
+ """Loads a locally saved model.
302
+
303
+ Parameters
304
+ ----------
305
+ folder : str
306
+ Folder of the saved model
307
+ spark_session : pyspark.sql.SparkSession
308
+ The current SparkSession
309
+
310
+ Returns
311
+ -------
312
+ CLIPForZeroShotClassification
313
+ The restored model
314
+ """
315
+ from sparknlp.internal import _MLLamaForMultimodalLoader
316
+ jModel = _MLLamaForMultimodalLoader(folder, spark_session._jsparkSession, use_openvino)._java_obj
317
+ return MLLamaForMultimodal(java_model=jModel)
318
+
319
+ @staticmethod
320
+ def pretrained(name="llama_3_2_11b_vision_instruct_int4", lang="en", remote_loc=None):
321
+ """Downloads and loads a pretrained model.
322
+
323
+ Parameters
324
+ ----------
325
+ name : str, optional
326
+ Name of the pretrained model, by default
327
+ "llama_3_2_11b_vision_instruct_int4"
328
+ lang : str, optional
329
+ Language of the pretrained model, by default "en"
330
+ remote_loc : str, optional
331
+ Optional remote address of the resource, by default None. Will use
332
+ Spark NLPs repositories otherwise.
333
+
334
+ Returns
335
+ -------
336
+ MLLamaForMultimodal
337
+ The restored model
338
+ """
339
+ from sparknlp.pretrained import ResourceDownloader
340
+ return ResourceDownloader.downloadModel(MLLamaForMultimodal, name, lang, remote_loc)
@@ -0,0 +1,308 @@
1
+ # Copyright 2017-2024 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from sparknlp.common import *
16
+
17
+ class PaliGemmaForMultiModal(AnnotatorModel,
18
+ HasBatchedAnnotateImage,
19
+ HasImageFeatureProperties,
20
+ HasEngine,
21
+ HasCandidateLabelsProperties,
22
+ HasRescaleFactor):
23
+ """PaliGemmaForMultiModal can load PaliGemma models for visual question answering.
24
+ The model consists of a vision encoder, a text encoder, a text decoder and a model merger.
25
+ The vision encoder will encode the input image, the text encoder will encode the input text,
26
+ the model merger will merge the image and text embeddings, and the text decoder will output the answer.
27
+
28
+ Pretrained models can be loaded with :meth:`.pretrained` of the companion
29
+ object:
30
+
31
+ >>> visualQAClassifier = PaliGemmaForMultiModal.pretrained() \\
32
+ ... .setInputCols(["image_assembler"]) \\
33
+ ... .setOutputCol("answer")
34
+
35
+ The default model is ``"paligemma_3b_pt_224_int4"``, if no name is
36
+ provided.
37
+
38
+ For available pretrained models please see the `Models Hub
39
+ <https://sparknlp.org/models?task=Question+Answering>`__.
40
+
41
+ ====================== ======================
42
+ Input Annotation types Output Annotation type
43
+ ====================== ======================
44
+ ``IMAGE`` ``DOCUMENT``
45
+ ====================== ======================
46
+
47
+ Parameters
48
+ ----------
49
+ batchSize
50
+ Batch size. Large values allows faster processing but requires more
51
+ memory, by default 2
52
+ maxSentenceLength
53
+ Max sentence length to process, by default 50
54
+
55
+ Examples
56
+ --------
57
+ >>> import sparknlp
58
+ >>> from sparknlp.base import *
59
+ >>> from sparknlp.annotator import *
60
+ >>> from pyspark.ml import Pipeline
61
+ >>> image_df = SparkSessionForTest.spark.read.format("image").load(path=images_path)
62
+ >>> test_df = image_df.withColumn("text", lit("USER: \\n <image> \\nDescribe this image. \\nASSISTANT:\\n"))
63
+ >>> imageAssembler = ImageAssembler() \\
64
+ ... .setInputCol("image") \\
65
+ ... .setOutputCol("image_assembler")
66
+ >>> visualQAClassifier = PaliGemmaForMultiModal.pretrained() \\
67
+ ... .setInputCols("image_assembler") \\
68
+ ... .setOutputCol("answer")
69
+ >>> pipeline = Pipeline().setStages([
70
+ ... imageAssembler,
71
+ ... visualQAClassifier
72
+ ... ])
73
+ >>> result = pipeline.fit(test_df).transform(test_df)
74
+ >>> result.select("image_assembler.origin", "answer.result").show(false)
75
+ +--------------------------------------+------+
76
+ |origin |result|
77
+ +--------------------------------------+------+
78
+ |[file:///content/images/bluetick.jpg] |[A dog is standing on a grassy field.]|
79
+ +--------------------------------------+------+
80
+ """
81
+
82
+ name = "PaliGemmaForMultiModal"
83
+
84
+ inputAnnotatorTypes = [AnnotatorType.IMAGE]
85
+
86
+ outputAnnotatorType = AnnotatorType.DOCUMENT
87
+
88
+ minOutputLength = Param(Params._dummy(), "minOutputLength", "Minimum length of the sequence to be generated",
89
+ typeConverter=TypeConverters.toInt)
90
+
91
+ maxOutputLength = Param(Params._dummy(), "maxOutputLength", "Maximum length of output text",
92
+ typeConverter=TypeConverters.toInt)
93
+
94
+ doSample = Param(Params._dummy(), "doSample", "Whether or not to use sampling; use greedy decoding otherwise",
95
+ typeConverter=TypeConverters.toBoolean)
96
+
97
+ temperature = Param(Params._dummy(), "temperature", "The value used to module the next token probabilities",
98
+ typeConverter=TypeConverters.toFloat)
99
+
100
+ topK = Param(Params._dummy(), "topK",
101
+ "The number of highest probability vocabulary tokens to keep for top-k-filtering",
102
+ typeConverter=TypeConverters.toInt)
103
+
104
+ topP = Param(Params._dummy(), "topP",
105
+ "If set to float < 1, only the most probable tokens with probabilities that add up to ``top_p`` or higher are kept for generation",
106
+ typeConverter=TypeConverters.toFloat)
107
+
108
+ repetitionPenalty = Param(Params._dummy(), "repetitionPenalty",
109
+ "The parameter for repetition penalty. 1.0 means no penalty. See `this paper <https://arxiv.org/pdf/1909.05858.pdf>`__ for more details",
110
+ typeConverter=TypeConverters.toFloat)
111
+
112
+ noRepeatNgramSize = Param(Params._dummy(), "noRepeatNgramSize",
113
+ "If set to int > 0, all ngrams of that size can only occur once",
114
+ typeConverter=TypeConverters.toInt)
115
+
116
+ ignoreTokenIds = Param(Params._dummy(), "ignoreTokenIds",
117
+ "A list of token ids which are ignored in the decoder's output",
118
+ typeConverter=TypeConverters.toListInt)
119
+ beamSize = Param(Params._dummy(), "beamSize",
120
+ "The Number of beams for beam search.",
121
+ typeConverter=TypeConverters.toInt)
122
+
123
+ def setMaxSentenceSize(self, value):
124
+ """Sets Maximum sentence length that the annotator will process, by
125
+ default 50.
126
+
127
+ Parameters
128
+ ----------
129
+ value : int
130
+ Maximum sentence length that the annotator will process
131
+ """
132
+ return self._set(maxSentenceLength=value)
133
+
134
+ def setIgnoreTokenIds(self, value):
135
+ """A list of token ids which are ignored in the decoder's output.
136
+
137
+ Parameters
138
+ ----------
139
+ value : List[int]
140
+ The words to be filtered out
141
+ """
142
+ return self._set(ignoreTokenIds=value)
143
+
144
+ def setMinOutputLength(self, value):
145
+ """Sets minimum length of the sequence to be generated.
146
+
147
+ Parameters
148
+ ----------
149
+ value : int
150
+ Minimum length of the sequence to be generated
151
+ """
152
+ return self._set(minOutputLength=value)
153
+
154
+ def setMaxOutputLength(self, value):
155
+ """Sets maximum length of output text.
156
+
157
+ Parameters
158
+ ----------
159
+ value : int
160
+ Maximum length of output text
161
+ """
162
+ return self._set(maxOutputLength=value)
163
+
164
+ def setDoSample(self, value):
165
+ """Sets whether or not to use sampling, use greedy decoding otherwise.
166
+
167
+ Parameters
168
+ ----------
169
+ value : bool
170
+ Whether or not to use sampling; use greedy decoding otherwise
171
+ """
172
+ return self._set(doSample=value)
173
+
174
+ def setTemperature(self, value):
175
+ """Sets the value used to module the next token probabilities.
176
+
177
+ Parameters
178
+ ----------
179
+ value : float
180
+ The value used to module the next token probabilities
181
+ """
182
+ return self._set(temperature=value)
183
+
184
+ def setTopK(self, value):
185
+ """Sets the number of highest probability vocabulary tokens to keep for
186
+ top-k-filtering.
187
+
188
+ Parameters
189
+ ----------
190
+ value : int
191
+ Number of highest probability vocabulary tokens to keep
192
+ """
193
+ return self._set(topK=value)
194
+
195
+ def setTopP(self, value):
196
+ """Sets the top cumulative probability for vocabulary tokens.
197
+
198
+ If set to float < 1, only the most probable tokens with probabilities
199
+ that add up to ``topP`` or higher are kept for generation.
200
+
201
+ Parameters
202
+ ----------
203
+ value : float
204
+ Cumulative probability for vocabulary tokens
205
+ """
206
+ return self._set(topP=value)
207
+
208
+ def setRepetitionPenalty(self, value):
209
+ """Sets the parameter for repetition penalty. 1.0 means no penalty.
210
+
211
+ Parameters
212
+ ----------
213
+ value : float
214
+ The repetition penalty
215
+
216
+ References
217
+ ----------
218
+ See `Ctrl: A Conditional Transformer Language Model For Controllable
219
+ Generation <https://arxiv.org/pdf/1909.05858.pdf>`__ for more details.
220
+ """
221
+ return self._set(repetitionPenalty=value)
222
+
223
+ def setNoRepeatNgramSize(self, value):
224
+ """Sets size of n-grams that can only occur once.
225
+
226
+ If set to int > 0, all ngrams of that size can only occur once.
227
+
228
+ Parameters
229
+ ----------
230
+ value : int
231
+ N-gram size can only occur once
232
+ """
233
+ return self._set(noRepeatNgramSize=value)
234
+
235
+ def setBeamSize(self, value):
236
+ """Sets the number of beam size for beam search, by default `4`.
237
+
238
+ Parameters
239
+ ----------
240
+ value : int
241
+ Number of beam size for beam search
242
+ """
243
+ return self._set(beamSize=value)
244
+
245
+ @keyword_only
246
+ def __init__(self, classname="com.johnsnowlabs.nlp.annotators.cv.PaliGemmaForMultiModal",
247
+ java_model=None):
248
+ super(PaliGemmaForMultiModal, self).__init__(
249
+ classname=classname,
250
+ java_model=java_model
251
+ )
252
+ self._setDefault(
253
+ batchSize=2,
254
+ minOutputLength=0,
255
+ maxOutputLength=200,
256
+ doSample=False,
257
+ temperature=1,
258
+ topK=50,
259
+ topP=1,
260
+ repetitionPenalty=1.0,
261
+ noRepeatNgramSize=0,
262
+ ignoreTokenIds=[],
263
+ beamSize=1,
264
+ )
265
+
266
+ @staticmethod
267
+ def loadSavedModel(folder, spark_session, use_openvino=False):
268
+ """Loads a locally saved model.
269
+
270
+ Parameters
271
+ ----------
272
+ folder : str
273
+ Folder of the saved model
274
+ spark_session : pyspark.sql.SparkSession
275
+ The current SparkSession
276
+
277
+ Returns
278
+ -------
279
+ PaliGemmaForMultiModal
280
+ The restored model
281
+ """
282
+ from sparknlp.internal import _PaliGemmaForMultiModalLoader
283
+ jModel = _PaliGemmaForMultiModalLoader(folder, spark_session._jsparkSession, use_openvino)._java_obj
284
+ return PaliGemmaForMultiModal(java_model=jModel)
285
+
286
+ @staticmethod
287
+ def pretrained(name="paligemma_3b_pt_224_int4", lang="en", remote_loc=None):
288
+ """Downloads and loads a pretrained model.
289
+
290
+ Parameters
291
+ ----------
292
+ name : str, optional
293
+ Name of the pretrained model, by default
294
+ "paligemma_3b_pt_224_int4"
295
+ lang : str, optional
296
+ Language of the pretrained model, by default "en"
297
+ remote_loc : str, optional
298
+ Optional remote address of the resource, by default None. Will use
299
+ Spark NLPs repositories otherwise.
300
+
301
+ Returns
302
+ -------
303
+ PaliGemmaForMultiModal
304
+ The restored model
305
+ """
306
+ from sparknlp.pretrained import ResourceDownloader
307
+ return ResourceDownloader.downloadModel(PaliGemmaForMultiModal, name, lang, remote_loc)
308
+