spark-nlp 6.0.1rc1__py2.py3-none-any.whl → 6.0.3__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of spark-nlp might be problematic. Click here for more details.

Files changed (39) hide show
  1. {spark_nlp-6.0.1rc1.dist-info → spark_nlp-6.0.3.dist-info}/METADATA +13 -6
  2. {spark_nlp-6.0.1rc1.dist-info → spark_nlp-6.0.3.dist-info}/RECORD +39 -32
  3. {spark_nlp-6.0.1rc1.dist-info → spark_nlp-6.0.3.dist-info}/WHEEL +1 -1
  4. sparknlp/__init__.py +4 -2
  5. sparknlp/annotator/cv/__init__.py +2 -0
  6. sparknlp/annotator/cv/florence2_transformer.py +180 -0
  7. sparknlp/annotator/cv/gemma3_for_multimodal.py +5 -10
  8. sparknlp/annotator/cv/internvl_for_multimodal.py +280 -0
  9. sparknlp/annotator/cv/janus_for_multimodal.py +8 -13
  10. sparknlp/annotator/cv/llava_for_multimodal.py +1 -1
  11. sparknlp/annotator/cv/paligemma_for_multimodal.py +7 -7
  12. sparknlp/annotator/cv/phi3_vision_for_multimodal.py +1 -1
  13. sparknlp/annotator/cv/qwen2vl_transformer.py +1 -1
  14. sparknlp/annotator/cv/smolvlm_transformer.py +7 -13
  15. sparknlp/annotator/date2_chunk.py +1 -1
  16. sparknlp/annotator/document_character_text_splitter.py +8 -8
  17. sparknlp/annotator/document_token_splitter.py +7 -7
  18. sparknlp/annotator/embeddings/__init__.py +1 -0
  19. sparknlp/annotator/embeddings/bge_embeddings.py +21 -19
  20. sparknlp/annotator/embeddings/e5v_embeddings.py +138 -0
  21. sparknlp/annotator/embeddings/snowflake_embeddings.py +15 -15
  22. sparknlp/annotator/openai/openai_completion.py +3 -4
  23. sparknlp/annotator/seq2seq/m2m100_transformer.py +1 -1
  24. sparknlp/annotator/seq2seq/mistral_transformer.py +2 -3
  25. sparknlp/annotator/seq2seq/nllb_transformer.py +1 -1
  26. sparknlp/annotator/seq2seq/qwen_transformer.py +26 -25
  27. sparknlp/annotator/spell_check/context_spell_checker.py +1 -1
  28. sparknlp/base/prompt_assembler.py +1 -1
  29. sparknlp/common/properties.py +7 -7
  30. sparknlp/internal/__init__.py +27 -0
  31. sparknlp/partition/__init__.py +16 -0
  32. sparknlp/partition/partition.py +244 -0
  33. sparknlp/partition/partition_properties.py +319 -0
  34. sparknlp/partition/partition_transformer.py +200 -0
  35. sparknlp/reader/pdf_to_text.py +50 -4
  36. sparknlp/reader/sparknlp_reader.py +101 -52
  37. sparknlp/training/spacy_to_annotation.py +7 -7
  38. sparknlp/util.py +26 -0
  39. {spark_nlp-6.0.1rc1.dist-info → spark_nlp-6.0.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,138 @@
1
+ # Copyright 2017-2024 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from sparknlp.common import *
16
+
17
+ class E5VEmbeddings(AnnotatorModel,
18
+ HasBatchedAnnotateImage,
19
+ HasImageFeatureProperties,
20
+ HasEngine,
21
+ HasRescaleFactor):
22
+ """Universal multimodal embeddings using the E5-V model (see https://huggingface.co/royokong/e5-v).
23
+
24
+ E5-V bridges the modality gap between different input types (text, image) and demonstrates strong performance in multimodal embeddings, even without fine-tuning. It also supports a single-modality training approach, where the model is trained exclusively on text pairs, often yielding better performance than multimodal training.
25
+
26
+ Pretrained models can be loaded with :meth:`.pretrained` of the companion object:
27
+
28
+ >>> e5vEmbeddings = E5VEmbeddings.pretrained() \
29
+ ... .setInputCols(["image_assembler"]) \
30
+ ... .setOutputCol("e5v")
31
+
32
+ The default model is ``"e5v_int4"``, if no name is provided.
33
+
34
+ For available pretrained models please see the `Models Hub <https://sparknlp.org/models?task=Question+Answering>`__.
35
+
36
+ ====================== ======================
37
+ Input Annotation types Output Annotation type
38
+ ====================== ======================
39
+ ``IMAGE`` ``SENTENCE_EMBEDDINGS``
40
+ ====================== ======================
41
+
42
+ Examples
43
+ --------
44
+ Image + Text Embedding:
45
+ >>> import sparknlp
46
+ >>> from sparknlp.base import *
47
+ >>> from sparknlp.annotator import *
48
+ >>> from pyspark.ml import Pipeline
49
+ >>> image_df = spark.read.format("image").option("dropInvalid", value = True).load(imageFolder)
50
+ >>> imagePrompt = "<|start_header_id|>user<|end_header_id|>\n\n<image>\\nSummary above image in one word: <|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n \n"
51
+ >>> test_df = image_df.withColumn("text", lit(imagePrompt))
52
+ >>> imageAssembler = ImageAssembler() \
53
+ ... .setInputCol("image") \
54
+ ... .setOutputCol("image_assembler")
55
+ >>> e5vEmbeddings = E5VEmbeddings.pretrained() \
56
+ ... .setInputCols(["image_assembler"]) \
57
+ ... .setOutputCol("e5v")
58
+ >>> pipeline = Pipeline().setStages([
59
+ ... imageAssembler,
60
+ ... e5vEmbeddings
61
+ ... ])
62
+ >>> result = pipeline.fit(test_df).transform(test_df)
63
+ >>> result.select("e5v.embeddings").show(truncate = False)
64
+
65
+ Text-Only Embedding:
66
+ >>> from sparknlp.util import EmbeddingsDataFrameUtils
67
+ >>> textPrompt = "<|start_header_id|>user<|end_header_id|>\n\n<sent>\\nSummary above sentence in one word: <|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n \n"
68
+ >>> textDesc = "A cat sitting in a box."
69
+ >>> nullImageDF = spark.createDataFrame(spark.sparkContext.parallelize([EmbeddingsDataFrameUtils.emptyImageRow]), EmbeddingsDataFrameUtils.imageSchema)
70
+ >>> textDF = nullImageDF.withColumn("text", lit(textPrompt.replace("<sent>", textDesc)))
71
+ >>> e5vEmbeddings = E5VEmbeddings.pretrained() \
72
+ ... .setInputCols(["image"]) \
73
+ ... .setOutputCol("e5v")
74
+ >>> result = e5vEmbeddings.transform(textDF)
75
+ >>> result.select("e5v.embeddings").show(truncate = False)
76
+ """
77
+
78
+ name = "E5VEmbeddings"
79
+
80
+ inputAnnotatorTypes = [AnnotatorType.IMAGE]
81
+ outputAnnotatorType = AnnotatorType.SENTENCE_EMBEDDINGS
82
+
83
+ @keyword_only
84
+ def __init__(self, classname="com.johnsnowlabs.nlp.embeddings.E5VEmbeddings", java_model=None):
85
+ """Initializes the E5VEmbeddings annotator.
86
+
87
+ Parameters
88
+ ----------
89
+ classname : str, optional
90
+ The Java class name of the annotator, by default "com.johnsnowlabs.nlp.annotators.embeddings.E5VEmbeddings"
91
+ java_model : Optional[java.lang.Object], optional
92
+ A pre-initialized Java model, by default None
93
+ """
94
+ super(E5VEmbeddings, self).__init__(classname=classname, java_model=java_model)
95
+ self._setDefault()
96
+
97
+ @staticmethod
98
+ def loadSavedModel(folder, spark_session, use_openvino=False):
99
+ """Loads a locally saved model.
100
+
101
+ Parameters
102
+ ----------
103
+ folder : str
104
+ Folder of the saved model
105
+ spark_session : pyspark.sql.SparkSession
106
+ The current SparkSession
107
+ use_openvino : bool, optional
108
+ Whether to use OpenVINO engine, by default False
109
+
110
+ Returns
111
+ -------
112
+ E5VEmbeddings
113
+ The restored model
114
+ """
115
+ from sparknlp.internal import _E5VEmbeddingsLoader
116
+ jModel = _E5VEmbeddingsLoader(folder, spark_session._jsparkSession, use_openvino)._java_obj
117
+ return E5VEmbeddings(java_model=jModel)
118
+
119
+ @staticmethod
120
+ def pretrained(name="e5v_int4", lang="en", remote_loc=None):
121
+ """Downloads and loads a pretrained model.
122
+
123
+ Parameters
124
+ ----------
125
+ name : str, optional
126
+ Name of the pretrained model, by default "e5v_int4"
127
+ lang : str, optional
128
+ Language of the pretrained model, by default "en"
129
+ remote_loc : str, optional
130
+ Optional remote address of the resource, by default None. Will use Spark NLPs repositories otherwise.
131
+
132
+ Returns
133
+ -------
134
+ E5VEmbeddings
135
+ The restored model
136
+ """
137
+ from sparknlp.pretrained import ResourceDownloader
138
+ return ResourceDownloader.downloadModel(E5VEmbeddings, name, lang, remote_loc)
@@ -47,21 +47,7 @@ class SnowFlakeEmbeddings(AnnotatorModel,
47
47
  ``DOCUMENT`` ``SENTENCE_EMBEDDINGS``
48
48
  ====================== ======================
49
49
 
50
- Parameters
51
- ----------
52
- batchSize
53
- Size of every batch , by default 8
54
- dimension
55
- Number of embedding dimensions, by default 768
56
- caseSensitive
57
- Whether to ignore case in tokens for embeddings matching, by default False
58
- maxSentenceLength
59
- Max sentence length to process, by default 512
60
- configProtoBytes
61
- ConfigProto from tensorflow, serialized into byte array.
62
-
63
- References
64
- ----------
50
+ **References**
65
51
 
66
52
  `Arctic-Embed: Scalable, Efficient, and Accurate Text Embedding Models <https://arxiv.org/abs/2405.05374>`__
67
53
  `Snowflake Arctic-Embed Models <https://github.com/Snowflake-Labs/arctic-embed>`__
@@ -78,6 +64,20 @@ class SnowFlakeEmbeddings(AnnotatorModel,
78
64
  data curation is crucial to retrieval accuracy. A detailed technical report will be available
79
65
  shortly. *
80
66
 
67
+ Parameters
68
+ ----------
69
+ batchSize
70
+ Size of every batch , by default 8
71
+ dimension
72
+ Number of embedding dimensions, by default 768
73
+ caseSensitive
74
+ Whether to ignore case in tokens for embeddings matching, by default False
75
+ maxSentenceLength
76
+ Max sentence length to process, by default 512
77
+ configProtoBytes
78
+ ConfigProto from tensorflow, serialized into byte array.
79
+
80
+
81
81
  Examples
82
82
  --------
83
83
  >>> import sparknlp
@@ -63,7 +63,6 @@ class OpenAICompletion(AnnotatorModel):
63
63
  >>> from sparknlp.annotator import *
64
64
  >>> from sparknlp.common import *
65
65
  >>> from pyspark.ml import Pipeline
66
-
67
66
  >>> documentAssembler = DocumentAssembler() \\
68
67
  ... .setInputCol("text") \\
69
68
  ... .setOutputCol("document")
@@ -83,9 +82,9 @@ class OpenAICompletion(AnnotatorModel):
83
82
  +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
84
83
  |completion |
85
84
  +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
86
- |[{document, 0, 258, \n\nI had the pleasure of dining at La Fiorita recently, and it was a truly delightful experience! The menu boasted a wonderful selection of classic Italian dishes, all exquisitely prepared and presented. The service staff was friendly and attentive and really, {}, []}]|
87
- |[{document, 0, 227, \n\nI recently visited Barbecue Joe's for dinner and it was amazing! The menu had so many items to choose from including pulled pork, smoked turkey, brisket, pork ribs, and sandwiches. I opted for the pulled pork sandwich and let, {}, []}] |
88
- |[{document, 0, 172, \n\n{ \n "review": { \n "overallRating": 4, \n "reviewBody": "I enjoyed my meal at this restaurant. The food was flavourful, well-prepared and beautifully presented., {}, []}] |
85
+ |[{document, 0, 258, \\n\\nI had the pleasure of dining at La Fiorita recently, and it was a truly delightful experience! The menu boasted a wonderful selection of classic Italian dishes, all exquisitely prepared and presented. The service staff was friendly and attentive and really, {}, []}]|
86
+ |[{document, 0, 227, \\n\\nI recently visited Barbecue Joe's for dinner and it was amazing! The menu had so many items to choose from including pulled pork, smoked turkey, brisket, pork ribs, and sandwiches. I opted for the pulled pork sandwich and let, {}, []}] |
87
+ |[{document, 0, 172, \\n\\n{ \\n "review": { \\n "overallRating": 4, \\n "reviewBody": "I enjoyed my meal at this restaurant. The food was flavourful, well-prepared and beautifully presented., {}, []}] |
89
88
  +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
90
89
  """
91
90
 
@@ -77,7 +77,7 @@ class M2M100Transformer(AnnotatorModel, HasBatchedAnnotate, HasEngine):
77
77
  Target Language (Default: `fr`)
78
78
 
79
79
  Languages Covered
80
- -----
80
+ -----------------
81
81
  Afrikaans (af), Amharic (am), Arabic (ar), Asturian (ast), Azerbaijani (az), Bashkir (ba),
82
82
  Belarusian (be), Bulgarian (bg), Bengali (bn), Breton (br), Bosnian (bs), Catalan; Valencian
83
83
  (ca), Cebuano (ceb), Czech (cs), Welsh (cy), Danish (da), German (de), Greeek (el), English
@@ -91,8 +91,7 @@ class MistralTransformer(AnnotatorModel, HasBatchedAnnotate, HasEngine):
91
91
 
92
92
  References
93
93
  ----------
94
- - `Mistral 7B
95
- <https://mistral.ai/news/announcing-mistral_7b/>`__
94
+ - `Mistral 7B <https://mistral.ai/news/announcing-mistral_7b/>`__
96
95
  - https://github.com/mistralai/mistral-src
97
96
 
98
97
  **Paper Abstract:**
@@ -126,7 +125,7 @@ class MistralTransformer(AnnotatorModel, HasBatchedAnnotate, HasEngine):
126
125
  +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
127
126
  |result |
128
127
  +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
129
- |[Leonardo Da Vinci invented the microscope?\n Question: Leonardo Da Vinci invented the microscope?\n Answer: No, Leonardo Da Vinci did not invent the microscope. The first microscope was invented |
128
+ |[Leonardo Da Vinci invented the microscope?\\n Question: Leonardo Da Vinci invented the microscope?\\n Answer: No, Leonardo Da Vinci did not invent the microscope. The first microscope was invented |
130
129
  | in the late 16th century, long after Leonardo'] |
131
130
  -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
132
131
  """
@@ -77,7 +77,7 @@ class NLLBTransformer(AnnotatorModel, HasBatchedAnnotate, HasEngine):
77
77
  Target Language (Default: `fr`)
78
78
 
79
79
  Languages Covered
80
- -----
80
+ -----------------
81
81
  Acehnese (Arabic script) (ace_Arab), Acehnese (Latin script) (ace_Latn), Mesopotamian Arabic
82
82
  (acm_Arab), Ta’izzi-Adeni Arabic (acq_Arab), Tunisian Arabic (aeb_Arab), Afrikaans (afr_Latn),
83
83
  South Levantine Arabic (ajp_Arab), Akan (aka_Latn), Amharic (amh_Ethi), North Levantine Arabic
@@ -52,6 +52,32 @@ class QwenTransformer(AnnotatorModel, HasBatchedAnnotate, HasEngine):
52
52
  ``DOCUMENT`` ``DOCUMENT``
53
53
  ====================== ======================
54
54
 
55
+ **References**
56
+
57
+ - `Qwen Technical Report
58
+ <https://arxiv.org/pdf/2309.16609.pdf>`__
59
+ - https://qwenlm.github.io/blog/qwen1.5/
60
+ - https://github.com/QwenLM/Qwen1.5
61
+
62
+ **Paper Abstract:**
63
+
64
+ *Large language models (LLMs) have revolutionized the field of artificial intelligence,
65
+ enabling natural language processing tasks that were previously thought to be exclusive to
66
+ humans. In this work, we introduce Qwen, the first installment of our large language model
67
+ series. Qwen is a comprehensive language model series that encompasses distinct models with
68
+ varying parameter counts. It includes Qwen, the base pretrained language models, and
69
+ Qwen-Chat, the chat models finetuned with human alignment techniques. The base language models
70
+ consistently demonstrate superior performance across a multitude of downstream tasks, and the
71
+ chat models, particularly those trained using Reinforcement Learning from Human Feedback
72
+ (RLHF), are highly competitive. The chat models possess advanced tool-use and planning
73
+ capabilities for creating agent applications, showcasing impressive performance even when
74
+ compared to bigger models on complex tasks like utilizing a code interpreter. Furthermore, we
75
+ have developed coding-specialized models, Code-Qwen and Code-Qwen-Chat, as well as
76
+ mathematics-focused models, Math-Qwen-Chat, which are built upon base language models. These
77
+ models demonstrate significantly improved performance in comparison with open-source models,
78
+ and slightly fall behind the proprietary models.*
79
+
80
+
55
81
  Parameters
56
82
  ----------
57
83
  configProtoBytes
@@ -87,31 +113,6 @@ class QwenTransformer(AnnotatorModel, HasBatchedAnnotate, HasEngine):
87
113
  This is a very computationally expensive module especially on larger
88
114
  sequence. The use of an accelerator such as GPU is recommended.
89
115
 
90
- References
91
- ----------
92
- - `Qwen Technical Report
93
- <https://arxiv.org/pdf/2309.16609.pdf>`__
94
- - https://qwenlm.github.io/blog/qwen1.5/
95
- - https://github.com/QwenLM/Qwen1.5
96
-
97
- **Paper Abstract:**
98
-
99
- *Large language models (LLMs) have revolutionized the field of artificial intelligence,
100
- enabling natural language processing tasks that were previously thought to be exclusive to
101
- humans. In this work, we introduce Qwen, the first installment of our large language model
102
- series. Qwen is a comprehensive language model series that encompasses distinct models with
103
- varying parameter counts. It includes Qwen, the base pretrained language models, and
104
- Qwen-Chat, the chat models finetuned with human alignment techniques. The base language models
105
- consistently demonstrate superior performance across a multitude of downstream tasks, and the
106
- chat models, particularly those trained using Reinforcement Learning from Human Feedback
107
- (RLHF), are highly competitive. The chat models possess advanced tool-use and planning
108
- capabilities for creating agent applications, showcasing impressive performance even when
109
- compared to bigger models on complex tasks like utilizing a code interpreter. Furthermore, we
110
- have developed coding-specialized models, Code-Qwen and Code-Qwen-Chat, as well as
111
- mathematics-focused models, Math-Qwen-Chat, which are built upon base language models. These
112
- models demonstrate significantly improved performance in comparison with open-source models,
113
- and slightly fall behind the proprietary models.*
114
-
115
116
  Examples
116
117
  --------
117
118
  >>> import sparknlp
@@ -565,7 +565,7 @@ class ContextSpellCheckerModel(AnnotatorModel, HasEngine):
565
565
 
566
566
 
567
567
  References
568
- -------------
568
+ ----------
569
569
  For an in-depth explanation of the module see the article `Applying Context
570
570
  Aware Spell Checking in Spark NLP
571
571
  <https://medium.com/spark-nlp/applying-context-aware-spell-checking-in-spark-nlp-3c29c46963bc>`__.
@@ -122,7 +122,7 @@ class PromptAssembler(AnnotatorTransformer):
122
122
  +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
123
123
  |result |
124
124
  +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
125
- |[<|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nHello there, how can I help you?<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nI need help with organizing my room.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n]|
125
+ |[<|start_header_id|>system<|end_header_id|>\\n\\nYou are a helpful assistant.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\\n\\nHello there, how can I help you?<|eot_id|><|start_header_id|>user<|end_header_id|>\\n\\nI need help with organizing my room.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\\n\\n]|
126
126
  +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
127
127
  """
128
128
 
@@ -38,7 +38,7 @@ class HasBatchedAnnotate:
38
38
  int
39
39
  Current batch size
40
40
  """
41
- return self.getOrDefault("batchSize")
41
+ return self.getOrDefault(self.batchSize)
42
42
 
43
43
 
44
44
  class HasCaseSensitiveProperties:
@@ -245,7 +245,7 @@ class HasBatchedAnnotateImage:
245
245
  int
246
246
  Current batch size
247
247
  """
248
- return self.getOrDefault("batchSize")
248
+ return self.getOrDefault(self.batchSize)
249
249
 
250
250
 
251
251
  class HasImageFeatureProperties:
@@ -402,7 +402,7 @@ class HasBatchedAnnotateAudio:
402
402
  int
403
403
  Current batch size
404
404
  """
405
- return self.getOrDefault("batchSize")
405
+ return self.getOrDefault(self.batchSize)
406
406
 
407
407
 
408
408
  class HasAudioFeatureProperties:
@@ -1099,7 +1099,7 @@ class HasLlamaCppProperties:
1099
1099
  return self._set(flashAttention=flashAttention)
1100
1100
 
1101
1101
  def setInputPrefixBos(self, inputPrefixBos: bool):
1102
- """Whether to add prefix BOS to user inputs, preceding the `--in-prefix` bool"""
1102
+ """Whether to add prefix BOS to user inputs, preceding the `--in-prefix` string"""
1103
1103
  return self._set(inputPrefixBos=inputPrefixBos)
1104
1104
 
1105
1105
  def setUseMmap(self, useMmap: bool):
@@ -1114,7 +1114,7 @@ class HasLlamaCppProperties:
1114
1114
  """Whether to disable KV offload"""
1115
1115
  return self._set(noKvOffload=noKvOffload)
1116
1116
 
1117
- def setSystemPrompt(self, systemPrompt: bool):
1117
+ def setSystemPrompt(self, systemPrompt: str):
1118
1118
  """Set a system prompt to use"""
1119
1119
  return self._set(systemPrompt=systemPrompt)
1120
1120
 
@@ -1219,7 +1219,7 @@ class HasLlamaCppProperties:
1219
1219
  """Set the amount of tokens the samplers should return at least (0 = disabled)"""
1220
1220
  return self._set(minKeep=minKeep)
1221
1221
 
1222
- def setGrammar(self, grammar: bool):
1222
+ def setGrammar(self, grammar: str):
1223
1223
  """Set BNF-like grammar to constrain generations"""
1224
1224
  return self._set(grammar=grammar)
1225
1225
 
@@ -1261,7 +1261,7 @@ class HasLlamaCppProperties:
1261
1261
  return self._call_java("setTokenBias", tokenBias)
1262
1262
 
1263
1263
  def setLoraAdapters(self, loraAdapters: Dict[str, float]):
1264
- """Set token id bias"""
1264
+ """Set LoRA adapters with their scaling factors"""
1265
1265
  return self._call_java("setLoraAdapters", loraAdapters)
1266
1266
 
1267
1267
  def getMetadata(self):
@@ -281,6 +281,16 @@ class _Gemma3ForMultiModalLoader(ExtendedJavaWrapper):
281
281
  use_openvino
282
282
  )
283
283
 
284
+ class _InternVLForMultiModalLoader(ExtendedJavaWrapper):
285
+ def __init__(self, path, jspark, use_openvino=False):
286
+ super(_InternVLForMultiModalLoader, self).__init__(
287
+ "com.johnsnowlabs.nlp.annotators.cv.InternVLForMultiModal.loadSavedModel",
288
+ path,
289
+ jspark,
290
+ use_openvino
291
+ )
292
+
293
+
284
294
  class _JanusForMultiModalLoader(ExtendedJavaWrapper):
285
295
  def __init__(self, path, jspark, use_openvino=False):
286
296
  super(_JanusForMultiModalLoader, self).__init__(
@@ -1146,3 +1156,20 @@ class _SmolVLMTransformerLoader(ExtendedJavaWrapper):
1146
1156
  jspark,
1147
1157
  use_openvino
1148
1158
  )
1159
+
1160
+ class _Florence2TransformerLoader(ExtendedJavaWrapper):
1161
+ def __init__(self, path, jspark, use_openvino=False):
1162
+ super(_Florence2TransformerLoader, self).__init__(
1163
+ "com.johnsnowlabs.nlp.annotators.cv.Florence2Transformer.loadSavedModel",
1164
+ path,
1165
+ jspark,
1166
+ use_openvino,
1167
+ )
1168
+ class _E5VEmbeddingsLoader(ExtendedJavaWrapper):
1169
+ def __init__(self, path, jspark, use_openvino=False):
1170
+ super(_E5VEmbeddingsLoader, self).__init__(
1171
+ "com.johnsnowlabs.nlp.embeddings.E5VEmbeddings.loadSavedModel",
1172
+ path,
1173
+ jspark,
1174
+ use_openvino
1175
+ )
@@ -0,0 +1,16 @@
1
+ # Copyright 2017-2025 John Snow Labs
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Module to read various types of documents into chunks"""
15
+ from sparknlp.partition.partition import *
16
+ from sparknlp.partition.partition_transformer import *