pyconverters-openai_vision 0.5.28__py3-none-any.whl → 0.5.32__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyconverters_openai_vision/__init__.py +1 -1
- pyconverters_openai_vision/openai_vision.py +12 -12
- {pyconverters_openai_vision-0.5.28.dist-info → pyconverters_openai_vision-0.5.32.dist-info}/METADATA +1 -1
- pyconverters_openai_vision-0.5.32.dist-info/RECORD +7 -0
- {pyconverters_openai_vision-0.5.28.dist-info → pyconverters_openai_vision-0.5.32.dist-info}/entry_points.txt +2 -2
- pyconverters_openai_vision-0.5.28.dist-info/RECORD +0 -7
- {pyconverters_openai_vision-0.5.28.dist-info → pyconverters_openai_vision-0.5.32.dist-info}/WHEEL +0 -0
|
@@ -1,2 +1,2 @@
|
|
|
1
1
|
"""OpenAIVision converter"""
|
|
2
|
-
__version__ = "0.5.
|
|
2
|
+
__version__ = "0.5.32"
|
|
@@ -287,22 +287,27 @@ class OpenAIVisionProcessorBaseParameters(ProcessorParameters):
|
|
|
287
287
|
model: str = Field(
|
|
288
288
|
None, extra="internal"
|
|
289
289
|
)
|
|
290
|
-
prompt: str = Field(
|
|
291
|
-
"""If the attached file is an image: describe the image.""",
|
|
292
|
-
description="""Contains the prompt as a string""",
|
|
293
|
-
extra="multiline",
|
|
294
|
-
)
|
|
295
290
|
max_tokens: int = Field(
|
|
296
291
|
16384,
|
|
297
292
|
description="""The maximum number of tokens to generate in the completion.
|
|
298
293
|
The token count of your prompt plus max_tokens cannot exceed the model's context length.
|
|
299
294
|
Most models have a context length of 2048 tokens (except for the newest models, which support 4096).""",
|
|
300
295
|
)
|
|
296
|
+
replace_refs_altTexts_by_descriptions: bool = Field(
|
|
297
|
+
True,
|
|
298
|
+
description="""Replace references to images in text by their textual description.""",
|
|
299
|
+
extra="advanced"
|
|
300
|
+
)
|
|
301
301
|
system_prompt: str = Field(
|
|
302
|
-
|
|
302
|
+
"Generate a textual description of the image",
|
|
303
303
|
description="""Contains the system prompt""",
|
|
304
304
|
extra="multiline,advanced",
|
|
305
305
|
)
|
|
306
|
+
prompt: str = Field(
|
|
307
|
+
None,
|
|
308
|
+
description="""Contains the prompt""",
|
|
309
|
+
extra="multiline",
|
|
310
|
+
)
|
|
306
311
|
temperature: float = Field(
|
|
307
312
|
0.1,
|
|
308
313
|
description="""What sampling temperature to use, between 0 and 2.
|
|
@@ -344,11 +349,6 @@ class OpenAIVisionProcessorBaseParameters(ProcessorParameters):
|
|
|
344
349
|
Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.""",
|
|
345
350
|
extra="advanced",
|
|
346
351
|
)
|
|
347
|
-
replace_refs_altTexts_by_descriptions: bool = Field(
|
|
348
|
-
True,
|
|
349
|
-
description="""Replace references to images in text by their textual description.""",
|
|
350
|
-
extra="advanced"
|
|
351
|
-
)
|
|
352
352
|
|
|
353
353
|
|
|
354
354
|
class OpenAIVisionProcessorBase(ProcessorBase):
|
|
@@ -421,7 +421,7 @@ class OpenAIVisionProcessorBase(ProcessorBase):
|
|
|
421
421
|
with add_logging_context(docid=document.identifier):
|
|
422
422
|
if document.altTexts:
|
|
423
423
|
altTexts = document.altTexts
|
|
424
|
-
alts = {altText.name: altText.text for altText in
|
|
424
|
+
alts = {altText.name: altText.text for altText in altTexts}
|
|
425
425
|
anames = list(alts.keys())
|
|
426
426
|
for aname in anames:
|
|
427
427
|
atext = alts[aname]
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
pyconverters_openai_vision/__init__.py,sha256=u1qzNNU3oXNQEdCLbHC2tSiBYPkFG6jkkixct0t9Zwo,52
|
|
2
|
+
pyconverters_openai_vision/openai_utils.py,sha256=XI4WYZ-EAVG0Vxd5yUDuZNDgEzqHJeriScxTUusi1oo,7740
|
|
3
|
+
pyconverters_openai_vision/openai_vision.py,sha256=OGr1veVTxgWxxClMNXeNCiuVrBhGA5ZpjbsoThRMXn8,23231
|
|
4
|
+
pyconverters_openai_vision-0.5.32.dist-info/entry_points.txt,sha256=NR0re-yebKKyhApky1I6nDQzjJQfEyfOkJlJju0Ngzo,404
|
|
5
|
+
pyconverters_openai_vision-0.5.32.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
|
|
6
|
+
pyconverters_openai_vision-0.5.32.dist-info/METADATA,sha256=spGRRfpd6RtKo3OJWmsXRv90ENoJpraJ9UtYS8NbYqg,2662
|
|
7
|
+
pyconverters_openai_vision-0.5.32.dist-info/RECORD,,
|
|
@@ -3,6 +3,6 @@ deepinfra_openai_vision=pyconverters_openai_vision.openai_vision:DeepInfraOpenAI
|
|
|
3
3
|
openai_vision=pyconverters_openai_vision.openai_vision:OpenAIVisionConverter
|
|
4
4
|
|
|
5
5
|
[pyprocessors.plugins]
|
|
6
|
-
|
|
7
|
-
|
|
6
|
+
deepinfra_openai_vision_proc=pyconverters_openai_vision.openai_vision:DeepInfraOpenAIVisionProcessor
|
|
7
|
+
openai_vision_proc=pyconverters_openai_vision.openai_vision:OpenAIVisionProcessor
|
|
8
8
|
|
|
@@ -1,7 +0,0 @@
|
|
|
1
|
-
pyconverters_openai_vision/__init__.py,sha256=qUMBGku3fY4uPdr6GKvWr74wJKeF1MyUzmtMZkmz1QU,52
|
|
2
|
-
pyconverters_openai_vision/openai_utils.py,sha256=XI4WYZ-EAVG0Vxd5yUDuZNDgEzqHJeriScxTUusi1oo,7740
|
|
3
|
-
pyconverters_openai_vision/openai_vision.py,sha256=uMCJpM8pA7_GvXsgC42rihI4TyF_SJgvjC7hYzgcPHM,23266
|
|
4
|
-
pyconverters_openai_vision-0.5.28.dist-info/entry_points.txt,sha256=KLlvDTMJjHy0fk6mvTXFNpn0pC8UKsTJLd9wre9SOHw,394
|
|
5
|
-
pyconverters_openai_vision-0.5.28.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
|
|
6
|
-
pyconverters_openai_vision-0.5.28.dist-info/METADATA,sha256=rRxo9iqs3ZUR4ZjqwhDkVG5bxmV-evTazPrlX-qI6Vo,2662
|
|
7
|
-
pyconverters_openai_vision-0.5.28.dist-info/RECORD,,
|
{pyconverters_openai_vision-0.5.28.dist-info → pyconverters_openai_vision-0.5.32.dist-info}/WHEEL
RENAMED
|
File without changes
|