@modular-prompt/driver 0.9.0 → 0.9.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json
CHANGED
|
@@ -271,7 +271,7 @@ def generate_text_vlm(prompt, images, options):
|
|
|
271
271
|
|
|
272
272
|
for response in vlm_stream_generate(
|
|
273
273
|
model, processor, prompt,
|
|
274
|
-
image=images,
|
|
274
|
+
image=images if images else None,
|
|
275
275
|
max_tokens=max_tokens,
|
|
276
276
|
temperature=temperature,
|
|
277
277
|
):
|
|
@@ -358,7 +358,7 @@ def main():
|
|
|
358
358
|
tools = req.get('tools')
|
|
359
359
|
images = req.get('images', [])
|
|
360
360
|
|
|
361
|
-
if model_kind == "vlm"
|
|
361
|
+
if model_kind == "vlm":
|
|
362
362
|
max_image_size = req.get('maxImageSize', 768)
|
|
363
363
|
handle_chat_vlm(messages, images, options, max_image_size)
|
|
364
364
|
else:
|