cat-llm 0.0.37__tar.gz → 0.0.38__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {cat_llm-0.0.37 → cat_llm-0.0.38}/PKG-INFO +3 -3
- {cat_llm-0.0.37 → cat_llm-0.0.38}/README.md +2 -2
- {cat_llm-0.0.37 → cat_llm-0.0.38}/src/catllm/CERAD_functions.py +10 -4
- {cat_llm-0.0.37 → cat_llm-0.0.38}/src/catllm/__about__.py +1 -1
- {cat_llm-0.0.37 → cat_llm-0.0.38}/LICENSE +0 -0
- {cat_llm-0.0.37 → cat_llm-0.0.38}/pyproject.toml +0 -0
- {cat_llm-0.0.37 → cat_llm-0.0.38}/src/catllm/__init__.py +0 -0
- {cat_llm-0.0.37 → cat_llm-0.0.38}/src/catllm/image_functions.py +0 -0
- {cat_llm-0.0.37 → cat_llm-0.0.38}/src/catllm/images/circle.png +0 -0
- {cat_llm-0.0.37 → cat_llm-0.0.38}/src/catllm/images/cube.png +0 -0
- {cat_llm-0.0.37 → cat_llm-0.0.38}/src/catllm/images/diamond.png +0 -0
- {cat_llm-0.0.37 → cat_llm-0.0.38}/src/catllm/images/overlapping_pentagons.png +0 -0
- {cat_llm-0.0.37 → cat_llm-0.0.38}/src/catllm/images/rectangles.png +0 -0
- {cat_llm-0.0.37 → cat_llm-0.0.38}/src/catllm/text_functions.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: cat-llm
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.38
|
|
4
4
|
Summary: A tool for categorizing text data and images using LLMs and vision models
|
|
5
5
|
Project-URL: Documentation, https://github.com/chrissoria/cat-llm#readme
|
|
6
6
|
Project-URL: Issues, https://github.com/chrissoria/cat-llm/issues
|
|
@@ -368,10 +368,10 @@ Processes each image individually, evaluating the drawn shapes based on CERAD cr
|
|
|
368
368
|
- `user_model` (str, default="gpt-4o"): Specific model to use
|
|
369
369
|
- `creativity` (float, default=0): Temperature/randomness setting (0.0-1.0)
|
|
370
370
|
- `reference_in_image` (bool, default=False): Whether a reference shape is present in the image for comparison
|
|
371
|
-
- `provide_reference` (bool, default=False): Whether to provide a reference example image
|
|
371
|
+
- `provide_reference` (bool, default=False): Whether to provide a reference example image (built in reference image)
|
|
372
372
|
- `safety` (bool, default=False): Enable safety checks and save results at each API call step
|
|
373
373
|
- `filename` (str, default="categorized_data.csv"): Filename for CSV output
|
|
374
|
-
- `model_source` (str, default="OpenAI"): Model provider ("OpenAI", "Anthropic", "
|
|
374
|
+
- `model_source` (str, default="OpenAI"): Model provider ("OpenAI", "Anthropic", "Mistral")
|
|
375
375
|
|
|
376
376
|
**Returns:**
|
|
377
377
|
- `pandas.DataFrame`: DataFrame with image paths, CERAD scores, and analysis details
|
|
@@ -342,10 +342,10 @@ Processes each image individually, evaluating the drawn shapes based on CERAD cr
|
|
|
342
342
|
- `user_model` (str, default="gpt-4o"): Specific model to use
|
|
343
343
|
- `creativity` (float, default=0): Temperature/randomness setting (0.0-1.0)
|
|
344
344
|
- `reference_in_image` (bool, default=False): Whether a reference shape is present in the image for comparison
|
|
345
|
-
- `provide_reference` (bool, default=False): Whether to provide a reference example image
|
|
345
|
+
- `provide_reference` (bool, default=False): Whether to provide a reference example image (built in reference image)
|
|
346
346
|
- `safety` (bool, default=False): Enable safety checks and save results at each API call step
|
|
347
347
|
- `filename` (str, default="categorized_data.csv"): Filename for CSV output
|
|
348
|
-
- `model_source` (str, default="OpenAI"): Model provider ("OpenAI", "Anthropic", "
|
|
348
|
+
- `model_source` (str, default="OpenAI"): Model provider ("OpenAI", "Anthropic", "Mistral")
|
|
349
349
|
|
|
350
350
|
**Returns:**
|
|
351
351
|
- `pandas.DataFrame`: DataFrame with image paths, CERAD scores, and analysis details
|
|
@@ -229,12 +229,18 @@ def cerad_drawn_score(
|
|
|
229
229
|
f"Example:\n"
|
|
230
230
|
f"{example_JSON}"
|
|
231
231
|
),
|
|
232
|
-
},
|
|
233
|
-
{
|
|
234
|
-
"type": "image_url",
|
|
235
|
-
"image_url": f"data:image/jpeg;base64,{encoded}"
|
|
236
232
|
}
|
|
237
233
|
]
|
|
234
|
+
if provide_reference:
|
|
235
|
+
prompt.append({
|
|
236
|
+
"type": "image_url",
|
|
237
|
+
"image_url": f"data:image/{ext};base64,{encoded_ref}"
|
|
238
|
+
})
|
|
239
|
+
|
|
240
|
+
prompt.append({
|
|
241
|
+
"type": "image_url",
|
|
242
|
+
"image_url": f"data:image/{ext};base64,{encoded_image}"
|
|
243
|
+
})
|
|
238
244
|
|
|
239
245
|
if model_source == "OpenAI":
|
|
240
246
|
from openai import OpenAI
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|