cat-llm 0.0.42__tar.gz → 0.0.43__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {cat_llm-0.0.42 → cat_llm-0.0.43}/PKG-INFO +1 -1
- {cat_llm-0.0.42 → cat_llm-0.0.43}/src/catllm/CERAD_functions.py +16 -7
- {cat_llm-0.0.42 → cat_llm-0.0.43}/src/catllm/__about__.py +1 -1
- {cat_llm-0.0.42 → cat_llm-0.0.43}/src/catllm/image_functions.py +74 -36
- {cat_llm-0.0.42 → cat_llm-0.0.43}/.gitignore +0 -0
- {cat_llm-0.0.42 → cat_llm-0.0.43}/LICENSE +0 -0
- {cat_llm-0.0.42 → cat_llm-0.0.43}/README.md +0 -0
- {cat_llm-0.0.42 → cat_llm-0.0.43}/pyproject.toml +0 -0
- {cat_llm-0.0.42 → cat_llm-0.0.43}/src/catllm/__init__.py +0 -0
- {cat_llm-0.0.42 → cat_llm-0.0.43}/src/catllm/images/circle.png +0 -0
- {cat_llm-0.0.42 → cat_llm-0.0.43}/src/catllm/images/cube.png +0 -0
- {cat_llm-0.0.42 → cat_llm-0.0.43}/src/catllm/images/diamond.png +0 -0
- {cat_llm-0.0.42 → cat_llm-0.0.43}/src/catllm/images/overlapping_pentagons.png +0 -0
- {cat_llm-0.0.42 → cat_llm-0.0.43}/src/catllm/images/rectangles.png +0 -0
- {cat_llm-0.0.42 → cat_llm-0.0.43}/src/catllm/text_functions.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: cat-llm
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.43
|
|
4
4
|
Summary: A tool for categorizing text data and images using LLMs and vision models
|
|
5
5
|
Project-URL: Documentation, https://github.com/chrissoria/cat-llm#readme
|
|
6
6
|
Project-URL: Issues, https://github.com/chrissoria/cat-llm/issues
|
|
@@ -21,6 +21,7 @@ Areas for improvement:
|
|
|
21
21
|
10. Test variety: expanding or adding functions to handle score more tests relevant for cogntive assesment, such as the MMSE.
|
|
22
22
|
11. Error handling: improving error handling to better manage unexpected inputs or model failures.
|
|
23
23
|
"""
|
|
24
|
+
|
|
24
25
|
def cerad_drawn_score(
|
|
25
26
|
shape,
|
|
26
27
|
image_input,
|
|
@@ -265,8 +266,11 @@ def cerad_drawn_score(
|
|
|
265
266
|
reply = response_obj.choices[0].message.content
|
|
266
267
|
link1.append(reply)
|
|
267
268
|
except Exception as e:
|
|
268
|
-
|
|
269
|
-
|
|
269
|
+
if "model" in str(e).lower():
|
|
270
|
+
raise ValueError(f"Invalid OpenAI model '{user_model}': {e}")
|
|
271
|
+
else:
|
|
272
|
+
print("An error occurred: {e}")
|
|
273
|
+
link1.append("Error processing input: {e}")
|
|
270
274
|
|
|
271
275
|
elif model_source == "Anthropic" and valid_image:
|
|
272
276
|
import anthropic
|
|
@@ -281,8 +285,11 @@ def cerad_drawn_score(
|
|
|
281
285
|
reply = message.content[0].text # Anthropic returns content as list
|
|
282
286
|
link1.append(reply)
|
|
283
287
|
except Exception as e:
|
|
284
|
-
|
|
285
|
-
|
|
288
|
+
if "model" in str(e).lower():
|
|
289
|
+
raise ValueError(f"Invalid OpenAI model '{user_model}': {e}")
|
|
290
|
+
else:
|
|
291
|
+
print("An error occurred: {e}")
|
|
292
|
+
link1.append("Error processing input: {e}")
|
|
286
293
|
|
|
287
294
|
elif model_source == "Mistral" and valid_image:
|
|
288
295
|
from mistralai import Mistral
|
|
@@ -299,9 +306,11 @@ def cerad_drawn_score(
|
|
|
299
306
|
reply = response.choices[0].message.content
|
|
300
307
|
link1.append(reply)
|
|
301
308
|
except Exception as e:
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
309
|
+
if "model" in str(e).lower():
|
|
310
|
+
raise ValueError(f"Invalid OpenAI model '{user_model}': {e}")
|
|
311
|
+
else:
|
|
312
|
+
print("An error occurred: {e}")
|
|
313
|
+
link1.append("Error processing input: {e}")
|
|
305
314
|
#if no valid image path is provided
|
|
306
315
|
elif valid_image == False:
|
|
307
316
|
reply = "invalid image path"
|
|
@@ -148,8 +148,11 @@ def image_multi_class(
|
|
|
148
148
|
reply = response_obj.choices[0].message.content
|
|
149
149
|
link1.append(reply)
|
|
150
150
|
except Exception as e:
|
|
151
|
-
|
|
152
|
-
|
|
151
|
+
if "model" in str(e).lower():
|
|
152
|
+
raise ValueError(f"Invalid OpenAI model '{user_model}': {e}")
|
|
153
|
+
else:
|
|
154
|
+
print("An error occurred: {e}")
|
|
155
|
+
link1.append("Error processing input: {e}")
|
|
153
156
|
|
|
154
157
|
elif model_source == "Anthropic":
|
|
155
158
|
import anthropic
|
|
@@ -165,8 +168,11 @@ def image_multi_class(
|
|
|
165
168
|
reply = message.content[0].text
|
|
166
169
|
link1.append(reply)
|
|
167
170
|
except Exception as e:
|
|
168
|
-
|
|
169
|
-
|
|
171
|
+
if "model" in str(e).lower():
|
|
172
|
+
raise ValueError(f"Invalid OpenAI model '{user_model}': {e}")
|
|
173
|
+
else:
|
|
174
|
+
print("An error occurred: {e}")
|
|
175
|
+
link1.append("Error processing input: {e}")
|
|
170
176
|
|
|
171
177
|
elif model_source == "Mistral":
|
|
172
178
|
from mistralai import Mistral
|
|
@@ -182,8 +188,11 @@ def image_multi_class(
|
|
|
182
188
|
reply = response.choices[0].message.content
|
|
183
189
|
link1.append(reply)
|
|
184
190
|
except Exception as e:
|
|
185
|
-
|
|
186
|
-
|
|
191
|
+
if "model" in str(e).lower():
|
|
192
|
+
raise ValueError(f"Invalid OpenAI model '{user_model}': {e}")
|
|
193
|
+
else:
|
|
194
|
+
print("An error occurred: {e}")
|
|
195
|
+
link1.append("Error processing input: {e}")
|
|
187
196
|
#if no valid image path is provided
|
|
188
197
|
elif valid_image == False:
|
|
189
198
|
reply = "invalid image path"
|
|
@@ -436,8 +445,11 @@ def image_score_drawing(
|
|
|
436
445
|
reply = response_obj.choices[0].message.content
|
|
437
446
|
link1.append(reply)
|
|
438
447
|
except Exception as e:
|
|
439
|
-
|
|
440
|
-
|
|
448
|
+
if "model" in str(e).lower():
|
|
449
|
+
raise ValueError(f"Invalid OpenAI model '{user_model}': {e}")
|
|
450
|
+
else:
|
|
451
|
+
print("An error occurred: {e}")
|
|
452
|
+
link1.append("Error processing input: {e}")
|
|
441
453
|
|
|
442
454
|
elif model_source == "Anthropic":
|
|
443
455
|
import anthropic
|
|
@@ -452,8 +464,11 @@ def image_score_drawing(
|
|
|
452
464
|
reply = message.content[0].text # Anthropic returns content as list
|
|
453
465
|
link1.append(reply)
|
|
454
466
|
except Exception as e:
|
|
455
|
-
|
|
456
|
-
|
|
467
|
+
if "model" in str(e).lower():
|
|
468
|
+
raise ValueError(f"Invalid OpenAI model '{user_model}': {e}")
|
|
469
|
+
else:
|
|
470
|
+
print("An error occurred: {e}")
|
|
471
|
+
link1.append("Error processing input: {e}")
|
|
457
472
|
|
|
458
473
|
elif model_source == "Mistral":
|
|
459
474
|
from mistralai import Mistral
|
|
@@ -469,8 +484,11 @@ def image_score_drawing(
|
|
|
469
484
|
reply = response.choices[0].message.content
|
|
470
485
|
link1.append(reply)
|
|
471
486
|
except Exception as e:
|
|
472
|
-
|
|
473
|
-
|
|
487
|
+
if "model" in str(e).lower():
|
|
488
|
+
raise ValueError(f"Invalid OpenAI model '{user_model}': {e}")
|
|
489
|
+
else:
|
|
490
|
+
print("An error occurred: {e}")
|
|
491
|
+
link1.append("Error processing input: {e}")
|
|
474
492
|
#if no valid image path is provided
|
|
475
493
|
elif valid_image == False:
|
|
476
494
|
reply = "invalid image path"
|
|
@@ -567,10 +585,6 @@ def image_features(
|
|
|
567
585
|
import base64
|
|
568
586
|
from pathlib import Path
|
|
569
587
|
|
|
570
|
-
if save_directory is not None and not os.path.isdir(save_directory):
|
|
571
|
-
# Directory doesn't exist - raise an exception to halt execution
|
|
572
|
-
raise FileNotFoundError(f"Directory {save_directory} doesn't exist")
|
|
573
|
-
|
|
574
588
|
image_extensions = [
|
|
575
589
|
'*.png', '*.jpg', '*.jpeg',
|
|
576
590
|
'*.gif', '*.webp', '*.svg', '*.svgz', '*.avif', '*.apng',
|
|
@@ -595,26 +609,35 @@ def image_features(
|
|
|
595
609
|
cat_num = len(features_to_extract)
|
|
596
610
|
category_dict = {str(i+1): "0" for i in range(cat_num)}
|
|
597
611
|
example_JSON = json.dumps(category_dict, indent=4)
|
|
598
|
-
|
|
599
|
-
# ensure number of categories is what user wants
|
|
600
|
-
print("\nThe image features to be extracted are:")
|
|
601
|
-
for i, cat in enumerate(features_to_extract, 1):
|
|
602
|
-
print(f"{i}. {cat}")
|
|
603
612
|
|
|
604
613
|
link1 = []
|
|
605
614
|
extracted_jsons = []
|
|
606
615
|
|
|
607
|
-
for i, img_path in enumerate(
|
|
608
|
-
|
|
616
|
+
for i, img_path in enumerate(tqdm(image_files, desc="Scoring images"), start=0):
|
|
617
|
+
# Check validity first
|
|
609
618
|
if img_path is None or not os.path.exists(img_path):
|
|
610
619
|
link1.append("Skipped NaN input or invalid path")
|
|
611
620
|
extracted_jsons.append("""{"no_valid_image": 1}""")
|
|
612
621
|
continue # Skip the rest of the loop iteration
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
622
|
+
|
|
623
|
+
# Only open the file if path is valid
|
|
624
|
+
if os.path.isdir(img_path):
|
|
625
|
+
encoded = "Not a Valid Image, contains file path"
|
|
626
|
+
else:
|
|
627
|
+
try:
|
|
628
|
+
with open(img_path, "rb") as f:
|
|
629
|
+
encoded = base64.b64encode(f.read()).decode("utf-8")
|
|
630
|
+
except Exception as e:
|
|
631
|
+
encoded = f"Error: {str(e)}"
|
|
632
|
+
# Handle extension safely
|
|
633
|
+
if encoded.startswith("Error:") or encoded == "Not a Valid Image, contains file path":
|
|
634
|
+
encoded_image = encoded
|
|
635
|
+
valid_image = False
|
|
636
|
+
|
|
637
|
+
else:
|
|
638
|
+
ext = Path(img_path).suffix.lstrip(".").lower()
|
|
639
|
+
encoded_image = f"data:image/{ext};base64,{encoded}"
|
|
640
|
+
valid_image = True
|
|
618
641
|
|
|
619
642
|
if model_source == "OpenAI" or model_source == "Mistral":
|
|
620
643
|
prompt = [
|
|
@@ -692,8 +715,11 @@ def image_features(
|
|
|
692
715
|
reply = response_obj.choices[0].message.content
|
|
693
716
|
link1.append(reply)
|
|
694
717
|
except Exception as e:
|
|
695
|
-
|
|
696
|
-
|
|
718
|
+
if "model" in str(e).lower():
|
|
719
|
+
raise ValueError(f"Invalid OpenAI model '{user_model}': {e}")
|
|
720
|
+
else:
|
|
721
|
+
print("An error occurred: {e}")
|
|
722
|
+
link1.append("Error processing input: {e}")
|
|
697
723
|
|
|
698
724
|
elif model_source == "Perplexity":
|
|
699
725
|
from openai import OpenAI
|
|
@@ -707,8 +733,12 @@ def image_features(
|
|
|
707
733
|
reply = response_obj.choices[0].message.content
|
|
708
734
|
link1.append(reply)
|
|
709
735
|
except Exception as e:
|
|
710
|
-
|
|
711
|
-
|
|
736
|
+
if "model" in str(e).lower():
|
|
737
|
+
raise ValueError(f"Invalid OpenAI model '{user_model}': {e}")
|
|
738
|
+
else:
|
|
739
|
+
print("An error occurred: {e}")
|
|
740
|
+
link1.append("Error processing input: {e}")
|
|
741
|
+
|
|
712
742
|
elif model_source == "Anthropic":
|
|
713
743
|
import anthropic
|
|
714
744
|
client = anthropic.Anthropic(api_key=api_key)
|
|
@@ -722,8 +752,12 @@ def image_features(
|
|
|
722
752
|
reply = message.content[0].text # Anthropic returns content as list
|
|
723
753
|
link1.append(reply)
|
|
724
754
|
except Exception as e:
|
|
725
|
-
|
|
726
|
-
|
|
755
|
+
if "model" in str(e).lower():
|
|
756
|
+
raise ValueError(f"Invalid OpenAI model '{user_model}': {e}")
|
|
757
|
+
else:
|
|
758
|
+
print("An error occurred: {e}")
|
|
759
|
+
link1.append("Error processing input: {e}")
|
|
760
|
+
|
|
727
761
|
elif model_source == "Mistral":
|
|
728
762
|
from mistralai import Mistral
|
|
729
763
|
client = Mistral(api_key=api_key)
|
|
@@ -738,8 +772,12 @@ def image_features(
|
|
|
738
772
|
reply = response.choices[0].message.content
|
|
739
773
|
link1.append(reply)
|
|
740
774
|
except Exception as e:
|
|
741
|
-
|
|
742
|
-
|
|
775
|
+
if "model" in str(e).lower():
|
|
776
|
+
raise ValueError(f"Invalid OpenAI model '{user_model}': {e}")
|
|
777
|
+
else:
|
|
778
|
+
print("An error occurred: {e}")
|
|
779
|
+
link1.append("Error processing input: {e}")
|
|
780
|
+
|
|
743
781
|
elif valid_image == False:
|
|
744
782
|
print("Skipped NaN input or invalid path")
|
|
745
783
|
reply = None
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|