cat-llm 0.0.20__tar.gz → 0.0.21__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {cat_llm-0.0.20 → cat_llm-0.0.21}/PKG-INFO +1 -1
- {cat_llm-0.0.20 → cat_llm-0.0.21}/src/catllm/__about__.py +1 -1
- {cat_llm-0.0.20 → cat_llm-0.0.21}/src/catllm/__init__.py +1 -1
- {cat_llm-0.0.20 → cat_llm-0.0.21}/src/catllm/cat_llm.py +331 -3
- {cat_llm-0.0.20 → cat_llm-0.0.21}/LICENSE +0 -0
- {cat_llm-0.0.20 → cat_llm-0.0.21}/README.md +0 -0
- {cat_llm-0.0.20 → cat_llm-0.0.21}/pyproject.toml +0 -0
- {cat_llm-0.0.20 → cat_llm-0.0.21}/src/catllm/CERAD_functions.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: cat-llm
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.21
|
|
4
4
|
Summary: A tool for categorizing text data and images using LLMs and vision models
|
|
5
5
|
Project-URL: Documentation, https://github.com/chrissoria/cat-llm#readme
|
|
6
6
|
Project-URL: Issues, https://github.com/chrissoria/cat-llm/issues
|
|
@@ -1063,13 +1063,341 @@ def extract_image_features(
|
|
|
1063
1063
|
'json': pd.Series(extracted_jsons).reset_index(drop=True)
|
|
1064
1064
|
})
|
|
1065
1065
|
categorized_data = pd.concat([categorized_data, normalized_data], axis=1)
|
|
1066
|
-
|
|
1067
|
-
if columns != "numbered": #if user wants text columns
|
|
1068
|
-
categorized_data.columns = list(categorized_data.columns[:3]) + categories[:len(categorized_data.columns) - 3]
|
|
1069
1066
|
|
|
1070
1067
|
if to_csv:
|
|
1071
1068
|
if save_directory is None:
|
|
1072
1069
|
save_directory = os.getcwd()
|
|
1073
1070
|
categorized_data.to_csv(os.path.join(save_directory, filename), index=False)
|
|
1074
1071
|
|
|
1072
|
+
return categorized_data
|
|
1073
|
+
|
|
1074
|
+
# image multi-class (binary) function
|
|
1075
|
+
def cerad_score(
|
|
1076
|
+
shape,
|
|
1077
|
+
image_input,
|
|
1078
|
+
api_key,
|
|
1079
|
+
user_model="gpt-4o-2024-11-20",
|
|
1080
|
+
creativity=0,
|
|
1081
|
+
safety=False,
|
|
1082
|
+
filename="categorized_data.csv",
|
|
1083
|
+
model_source="OpenAI"
|
|
1084
|
+
):
|
|
1085
|
+
import os
|
|
1086
|
+
import json
|
|
1087
|
+
import pandas as pd
|
|
1088
|
+
import regex
|
|
1089
|
+
from tqdm import tqdm
|
|
1090
|
+
import glob
|
|
1091
|
+
import base64
|
|
1092
|
+
from pathlib import Path
|
|
1093
|
+
|
|
1094
|
+
shape = shape.lower()
|
|
1095
|
+
|
|
1096
|
+
if shape == "circle":
|
|
1097
|
+
categories = ["It has a drawing of a circle",
|
|
1098
|
+
"The drawing does not resemble a circle",
|
|
1099
|
+
"The drawing resembles a circle",
|
|
1100
|
+
"The circle is closed",
|
|
1101
|
+
"The circle is almost closed",
|
|
1102
|
+
"The circle is circular",
|
|
1103
|
+
"The circle is almost circular",
|
|
1104
|
+
"None of the above descriptions apply"]
|
|
1105
|
+
elif shape == "diamond":
|
|
1106
|
+
categories = ["It has a drawing of a diamond",
|
|
1107
|
+
"It has a drawing of a square",
|
|
1108
|
+
"A drawn shape DOES NOT resemble a diamond",
|
|
1109
|
+
"A drawn shape resembles a diamond",
|
|
1110
|
+
"The drawn shape has 4 sides",
|
|
1111
|
+
"The drawn shape sides are about equal",
|
|
1112
|
+
"If a diamond is drawn it's more elaborate than a simple diamond (such as overlapping diamonds or a diamond with an extras lines inside)",
|
|
1113
|
+
"None of the above descriptions apply"]
|
|
1114
|
+
elif shape == "rectangles" or shape == "overlapping rectangles":
|
|
1115
|
+
categories = ["It has a drawing of overlapping rectangles",
|
|
1116
|
+
"A drawn shape DOES NOT resemble a overlapping rectangles",
|
|
1117
|
+
"A drawn shape resembles a overlapping rectangles",
|
|
1118
|
+
"Rectangle 1 has 4 sides",
|
|
1119
|
+
"Rectangle 2 has 4 sides",
|
|
1120
|
+
"The rectangles are overlapping",
|
|
1121
|
+
"The rectangles overlap contains a longer vertical rectangle with top and bottom portruding",
|
|
1122
|
+
"None of the above descriptions apply"]
|
|
1123
|
+
elif shape == "cube":
|
|
1124
|
+
categories = ["The image contains a drawing that clearly represents a cube (3D box shape)",
|
|
1125
|
+
"The image does NOT contain any drawing that resembles a cube or 3D box",
|
|
1126
|
+
"The image contains a WELL-DRAWN recognizable cube with proper 3D perspective",
|
|
1127
|
+
"If a cube is present: the front face appears as a square or diamond shape",
|
|
1128
|
+
"If a cube is present: internal/hidden edges are visible (showing 3D depth, not just an outline)",
|
|
1129
|
+
"If a cube is present: the front and back faces appear parallel to each other",
|
|
1130
|
+
"The image contains only a 2D square (flat shape, no 3D appearance)",
|
|
1131
|
+
"None of the above descriptions apply"]
|
|
1132
|
+
else:
|
|
1133
|
+
raise ValueError("Invalid shape! Choose from 'circle', 'diamond', 'rectangles', or 'cube'.")
|
|
1134
|
+
|
|
1135
|
+
image_extensions = [
|
|
1136
|
+
'*.png', '*.jpg', '*.jpeg',
|
|
1137
|
+
'*.gif', '*.webp', '*.svg', '*.svgz', '*.avif', '*.apng',
|
|
1138
|
+
'*.tif', '*.tiff', '*.bmp',
|
|
1139
|
+
'*.heif', '*.heic', '*.ico',
|
|
1140
|
+
'*.psd'
|
|
1141
|
+
]
|
|
1142
|
+
|
|
1143
|
+
if not isinstance(image_input, list):
|
|
1144
|
+
# If image_input is a filepath (string)
|
|
1145
|
+
image_files = []
|
|
1146
|
+
for ext in image_extensions:
|
|
1147
|
+
image_files.extend(glob.glob(os.path.join(image_input, ext)))
|
|
1148
|
+
|
|
1149
|
+
print(f"Found {len(image_files)} images.")
|
|
1150
|
+
else:
|
|
1151
|
+
# If image_files is already a list
|
|
1152
|
+
image_files = image_input
|
|
1153
|
+
print(f"Provided a list of {len(image_input)} images.")
|
|
1154
|
+
|
|
1155
|
+
categories_str = "\n".join(f"{i + 1}. {cat}" for i, cat in enumerate(categories))
|
|
1156
|
+
cat_num = len(categories)
|
|
1157
|
+
category_dict = {str(i+1): "0" for i in range(cat_num)}
|
|
1158
|
+
example_JSON = json.dumps(category_dict, indent=4)
|
|
1159
|
+
|
|
1160
|
+
link1 = []
|
|
1161
|
+
extracted_jsons = []
|
|
1162
|
+
|
|
1163
|
+
for i, img_path in enumerate(tqdm(image_files, desc="Categorising images"), start=0):
|
|
1164
|
+
# Check validity first
|
|
1165
|
+
if img_path is None or not os.path.exists(img_path):
|
|
1166
|
+
link1.append("Skipped NaN input or invalid path")
|
|
1167
|
+
extracted_jsons.append("""{"no_valid_image": 1}""")
|
|
1168
|
+
continue # Skip the rest of the loop iteration
|
|
1169
|
+
|
|
1170
|
+
# Only open the file if path is valid
|
|
1171
|
+
with open(img_path, "rb") as f:
|
|
1172
|
+
encoded = base64.b64encode(f.read()).decode("utf-8")
|
|
1173
|
+
|
|
1174
|
+
# Handle extension safely
|
|
1175
|
+
ext = Path(img_path).suffix.lstrip(".").lower()
|
|
1176
|
+
encoded_image = f"data:image/{ext};base64,{encoded}"
|
|
1177
|
+
|
|
1178
|
+
prompt = [
|
|
1179
|
+
{
|
|
1180
|
+
"type": "text",
|
|
1181
|
+
"text": (
|
|
1182
|
+
f"You are an image-tagging assistant trained in the CERAD Constructional Praxis test.\n"
|
|
1183
|
+
f"Task ► Examine the attached image and decide, **for each category below**, "
|
|
1184
|
+
f"whether it is PRESENT (1) or NOT PRESENT (0).\n\n"
|
|
1185
|
+
f"Image is expected to show within it a drawing of a {shape}.\n\n"
|
|
1186
|
+
f"Categories:\n{categories_str}\n\n"
|
|
1187
|
+
f"Output format ► Respond with **only** a JSON object whose keys are the "
|
|
1188
|
+
f"quoted category numbers ('1', '2', …) and whose values are 1 or 0. "
|
|
1189
|
+
f"No additional keys, comments, or text.\n\n"
|
|
1190
|
+
f"Example:\n"
|
|
1191
|
+
f"{example_JSON}"
|
|
1192
|
+
),
|
|
1193
|
+
},
|
|
1194
|
+
{
|
|
1195
|
+
"type": "image_url",
|
|
1196
|
+
"image_url": {"url": encoded_image, "detail": "high"},
|
|
1197
|
+
},
|
|
1198
|
+
]
|
|
1199
|
+
if model_source == "OpenAI":
|
|
1200
|
+
from openai import OpenAI
|
|
1201
|
+
client = OpenAI(api_key=api_key)
|
|
1202
|
+
try:
|
|
1203
|
+
response_obj = client.chat.completions.create(
|
|
1204
|
+
model=user_model,
|
|
1205
|
+
messages=[{'role': 'user', 'content': prompt}],
|
|
1206
|
+
temperature=creativity
|
|
1207
|
+
)
|
|
1208
|
+
reply = response_obj.choices[0].message.content
|
|
1209
|
+
link1.append(reply)
|
|
1210
|
+
except Exception as e:
|
|
1211
|
+
print(f"An error occurred: {e}")
|
|
1212
|
+
link1.append(f"Error processing input: {e}")
|
|
1213
|
+
|
|
1214
|
+
elif model_source == "Perplexity":
|
|
1215
|
+
from openai import OpenAI
|
|
1216
|
+
client = OpenAI(api_key=api_key, base_url="https://api.perplexity.ai")
|
|
1217
|
+
try:
|
|
1218
|
+
response_obj = client.chat.completions.create(
|
|
1219
|
+
model=user_model,
|
|
1220
|
+
messages=[{'role': 'user', 'content': prompt}],
|
|
1221
|
+
temperature=creativity
|
|
1222
|
+
)
|
|
1223
|
+
reply = response_obj.choices[0].message.content
|
|
1224
|
+
link1.append(reply)
|
|
1225
|
+
except Exception as e:
|
|
1226
|
+
print(f"An error occurred: {e}")
|
|
1227
|
+
link1.append(f"Error processing input: {e}")
|
|
1228
|
+
elif model_source == "Anthropic":
|
|
1229
|
+
import anthropic
|
|
1230
|
+
client = anthropic.Anthropic(api_key=api_key)
|
|
1231
|
+
try:
|
|
1232
|
+
message = client.messages.create(
|
|
1233
|
+
model=user_model,
|
|
1234
|
+
max_tokens=1024,
|
|
1235
|
+
temperature=creativity,
|
|
1236
|
+
messages=[{"role": "user", "content": prompt}]
|
|
1237
|
+
)
|
|
1238
|
+
reply = message.content[0].text # Anthropic returns content as list
|
|
1239
|
+
link1.append(reply)
|
|
1240
|
+
except Exception as e:
|
|
1241
|
+
print(f"An error occurred: {e}")
|
|
1242
|
+
link1.append(f"Error processing input: {e}")
|
|
1243
|
+
elif model_source == "Mistral":
|
|
1244
|
+
from mistralai import Mistral
|
|
1245
|
+
client = Mistral(api_key=api_key)
|
|
1246
|
+
try:
|
|
1247
|
+
response = client.chat.complete(
|
|
1248
|
+
model=user_model,
|
|
1249
|
+
messages=[
|
|
1250
|
+
{'role': 'user', 'content': prompt}
|
|
1251
|
+
],
|
|
1252
|
+
temperature=creativity
|
|
1253
|
+
)
|
|
1254
|
+
reply = response.choices[0].message.content
|
|
1255
|
+
link1.append(reply)
|
|
1256
|
+
except Exception as e:
|
|
1257
|
+
print(f"An error occurred: {e}")
|
|
1258
|
+
link1.append(f"Error processing input: {e}")
|
|
1259
|
+
else:
|
|
1260
|
+
raise ValueError("Unknown source! Choose from OpenAI, Anthropic, Perplexity, or Mistral")
|
|
1261
|
+
# in situation that no JSON is found
|
|
1262
|
+
if reply is not None:
|
|
1263
|
+
extracted_json = regex.findall(r'\{(?:[^{}]|(?R))*\}', reply, regex.DOTALL)
|
|
1264
|
+
if extracted_json:
|
|
1265
|
+
cleaned_json = extracted_json[0].replace('[', '').replace(']', '').replace('\n', '').replace(" ", '').replace(" ", '')
|
|
1266
|
+
extracted_jsons.append(cleaned_json)
|
|
1267
|
+
#print(cleaned_json)
|
|
1268
|
+
else:
|
|
1269
|
+
error_message = """{"1":"e"}"""
|
|
1270
|
+
extracted_jsons.append(error_message)
|
|
1271
|
+
print(error_message)
|
|
1272
|
+
else:
|
|
1273
|
+
error_message = """{"1":"e"}"""
|
|
1274
|
+
extracted_jsons.append(error_message)
|
|
1275
|
+
#print(error_message)
|
|
1276
|
+
|
|
1277
|
+
# --- Safety Save ---
|
|
1278
|
+
if safety:
|
|
1279
|
+
#print(f"Saving CSV to: {save_directory}")
|
|
1280
|
+
# Save progress so far
|
|
1281
|
+
temp_df = pd.DataFrame({
|
|
1282
|
+
'image_input': image_files[:i+1],
|
|
1283
|
+
'link1': link1,
|
|
1284
|
+
'json': extracted_jsons
|
|
1285
|
+
})
|
|
1286
|
+
# Normalize processed jsons so far
|
|
1287
|
+
normalized_data_list = []
|
|
1288
|
+
for json_str in extracted_jsons:
|
|
1289
|
+
try:
|
|
1290
|
+
parsed_obj = json.loads(json_str)
|
|
1291
|
+
normalized_data_list.append(pd.json_normalize(parsed_obj))
|
|
1292
|
+
except json.JSONDecodeError:
|
|
1293
|
+
normalized_data_list.append(pd.DataFrame({"1": ["e"]}))
|
|
1294
|
+
normalized_data = pd.concat(normalized_data_list, ignore_index=True)
|
|
1295
|
+
temp_df = pd.concat([temp_df, normalized_data], axis=1)
|
|
1296
|
+
# Save to CSV
|
|
1297
|
+
if filename is None:
|
|
1298
|
+
filepath = os.path.join(os.getcwd(), 'catllm_data.csv')
|
|
1299
|
+
else:
|
|
1300
|
+
filepath = filename
|
|
1301
|
+
temp_df.to_csv(filepath, index=False)
|
|
1302
|
+
|
|
1303
|
+
# --- Final DataFrame ---
|
|
1304
|
+
normalized_data_list = []
|
|
1305
|
+
for json_str in extracted_jsons:
|
|
1306
|
+
try:
|
|
1307
|
+
parsed_obj = json.loads(json_str)
|
|
1308
|
+
normalized_data_list.append(pd.json_normalize(parsed_obj))
|
|
1309
|
+
except json.JSONDecodeError:
|
|
1310
|
+
normalized_data_list.append(pd.DataFrame({"1": ["e"]}))
|
|
1311
|
+
normalized_data = pd.concat(normalized_data_list, ignore_index=True)
|
|
1312
|
+
|
|
1313
|
+
categorized_data = pd.DataFrame({
|
|
1314
|
+
'image_input': image_files,
|
|
1315
|
+
'link1': pd.Series(link1).reset_index(drop=True),
|
|
1316
|
+
'json': pd.Series(extracted_jsons).reset_index(drop=True)
|
|
1317
|
+
})
|
|
1318
|
+
categorized_data = pd.concat([categorized_data, normalized_data], axis=1)
|
|
1319
|
+
columns_to_convert = ["1", "2", "3", "4", "5", "6", "7"]
|
|
1320
|
+
categorized_data[columns_to_convert] = categorized_data[columns_to_convert].apply(pd.to_numeric, errors='coerce').fillna(0).astype(int)
|
|
1321
|
+
|
|
1322
|
+
if shape == "circle":
|
|
1323
|
+
|
|
1324
|
+
categorized_data = categorized_data.rename(columns={
|
|
1325
|
+
"1": "drawing_present",
|
|
1326
|
+
"2": "not_similar",
|
|
1327
|
+
"3": "similar",
|
|
1328
|
+
"4": "cir_closed",
|
|
1329
|
+
"5": "cir_almost_closed",
|
|
1330
|
+
"6": "cir_round",
|
|
1331
|
+
"7": "cir_almost_round",
|
|
1332
|
+
"8": "none"
|
|
1333
|
+
})
|
|
1334
|
+
|
|
1335
|
+
categorized_data['score'] = categorized_data['cir_almost_closed'] + categorized_data['cir_closed'] + categorized_data['cir_round'] + categorized_data['cir_almost_round']
|
|
1336
|
+
categorized_data.loc[categorized_data['none'] == 1, 'score'] = 0
|
|
1337
|
+
categorized_data.loc[(categorized_data['drawing_present'] == 0) & (categorized_data['score'] == 0), 'score'] = 0
|
|
1338
|
+
|
|
1339
|
+
elif shape == "diamond":
|
|
1340
|
+
|
|
1341
|
+
categorized_data = categorized_data.rename(columns={
|
|
1342
|
+
"1": "drawing_present",
|
|
1343
|
+
"2": "diamond_square",
|
|
1344
|
+
"3": "not_similar",
|
|
1345
|
+
"4": "similar",
|
|
1346
|
+
"5": "diamond_4_sides",
|
|
1347
|
+
"6": "diamond_equal_sides",
|
|
1348
|
+
"7": "complex_diamond",
|
|
1349
|
+
"8": "none"
|
|
1350
|
+
})
|
|
1351
|
+
|
|
1352
|
+
categorized_data['score'] = categorized_data['diamond_4_sides'] + categorized_data['diamond_equal_sides'] + categorized_data['similar']
|
|
1353
|
+
|
|
1354
|
+
categorized_data.loc[categorized_data['none'] == 1, 'score'] = 0
|
|
1355
|
+
categorized_data.loc[(categorized_data['diamond_square'] == 1) & (categorized_data['score'] == 0), 'score'] = 2
|
|
1356
|
+
|
|
1357
|
+
elif shape == "rectangles" or shape == "overlapping rectangles":
|
|
1358
|
+
|
|
1359
|
+
categorized_data = categorized_data.rename(columns={
|
|
1360
|
+
"1":"drawing_present",
|
|
1361
|
+
"2": "not_similar",
|
|
1362
|
+
"3": "similar",
|
|
1363
|
+
"4": "r1_4_sides",
|
|
1364
|
+
"5": "r2_4_sides",
|
|
1365
|
+
"6": "rectangles_overlap",
|
|
1366
|
+
"7": "rectangles_cross",
|
|
1367
|
+
"8": "none"
|
|
1368
|
+
})
|
|
1369
|
+
|
|
1370
|
+
categorized_data['score'] = 0
|
|
1371
|
+
categorized_data.loc[(categorized_data['r1_4_sides'] == 1) & (categorized_data['r2_4_sides'] == 1), 'score'] = 1
|
|
1372
|
+
categorized_data.loc[(categorized_data['rectangles_overlap'] == 1) & (categorized_data['rectangles_cross'] == 1), 'score'] += 1
|
|
1373
|
+
categorized_data.loc[categorized_data['none'] == 1, 'score'] = 0
|
|
1374
|
+
|
|
1375
|
+
elif shape == "cube":
|
|
1376
|
+
|
|
1377
|
+
categorized_data = categorized_data.rename(columns={
|
|
1378
|
+
"1": "drawing_present",
|
|
1379
|
+
"2": "not_similar",
|
|
1380
|
+
"3": "similar",
|
|
1381
|
+
"4": "cube_front_face",
|
|
1382
|
+
"5": "cube_internal_lines",
|
|
1383
|
+
"6": "cube_opposite_sides",
|
|
1384
|
+
"7": "square_only",
|
|
1385
|
+
"8": "none"
|
|
1386
|
+
})
|
|
1387
|
+
|
|
1388
|
+
categorized_data['score'] = categorized_data['cube_front_face'] + categorized_data['cube_internal_lines'] + categorized_data['cube_opposite_sides'] + categorized_data['similar']
|
|
1389
|
+
categorized_data.loc[categorized_data['similar'] == 1, 'score'] = categorized_data['score'] + 1
|
|
1390
|
+
categorized_data.loc[categorized_data['none'] == 1, 'score'] = 0
|
|
1391
|
+
categorized_data.loc[(categorized_data['drawing_present'] == 0) & (categorized_data['score'] == 0), 'score'] = 0
|
|
1392
|
+
categorized_data.loc[(categorized_data['not_similar'] == 1) & (categorized_data['score'] == 0), 'score'] = 0
|
|
1393
|
+
categorized_data.loc[categorized_data['score'] > 4, 'score'] = 4
|
|
1394
|
+
|
|
1395
|
+
else:
|
|
1396
|
+
raise ValueError("Invalid shape! Choose from 'circle', 'diamond', 'rectangles', or 'cube'.")
|
|
1397
|
+
|
|
1398
|
+
categorized_data.loc[categorized_data['no_valid_image'] == 1, 'score'] = None
|
|
1399
|
+
|
|
1400
|
+
if filename is not None:
|
|
1401
|
+
categorized_data.to_csv(filename, index=False)
|
|
1402
|
+
|
|
1075
1403
|
return categorized_data
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|