cat-llm 0.0.26__py3-none-any.whl → 0.0.27__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {cat_llm-0.0.26.dist-info → cat_llm-0.0.27.dist-info}/METADATA +1 -1
- cat_llm-0.0.27.dist-info/RECORD +9 -0
- catllm/__about__.py +1 -1
- catllm/image_functions.py +51 -48
- cat_llm-0.0.26.dist-info/RECORD +0 -9
- {cat_llm-0.0.26.dist-info → cat_llm-0.0.27.dist-info}/WHEEL +0 -0
- {cat_llm-0.0.26.dist-info → cat_llm-0.0.27.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: cat-llm
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.27
|
|
4
4
|
Summary: A tool for categorizing text data and images using LLMs and vision models
|
|
5
5
|
Project-URL: Documentation, https://github.com/chrissoria/cat-llm#readme
|
|
6
6
|
Project-URL: Issues, https://github.com/chrissoria/cat-llm/issues
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
catllm/CERAD_functions.py,sha256=mtHxshRWmWXMH9kkkCfbMHXgDe00EVabjFiN8s73LPI,16935
|
|
2
|
+
catllm/__about__.py,sha256=H3dYrI6XpHXpRmgCCiw8u2dIaFZWRsw7RxfRy_aIlaQ,404
|
|
3
|
+
catllm/__init__.py,sha256=kLk180aJna1s-wU6CLr4_hKkbjoeET-11jGmC1pdhQw,330
|
|
4
|
+
catllm/cat_llm.py,sha256=Rwyz93caNf0h9tfurObY6qDjtG6EKaYXR0GrVW7h2kU,16920
|
|
5
|
+
catllm/image_functions.py,sha256=rMa7Jb565Rp75pbRKYF8Sqk_Uwuv60olMU0Mbvbq55s,27440
|
|
6
|
+
cat_llm-0.0.27.dist-info/METADATA,sha256=ocT3Y5NoA9csBGpSkg0G_PCPPhbm_EgWiNicpf9Xf-I,1679
|
|
7
|
+
cat_llm-0.0.27.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
8
|
+
cat_llm-0.0.27.dist-info/licenses/LICENSE,sha256=wJLsvOr6lrFUDcoPXExa01HOKFWrS3JC9f0RudRw8uw,1075
|
|
9
|
+
cat_llm-0.0.27.dist-info/RECORD,,
|
catllm/__about__.py
CHANGED
catllm/image_functions.py
CHANGED
|
@@ -72,29 +72,56 @@ def extract_image_multi_class(
|
|
|
72
72
|
|
|
73
73
|
# Handle extension safely
|
|
74
74
|
ext = Path(img_path).suffix.lstrip(".").lower()
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
75
|
+
if model_source == "OpenAI":
|
|
76
|
+
encoded_image = f"data:image/{ext};base64,{encoded}"
|
|
77
|
+
prompt = [
|
|
78
|
+
{
|
|
79
|
+
"type": "text",
|
|
80
|
+
"text": (
|
|
81
|
+
f"You are an image-tagging assistant.\n"
|
|
82
|
+
f"Task ► Examine the attached image and decide, **for each category below**, "
|
|
83
|
+
f"whether it is PRESENT (1) or NOT PRESENT (0).\n\n"
|
|
84
|
+
f"Image is expected to show: {image_description}\n\n"
|
|
85
|
+
f"Categories:\n{categories_str}\n\n"
|
|
86
|
+
f"Output format ► Respond with **only** a JSON object whose keys are the "
|
|
87
|
+
f"quoted category numbers ('1', '2', …) and whose values are 1 or 0. "
|
|
88
|
+
f"No additional keys, comments, or text.\n\n"
|
|
89
|
+
f"Example (three categories):\n"
|
|
90
|
+
f"{example_JSON}"
|
|
91
|
+
),
|
|
92
|
+
},
|
|
93
|
+
{
|
|
94
|
+
"type": "image_url",
|
|
95
|
+
"image_url": {"url": encoded_image, "detail": "high"},
|
|
96
|
+
},
|
|
97
|
+
]
|
|
98
|
+
|
|
99
|
+
if model_source == "Anthropic":
|
|
100
|
+
encoded_image = f"data:image/{ext};base64,{encoded}"
|
|
101
|
+
prompt = [
|
|
102
|
+
{"type": "text",
|
|
103
|
+
"text": (
|
|
104
|
+
f"You are an image-tagging assistant.\n"
|
|
105
|
+
f"Task ► Examine the attached image and decide, **for each category below**, "
|
|
106
|
+
f"whether it is PRESENT (1) or NOT PRESENT (0).\n\n"
|
|
107
|
+
f"Image is expected to show: {image_description}\n\n"
|
|
108
|
+
f"Categories:\n{categories_str}\n\n"
|
|
109
|
+
f"Output format ► Respond with **only** a JSON object whose keys are the "
|
|
110
|
+
f"quoted category numbers ('1', '2', …) and whose values are 1 or 0. "
|
|
111
|
+
f"No additional keys, comments, or text.\n\n"
|
|
112
|
+
f"Example (three categories):\n"
|
|
113
|
+
f"{example_JSON}"
|
|
114
|
+
),
|
|
115
|
+
},
|
|
116
|
+
{
|
|
117
|
+
"type": "image",
|
|
118
|
+
"source": {
|
|
119
|
+
"type": "base64",
|
|
120
|
+
"media_type": "image/jpeg",
|
|
121
|
+
"data": encoded
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
]
|
|
98
125
|
if model_source == "OpenAI":
|
|
99
126
|
from openai import OpenAI
|
|
100
127
|
client = OpenAI(api_key=api_key)
|
|
@@ -111,32 +138,8 @@ def extract_image_multi_class(
|
|
|
111
138
|
link1.append(f"Error processing input: {e}")
|
|
112
139
|
|
|
113
140
|
elif model_source == "Anthropic":
|
|
114
|
-
prompt = [
|
|
115
|
-
{"type": "text",
|
|
116
|
-
"text": (
|
|
117
|
-
f"You are an image-tagging assistant.\n"
|
|
118
|
-
f"Task ► Examine the attached image and decide, **for each category below**, "
|
|
119
|
-
f"whether it is PRESENT (1) or NOT PRESENT (0).\n\n"
|
|
120
|
-
f"Image is expected to show: {image_description}\n\n"
|
|
121
|
-
f"Categories:\n{categories_str}\n\n"
|
|
122
|
-
f"Output format ► Respond with **only** a JSON object whose keys are the "
|
|
123
|
-
f"quoted category numbers ('1', '2', …) and whose values are 1 or 0. "
|
|
124
|
-
f"No additional keys, comments, or text.\n\n"
|
|
125
|
-
f"Example (three categories):\n"
|
|
126
|
-
f"{example_JSON}"
|
|
127
|
-
),
|
|
128
|
-
},
|
|
129
|
-
{
|
|
130
|
-
"type": "image",
|
|
131
|
-
"source": {
|
|
132
|
-
"type": "base64",
|
|
133
|
-
"media_type": "image/jpeg",
|
|
134
|
-
"data": encoded_image
|
|
135
|
-
}
|
|
136
|
-
}
|
|
137
|
-
]
|
|
138
|
-
|
|
139
141
|
import anthropic
|
|
142
|
+
reply = None
|
|
140
143
|
client = anthropic.Anthropic(api_key=api_key)
|
|
141
144
|
try:
|
|
142
145
|
message = client.messages.create(
|
cat_llm-0.0.26.dist-info/RECORD
DELETED
|
@@ -1,9 +0,0 @@
|
|
|
1
|
-
catllm/CERAD_functions.py,sha256=mtHxshRWmWXMH9kkkCfbMHXgDe00EVabjFiN8s73LPI,16935
|
|
2
|
-
catllm/__about__.py,sha256=4jqHFlCUy8Xa_lkK5V6tmc1z_Goja59oOIaD7pFXnRs,404
|
|
3
|
-
catllm/__init__.py,sha256=kLk180aJna1s-wU6CLr4_hKkbjoeET-11jGmC1pdhQw,330
|
|
4
|
-
catllm/cat_llm.py,sha256=Rwyz93caNf0h9tfurObY6qDjtG6EKaYXR0GrVW7h2kU,16920
|
|
5
|
-
catllm/image_functions.py,sha256=nTppbeeuYrlDmZtUkEwNwRJQ6YbBxBCYoPRU1X4GZOY,27100
|
|
6
|
-
cat_llm-0.0.26.dist-info/METADATA,sha256=uXjULRmbv-RQu9Q1aU3rXuYo1d4qK-163M2i-j5Sb1o,1679
|
|
7
|
-
cat_llm-0.0.26.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
8
|
-
cat_llm-0.0.26.dist-info/licenses/LICENSE,sha256=wJLsvOr6lrFUDcoPXExa01HOKFWrS3JC9f0RudRw8uw,1075
|
|
9
|
-
cat_llm-0.0.26.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|