cat-llm 0.0.80__tar.gz → 0.0.81__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {cat_llm-0.0.80 → cat_llm-0.0.81}/PKG-INFO +1 -1
- {cat_llm-0.0.80 → cat_llm-0.0.81}/src/catllm/__about__.py +1 -1
- {cat_llm-0.0.80 → cat_llm-0.0.81}/src/catllm/text_functions.py +43 -17
- {cat_llm-0.0.80 → cat_llm-0.0.81}/.gitignore +0 -0
- {cat_llm-0.0.80 → cat_llm-0.0.81}/LICENSE +0 -0
- {cat_llm-0.0.80 → cat_llm-0.0.81}/README.md +0 -0
- {cat_llm-0.0.80 → cat_llm-0.0.81}/pyproject.toml +0 -0
- {cat_llm-0.0.80 → cat_llm-0.0.81}/src/catllm/CERAD_functions.py +0 -0
- {cat_llm-0.0.80 → cat_llm-0.0.81}/src/catllm/__init__.py +0 -0
- {cat_llm-0.0.80 → cat_llm-0.0.81}/src/catllm/build_web_research.py +0 -0
- {cat_llm-0.0.80 → cat_llm-0.0.81}/src/catllm/calls/CoVe.py +0 -0
- {cat_llm-0.0.80 → cat_llm-0.0.81}/src/catllm/calls/__init__.py +0 -0
- {cat_llm-0.0.80 → cat_llm-0.0.81}/src/catllm/calls/all_calls.py +0 -0
- {cat_llm-0.0.80 → cat_llm-0.0.81}/src/catllm/image_functions.py +0 -0
- {cat_llm-0.0.80 → cat_llm-0.0.81}/src/catllm/images/circle.png +0 -0
- {cat_llm-0.0.80 → cat_llm-0.0.81}/src/catllm/images/cube.png +0 -0
- {cat_llm-0.0.80 → cat_llm-0.0.81}/src/catllm/images/diamond.png +0 -0
- {cat_llm-0.0.80 → cat_llm-0.0.81}/src/catllm/images/overlapping_pentagons.png +0 -0
- {cat_llm-0.0.80 → cat_llm-0.0.81}/src/catllm/images/rectangles.png +0 -0
- {cat_llm-0.0.80 → cat_llm-0.0.81}/src/catllm/model_reference_list.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: cat-llm
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.81
|
4
4
|
Summary: A tool for categorizing text data and images using LLMs and vision models
|
5
5
|
Project-URL: Documentation, https://github.com/chrissoria/cat-llm#readme
|
6
6
|
Project-URL: Issues, https://github.com/chrissoria/cat-llm/issues
|
@@ -8,7 +8,7 @@ from .calls.all_calls import (
|
|
8
8
|
chain_of_verification_anthropic,
|
9
9
|
chain_of_verification_mistral,
|
10
10
|
get_openai_top_n,
|
11
|
-
get_anthropic_top_n
|
11
|
+
get_anthropic_top_n
|
12
12
|
)
|
13
13
|
|
14
14
|
|
@@ -167,16 +167,29 @@ Number your categories from 1 through {cat_num} and be concise with the category
|
|
167
167
|
|
168
168
|
if model_source == "openai":
|
169
169
|
try:
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
research_question=research_question,
|
177
|
-
creativity=creativity
|
170
|
+
from openai import OpenAI
|
171
|
+
|
172
|
+
base_url = (
|
173
|
+
"https://api.perplexity.ai" if model_source == "perplexity"
|
174
|
+
else "https://router.huggingface.co/v1" if model_source == "huggingface"
|
175
|
+
else None
|
178
176
|
)
|
179
177
|
|
178
|
+
client = OpenAI(api_key=api_key, base_url=base_url)
|
179
|
+
|
180
|
+
response_obj = client.chat.completions.create(
|
181
|
+
model=user_model,
|
182
|
+
messages=[
|
183
|
+
{'role': 'system', 'content': f"""You are a helpful assistant that extracts categories from survey responses. \
|
184
|
+
The specific task is to identify {specificity} categories of responses to a survey question. \
|
185
|
+
The research question is: {research_question}""" if research_question else "You are a helpful assistant."},
|
186
|
+
{'role': 'user', 'content': prompt}
|
187
|
+
],
|
188
|
+
**({"temperature": creativity} if creativity is not None else {})
|
189
|
+
)
|
190
|
+
|
191
|
+
reply = response_obj.choices[0].message.content
|
192
|
+
|
180
193
|
responses.append(reply)
|
181
194
|
|
182
195
|
except BadRequestError as e:
|
@@ -191,16 +204,29 @@ Number your categories from 1 through {cat_num} and be concise with the category
|
|
191
204
|
|
192
205
|
elif model_source == "anthropic":
|
193
206
|
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
207
|
+
import anthropic
|
208
|
+
client = anthropic.Anthropic(api_key=api_key)
|
209
|
+
|
210
|
+
# Build system prompt
|
211
|
+
if research_question:
|
212
|
+
system_content = (f"You are a helpful assistant that extracts categories from survey responses. "
|
213
|
+
f"The specific task is to identify {specificity} categories of responses to a survey question. "
|
214
|
+
f"The research question is: {research_question}")
|
215
|
+
else:
|
216
|
+
system_content = "You are a helpful assistant."
|
217
|
+
|
218
|
+
response_obj = client.messages.create(
|
219
|
+
model=user_model,
|
220
|
+
max_tokens=4096,
|
221
|
+
system=system_content,
|
222
|
+
messages=[
|
223
|
+
{'role': 'user', 'content': prompt}
|
224
|
+
],
|
225
|
+
**({"temperature": creativity} if creativity is not None else {})
|
202
226
|
)
|
203
227
|
|
228
|
+
reply = response_obj.content[0].text
|
229
|
+
|
204
230
|
responses.append(reply)
|
205
231
|
else:
|
206
232
|
raise ValueError(f"Unsupported model_source: {model_source}")
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|