cat-llm 0.0.80__tar.gz → 0.0.81__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cat-llm
3
- Version: 0.0.80
3
+ Version: 0.0.81
4
4
  Summary: A tool for categorizing text data and images using LLMs and vision models
5
5
  Project-URL: Documentation, https://github.com/chrissoria/cat-llm#readme
6
6
  Project-URL: Issues, https://github.com/chrissoria/cat-llm/issues
@@ -1,7 +1,7 @@
1
1
  # SPDX-FileCopyrightText: 2025-present Christopher Soria <chrissoria@berkeley.edu>
2
2
  #
3
3
  # SPDX-License-Identifier: MIT
4
- __version__ = "0.0.80"
4
+ __version__ = "0.0.81"
5
5
  __author__ = "Chris Soria"
6
6
  __email__ = "chrissoria@berkeley.edu"
7
7
  __title__ = "cat-llm"
@@ -8,7 +8,7 @@ from .calls.all_calls import (
8
8
  chain_of_verification_anthropic,
9
9
  chain_of_verification_mistral,
10
10
  get_openai_top_n,
11
- get_anthropic_top_n,
11
+ get_anthropic_top_n
12
12
  )
13
13
 
14
14
 
@@ -167,16 +167,29 @@ Number your categories from 1 through {cat_num} and be concise with the category
167
167
 
168
168
  if model_source == "openai":
169
169
  try:
170
- reply = get_openai_top_n(
171
- prompt=prompt,
172
- user_model=user_model,
173
- specificity=specificity,
174
- api_key=api_key,
175
- model_source=model_source,
176
- research_question=research_question,
177
- creativity=creativity
170
+ from openai import OpenAI
171
+
172
+ base_url = (
173
+ "https://api.perplexity.ai" if model_source == "perplexity"
174
+ else "https://router.huggingface.co/v1" if model_source == "huggingface"
175
+ else None
178
176
  )
179
177
 
178
+ client = OpenAI(api_key=api_key, base_url=base_url)
179
+
180
+ response_obj = client.chat.completions.create(
181
+ model=user_model,
182
+ messages=[
183
+ {'role': 'system', 'content': f"""You are a helpful assistant that extracts categories from survey responses. \
184
+ The specific task is to identify {specificity} categories of responses to a survey question. \
185
+ The research question is: {research_question}""" if research_question else "You are a helpful assistant."},
186
+ {'role': 'user', 'content': prompt}
187
+ ],
188
+ **({"temperature": creativity} if creativity is not None else {})
189
+ )
190
+
191
+ reply = response_obj.choices[0].message.content
192
+
180
193
  responses.append(reply)
181
194
 
182
195
  except BadRequestError as e:
@@ -191,16 +204,29 @@ Number your categories from 1 through {cat_num} and be concise with the category
191
204
 
192
205
  elif model_source == "anthropic":
193
206
 
194
- reply = get_anthropic_top_n(
195
- prompt=prompt,
196
- user_model=user_model,
197
- specificity=specificity,
198
- model_source=model_source,
199
- api_key=api_key,
200
- research_question=research_question,
201
- creativity=creativity
207
+ import anthropic
208
+ client = anthropic.Anthropic(api_key=api_key)
209
+
210
+ # Build system prompt
211
+ if research_question:
212
+ system_content = (f"You are a helpful assistant that extracts categories from survey responses. "
213
+ f"The specific task is to identify {specificity} categories of responses to a survey question. "
214
+ f"The research question is: {research_question}")
215
+ else:
216
+ system_content = "You are a helpful assistant."
217
+
218
+ response_obj = client.messages.create(
219
+ model=user_model,
220
+ max_tokens=4096,
221
+ system=system_content,
222
+ messages=[
223
+ {'role': 'user', 'content': prompt}
224
+ ],
225
+ **({"temperature": creativity} if creativity is not None else {})
202
226
  )
203
227
 
228
+ reply = response_obj.content[0].text
229
+
204
230
  responses.append(reply)
205
231
  else:
206
232
  raise ValueError(f"Unsupported model_source: {model_source}")
File without changes
File without changes
File without changes
File without changes