cat-llm 0.0.67__py3-none-any.whl → 0.0.69__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cat-llm
3
- Version: 0.0.67
3
+ Version: 0.0.69
4
4
  Summary: A tool for categorizing text data and images using LLMs and vision models
5
5
  Project-URL: Documentation, https://github.com/chrissoria/cat-llm#readme
6
6
  Project-URL: Issues, https://github.com/chrissoria/cat-llm/issues
@@ -29,7 +29,7 @@ Description-Content-Type: text/markdown
29
29
 
30
30
  ![catllm Logo](https://github.com/chrissoria/cat-llm/blob/main/images/logo.png?raw=True)
31
31
 
32
- # catllm
32
+ # cat-llm
33
33
 
34
34
  [![PyPI - Version](https://img.shields.io/pypi/v/cat-llm.svg)](https://pypi.org/project/cat-llm)
35
35
  [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/cat-llm.svg)](https://pypi.org/project/cat-llm)
@@ -1,15 +1,19 @@
1
1
  catllm/CERAD_functions.py,sha256=q4HbP5e2Yu8NnZZ-2eX4sImyj6u3i8xWcq0pYU81iis,22676
2
- catllm/__about__.py,sha256=hIVt-fQydvImRffcCyiHlTFeOR5F2aEchTWXAsWTPc4,430
2
+ catllm/__about__.py,sha256=qQkN04YWoxAJ5HglANO-XGwexy9aL_qFoZSv_CueaUs,430
3
3
  catllm/__init__.py,sha256=sf02zp7N0NW0mAQi7eQ4gliWR1EwoqvXkHN2HwwjcTE,372
4
4
  catllm/build_web_research.py,sha256=880dfE2bEQb-FrXP-42JoLLtyc9ox_sBULDr38xiTiQ,22655
5
5
  catllm/image_functions.py,sha256=8_FftRU285x1HT-AgNkaobefQVD-5q7ZY_t7JFdL3Sg,36177
6
- catllm/text_functions.py,sha256=_GMretLVBUs0ntL-wV6My0TRAvzvaPo_WU9ZqXlKxeM,19426
6
+ catllm/model_reference_list.py,sha256=37pWwMcgnf4biE3BVRluH5oz2P6ccdJJiCVNHodBH8k,2307
7
+ catllm/text_functions.py,sha256=Vd9tAPDCDEhoXVW6O-jXeftJiZQmsyyrKeEUneYeobw,32533
8
+ catllm/calls/CoVe.py,sha256=Y9OGJbaeJ3Odwira92cPXUlnm_ADFqvpOSFSNjFzMMU,10847
9
+ catllm/calls/__init__.py,sha256=fWuMwLeSGa6zXJYd4s8IyNblsD62G-1NMUsOKrNIkoI,725
10
+ catllm/calls/all_calls.py,sha256=E25KpZ_MakMDeCpNCOOM8kQvlfex6UMjnGN1wHkA4AI,14356
7
11
  catllm/images/circle.png,sha256=JWujAWAh08-TajAoEr_TAeFNLlfbryOLw6cgIBREBuQ,86202
8
12
  catllm/images/cube.png,sha256=nFec3e5bmRe4zrBCJ8QK-HcJLrG7u7dYdKhmdMfacfE,77275
9
13
  catllm/images/diamond.png,sha256=rJDZKtsnBGRO8FPA0iHuA8FvHFGi9PkI_DWSFdw6iv0,99568
10
14
  catllm/images/overlapping_pentagons.png,sha256=VO5plI6eoVRnjfqinn1nNzsCP2WQhuQy71V0EASouW4,71208
11
15
  catllm/images/rectangles.png,sha256=2XM16HO9EYWj2yHgN4bPXaCwPfl7iYQy0tQUGaJX9xg,40692
12
- cat_llm-0.0.67.dist-info/METADATA,sha256=UR95eJdArTmJe6A2g2hH0Q9mZ9PkEQsJ2kVSg3KfQe4,22423
13
- cat_llm-0.0.67.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
14
- cat_llm-0.0.67.dist-info/licenses/LICENSE,sha256=Vje2sS5WV4TnIwY5uQHrF4qnBAM3YOk1pGpdH0ot-2o,34969
15
- cat_llm-0.0.67.dist-info/RECORD,,
16
+ cat_llm-0.0.69.dist-info/METADATA,sha256=E2q6apmvq1sDDiisnfyyQZzxqjNnqjCSecpalb5MgWQ,22424
17
+ cat_llm-0.0.69.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
18
+ cat_llm-0.0.69.dist-info/licenses/LICENSE,sha256=Vje2sS5WV4TnIwY5uQHrF4qnBAM3YOk1pGpdH0ot-2o,34969
19
+ cat_llm-0.0.69.dist-info/RECORD,,
catllm/__about__.py CHANGED
@@ -1,7 +1,7 @@
1
1
  # SPDX-FileCopyrightText: 2025-present Christopher Soria <chrissoria@berkeley.edu>
2
2
  #
3
3
  # SPDX-License-Identifier: MIT
4
- __version__ = "0.0.67"
4
+ __version__ = "0.0.69"
5
5
  __author__ = "Chris Soria"
6
6
  __email__ = "chrissoria@berkeley.edu"
7
7
  __title__ = "cat-llm"
catllm/calls/CoVe.py ADDED
@@ -0,0 +1,304 @@
1
+ # openai chain of verification calls
2
+
3
+ def chain_of_verification_openai(
4
+ initial_reply,
5
+ step2_prompt,
6
+ step3_prompt,
7
+ step4_prompt,
8
+ client,
9
+ user_model,
10
+ creativity,
11
+ remove_numbering
12
+ ):
13
+ """
14
+ Execute Chain of Verification (CoVe) process.
15
+ Returns the verified reply or initial reply if error occurs.
16
+ """
17
+ try:
18
+ # STEP 2: Generate verification questions
19
+ step2_filled = step2_prompt.replace('<<INITIAL_REPLY>>', initial_reply)
20
+
21
+ verification_response = client.chat.completions.create(
22
+ model=user_model,
23
+ messages=[{'role': 'user', 'content': step2_filled}],
24
+ **({"temperature": creativity} if creativity is not None else {})
25
+ )
26
+
27
+ verification_questions = verification_response.choices[0].message.content
28
+
29
+ # STEP 3: Answer verification questions
30
+ questions_list = [
31
+ remove_numbering(q)
32
+ for q in verification_questions.split('\n')
33
+ if q.strip()
34
+ ]
35
+ verification_qa = []
36
+
37
+ # Prompting each question individually
38
+ for question in questions_list:
39
+ step3_filled = step3_prompt.replace('<<QUESTION>>', question)
40
+
41
+ answer_response = client.chat.completions.create(
42
+ model=user_model,
43
+ messages=[{'role': 'user', 'content': step3_filled}],
44
+ **({"temperature": creativity} if creativity is not None else {})
45
+ )
46
+
47
+ answer = answer_response.choices[0].message.content
48
+ verification_qa.append(f"Q: {question}\nA: {answer}")
49
+
50
+ # STEP 4: Final corrected categorization
51
+ verification_qa_text = "\n\n".join(verification_qa)
52
+
53
+ step4_filled = (step4_prompt
54
+ .replace('<<INITIAL_REPLY>>', initial_reply)
55
+ .replace('<<VERIFICATION_QA>>', verification_qa_text))
56
+
57
+ print(f"Final prompt:\n{step4_filled}\n")
58
+
59
+ final_response = client.chat.completions.create(
60
+ model=user_model,
61
+ messages=[{'role': 'user', 'content': step4_filled}],
62
+ **({"temperature": creativity} if creativity is not None else {})
63
+ )
64
+
65
+ verified_reply = final_response.choices[0].message.content
66
+ print("Chain of verification completed. Final response generated.\n")
67
+
68
+ return verified_reply
69
+
70
+ except Exception as e:
71
+ print(f"ERROR in Chain of Verification: {str(e)}")
72
+ print("Falling back to initial response.\n")
73
+ return initial_reply
74
+
75
+ # anthropic chain of verification calls
76
+
77
+ def chain_of_verification_anthropic(
78
+ initial_reply,
79
+ step2_prompt,
80
+ step3_prompt,
81
+ step4_prompt,
82
+ client,
83
+ user_model,
84
+ creativity,
85
+ remove_numbering
86
+ ):
87
+ """
88
+ Execute Chain of Verification (CoVe) process for Anthropic Claude.
89
+ Returns the verified reply or initial reply if error occurs.
90
+ """
91
+ try:
92
+ # STEP 2: Generate verification questions
93
+ step2_filled = step2_prompt.replace('<<INITIAL_REPLY>>', initial_reply)
94
+
95
+ verification_response = client.messages.create(
96
+ model=user_model,
97
+ messages=[{'role': 'user', 'content': step2_filled}],
98
+ max_tokens=4096,
99
+ **({"temperature": creativity} if creativity is not None else {})
100
+ )
101
+
102
+ verification_questions = verification_response.content[0].text
103
+
104
+ # STEP 3: Answer verification questions
105
+ questions_list = [
106
+ remove_numbering(q)
107
+ for q in verification_questions.split('\n')
108
+ if q.strip()
109
+ ]
110
+ print(f"Verification questions:\n{questions_list}\n")
111
+ verification_qa = []
112
+
113
+ # Prompting each question individually
114
+ for question in questions_list:
115
+ step3_filled = step3_prompt.replace('<<QUESTION>>', question)
116
+
117
+ answer_response = client.messages.create(
118
+ model=user_model,
119
+ messages=[{'role': 'user', 'content': step3_filled}],
120
+ max_tokens=4096,
121
+ **({"temperature": creativity} if creativity is not None else {})
122
+ )
123
+
124
+ answer = answer_response.content[0].text
125
+ verification_qa.append(f"Q: {question}\nA: {answer}")
126
+
127
+ # STEP 4: Final corrected categorization
128
+ verification_qa_text = "\n\n".join(verification_qa)
129
+
130
+ step4_filled = (step4_prompt
131
+ .replace('<<INITIAL_REPLY>>', initial_reply)
132
+ .replace('<<VERIFICATION_QA>>', verification_qa_text))
133
+
134
+ print(f"Final prompt:\n{step4_filled}\n")
135
+
136
+ final_response = client.messages.create(
137
+ model=user_model,
138
+ messages=[{'role': 'user', 'content': step4_filled}],
139
+ max_tokens=4096,
140
+ **({"temperature": creativity} if creativity is not None else {})
141
+ )
142
+
143
+ verified_reply = final_response.content[0].text
144
+ print("Chain of verification completed. Final response generated.\n")
145
+
146
+ return verified_reply
147
+
148
+ except Exception as e:
149
+ print(f"ERROR in Chain of Verification: {str(e)}")
150
+ print("Falling back to initial response.\n")
151
+ return initial_reply
152
+
153
+ # google chain of verification calls
154
+ def chain_of_verification_google(
155
+ initial_reply,
156
+ prompt,
157
+ step2_prompt,
158
+ step3_prompt,
159
+ step4_prompt,
160
+ url,
161
+ headers,
162
+ creativity,
163
+ remove_numbering,
164
+ make_google_request
165
+ ):
166
+ import time
167
+ """
168
+ Execute Chain of Verification (CoVe) process for Google Gemini.
169
+ Returns the verified reply or initial reply if error occurs.
170
+ """
171
+ try:
172
+ # STEP 2: Generate verification questions
173
+ step2_filled = step2_prompt.replace('<<INITIAL_REPLY>>', initial_reply)
174
+
175
+ payload_step2 = {
176
+ "contents": [{
177
+ "parts": [{"text": step2_filled}]
178
+ }],
179
+ **({"generationConfig": {"temperature": creativity}} if creativity is not None else {})
180
+ }
181
+
182
+ result_step2 = make_google_request(url, headers, payload_step2)
183
+ verification_questions = result_step2["candidates"][0]["content"]["parts"][0]["text"]
184
+
185
+ # STEP 3: Answer verification questions
186
+ questions_list = [
187
+ remove_numbering(q)
188
+ for q in verification_questions.split('\n')
189
+ if q.strip()
190
+ ]
191
+ verification_qa = []
192
+
193
+ for question in questions_list:
194
+ time.sleep(2) # temporary rate limit handling
195
+ step3_filled = step3_prompt.replace('<<QUESTION>>', question)
196
+
197
+ payload_step3 = {
198
+ "contents": [{
199
+ "parts": [{"text": step3_filled}]
200
+ }],
201
+ **({"generationConfig": {"temperature": creativity}} if creativity is not None else {})
202
+ }
203
+
204
+ result_step3 = make_google_request(url, headers, payload_step3)
205
+ answer = result_step3["candidates"][0]["content"]["parts"][0]["text"]
206
+ verification_qa.append(f"Q: {question}\nA: {answer}")
207
+
208
+ # STEP 4: Final corrected categorization
209
+ verification_qa_text = "\n\n".join(verification_qa)
210
+
211
+ step4_filled = (step4_prompt
212
+ .replace('<<PROMPT>>', prompt)
213
+ .replace('<<INITIAL_REPLY>>', initial_reply)
214
+ .replace('<<VERIFICATION_QA>>', verification_qa_text))
215
+
216
+ payload_step4 = {
217
+ "contents": [{
218
+ "parts": [{"text": step4_filled}]
219
+ }],
220
+ **({"generationConfig": {"temperature": creativity}} if creativity is not None else {})
221
+ }
222
+
223
+ result_step4 = make_google_request(url, headers, payload_step4)
224
+ verified_reply = result_step4["candidates"][0]["content"]["parts"][0]["text"]
225
+
226
+ print("Chain of verification completed. Final response generated.\n")
227
+ return verified_reply
228
+
229
+ except Exception as e:
230
+ print(f"ERROR in Chain of Verification: {str(e)}")
231
+ print("Falling back to initial response.\n")
232
+ return initial_reply
233
+
234
+ # mistral chain of verification calls
235
+
236
+ def chain_of_verification_mistral(
237
+ initial_reply,
238
+ step2_prompt,
239
+ step3_prompt,
240
+ step4_prompt,
241
+ client,
242
+ user_model,
243
+ creativity,
244
+ remove_numbering
245
+ ):
246
+ """
247
+ Execute Chain of Verification (CoVe) process for Mistral AI.
248
+ Returns the verified reply or initial reply if error occurs.
249
+ """
250
+ try:
251
+ # STEP 2: Generate verification questions
252
+ step2_filled = step2_prompt.replace('<<INITIAL_REPLY>>', initial_reply)
253
+
254
+ verification_response = client.chat.complete(
255
+ model=user_model,
256
+ messages=[{'role': 'user', 'content': step2_filled}],
257
+ **({"temperature": creativity} if creativity is not None else {})
258
+ )
259
+
260
+ verification_questions = verification_response.choices[0].message.content
261
+
262
+ # STEP 3: Answer verification questions
263
+ questions_list = [
264
+ remove_numbering(q)
265
+ for q in verification_questions.split('\n')
266
+ if q.strip()
267
+ ]
268
+ verification_qa = []
269
+
270
+ # Prompting each question individually
271
+ for question in questions_list:
272
+ step3_filled = step3_prompt.replace('<<QUESTION>>', question)
273
+
274
+ answer_response = client.chat.complete(
275
+ model=user_model,
276
+ messages=[{'role': 'user', 'content': step3_filled}],
277
+ **({"temperature": creativity} if creativity is not None else {})
278
+ )
279
+
280
+ answer = answer_response.choices[0].message.content
281
+ verification_qa.append(f"Q: {question}\nA: {answer}")
282
+
283
+ # STEP 4: Final corrected categorization
284
+ verification_qa_text = "\n\n".join(verification_qa)
285
+
286
+ step4_filled = (step4_prompt
287
+ .replace('<<INITIAL_REPLY>>', initial_reply)
288
+ .replace('<<VERIFICATION_QA>>', verification_qa_text))
289
+
290
+ final_response = client.chat.complete(
291
+ model=user_model,
292
+ messages=[{'role': 'user', 'content': step4_filled}],
293
+ **({"temperature": creativity} if creativity is not None else {})
294
+ )
295
+
296
+ verified_reply = final_response.choices[0].message.content
297
+ print("Chain of verification completed. Final response generated.\n")
298
+
299
+ return verified_reply
300
+
301
+ except Exception as e:
302
+ print(f"ERROR in Chain of Verification: {str(e)}")
303
+ print("Falling back to initial response.\n")
304
+ return initial_reply
@@ -0,0 +1,25 @@
1
+ # SPDX-FileCopyrightText: 2025-present Christopher Soria <chrissoria@berkeley.edu>
2
+ #
3
+ # SPDX-License-Identifier: MIT
4
+
5
+ from .all_calls import (
6
+ get_stepback_insight_openai,
7
+ get_stepback_insight_anthropic,
8
+ get_stepback_insight_google,
9
+ get_stepback_insight_mistral,
10
+ chain_of_verification_openai,
11
+ chain_of_verification_google,
12
+ chain_of_verification_anthropic,
13
+ chain_of_verification_mistral
14
+ )
15
+
16
+ __all__ = [
17
+ 'get_stepback_insight_openai',
18
+ 'get_stepback_insight_anthropic',
19
+ 'get_stepback_insight_google',
20
+ 'get_stepback_insight_mistral',
21
+ 'chain_of_verification_openai',
22
+ 'chain_of_verification_anthropic',
23
+ 'chain_of_verification_google',
24
+ 'chain_of_verification_mistral',
25
+ ]