cat-llm 0.0.67__tar.gz → 0.0.69__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {cat_llm-0.0.67 → cat_llm-0.0.69}/PKG-INFO +2 -2
- {cat_llm-0.0.67 → cat_llm-0.0.69}/README.md +1 -1
- {cat_llm-0.0.67 → cat_llm-0.0.69}/src/catllm/__about__.py +1 -1
- cat_llm-0.0.69/src/catllm/calls/CoVe.py +304 -0
- cat_llm-0.0.69/src/catllm/calls/__init__.py +25 -0
- cat_llm-0.0.69/src/catllm/calls/all_calls.py +433 -0
- cat_llm-0.0.69/src/catllm/model_reference_list.py +94 -0
- {cat_llm-0.0.67 → cat_llm-0.0.69}/src/catllm/text_functions.py +335 -42
- {cat_llm-0.0.67 → cat_llm-0.0.69}/.gitignore +0 -0
- {cat_llm-0.0.67 → cat_llm-0.0.69}/LICENSE +0 -0
- {cat_llm-0.0.67 → cat_llm-0.0.69}/pyproject.toml +0 -0
- {cat_llm-0.0.67 → cat_llm-0.0.69}/src/catllm/CERAD_functions.py +0 -0
- {cat_llm-0.0.67 → cat_llm-0.0.69}/src/catllm/__init__.py +0 -0
- {cat_llm-0.0.67 → cat_llm-0.0.69}/src/catllm/build_web_research.py +0 -0
- {cat_llm-0.0.67 → cat_llm-0.0.69}/src/catllm/image_functions.py +0 -0
- {cat_llm-0.0.67 → cat_llm-0.0.69}/src/catllm/images/circle.png +0 -0
- {cat_llm-0.0.67 → cat_llm-0.0.69}/src/catllm/images/cube.png +0 -0
- {cat_llm-0.0.67 → cat_llm-0.0.69}/src/catllm/images/diamond.png +0 -0
- {cat_llm-0.0.67 → cat_llm-0.0.69}/src/catllm/images/overlapping_pentagons.png +0 -0
- {cat_llm-0.0.67 → cat_llm-0.0.69}/src/catllm/images/rectangles.png +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: cat-llm
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.69
|
|
4
4
|
Summary: A tool for categorizing text data and images using LLMs and vision models
|
|
5
5
|
Project-URL: Documentation, https://github.com/chrissoria/cat-llm#readme
|
|
6
6
|
Project-URL: Issues, https://github.com/chrissoria/cat-llm/issues
|
|
@@ -29,7 +29,7 @@ Description-Content-Type: text/markdown
|
|
|
29
29
|
|
|
30
30
|

|
|
31
31
|
|
|
32
|
-
#
|
|
32
|
+
# cat-llm
|
|
33
33
|
|
|
34
34
|
[](https://pypi.org/project/cat-llm)
|
|
35
35
|
[](https://pypi.org/project/cat-llm)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|

|
|
2
2
|
|
|
3
|
-
#
|
|
3
|
+
# cat-llm
|
|
4
4
|
|
|
5
5
|
[](https://pypi.org/project/cat-llm)
|
|
6
6
|
[](https://pypi.org/project/cat-llm)
|
|
@@ -0,0 +1,304 @@
|
|
|
1
|
+
# openai chain of verification calls
|
|
2
|
+
|
|
3
|
+
def chain_of_verification_openai(
|
|
4
|
+
initial_reply,
|
|
5
|
+
step2_prompt,
|
|
6
|
+
step3_prompt,
|
|
7
|
+
step4_prompt,
|
|
8
|
+
client,
|
|
9
|
+
user_model,
|
|
10
|
+
creativity,
|
|
11
|
+
remove_numbering
|
|
12
|
+
):
|
|
13
|
+
"""
|
|
14
|
+
Execute Chain of Verification (CoVe) process.
|
|
15
|
+
Returns the verified reply or initial reply if error occurs.
|
|
16
|
+
"""
|
|
17
|
+
try:
|
|
18
|
+
# STEP 2: Generate verification questions
|
|
19
|
+
step2_filled = step2_prompt.replace('<<INITIAL_REPLY>>', initial_reply)
|
|
20
|
+
|
|
21
|
+
verification_response = client.chat.completions.create(
|
|
22
|
+
model=user_model,
|
|
23
|
+
messages=[{'role': 'user', 'content': step2_filled}],
|
|
24
|
+
**({"temperature": creativity} if creativity is not None else {})
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
verification_questions = verification_response.choices[0].message.content
|
|
28
|
+
|
|
29
|
+
# STEP 3: Answer verification questions
|
|
30
|
+
questions_list = [
|
|
31
|
+
remove_numbering(q)
|
|
32
|
+
for q in verification_questions.split('\n')
|
|
33
|
+
if q.strip()
|
|
34
|
+
]
|
|
35
|
+
verification_qa = []
|
|
36
|
+
|
|
37
|
+
# Prompting each question individually
|
|
38
|
+
for question in questions_list:
|
|
39
|
+
step3_filled = step3_prompt.replace('<<QUESTION>>', question)
|
|
40
|
+
|
|
41
|
+
answer_response = client.chat.completions.create(
|
|
42
|
+
model=user_model,
|
|
43
|
+
messages=[{'role': 'user', 'content': step3_filled}],
|
|
44
|
+
**({"temperature": creativity} if creativity is not None else {})
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
answer = answer_response.choices[0].message.content
|
|
48
|
+
verification_qa.append(f"Q: {question}\nA: {answer}")
|
|
49
|
+
|
|
50
|
+
# STEP 4: Final corrected categorization
|
|
51
|
+
verification_qa_text = "\n\n".join(verification_qa)
|
|
52
|
+
|
|
53
|
+
step4_filled = (step4_prompt
|
|
54
|
+
.replace('<<INITIAL_REPLY>>', initial_reply)
|
|
55
|
+
.replace('<<VERIFICATION_QA>>', verification_qa_text))
|
|
56
|
+
|
|
57
|
+
print(f"Final prompt:\n{step4_filled}\n")
|
|
58
|
+
|
|
59
|
+
final_response = client.chat.completions.create(
|
|
60
|
+
model=user_model,
|
|
61
|
+
messages=[{'role': 'user', 'content': step4_filled}],
|
|
62
|
+
**({"temperature": creativity} if creativity is not None else {})
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
verified_reply = final_response.choices[0].message.content
|
|
66
|
+
print("Chain of verification completed. Final response generated.\n")
|
|
67
|
+
|
|
68
|
+
return verified_reply
|
|
69
|
+
|
|
70
|
+
except Exception as e:
|
|
71
|
+
print(f"ERROR in Chain of Verification: {str(e)}")
|
|
72
|
+
print("Falling back to initial response.\n")
|
|
73
|
+
return initial_reply
|
|
74
|
+
|
|
75
|
+
# anthropic chain of verification calls
|
|
76
|
+
|
|
77
|
+
def chain_of_verification_anthropic(
|
|
78
|
+
initial_reply,
|
|
79
|
+
step2_prompt,
|
|
80
|
+
step3_prompt,
|
|
81
|
+
step4_prompt,
|
|
82
|
+
client,
|
|
83
|
+
user_model,
|
|
84
|
+
creativity,
|
|
85
|
+
remove_numbering
|
|
86
|
+
):
|
|
87
|
+
"""
|
|
88
|
+
Execute Chain of Verification (CoVe) process for Anthropic Claude.
|
|
89
|
+
Returns the verified reply or initial reply if error occurs.
|
|
90
|
+
"""
|
|
91
|
+
try:
|
|
92
|
+
# STEP 2: Generate verification questions
|
|
93
|
+
step2_filled = step2_prompt.replace('<<INITIAL_REPLY>>', initial_reply)
|
|
94
|
+
|
|
95
|
+
verification_response = client.messages.create(
|
|
96
|
+
model=user_model,
|
|
97
|
+
messages=[{'role': 'user', 'content': step2_filled}],
|
|
98
|
+
max_tokens=4096,
|
|
99
|
+
**({"temperature": creativity} if creativity is not None else {})
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
verification_questions = verification_response.content[0].text
|
|
103
|
+
|
|
104
|
+
# STEP 3: Answer verification questions
|
|
105
|
+
questions_list = [
|
|
106
|
+
remove_numbering(q)
|
|
107
|
+
for q in verification_questions.split('\n')
|
|
108
|
+
if q.strip()
|
|
109
|
+
]
|
|
110
|
+
print(f"Verification questions:\n{questions_list}\n")
|
|
111
|
+
verification_qa = []
|
|
112
|
+
|
|
113
|
+
# Prompting each question individually
|
|
114
|
+
for question in questions_list:
|
|
115
|
+
step3_filled = step3_prompt.replace('<<QUESTION>>', question)
|
|
116
|
+
|
|
117
|
+
answer_response = client.messages.create(
|
|
118
|
+
model=user_model,
|
|
119
|
+
messages=[{'role': 'user', 'content': step3_filled}],
|
|
120
|
+
max_tokens=4096,
|
|
121
|
+
**({"temperature": creativity} if creativity is not None else {})
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
answer = answer_response.content[0].text
|
|
125
|
+
verification_qa.append(f"Q: {question}\nA: {answer}")
|
|
126
|
+
|
|
127
|
+
# STEP 4: Final corrected categorization
|
|
128
|
+
verification_qa_text = "\n\n".join(verification_qa)
|
|
129
|
+
|
|
130
|
+
step4_filled = (step4_prompt
|
|
131
|
+
.replace('<<INITIAL_REPLY>>', initial_reply)
|
|
132
|
+
.replace('<<VERIFICATION_QA>>', verification_qa_text))
|
|
133
|
+
|
|
134
|
+
print(f"Final prompt:\n{step4_filled}\n")
|
|
135
|
+
|
|
136
|
+
final_response = client.messages.create(
|
|
137
|
+
model=user_model,
|
|
138
|
+
messages=[{'role': 'user', 'content': step4_filled}],
|
|
139
|
+
max_tokens=4096,
|
|
140
|
+
**({"temperature": creativity} if creativity is not None else {})
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
verified_reply = final_response.content[0].text
|
|
144
|
+
print("Chain of verification completed. Final response generated.\n")
|
|
145
|
+
|
|
146
|
+
return verified_reply
|
|
147
|
+
|
|
148
|
+
except Exception as e:
|
|
149
|
+
print(f"ERROR in Chain of Verification: {str(e)}")
|
|
150
|
+
print("Falling back to initial response.\n")
|
|
151
|
+
return initial_reply
|
|
152
|
+
|
|
153
|
+
# google chain of verification calls
|
|
154
|
+
def chain_of_verification_google(
|
|
155
|
+
initial_reply,
|
|
156
|
+
prompt,
|
|
157
|
+
step2_prompt,
|
|
158
|
+
step3_prompt,
|
|
159
|
+
step4_prompt,
|
|
160
|
+
url,
|
|
161
|
+
headers,
|
|
162
|
+
creativity,
|
|
163
|
+
remove_numbering,
|
|
164
|
+
make_google_request
|
|
165
|
+
):
|
|
166
|
+
import time
|
|
167
|
+
"""
|
|
168
|
+
Execute Chain of Verification (CoVe) process for Google Gemini.
|
|
169
|
+
Returns the verified reply or initial reply if error occurs.
|
|
170
|
+
"""
|
|
171
|
+
try:
|
|
172
|
+
# STEP 2: Generate verification questions
|
|
173
|
+
step2_filled = step2_prompt.replace('<<INITIAL_REPLY>>', initial_reply)
|
|
174
|
+
|
|
175
|
+
payload_step2 = {
|
|
176
|
+
"contents": [{
|
|
177
|
+
"parts": [{"text": step2_filled}]
|
|
178
|
+
}],
|
|
179
|
+
**({"generationConfig": {"temperature": creativity}} if creativity is not None else {})
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
result_step2 = make_google_request(url, headers, payload_step2)
|
|
183
|
+
verification_questions = result_step2["candidates"][0]["content"]["parts"][0]["text"]
|
|
184
|
+
|
|
185
|
+
# STEP 3: Answer verification questions
|
|
186
|
+
questions_list = [
|
|
187
|
+
remove_numbering(q)
|
|
188
|
+
for q in verification_questions.split('\n')
|
|
189
|
+
if q.strip()
|
|
190
|
+
]
|
|
191
|
+
verification_qa = []
|
|
192
|
+
|
|
193
|
+
for question in questions_list:
|
|
194
|
+
time.sleep(2) # temporary rate limit handling
|
|
195
|
+
step3_filled = step3_prompt.replace('<<QUESTION>>', question)
|
|
196
|
+
|
|
197
|
+
payload_step3 = {
|
|
198
|
+
"contents": [{
|
|
199
|
+
"parts": [{"text": step3_filled}]
|
|
200
|
+
}],
|
|
201
|
+
**({"generationConfig": {"temperature": creativity}} if creativity is not None else {})
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
result_step3 = make_google_request(url, headers, payload_step3)
|
|
205
|
+
answer = result_step3["candidates"][0]["content"]["parts"][0]["text"]
|
|
206
|
+
verification_qa.append(f"Q: {question}\nA: {answer}")
|
|
207
|
+
|
|
208
|
+
# STEP 4: Final corrected categorization
|
|
209
|
+
verification_qa_text = "\n\n".join(verification_qa)
|
|
210
|
+
|
|
211
|
+
step4_filled = (step4_prompt
|
|
212
|
+
.replace('<<PROMPT>>', prompt)
|
|
213
|
+
.replace('<<INITIAL_REPLY>>', initial_reply)
|
|
214
|
+
.replace('<<VERIFICATION_QA>>', verification_qa_text))
|
|
215
|
+
|
|
216
|
+
payload_step4 = {
|
|
217
|
+
"contents": [{
|
|
218
|
+
"parts": [{"text": step4_filled}]
|
|
219
|
+
}],
|
|
220
|
+
**({"generationConfig": {"temperature": creativity}} if creativity is not None else {})
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
result_step4 = make_google_request(url, headers, payload_step4)
|
|
224
|
+
verified_reply = result_step4["candidates"][0]["content"]["parts"][0]["text"]
|
|
225
|
+
|
|
226
|
+
print("Chain of verification completed. Final response generated.\n")
|
|
227
|
+
return verified_reply
|
|
228
|
+
|
|
229
|
+
except Exception as e:
|
|
230
|
+
print(f"ERROR in Chain of Verification: {str(e)}")
|
|
231
|
+
print("Falling back to initial response.\n")
|
|
232
|
+
return initial_reply
|
|
233
|
+
|
|
234
|
+
# mistral chain of verification calls
|
|
235
|
+
|
|
236
|
+
def chain_of_verification_mistral(
|
|
237
|
+
initial_reply,
|
|
238
|
+
step2_prompt,
|
|
239
|
+
step3_prompt,
|
|
240
|
+
step4_prompt,
|
|
241
|
+
client,
|
|
242
|
+
user_model,
|
|
243
|
+
creativity,
|
|
244
|
+
remove_numbering
|
|
245
|
+
):
|
|
246
|
+
"""
|
|
247
|
+
Execute Chain of Verification (CoVe) process for Mistral AI.
|
|
248
|
+
Returns the verified reply or initial reply if error occurs.
|
|
249
|
+
"""
|
|
250
|
+
try:
|
|
251
|
+
# STEP 2: Generate verification questions
|
|
252
|
+
step2_filled = step2_prompt.replace('<<INITIAL_REPLY>>', initial_reply)
|
|
253
|
+
|
|
254
|
+
verification_response = client.chat.complete(
|
|
255
|
+
model=user_model,
|
|
256
|
+
messages=[{'role': 'user', 'content': step2_filled}],
|
|
257
|
+
**({"temperature": creativity} if creativity is not None else {})
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
verification_questions = verification_response.choices[0].message.content
|
|
261
|
+
|
|
262
|
+
# STEP 3: Answer verification questions
|
|
263
|
+
questions_list = [
|
|
264
|
+
remove_numbering(q)
|
|
265
|
+
for q in verification_questions.split('\n')
|
|
266
|
+
if q.strip()
|
|
267
|
+
]
|
|
268
|
+
verification_qa = []
|
|
269
|
+
|
|
270
|
+
# Prompting each question individually
|
|
271
|
+
for question in questions_list:
|
|
272
|
+
step3_filled = step3_prompt.replace('<<QUESTION>>', question)
|
|
273
|
+
|
|
274
|
+
answer_response = client.chat.complete(
|
|
275
|
+
model=user_model,
|
|
276
|
+
messages=[{'role': 'user', 'content': step3_filled}],
|
|
277
|
+
**({"temperature": creativity} if creativity is not None else {})
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
answer = answer_response.choices[0].message.content
|
|
281
|
+
verification_qa.append(f"Q: {question}\nA: {answer}")
|
|
282
|
+
|
|
283
|
+
# STEP 4: Final corrected categorization
|
|
284
|
+
verification_qa_text = "\n\n".join(verification_qa)
|
|
285
|
+
|
|
286
|
+
step4_filled = (step4_prompt
|
|
287
|
+
.replace('<<INITIAL_REPLY>>', initial_reply)
|
|
288
|
+
.replace('<<VERIFICATION_QA>>', verification_qa_text))
|
|
289
|
+
|
|
290
|
+
final_response = client.chat.complete(
|
|
291
|
+
model=user_model,
|
|
292
|
+
messages=[{'role': 'user', 'content': step4_filled}],
|
|
293
|
+
**({"temperature": creativity} if creativity is not None else {})
|
|
294
|
+
)
|
|
295
|
+
|
|
296
|
+
verified_reply = final_response.choices[0].message.content
|
|
297
|
+
print("Chain of verification completed. Final response generated.\n")
|
|
298
|
+
|
|
299
|
+
return verified_reply
|
|
300
|
+
|
|
301
|
+
except Exception as e:
|
|
302
|
+
print(f"ERROR in Chain of Verification: {str(e)}")
|
|
303
|
+
print("Falling back to initial response.\n")
|
|
304
|
+
return initial_reply
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: 2025-present Christopher Soria <chrissoria@berkeley.edu>
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: MIT
|
|
4
|
+
|
|
5
|
+
from .all_calls import (
|
|
6
|
+
get_stepback_insight_openai,
|
|
7
|
+
get_stepback_insight_anthropic,
|
|
8
|
+
get_stepback_insight_google,
|
|
9
|
+
get_stepback_insight_mistral,
|
|
10
|
+
chain_of_verification_openai,
|
|
11
|
+
chain_of_verification_google,
|
|
12
|
+
chain_of_verification_anthropic,
|
|
13
|
+
chain_of_verification_mistral
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
__all__ = [
|
|
17
|
+
'get_stepback_insight_openai',
|
|
18
|
+
'get_stepback_insight_anthropic',
|
|
19
|
+
'get_stepback_insight_google',
|
|
20
|
+
'get_stepback_insight_mistral',
|
|
21
|
+
'chain_of_verification_openai',
|
|
22
|
+
'chain_of_verification_anthropic',
|
|
23
|
+
'chain_of_verification_google',
|
|
24
|
+
'chain_of_verification_mistral',
|
|
25
|
+
]
|