themefinder 0.6.2__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of themefinder might be problematic. Click here for more details.
- themefinder/__init__.py +8 -2
- themefinder/core.py +217 -39
- themefinder/llm_batch_processor.py +33 -81
- themefinder/models.py +371 -94
- themefinder/prompts/agentic_theme_clustering.txt +31 -0
- themefinder/prompts/detail_detection.txt +19 -0
- themefinder/prompts/sentiment_analysis.txt +0 -14
- themefinder/prompts/theme_condensation.txt +2 -22
- themefinder/prompts/theme_generation.txt +6 -38
- themefinder/prompts/theme_mapping.txt +6 -23
- themefinder/prompts/theme_refinement.txt +7 -16
- themefinder/prompts/theme_target_alignment.txt +2 -10
- themefinder/theme_clustering_agent.py +332 -0
- {themefinder-0.6.2.dist-info → themefinder-0.7.0.dist-info}/METADATA +24 -9
- themefinder-0.7.0.dist-info/RECORD +19 -0
- {themefinder-0.6.2.dist-info → themefinder-0.7.0.dist-info}/WHEEL +1 -1
- themefinder-0.6.2.dist-info/RECORD +0 -16
- {themefinder-0.6.2.dist-info → themefinder-0.7.0.dist-info}/LICENCE +0 -0
themefinder/models.py
CHANGED
|
@@ -1,138 +1,415 @@
|
|
|
1
|
+
from typing import List, Optional
|
|
2
|
+
from enum import Enum
|
|
1
3
|
from pydantic import BaseModel, Field, model_validator
|
|
2
4
|
|
|
3
5
|
|
|
4
|
-
|
|
5
|
-
"""
|
|
6
|
-
Validate that all string fields in the model are non-empty (after stripping)
|
|
7
|
-
and that list fields are not empty.
|
|
6
|
+
class Position(str, Enum):
|
|
7
|
+
"""Enum for valid position values"""
|
|
8
8
|
|
|
9
|
-
|
|
10
|
-
|
|
9
|
+
AGREEMENT = "AGREEMENT"
|
|
10
|
+
DISAGREEMENT = "DISAGREEMENT"
|
|
11
|
+
UNCLEAR = "UNCLEAR"
|
|
11
12
|
|
|
12
|
-
Returns:
|
|
13
|
-
BaseModel: The same model if validation passes.
|
|
14
13
|
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
"""
|
|
18
|
-
for field_name, value in model.__dict__.items():
|
|
19
|
-
if isinstance(value, str) and not value.strip():
|
|
20
|
-
raise ValueError(f"{field_name} cannot be empty or only whitespace")
|
|
21
|
-
if isinstance(value, list) and not value:
|
|
22
|
-
raise ValueError(f"{field_name} cannot be an empty list")
|
|
23
|
-
return model
|
|
14
|
+
class Stance(str, Enum):
|
|
15
|
+
"""Enum for valid stance values"""
|
|
24
16
|
|
|
17
|
+
POSITIVE = "POSITIVE"
|
|
18
|
+
NEGATIVE = "NEGATIVE"
|
|
25
19
|
|
|
26
|
-
def validate_position(model: BaseModel) -> BaseModel:
|
|
27
|
-
"""
|
|
28
|
-
Validate that the model's 'position' field is one of the allowed values.
|
|
29
20
|
|
|
30
|
-
|
|
31
|
-
|
|
21
|
+
class EvidenceRich(str, Enum):
|
|
22
|
+
"""Enum for valid evidence_rich values"""
|
|
32
23
|
|
|
33
|
-
|
|
34
|
-
|
|
24
|
+
YES = "YES"
|
|
25
|
+
NO = "NO"
|
|
35
26
|
|
|
36
|
-
Raises:
|
|
37
|
-
ValueError: If the 'position' field is not one of the allowed values.
|
|
38
|
-
"""
|
|
39
|
-
allowed_positions = {"AGREEMENT", "DISAGREEMENT", "UNCLEAR"}
|
|
40
|
-
if model.position not in allowed_positions:
|
|
41
|
-
raise ValueError(f"position must be one of {allowed_positions}")
|
|
42
|
-
return model
|
|
43
27
|
|
|
28
|
+
class ValidatedModel(BaseModel):
|
|
29
|
+
"""Base model with common validation methods"""
|
|
44
30
|
|
|
45
|
-
def
|
|
46
|
-
|
|
47
|
-
|
|
31
|
+
def validate_non_empty_fields(self) -> "ValidatedModel":
|
|
32
|
+
"""
|
|
33
|
+
Validate that all string fields are non-empty and all list fields are not empty.
|
|
34
|
+
"""
|
|
35
|
+
for field_name, value in self.__dict__.items():
|
|
36
|
+
if isinstance(value, str) and not value.strip():
|
|
37
|
+
raise ValueError(f"{field_name} cannot be empty or only whitespace")
|
|
38
|
+
if isinstance(value, list) and not value:
|
|
39
|
+
raise ValueError(f"{field_name} cannot be an empty list")
|
|
40
|
+
if isinstance(value, list):
|
|
41
|
+
for i, item in enumerate(value):
|
|
42
|
+
if isinstance(item, str) and not item.strip():
|
|
43
|
+
raise ValueError(
|
|
44
|
+
f"Item {i} in {field_name} cannot be empty or only whitespace"
|
|
45
|
+
)
|
|
46
|
+
return self
|
|
48
47
|
|
|
49
|
-
|
|
50
|
-
|
|
48
|
+
def validate_unique_items(
|
|
49
|
+
self, field_name: str, transform_func: Optional[callable] = None
|
|
50
|
+
) -> "ValidatedModel":
|
|
51
|
+
"""
|
|
52
|
+
Validate that a field contains unique values.
|
|
51
53
|
|
|
52
|
-
|
|
53
|
-
|
|
54
|
+
Args:
|
|
55
|
+
field_name: The name of the field to check for uniqueness
|
|
56
|
+
transform_func: Optional function to transform items before checking uniqueness
|
|
57
|
+
(e.g., lowercasing strings)
|
|
58
|
+
"""
|
|
59
|
+
if not hasattr(self, field_name):
|
|
60
|
+
raise ValueError(f"Field '{field_name}' does not exist")
|
|
61
|
+
items = getattr(self, field_name)
|
|
62
|
+
if not isinstance(items, list):
|
|
63
|
+
raise ValueError(f"Field '{field_name}' is not a list")
|
|
64
|
+
if transform_func:
|
|
65
|
+
transformed_items = [transform_func(item) for item in items]
|
|
66
|
+
else:
|
|
67
|
+
transformed_items = items
|
|
68
|
+
if len(transformed_items) != len(set(transformed_items)):
|
|
69
|
+
raise ValueError(f"'{field_name}' must contain unique values")
|
|
70
|
+
return self
|
|
54
71
|
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
""
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
if stance not in allowed_stances:
|
|
61
|
-
raise ValueError(f"stances must be one of {allowed_stances}")
|
|
62
|
-
return model
|
|
72
|
+
def validate_unique_attribute_in_list(
|
|
73
|
+
self, list_field: str, attr_name: str
|
|
74
|
+
) -> "ValidatedModel":
|
|
75
|
+
"""
|
|
76
|
+
Validate that an attribute across all objects in a list field is unique.
|
|
63
77
|
|
|
78
|
+
Args:
|
|
79
|
+
list_field: The name of the list field containing objects
|
|
80
|
+
attr_name: The attribute within each object to check for uniqueness
|
|
81
|
+
"""
|
|
82
|
+
if not hasattr(self, list_field):
|
|
83
|
+
raise ValueError(f"Field '{list_field}' does not exist")
|
|
84
|
+
|
|
85
|
+
items = getattr(self, list_field)
|
|
86
|
+
if not isinstance(items, list):
|
|
87
|
+
raise ValueError(f"Field '{list_field}' is not a list")
|
|
88
|
+
|
|
89
|
+
attr_values = []
|
|
90
|
+
for item in items:
|
|
91
|
+
if not hasattr(item, attr_name):
|
|
92
|
+
raise ValueError(
|
|
93
|
+
f"Item in '{list_field}' does not have attribute '{attr_name}'"
|
|
94
|
+
)
|
|
95
|
+
attr_values.append(getattr(item, attr_name))
|
|
96
|
+
if len(attr_values) != len(set(attr_values)):
|
|
97
|
+
raise ValueError(
|
|
98
|
+
f"'{attr_name}' must be unique across all items in '{list_field}'"
|
|
99
|
+
)
|
|
100
|
+
return self
|
|
64
101
|
|
|
65
|
-
def
|
|
66
|
-
|
|
67
|
-
|
|
102
|
+
def validate_equal_lengths(self, *field_names) -> "ValidatedModel":
|
|
103
|
+
"""
|
|
104
|
+
Validate that multiple list fields have the same length.
|
|
68
105
|
|
|
69
|
-
|
|
70
|
-
|
|
106
|
+
Args:
|
|
107
|
+
*field_names: Variable number of field names to check for equal lengths
|
|
108
|
+
"""
|
|
109
|
+
if len(field_names) < 2:
|
|
110
|
+
return self
|
|
111
|
+
lengths = []
|
|
112
|
+
for field_name in field_names:
|
|
113
|
+
if not hasattr(self, field_name):
|
|
114
|
+
raise ValueError(f"Field '{field_name}' does not exist")
|
|
115
|
+
|
|
116
|
+
items = getattr(self, field_name)
|
|
117
|
+
if not isinstance(items, list):
|
|
118
|
+
raise ValueError(f"Field '{field_name}' is not a list")
|
|
119
|
+
|
|
120
|
+
lengths.append(len(items))
|
|
121
|
+
if len(set(lengths)) > 1:
|
|
122
|
+
raise ValueError(
|
|
123
|
+
f"Fields {', '.join(field_names)} must all have the same length"
|
|
124
|
+
)
|
|
125
|
+
return self
|
|
71
126
|
|
|
72
|
-
|
|
73
|
-
|
|
127
|
+
@model_validator(mode="after")
|
|
128
|
+
def run_validations(self) -> "ValidatedModel":
|
|
129
|
+
"""
|
|
130
|
+
Run common validations. Override in subclasses to add specific validations.
|
|
131
|
+
"""
|
|
132
|
+
return self.validate_non_empty_fields()
|
|
74
133
|
|
|
75
|
-
Raises:
|
|
76
|
-
ValueError: If the lengths of 'stances' and 'labels' do not match.
|
|
77
|
-
"""
|
|
78
|
-
if len(model.stances) != len(model.labels):
|
|
79
|
-
raise ValueError("'stances' must have the same length as 'labels'")
|
|
80
|
-
return model
|
|
81
134
|
|
|
135
|
+
class SentimentAnalysisOutput(ValidatedModel):
|
|
136
|
+
"""Model for sentiment analysis output"""
|
|
82
137
|
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
Validate that the model's 'labels' field contains unique values.
|
|
138
|
+
response_id: int = Field(gt=0)
|
|
139
|
+
position: Position
|
|
86
140
|
|
|
87
|
-
Args:
|
|
88
|
-
model (BaseModel): A Pydantic model instance with a 'labels' attribute.
|
|
89
141
|
|
|
90
|
-
|
|
91
|
-
|
|
142
|
+
class SentimentAnalysisResponses(ValidatedModel):
|
|
143
|
+
"""Container for all sentiment analysis responses"""
|
|
92
144
|
|
|
93
|
-
|
|
94
|
-
ValueError: If 'labels' contains duplicate values.
|
|
95
|
-
"""
|
|
96
|
-
if len(model.labels) != len(set(model.labels)):
|
|
97
|
-
raise ValueError("'labels' must be unique")
|
|
98
|
-
return model
|
|
145
|
+
responses: List[SentimentAnalysisOutput]
|
|
99
146
|
|
|
147
|
+
@model_validator(mode="after")
|
|
148
|
+
def run_validations(self) -> "SentimentAnalysisResponses":
|
|
149
|
+
"""Validate that response_ids are unique"""
|
|
150
|
+
self.validate_non_empty_fields()
|
|
151
|
+
response_ids = [resp.response_id for resp in self.responses]
|
|
152
|
+
if len(response_ids) != len(set(response_ids)):
|
|
153
|
+
raise ValueError("Response IDs must be unique")
|
|
154
|
+
return self
|
|
100
155
|
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
156
|
+
|
|
157
|
+
class Theme(ValidatedModel):
|
|
158
|
+
"""Model for a single extracted theme"""
|
|
159
|
+
|
|
160
|
+
topic_label: str = Field(
|
|
161
|
+
..., description="Short label summarizing the topic in a few words"
|
|
162
|
+
)
|
|
163
|
+
topic_description: str = Field(
|
|
164
|
+
..., description="More detailed description of the topic in 1-2 sentences"
|
|
165
|
+
)
|
|
166
|
+
position: Position = Field(
|
|
167
|
+
...,
|
|
168
|
+
description="SENTIMENT ABOUT THIS TOPIC (AGREEMENT, DISAGREEMENT, OR UNCLEAR)",
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
class ThemeGenerationResponses(ValidatedModel):
|
|
173
|
+
"""Container for all extracted themes"""
|
|
174
|
+
|
|
175
|
+
responses: List[Theme] = Field(..., description="List of extracted themes")
|
|
176
|
+
|
|
177
|
+
@model_validator(mode="after")
|
|
178
|
+
def run_validations(self) -> "ThemeGenerationResponses":
|
|
179
|
+
"""Ensure there are no duplicate themes"""
|
|
180
|
+
self.validate_non_empty_fields()
|
|
181
|
+
labels = [theme.topic_label.lower().strip() for theme in self.responses]
|
|
182
|
+
if len(labels) != len(set(labels)):
|
|
183
|
+
raise ValueError("Duplicate topic labels detected")
|
|
184
|
+
return self
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
class CondensedTheme(ValidatedModel):
|
|
188
|
+
"""Model for a single condensed theme"""
|
|
189
|
+
|
|
190
|
+
topic_label: str = Field(
|
|
191
|
+
..., description="Representative label for the condensed topic"
|
|
192
|
+
)
|
|
193
|
+
topic_description: str = Field(
|
|
194
|
+
...,
|
|
195
|
+
description="Concise description incorporating key insights from constituent topics",
|
|
196
|
+
)
|
|
197
|
+
source_topic_count: int = Field(
|
|
198
|
+
..., gt=0, description="Sum of source_topic_counts from combined topics"
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
class ThemeCondensationResponses(ValidatedModel):
|
|
203
|
+
"""Container for all condensed themes"""
|
|
204
|
+
|
|
205
|
+
responses: List[CondensedTheme] = Field(..., description="List of condensed themes")
|
|
206
|
+
|
|
207
|
+
@model_validator(mode="after")
|
|
208
|
+
def run_validations(self) -> "ThemeCondensationResponses":
|
|
209
|
+
"""Ensure there are no duplicate themes"""
|
|
210
|
+
self.validate_non_empty_fields()
|
|
211
|
+
labels = [theme.topic_label.lower().strip() for theme in self.responses]
|
|
212
|
+
if len(labels) != len(set(labels)):
|
|
213
|
+
raise ValueError("Duplicate topic labels detected")
|
|
214
|
+
return self
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
class RefinedTheme(ValidatedModel):
|
|
218
|
+
"""Model for a single refined theme"""
|
|
219
|
+
|
|
220
|
+
topic_id: str = Field(
|
|
221
|
+
..., description="Single uppercase letter ID (A-Z, then AA, AB, etc.)"
|
|
222
|
+
)
|
|
223
|
+
topic: str = Field(
|
|
224
|
+
..., description="Topic label and description combined with a colon separator"
|
|
225
|
+
)
|
|
226
|
+
source_topic_count: int = Field(
|
|
227
|
+
..., gt=0, description="Count of source topics combined"
|
|
228
|
+
)
|
|
104
229
|
|
|
105
230
|
@model_validator(mode="after")
|
|
106
|
-
def run_validations(self) -> "
|
|
231
|
+
def run_validations(self) -> "RefinedTheme":
|
|
232
|
+
"""Run all validations for RefinedTheme"""
|
|
233
|
+
self.validate_non_empty_fields()
|
|
234
|
+
self.validate_topic_id_format()
|
|
235
|
+
self.validate_topic_format()
|
|
236
|
+
return self
|
|
237
|
+
|
|
238
|
+
def validate_topic_id_format(self) -> "RefinedTheme":
|
|
239
|
+
"""
|
|
240
|
+
Validate that topic_id follows the expected format (A-Z, then AA, AB, etc.).
|
|
107
241
|
"""
|
|
108
|
-
|
|
242
|
+
topic_id = self.topic_id.strip()
|
|
243
|
+
if not topic_id.isupper() or not topic_id.isalpha():
|
|
244
|
+
raise ValueError(f"topic_id must be uppercase letters only: {topic_id}")
|
|
245
|
+
return self
|
|
109
246
|
|
|
110
|
-
|
|
111
|
-
- 'position' is one of the allowed values.
|
|
112
|
-
- No fields are empty or only whitespace (for strings) and no lists are empty.
|
|
247
|
+
def validate_topic_format(self) -> "RefinedTheme":
|
|
113
248
|
"""
|
|
114
|
-
|
|
115
|
-
|
|
249
|
+
Validate that topic contains a label and description separated by a colon.
|
|
250
|
+
"""
|
|
251
|
+
if ":" not in self.topic:
|
|
252
|
+
raise ValueError(
|
|
253
|
+
"Topic must contain a label and description separated by a colon"
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
label, description = self.topic.split(":", 1)
|
|
257
|
+
if not label.strip() or not description.strip():
|
|
258
|
+
raise ValueError("Both label and description must be non-empty")
|
|
259
|
+
|
|
260
|
+
word_count = len(label.strip().split())
|
|
261
|
+
if word_count > 10:
|
|
262
|
+
raise ValueError(f"Topic label must be under 10 words (found {word_count})")
|
|
263
|
+
|
|
116
264
|
return self
|
|
117
265
|
|
|
118
266
|
|
|
119
|
-
class
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
267
|
+
class ThemeRefinementResponses(ValidatedModel):
|
|
268
|
+
"""Container for all refined themes"""
|
|
269
|
+
|
|
270
|
+
responses: List[RefinedTheme] = Field(..., description="List of refined themes")
|
|
271
|
+
|
|
272
|
+
@model_validator(mode="after")
|
|
273
|
+
def run_validations(self) -> "ThemeRefinementResponses":
|
|
274
|
+
"""Ensure there are no duplicate themes"""
|
|
275
|
+
self.validate_non_empty_fields()
|
|
276
|
+
topic_ids = [theme.topic_id for theme in self.responses]
|
|
277
|
+
if len(topic_ids) != len(set(topic_ids)):
|
|
278
|
+
raise ValueError("Duplicate topic_ids detected")
|
|
279
|
+
topics = [theme.topic.lower().strip() for theme in self.responses]
|
|
280
|
+
if len(topics) != len(set(topics)):
|
|
281
|
+
raise ValueError("Duplicate topics detected")
|
|
282
|
+
|
|
283
|
+
return self
|
|
284
|
+
|
|
285
|
+
|
|
286
|
+
class ThemeMappingOutput(ValidatedModel):
|
|
287
|
+
"""Model for theme mapping output"""
|
|
288
|
+
|
|
289
|
+
response_id: int = Field(gt=0, description="Response ID, must be greater than 0")
|
|
290
|
+
labels: List[str] = Field(..., description="List of theme labels")
|
|
291
|
+
reasons: List[str] = Field(..., description="List of reasons for mapping")
|
|
292
|
+
stances: List[Stance] = Field(
|
|
293
|
+
..., description="List of stances (POSITIVE or NEGATIVE)"
|
|
294
|
+
)
|
|
124
295
|
|
|
125
296
|
@model_validator(mode="after")
|
|
126
297
|
def run_validations(self) -> "ThemeMappingOutput":
|
|
127
298
|
"""
|
|
128
299
|
Run all validations for ThemeMappingOutput.
|
|
300
|
+
"""
|
|
301
|
+
self.validate_non_empty_fields()
|
|
302
|
+
self.validate_equal_lengths("stances", "labels", "reasons")
|
|
303
|
+
self.validate_unique_items("labels")
|
|
304
|
+
return self
|
|
305
|
+
|
|
306
|
+
|
|
307
|
+
class ThemeMappingResponses(ValidatedModel):
|
|
308
|
+
"""Container for all theme mapping responses"""
|
|
309
|
+
|
|
310
|
+
responses: List[ThemeMappingOutput] = Field(
|
|
311
|
+
..., description="List of theme mapping outputs"
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
@model_validator(mode="after")
|
|
315
|
+
def run_validations(self) -> "ThemeMappingResponses":
|
|
316
|
+
"""
|
|
317
|
+
Validate that response_ids are unique.
|
|
318
|
+
"""
|
|
319
|
+
self.validate_non_empty_fields()
|
|
320
|
+
response_ids = [resp.response_id for resp in self.responses]
|
|
321
|
+
if len(response_ids) != len(set(response_ids)):
|
|
322
|
+
raise ValueError("Response IDs must be unique")
|
|
323
|
+
return self
|
|
324
|
+
|
|
129
325
|
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
326
|
+
class DetailDetectionOutput(ValidatedModel):
|
|
327
|
+
"""Model for detail detection output"""
|
|
328
|
+
|
|
329
|
+
response_id: int = Field(gt=0, description="Response ID, must be greater than 0")
|
|
330
|
+
evidence_rich: EvidenceRich = Field(
|
|
331
|
+
..., description="Whether the response is evidence-rich (YES or NO)"
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
class DetailDetectionResponses(ValidatedModel):
|
|
336
|
+
"""Container for all detail detection responses"""
|
|
337
|
+
|
|
338
|
+
responses: List[DetailDetectionOutput] = Field(
|
|
339
|
+
..., description="List of detail detection outputs"
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
@model_validator(mode="after")
|
|
343
|
+
def run_validations(self) -> "DetailDetectionResponses":
|
|
134
344
|
"""
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
345
|
+
Validate that response_ids are unique.
|
|
346
|
+
"""
|
|
347
|
+
self.validate_non_empty_fields()
|
|
348
|
+
response_ids = [resp.response_id for resp in self.responses]
|
|
349
|
+
if len(response_ids) != len(set(response_ids)):
|
|
350
|
+
raise ValueError("Response IDs must be unique")
|
|
351
|
+
return self
|
|
352
|
+
|
|
353
|
+
|
|
354
|
+
class ThemeNode(ValidatedModel):
|
|
355
|
+
"""Model for topic nodes created during hierarchical clustering"""
|
|
356
|
+
|
|
357
|
+
topic_id: str = Field(
|
|
358
|
+
...,
|
|
359
|
+
description="Short alphabetic ID (e.g. 'A', 'B', 'C') - iteration prefix will be added automatically",
|
|
360
|
+
)
|
|
361
|
+
topic_label: str = Field(
|
|
362
|
+
..., description="4-5 word label encompassing merged child topics"
|
|
363
|
+
)
|
|
364
|
+
topic_description: str = Field(
|
|
365
|
+
..., description="1-2 sentences combining key aspects of child topics"
|
|
366
|
+
)
|
|
367
|
+
source_topic_count: int = Field(gt=0, description="Sum of all child topic counts")
|
|
368
|
+
parent_id: Optional[str] = Field(
|
|
369
|
+
default=None,
|
|
370
|
+
description="Internal field: ID of parent topic node, managed by clustering agent, not set by LLM",
|
|
371
|
+
)
|
|
372
|
+
children: List[str] = Field(
|
|
373
|
+
default_factory=list, description="List of topic_ids of merged child topics"
|
|
374
|
+
)
|
|
375
|
+
|
|
376
|
+
@model_validator(mode="after")
|
|
377
|
+
def run_validations(self) -> "ThemeNode":
|
|
378
|
+
"""Validate topic node constraints"""
|
|
379
|
+
if self.children:
|
|
380
|
+
# Each parent must have at least 2 children
|
|
381
|
+
if len(self.children) < 2:
|
|
382
|
+
raise ValueError("Each topic node must have at least 2 children")
|
|
383
|
+
# Validate children are unique
|
|
384
|
+
if len(self.children) != len(set(self.children)):
|
|
385
|
+
raise ValueError("Child topic IDs must be unique")
|
|
386
|
+
|
|
387
|
+
return self
|
|
388
|
+
|
|
389
|
+
|
|
390
|
+
class HierarchicalClusteringResponse(ValidatedModel):
|
|
391
|
+
"""Model for hierarchical clustering agent response"""
|
|
392
|
+
|
|
393
|
+
parent_themes: List[ThemeNode] = Field(
|
|
394
|
+
default=[],
|
|
395
|
+
description="List of parent themes created by merging similar themes",
|
|
396
|
+
)
|
|
397
|
+
should_terminate: bool = Field(
|
|
398
|
+
...,
|
|
399
|
+
description="True if no more meaningful clustering is possible, false otherwise",
|
|
400
|
+
)
|
|
401
|
+
|
|
402
|
+
@model_validator(mode="after")
|
|
403
|
+
def run_validations(self) -> "HierarchicalClusteringResponse":
|
|
404
|
+
"""Validate clustering response constraints"""
|
|
405
|
+
self.validate_non_empty_fields()
|
|
406
|
+
|
|
407
|
+
# Validate that no child appears in multiple parents
|
|
408
|
+
all_children = []
|
|
409
|
+
for parent in self.parent_themes:
|
|
410
|
+
all_children.extend(parent.children)
|
|
411
|
+
|
|
412
|
+
if len(all_children) != len(set(all_children)):
|
|
413
|
+
raise ValueError("Each child theme can have at most one parent")
|
|
414
|
+
|
|
138
415
|
return self
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
Analyze these topics and identify which ones should be merged based on semantic similarity.
|
|
2
|
+
Your goal is to significantly reduce the number of topics by creating meaningful parent topics.
|
|
3
|
+
Be aggressive in finding opportunities to merge topics that share any semantic relationship.
|
|
4
|
+
|
|
5
|
+
TOPICS:
|
|
6
|
+
{themes_json}
|
|
7
|
+
|
|
8
|
+
For each group of similar topics that should be merged, create a new parent topic.
|
|
9
|
+
|
|
10
|
+
Guidelines:
|
|
11
|
+
- Each parent topic must have at least 2 children, it can have more than 2 if appropriate
|
|
12
|
+
- Each child topic can have at most 1 parent
|
|
13
|
+
- topic_id should be a simple alphabetic ID (e.g. 'A', 'B', 'C') - the iteration prefix will be added automatically
|
|
14
|
+
- Be creative and look for higher-level abstractions that can combine seemingly different topics
|
|
15
|
+
- When creating parent topics, follow these naming rules:
|
|
16
|
+
* The label should read naturally as a single coherent topic
|
|
17
|
+
* Choose labels that can encompass broader categories of topics
|
|
18
|
+
* If merging different topics, the topic with the higher source_topic_count should dominate the label
|
|
19
|
+
* Never combine different topics with "and" or "/" in the label
|
|
20
|
+
- topic_description must be 1 or 2 sentences that:
|
|
21
|
+
* preserves key information from the child topics
|
|
22
|
+
- source_topic_count must be the sum of all child topic counts
|
|
23
|
+
- children must be a list of valid topic_ids from the input
|
|
24
|
+
- should_terminate should only be true if ALL of these conditions are met:
|
|
25
|
+
* There are fewer than 10 active topics remaining
|
|
26
|
+
* The remaining topics are fundamentally incompatible semantically
|
|
27
|
+
* Any further merging would create meaninglessly broad categories
|
|
28
|
+
|
|
29
|
+
If no topics should be merged in this iteration but future iterations might still yield meaningful merges, set should_terminate to false with an empty parent_themes list.
|
|
30
|
+
|
|
31
|
+
If no topics should be merged and the termination conditions are met, set should_terminate to true with an empty parent_themes list.
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
{system_prompt}
|
|
2
|
+
|
|
3
|
+
You will receive a list of RESPONSES, each containing a response_id and a response.
|
|
4
|
+
Your job is to analyze each response to the QUESTION below and decide if a response contains rich evidence.
|
|
5
|
+
You MUST include every response ID in the output.
|
|
6
|
+
|
|
7
|
+
Evidence-rich responses contain one or more of the following:
|
|
8
|
+
- Specific facts or figures that shed new light on the issue (e.g., statistics, percentages, measurements, dates)
|
|
9
|
+
- Concrete examples and specific insights that could inform decision-making
|
|
10
|
+
- Detailed personal or professional experiences with clear contextual information or specific incidents
|
|
11
|
+
In addition to the above an evidence rich response should answer the question and provide deeper insights than an average response.
|
|
12
|
+
|
|
13
|
+
For each response, determine:
|
|
14
|
+
EVIDENCE_RICH - does the response contain significant evidence as defined above?
|
|
15
|
+
Choose one from ['YES', 'NO']
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
QUESTION: \n {question}
|
|
19
|
+
RESPONSES: \n {responses}
|
|
@@ -6,20 +6,6 @@ Your job is to analyze each response to the QUESTION below and decide:
|
|
|
6
6
|
POSITION - is the response AGREEING or DISAGREEING or is it UNCLEAR about the change being proposed in the question.
|
|
7
7
|
Choose one from [AGREEMENT, DISAGREEMENT, UNCLEAR]
|
|
8
8
|
|
|
9
|
-
The final output should be in the following JSON format:
|
|
10
|
-
|
|
11
|
-
{{"responses": [
|
|
12
|
-
{{
|
|
13
|
-
"response_id": "{{response_id_1}}",
|
|
14
|
-
"position": {{position_1}},
|
|
15
|
-
}},
|
|
16
|
-
{{
|
|
17
|
-
"response_id": "{{response_id_2}}",
|
|
18
|
-
"position": {{position_2}},
|
|
19
|
-
}}
|
|
20
|
-
...
|
|
21
|
-
]}}
|
|
22
|
-
|
|
23
9
|
You MUST include every response ID in the output.
|
|
24
10
|
If the response can not be labelled return empty sections where appropriate but you MUST return an entry
|
|
25
11
|
with the correct response ID for each input object
|
|
@@ -23,28 +23,8 @@ For each topic in your output:
|
|
|
23
23
|
2. Write a concise description that incorporates key insights from all constituent topics, this should only be a single sentence
|
|
24
24
|
3. Include the total count of original topics combined by summing the source_topic_counts of merged topics (or 1 for topics without a count)
|
|
25
25
|
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
{{"responses": [
|
|
29
|
-
{{"topic_label": "{{label for condensed topic 1}}",
|
|
30
|
-
"topic_description": "{{description for condensed topic 1}}",
|
|
31
|
-
"source_topic_count": {{sum of source_topic_counts from combined topics}}
|
|
32
|
-
}},
|
|
33
|
-
{{"topic_label": "{{label for condensed topic 2}}",
|
|
34
|
-
"topic_description": "{{description for condensed topic 2}}",
|
|
35
|
-
"source_topic_count": {{sum of source_topic_counts from combined topics}}
|
|
36
|
-
}},
|
|
37
|
-
{{"topic_label": "{{label for condensed topic 3}}",
|
|
38
|
-
"topic_description": "{{description for condensed topic 3}}",
|
|
39
|
-
"source_topic_count": {{sum of source_topic_counts from combined topics}}
|
|
40
|
-
}},
|
|
41
|
-
// Additional topics as necessary
|
|
42
|
-
]}}
|
|
43
|
-
|
|
44
|
-
[Question]
|
|
45
|
-
|
|
26
|
+
QUESTION:
|
|
46
27
|
{question}
|
|
47
28
|
|
|
48
|
-
|
|
49
|
-
|
|
29
|
+
TOPICS:
|
|
50
30
|
{responses}
|
|
@@ -7,28 +7,12 @@ Your task is to analyze the RESPONSES below and extract TOPICS such that:
|
|
|
7
7
|
2. Every distinct and relevant point of view in the responses should be captured by a topic
|
|
8
8
|
3. Each topic has a topic_label which summarizes the topic in a few words
|
|
9
9
|
4. Each topic has a topic_description which gives more detail about the topic in one or two sentences
|
|
10
|
-
5. The position field should just be the sentiment stated, and is either "
|
|
10
|
+
5. The position field should just be the sentiment stated, and is either "AGREEMENT" or "DISAGREEMENT" or "UNCLEAR"
|
|
11
11
|
6. There should be no duplicate topics
|
|
12
12
|
|
|
13
13
|
The topics identified will be used by policy makers to understand what the public like and don't like about the proposals.
|
|
14
14
|
|
|
15
|
-
Here is an example of how to extract topics from some responses
|
|
16
|
-
|
|
17
|
-
The final output should be in the following JSON format:
|
|
18
|
-
|
|
19
|
-
{{"responses": [
|
|
20
|
-
{{
|
|
21
|
-
"topic_label": "{{label_1}}",
|
|
22
|
-
"topic_description": "{{description_1}}",
|
|
23
|
-
"position": "{{position_1}}"
|
|
24
|
-
}},
|
|
25
|
-
{{
|
|
26
|
-
"topic_label": "{{label_2}}",
|
|
27
|
-
"topic_description": "{{description_2}}",
|
|
28
|
-
"position": "{{position_2}}"
|
|
29
|
-
}},
|
|
30
|
-
// Additional topics as necessary
|
|
31
|
-
]}}
|
|
15
|
+
Here is an example of how to extract topics from some responses:
|
|
32
16
|
|
|
33
17
|
## EXAMPLE
|
|
34
18
|
|
|
@@ -42,26 +26,10 @@ RESPONSES
|
|
|
42
26
|
{{"response": "I hate grapes", "position": "disagreement"}},
|
|
43
27
|
]
|
|
44
28
|
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
"topic_label": "Government overreach",
|
|
50
|
-
"topic_description": "The proposals would result in government interfering too much with citizen's lives",
|
|
51
|
-
"position": "disagreement"
|
|
52
|
-
}},
|
|
53
|
-
{{
|
|
54
|
-
"topic_label": "Regressive change",
|
|
55
|
-
"topic_description": "The change would have a larger negative impact on poorer people",
|
|
56
|
-
"position": "disagreement"
|
|
57
|
-
}},
|
|
58
|
-
{{
|
|
59
|
-
"topic_label": "Health",
|
|
60
|
-
"topic_description": "The change would result in people eating healthier diets",
|
|
61
|
-
"position": "disagreement"
|
|
62
|
-
}},
|
|
63
|
-
]}}
|
|
64
|
-
|
|
29
|
+
EXAMPLE OUTPUT (showing the structure)
|
|
30
|
+
- Topic 1: Government overreach (The proposals would result in government interfering too much with citizen's lives) - DISAGREEMENT
|
|
31
|
+
- Topic 2: Regressive change (The change would have a larger negative impact on poorer people) - DISAGREEMENT
|
|
32
|
+
- Topic 3: Health (The change would result in people eating healthier diets) - DISAGREEMENT
|
|
65
33
|
|
|
66
34
|
QUESTION:
|
|
67
35
|
{question}
|