fabricatio 0.2.9.dev2__cp312-cp312-win_amd64.whl → 0.2.9.dev4__cp312-cp312-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,12 +1,14 @@
1
1
  """A class that provides the capability to check strings and objects against rules and guidelines."""
2
2
 
3
- from typing import Optional, Unpack
3
+ from asyncio import gather
4
+ from typing import List, Optional, Unpack
4
5
 
5
6
  from fabricatio import TEMPLATE_MANAGER
6
7
  from fabricatio.capabilities.advanced_judge import AdvancedJudge
7
8
  from fabricatio.capabilities.propose import Propose
8
9
  from fabricatio.config import configs
9
- from fabricatio.models.extra.patches import RuleSetBriefingPatch
10
+ from fabricatio.journal import logger
11
+ from fabricatio.models.extra.patches import RuleSetMetadata
10
12
  from fabricatio.models.extra.problem import Improvement
11
13
  from fabricatio.models.extra.rule import Rule, RuleSet
12
14
  from fabricatio.models.generic import Display, WithBriefing
@@ -40,23 +42,35 @@ class Check(AdvancedJudge, Propose):
40
42
  - Returns None if any step in rule generation fails
41
43
  - Uses `alist_str` for requirement breakdown and iterative rule proposal
42
44
  """
43
- rule_reqs = await self.alist_str(
44
- TEMPLATE_MANAGER.render_template(
45
- configs.templates.ruleset_requirement_breakdown_template, {"ruleset_requirement": ruleset_requirement}
46
- ),
47
- rule_count,
48
- **override_kwargs(kwargs, default=None),
45
+ rule_reqs = (
46
+ await self.alist_str(
47
+ TEMPLATE_MANAGER.render_template(
48
+ configs.templates.ruleset_requirement_breakdown_template,
49
+ {"ruleset_requirement": ruleset_requirement},
50
+ ),
51
+ rule_count,
52
+ **override_kwargs(kwargs, default=None),
53
+ )
54
+ if rule_count > 1
55
+ else [ruleset_requirement]
49
56
  )
50
57
 
51
58
  if rule_reqs is None:
52
59
  return None
53
60
 
54
- rules = await self.propose(Rule, [TEMPLATE_MANAGER.render_template(configs.templates.rule_requirement_template, {"rule_requirement": r}) for r in rule_reqs], **kwargs)
61
+ rules = await self.propose(
62
+ Rule,
63
+ [
64
+ TEMPLATE_MANAGER.render_template(configs.templates.rule_requirement_template, {"rule_requirement": r})
65
+ for r in rule_reqs
66
+ ],
67
+ **kwargs,
68
+ )
55
69
  if any(r for r in rules if r is None):
56
70
  return None
57
71
 
58
72
  ruleset_patch = await self.propose(
59
- RuleSetBriefingPatch,
73
+ RuleSetMetadata,
60
74
  f"{ruleset_requirement}\n\nYou should use `{detect_language(ruleset_requirement)}`!",
61
75
  **override_kwargs(kwargs, default=None),
62
76
  )
@@ -93,6 +107,7 @@ class Check(AdvancedJudge, Propose):
93
107
  f"# Content to exam\n{input_text}\n\n# Rule Must to follow\n{rule.display()}\nDoes `Content to exam` provided above violate the `Rule Must to follow` provided above?",
94
108
  **override_kwargs(kwargs, default=None),
95
109
  ):
110
+ logger.info(f"Rule `{rule.name}` violated: \n{judge.display()}")
96
111
  return await self.propose(
97
112
  Improvement,
98
113
  TEMPLATE_MANAGER.render_template(
@@ -141,7 +156,7 @@ class Check(AdvancedJudge, Propose):
141
156
  ruleset: RuleSet,
142
157
  reference: str = "",
143
158
  **kwargs: Unpack[ValidateKwargs[Improvement]],
144
- ) -> Optional[Improvement]:
159
+ ) -> Optional[List[Improvement]]:
145
160
  """Validate text against full ruleset.
146
161
 
147
162
  Args:
@@ -158,12 +173,13 @@ class Check(AdvancedJudge, Propose):
158
173
  - Halts validation after first successful improvement proposal
159
174
  - Maintains rule execution order from ruleset.rules list
160
175
  """
161
- imp_seq = [
162
- await self.check_string_against_rule(input_text, rule, reference, **kwargs) for rule in ruleset.rules
163
- ]
164
- if all(isinstance(i, Improvement) for i in imp_seq):
165
- return Improvement.gather(*imp_seq) # pyright: ignore [reportArgumentType]
166
- return None
176
+ imp_seq = await gather(
177
+ *[self.check_string_against_rule(input_text, rule, reference, **kwargs) for rule in ruleset.rules]
178
+ )
179
+ if imp_seq is None:
180
+ logger.warning(f"Generation failed for string check against `{ruleset.name}`")
181
+ return None
182
+ return [imp for imp in imp_seq if imp]
167
183
 
168
184
  async def check_obj[M: (Display, WithBriefing)](
169
185
  self,
@@ -171,7 +187,7 @@ class Check(AdvancedJudge, Propose):
171
187
  ruleset: RuleSet,
172
188
  reference: str = "",
173
189
  **kwargs: Unpack[ValidateKwargs[Improvement]],
174
- ) -> Optional[Improvement]:
190
+ ) -> Optional[List[Improvement]]:
175
191
  """Validate object against full ruleset.
176
192
 
177
193
  Args:
@@ -188,7 +204,9 @@ class Check(AdvancedJudge, Propose):
188
204
  - Maintains same early termination behavior as check_string
189
205
  - Validates object through text conversion mechanism
190
206
  """
191
- imp_seq = [await self.check_obj_against_rule(obj, rule, reference, **kwargs) for rule in ruleset.rules]
192
- if all(isinstance(i, Improvement) for i in imp_seq):
193
- return Improvement.gather(*imp_seq) # pyright: ignore [reportArgumentType]
194
- return None
207
+ imp_seq = await gather(*[self.check_obj_against_rule(obj, rule, reference, **kwargs) for rule in ruleset.rules])
208
+
209
+ if imp_seq is None:
210
+ logger.warning(f"Generation Failed for `{obj.__class__.__name__}` against Ruleset `{ruleset.name}`")
211
+ return None
212
+ return [i for i in imp_seq if i]
@@ -1,5 +1,6 @@
1
1
  """A module containing the Correct capability for reviewing, validating, and improving objects."""
2
2
 
3
+ from asyncio import gather
3
4
  from typing import Optional, Type, Unpack, cast
4
5
 
5
6
  from fabricatio.capabilities.propose import Propose
@@ -14,7 +15,7 @@ from fabricatio.models.kwargs_types import (
14
15
  ValidateKwargs,
15
16
  )
16
17
  from fabricatio.rust_instances import TEMPLATE_MANAGER
17
- from fabricatio.utils import ok, override_kwargs
18
+ from fabricatio.utils import fallback_kwargs, ok, override_kwargs
18
19
 
19
20
 
20
21
  class Correct(Rating, Propose):
@@ -33,8 +34,9 @@ class Correct(Rating, Propose):
33
34
  ProblemSolutions: The problem solutions with the best solution selected.
34
35
  """
35
36
  if (leng := len(problem_solutions.solutions)) == 0:
36
- logger.error(f"No solutions found in ProblemSolutions, Skip: {problem_solutions.problem}")
37
+ logger.error(f"No solutions found in ProblemSolutions, Skip: `{problem_solutions.problem.name}`")
37
38
  if leng > 1:
39
+ logger.info(f"{leng} solutions found in Problem `{problem_solutions.problem.name}`, select the best.")
38
40
  problem_solutions.solutions = await self.best(problem_solutions.solutions, **kwargs)
39
41
  return problem_solutions
40
42
 
@@ -48,11 +50,25 @@ class Correct(Rating, Propose):
48
50
  Returns:
49
51
  Improvement: The improvement with the best solutions selected for each problem solution.
50
52
  """
51
- if (leng := len(improvement.problem_solutions)) == 0:
53
+ if leng := len(improvement.problem_solutions):
54
+ logger.debug(f"{leng} problem_solutions found in Improvement, decide solution for each of them.")
55
+ await gather(
56
+ *[
57
+ self.decide_solution(
58
+ ps,
59
+ **fallback_kwargs(
60
+ kwargs, topic=f"which solution is better to deal this problem {ps.problem.description}\n\n"
61
+ ),
62
+ )
63
+ for ps in improvement.problem_solutions
64
+ ],
65
+ )
66
+ if any(not (violated := ps).decided() for ps in improvement.problem_solutions):
67
+ logger.error(f"Some problem_solutions are not decided: {violated}")
68
+ else:
69
+ logger.success(f"All problem_solutions are decided '{improvement.focused_on}'")
70
+ else:
52
71
  logger.error(f"No problem_solutions found in Improvement, Skip: {improvement}")
53
- if leng > 1:
54
- for ps in improvement.problem_solutions:
55
- ps.solutions = await self.best(ps.solutions, **kwargs)
56
72
  return improvement
57
73
 
58
74
  async def fix_troubled_obj[M: SketchedAble](
@@ -81,7 +97,7 @@ class Correct(Rating, Propose):
81
97
  "problem": problem_solutions.problem.display(),
82
98
  "solution": ok(
83
99
  problem_solutions.final_solution(),
84
- f"No solution found for problem: {problem_solutions.problem}",
100
+ f"{len(problem_solutions.solutions)} solution Found for `{problem_solutions.problem.name}`.",
85
101
  ).display(),
86
102
  "reference": reference,
87
103
  },
@@ -148,14 +164,15 @@ class Correct(Rating, Propose):
148
164
  TypeError: If the provided object doesn't implement Display or WithBriefing interfaces.
149
165
  """
150
166
  if not improvement.decided():
167
+ logger.info(f"Improvement {improvement.focused_on} not decided, start deciding...")
151
168
  improvement = await self.decide_improvement(improvement, **override_kwargs(kwargs, default=None))
152
169
 
153
- for ps in improvement.problem_solutions:
170
+ total = len(improvement.problem_solutions)
171
+ for idx, ps in enumerate(improvement.problem_solutions):
172
+ logger.info(f"[{idx + 1}/{total}] Fixing {obj.__class__.__name__} for problem `{ps.problem.name}`")
154
173
  fixed_obj = await self.fix_troubled_obj(obj, ps, reference, **kwargs)
155
174
  if fixed_obj is None:
156
- logger.error(
157
- f"Failed to fix troubling obj {obj.__class__.__name__} when deal with problem: {ps.problem}",
158
- )
175
+ logger.error(f"[{idx + 1}/{total}] Failed to fix problem `{ps.problem.name}`")
159
176
  return None
160
177
  obj = fixed_obj
161
178
  return obj
@@ -178,6 +195,8 @@ class Correct(Rating, Propose):
178
195
  Optional[str]: A corrected version of the input string, or None if correction fails.
179
196
  """
180
197
  if not improvement.decided():
198
+ logger.info(f"Improvement {improvement.focused_on} not decided, start deciding...")
199
+
181
200
  improvement = await self.decide_improvement(improvement, **override_kwargs(kwargs, default=None))
182
201
 
183
202
  for ps in improvement.problem_solutions:
@@ -5,19 +5,19 @@ from random import sample
5
5
  from typing import Dict, List, Optional, Set, Tuple, Union, Unpack, overload
6
6
 
7
7
  from more_itertools import flatten, windowed
8
- from pydantic import NonNegativeInt, PositiveInt
8
+ from pydantic import Field, NonNegativeInt, PositiveInt, create_model
9
9
 
10
+ from fabricatio.capabilities.propose import Propose
10
11
  from fabricatio.config import configs
11
12
  from fabricatio.journal import logger
12
- from fabricatio.models.generic import Display
13
+ from fabricatio.models.generic import Display, ProposedAble
13
14
  from fabricatio.models.kwargs_types import CompositeScoreKwargs, ValidateKwargs
14
- from fabricatio.models.usages import LLMUsage
15
15
  from fabricatio.parser import JsonCapture
16
16
  from fabricatio.rust_instances import TEMPLATE_MANAGER
17
- from fabricatio.utils import ok, override_kwargs
17
+ from fabricatio.utils import fallback_kwargs, ok, override_kwargs
18
18
 
19
19
 
20
- class Rating(LLMUsage):
20
+ class Rating(Propose):
21
21
  """A class that provides functionality to rate tasks based on a rating manual and score range.
22
22
 
23
23
  References:
@@ -30,7 +30,7 @@ class Rating(LLMUsage):
30
30
  rating_manual: Dict[str, str],
31
31
  score_range: Tuple[float, float],
32
32
  **kwargs: Unpack[ValidateKwargs[Dict[str, float]]],
33
- ) -> Optional[Dict[str, float] | List[Dict[str, float]]]:
33
+ ) -> Dict[str, float] | List[Dict[str, float]] | List[Optional[Dict[str, float]]] | None:
34
34
  """Rate a given string based on a rating manual and score range.
35
35
 
36
36
  Args:
@@ -42,45 +42,49 @@ class Rating(LLMUsage):
42
42
  Returns:
43
43
  Dict[str, float]: A dictionary with the ratings for each dimension.
44
44
  """
45
-
46
- def _validator(response: str) -> Dict[str, float] | None:
47
- if (
48
- (json_data := JsonCapture.validate_with(response, dict, str))
49
- and json_data.keys() == rating_manual.keys()
50
- and all(score_range[0] <= v <= score_range[1] for v in json_data.values())
51
- ):
52
- return json_data
53
- return None
54
-
55
- logger.info(f"Rating for {to_rate}")
56
- return await self.aask_validate(
57
- question=(
58
- TEMPLATE_MANAGER.render_template(
59
- configs.templates.rate_fine_grind_template,
60
- {
61
- "to_rate": to_rate,
62
- "min_score": score_range[0],
63
- "max_score": score_range[1],
64
- "rating_manual": rating_manual,
65
- },
45
+ min_score, max_score = score_range
46
+ tip = (max_score - min_score) / 9
47
+
48
+ model = create_model( # pyright: ignore [reportCallIssue]
49
+ "RatingResult",
50
+ __base__=ProposedAble,
51
+ __doc__=f"The rating result contains the scores against each criterion, with min_score={min_score} and max_score={max_score}.",
52
+ **{ # pyright: ignore [reportArgumentType]
53
+ criterion: (
54
+ float,
55
+ Field(
56
+ ge=min_score,
57
+ le=max_score,
58
+ description=desc,
59
+ examples=[round(min_score + tip * i, 2) for i in range(10)],
60
+ ),
66
61
  )
62
+ for criterion, desc in rating_manual.items()
63
+ },
64
+ )
65
+
66
+ res = await self.propose(
67
+ model,
68
+ TEMPLATE_MANAGER.render_template(
69
+ configs.templates.rate_fine_grind_template,
70
+ {"to_rate": to_rate, "min_score": min_score, "max_score": max_score},
67
71
  )
68
72
  if isinstance(to_rate, str)
69
73
  else [
70
74
  TEMPLATE_MANAGER.render_template(
71
75
  configs.templates.rate_fine_grind_template,
72
- {
73
- "to_rate": item,
74
- "min_score": score_range[0],
75
- "max_score": score_range[1],
76
- "rating_manual": rating_manual,
77
- },
76
+ {"to_rate": t, "min_score": min_score, "max_score": max_score},
78
77
  )
79
- for item in to_rate
78
+ for t in to_rate
80
79
  ],
81
- validator=_validator,
82
- **kwargs,
80
+ **override_kwargs(kwargs, default=None),
83
81
  )
82
+ default = kwargs.get("default")
83
+ if isinstance(res, list):
84
+ return [r.model_dump() if r else default for r in res]
85
+ if res is None:
86
+ return default
87
+ return res.model_dump()
84
88
 
85
89
  @overload
86
90
  async def rate(
@@ -88,7 +92,7 @@ class Rating(LLMUsage):
88
92
  to_rate: str,
89
93
  topic: str,
90
94
  criteria: Set[str],
91
- manual: Optional[Dict[str, str]],
95
+ manual: Optional[Dict[str, str]] = None,
92
96
  score_range: Tuple[float, float] = (0.0, 1.0),
93
97
  **kwargs: Unpack[ValidateKwargs],
94
98
  ) -> Dict[str, float]: ...
@@ -99,7 +103,7 @@ class Rating(LLMUsage):
99
103
  to_rate: List[str],
100
104
  topic: str,
101
105
  criteria: Set[str],
102
- manual: Optional[Dict[str, str]],
106
+ manual: Optional[Dict[str, str]] = None,
103
107
  score_range: Tuple[float, float] = (0.0, 1.0),
104
108
  **kwargs: Unpack[ValidateKwargs],
105
109
  ) -> List[Dict[str, float]]: ...
@@ -109,10 +113,10 @@ class Rating(LLMUsage):
109
113
  to_rate: Union[str, List[str]],
110
114
  topic: str,
111
115
  criteria: Set[str],
112
- manual: Optional[Dict[str, str]],
116
+ manual: Optional[Dict[str, str]] = None,
113
117
  score_range: Tuple[float, float] = (0.0, 1.0),
114
118
  **kwargs: Unpack[ValidateKwargs],
115
- ) -> Optional[Dict[str, float] | List[Dict[str, float]]]:
119
+ ) -> Dict[str, float] | List[Dict[str, float]] | List[Optional[Dict[str, float]]] | None:
116
120
  """Rate a given string or a sequence of strings based on a topic, criteria, and score range.
117
121
 
118
122
  Args:
@@ -133,7 +137,7 @@ class Rating(LLMUsage):
133
137
  or dict(zip(criteria, criteria, strict=True))
134
138
  )
135
139
 
136
- return await self.rate_fine_grind(to_rate, manual, score_range, **kwargs)
140
+ return await self.rate_fine_grind(to_rate, manual, score_range, **fallback_kwargs(kwargs, co_extractor={}))
137
141
 
138
142
  async def draft_rating_manual(
139
143
  self, topic: str, criteria: Optional[Set[str]] = None, **kwargs: Unpack[ValidateKwargs[Dict[str, str]]]
@@ -170,7 +174,7 @@ class Rating(LLMUsage):
170
174
  configs.templates.draft_rating_manual_template,
171
175
  {
172
176
  "topic": topic,
173
- "criteria": criteria,
177
+ "criteria": list(criteria),
174
178
  },
175
179
  )
176
180
  ),
@@ -244,7 +248,7 @@ class Rating(LLMUsage):
244
248
 
245
249
  # extract reasons from the comparison of ordered pairs of extracted from examples
246
250
  reasons = flatten(
247
- await self.aask_validate(
251
+ await self.aask_validate( # pyright: ignore [reportArgumentType]
248
252
  question=[
249
253
  TEMPLATE_MANAGER.render_template(
250
254
  configs.templates.extract_reasons_from_examples_template,
@@ -319,9 +323,11 @@ class Rating(LLMUsage):
319
323
  validator=lambda resp: JsonCapture.validate_with(resp, target_type=float),
320
324
  **kwargs,
321
325
  )
326
+ if not all(relative_weights):
327
+ raise ValueError(f"found illegal weight: {relative_weights}")
322
328
  weights = [1.0]
323
329
  for rw in relative_weights:
324
- weights.append(weights[-1] * rw)
330
+ weights.append(weights[-1] * rw) # pyright: ignore [reportOperatorIssue]
325
331
  total = sum(weights)
326
332
  return dict(zip(criteria_seq, [w / total for w in weights], strict=True))
327
333
 
@@ -360,14 +366,14 @@ class Rating(LLMUsage):
360
366
  return [sum(ratings[c] * weights[c] for c in criteria) for ratings in ratings_seq]
361
367
 
362
368
  @overload
363
- async def best(self, candidates: List[str], k: int=1, **kwargs: Unpack[CompositeScoreKwargs]) -> List[str]: ...
369
+ async def best(self, candidates: List[str], k: int = 1, **kwargs: Unpack[CompositeScoreKwargs]) -> List[str]: ...
364
370
  @overload
365
371
  async def best[T: Display](
366
- self, candidates: List[T], k: int=1, **kwargs: Unpack[CompositeScoreKwargs]
372
+ self, candidates: List[T], k: int = 1, **kwargs: Unpack[CompositeScoreKwargs]
367
373
  ) -> List[T]: ...
368
374
 
369
375
  async def best[T: Display](
370
- self, candidates: List[str] | List[T], k: int=1, **kwargs: Unpack[CompositeScoreKwargs]
376
+ self, candidates: List[str] | List[T], k: int = 1, **kwargs: Unpack[CompositeScoreKwargs]
371
377
  ) -> Optional[List[str] | List[T]]:
372
378
  """Choose the best candidates from the list of candidates based on the composite score.
373
379
 
fabricatio/config.py CHANGED
@@ -44,7 +44,7 @@ class LLMConfig(BaseModel):
44
44
  top_p (NonNegativeFloat): The top p of the LLM model. Controls diversity via nucleus sampling. Set to 0.35 as per request.
45
45
  generation_count (PositiveInt): The number of generations to generate. Default is 1.
46
46
  stream (bool): Whether to stream the LLM model's response. Default is False.
47
- max_tokens (PositiveInt): The maximum number of tokens to generate. Set to 8192 as per request.
47
+ max_tokens (PositiveInt): The maximum number of tokens to generate.
48
48
  """
49
49
 
50
50
  model_config = ConfigDict(use_attribute_docstrings=True)
@@ -79,7 +79,7 @@ class LLMConfig(BaseModel):
79
79
  """Whether to stream the LLM model's response. Default is False."""
80
80
 
81
81
  max_tokens: Optional[PositiveInt] = Field(default=None)
82
- """The maximum number of tokens to generate. Set to 8192 as per request."""
82
+ """The maximum number of tokens to generate."""
83
83
 
84
84
  rpm: Optional[PositiveInt] = Field(default=100)
85
85
  """The rate limit of the LLM model in requests per minute. None means not checked."""
fabricatio/fs/readers.py CHANGED
@@ -1,9 +1,10 @@
1
1
  """Filesystem readers for Fabricatio."""
2
2
 
3
3
  from pathlib import Path
4
- from typing import Dict
4
+ from typing import Dict, List, Tuple
5
5
 
6
6
  import orjson
7
+ import regex
7
8
  from magika import Magika
8
9
 
9
10
  from fabricatio.config import configs
@@ -44,3 +45,21 @@ def safe_json_read(path: Path | str) -> Dict:
44
45
  except (orjson.JSONDecodeError, IsADirectoryError, FileNotFoundError) as e:
45
46
  logger.error(f"Failed to read file {path}: {e!s}")
46
47
  return {}
48
+
49
+
50
+ def extract_sections(string: str, level: int, section_char: str = "#") -> List[Tuple[str, str]]:
51
+ """Extract sections from markdown-style text by header level.
52
+
53
+ Args:
54
+ string (str): Input text to parse
55
+ level (int): Header level (e.g., 1 for '#', 2 for '##')
56
+ section_char (str, optional): The character used for headers (default: '#')
57
+
58
+ Returns:
59
+ List[Tuple[str, str]]: List of (header_text, section_content) tuples
60
+ """
61
+ return regex.findall(
62
+ r"^%s{%d}\s+(.+?)\n((?:(?!^%s{%d}\s).|\n)*)" % (section_char, level, section_char, level),
63
+ string,
64
+ regex.MULTILINE,
65
+ )
@@ -177,27 +177,27 @@ class WorkFlow(WithBriefing, ToolBoxUsage):
177
177
  current_action = None
178
178
  try:
179
179
  # Process each action in sequence
180
- for step in self._instances:
180
+ for i,step in enumerate(self._instances):
181
181
  current_action = step.name
182
- logger.info(f"Executing step >> {current_action}")
182
+ logger.info(f"Executing step [{i}] >> {current_action}")
183
183
 
184
184
  # Get current context and execute action
185
185
  context = await self._context.get()
186
186
  act_task = create_task(step.act(context))
187
187
  # Handle task cancellation
188
188
  if task.is_cancelled():
189
- logger.warning(f"Task cancelled by task: {task.name}")
189
+ logger.warning(f"Workflow cancelled by task: {task.name}")
190
190
  act_task.cancel(f"Cancelled by task: {task.name}")
191
191
  break
192
192
 
193
193
  # Update context with modified values
194
194
  modified_ctx = await act_task
195
- logger.success(f"Step execution finished: {current_action}")
195
+ logger.success(f"Step [{i}] `{current_action}` execution finished.")
196
196
  if step.output_key:
197
- logger.success(f"Setting output to `{step.output_key}`")
197
+ logger.success(f"Setting action `{current_action}` output to `{step.output_key}`")
198
198
  await self._context.put(modified_ctx)
199
199
 
200
- logger.success(f"Workflow execution finished: {self.name}")
200
+ logger.success(f"Workflow `{self.name}` execution finished.")
201
201
 
202
202
  # Get final context and extract result
203
203
  final_ctx = await self._context.get()
@@ -2,17 +2,17 @@
2
2
 
3
3
  from typing import List
4
4
 
5
- from fabricatio.models.generic import Display, ProposedAble
5
+ from fabricatio.models.generic import SketchedAble
6
6
 
7
7
 
8
- class JudgeMent(ProposedAble, Display):
8
+ class JudgeMent(SketchedAble):
9
9
  """Represents a judgment result containing supporting/denying evidence and final verdict.
10
10
 
11
11
  The class stores both affirmative and denies evidence, truth and reasons lists along with the final boolean judgment.
12
12
  """
13
13
 
14
14
  issue_to_judge: str
15
- """The issue to be judged"""
15
+ """The issue to be judged, true for affirmation, false for denial."""
16
16
 
17
17
  deny_evidence: List[str]
18
18
  """List of clues supporting the denial."""