fabricatio 0.2.5.dev3__cp312-cp312-manylinux_2_34_x86_64.whl → 0.2.5.dev5__cp312-cp312-manylinux_2_34_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
fabricatio/actions/rag.py CHANGED
@@ -14,7 +14,7 @@ class InjectToDB(Action, RAG):
14
14
 
15
15
  async def _execute[T: PrepareVectorization](
16
16
  self, to_inject: T | List[T], collection_name: Optional[str] = "my_collection", **cxt: Unpack
17
- ) -> str:
17
+ ) -> Optional[str]:
18
18
  if not isinstance(to_inject, list):
19
19
  to_inject = [to_inject]
20
20
 
@@ -1,37 +1,37 @@
1
1
  """A module for the task capabilities of the Fabricatio library."""
2
2
 
3
- from typing import List, Type, Unpack, overload
3
+ from typing import List, Optional, Type, Unpack, overload
4
4
 
5
5
  from fabricatio.models.generic import ProposedAble
6
- from fabricatio.models.kwargs_types import GenerateKwargs
6
+ from fabricatio.models.kwargs_types import ValidateKwargs
7
7
  from fabricatio.models.usages import LLMUsage
8
8
 
9
9
 
10
- class Propose[M: ProposedAble](LLMUsage):
10
+ class Propose(LLMUsage):
11
11
  """A class that proposes an Obj based on a prompt."""
12
12
 
13
13
  @overload
14
- async def propose(
14
+ async def propose[M: ProposedAble](
15
15
  self,
16
16
  cls: Type[M],
17
17
  prompt: List[str],
18
- **kwargs: Unpack[GenerateKwargs[M]],
19
- ) -> List[M]: ...
18
+ **kwargs: Unpack[ValidateKwargs[M]],
19
+ ) -> Optional[List[M]]: ...
20
20
 
21
21
  @overload
22
- async def propose(
22
+ async def propose[M: ProposedAble](
23
23
  self,
24
24
  cls: Type[M],
25
25
  prompt: str,
26
- **kwargs: Unpack[GenerateKwargs[M]],
27
- ) -> M: ...
26
+ **kwargs: Unpack[ValidateKwargs[M]],
27
+ ) -> Optional[M]: ...
28
28
 
29
- async def propose(
29
+ async def propose[M: ProposedAble](
30
30
  self,
31
31
  cls: Type[M],
32
32
  prompt: List[str] | str,
33
- **kwargs: Unpack[GenerateKwargs[M]],
34
- ) -> List[M] | M:
33
+ **kwargs: Unpack[ValidateKwargs[M]],
34
+ ) -> Optional[List[M] | M]:
35
35
  """Asynchronously proposes a task based on a given prompt and parameters.
36
36
 
37
37
  Parameters:
@@ -42,14 +42,8 @@ class Propose[M: ProposedAble](LLMUsage):
42
42
  Returns:
43
43
  A Task object based on the proposal result.
44
44
  """
45
- if isinstance(prompt, str):
46
- return await self.aask_validate(
47
- question=cls.create_json_prompt(prompt),
48
- validator=cls.instantiate_from_string,
49
- **kwargs,
50
- )
51
- return await self.aask_validate_batch(
52
- questions=[cls.create_json_prompt(p) for p in prompt],
45
+ return await self.aask_validate(
46
+ question=cls.create_json_prompt(prompt),
53
47
  validator=cls.instantiate_from_string,
54
48
  **kwargs,
55
49
  )
@@ -1,15 +1,14 @@
1
1
  """A module that provides functionality to rate tasks based on a rating manual and score range."""
2
2
 
3
- from asyncio import gather
4
3
  from itertools import permutations
5
4
  from random import sample
6
- from typing import Dict, List, Set, Tuple, Union, Unpack, overload
5
+ from typing import Dict, List, Optional, Set, Tuple, Union, Unpack, overload
7
6
 
8
7
  from fabricatio._rust_instances import template_manager
9
8
  from fabricatio.config import configs
10
9
  from fabricatio.journal import logger
11
10
  from fabricatio.models.generic import WithBriefing
12
- from fabricatio.models.kwargs_types import GenerateKwargs, ValidateKwargs
11
+ from fabricatio.models.kwargs_types import ValidateKwargs
13
12
  from fabricatio.models.usages import LLMUsage
14
13
  from fabricatio.parser import JsonCapture
15
14
  from more_itertools import flatten, windowed
@@ -25,11 +24,11 @@ class GiveRating(WithBriefing, LLMUsage):
25
24
 
26
25
  async def rate_fine_grind(
27
26
  self,
28
- to_rate: str,
27
+ to_rate: str | List[str],
29
28
  rating_manual: Dict[str, str],
30
29
  score_range: Tuple[float, float],
31
- **kwargs: Unpack[ValidateKwargs],
32
- ) -> Dict[str, float]:
30
+ **kwargs: Unpack[ValidateKwargs[Dict[str, float]]],
31
+ ) -> Optional[Dict[str, float] | List[Dict[str, float]]]:
33
32
  """Rate a given string based on a rating manual and score range.
34
33
 
35
34
  Args:
@@ -63,9 +62,21 @@ class GiveRating(WithBriefing, LLMUsage):
63
62
  "rating_manual": rating_manual,
64
63
  },
65
64
  )
66
- ),
65
+ )
66
+ if isinstance(to_rate, str)
67
+ else [
68
+ template_manager.render_template(
69
+ configs.templates.rate_fine_grind_template,
70
+ {
71
+ "to_rate": item,
72
+ "min_score": score_range[0],
73
+ "max_score": score_range[1],
74
+ "rating_manual": rating_manual,
75
+ },
76
+ )
77
+ for item in to_rate
78
+ ],
67
79
  validator=_validator,
68
- system_message=f"# your personal briefing: \n{self.briefing}",
69
80
  **kwargs,
70
81
  )
71
82
 
@@ -96,7 +107,7 @@ class GiveRating(WithBriefing, LLMUsage):
96
107
  criteria: Set[str],
97
108
  score_range: Tuple[float, float] = (0.0, 1.0),
98
109
  **kwargs: Unpack[ValidateKwargs],
99
- ) -> Union[Dict[str, float], List[Dict[str, float]]]:
110
+ ) -> Optional[Dict[str, float] | List[Dict[str, float]]]:
100
111
  """Rate a given string or a sequence of strings based on a topic, criteria, and score range.
101
112
 
102
113
  Args:
@@ -110,16 +121,13 @@ class GiveRating(WithBriefing, LLMUsage):
110
121
  Union[Dict[str, float], List[Dict[str, float]]]: A dictionary with the ratings for each criterion if a single string is provided,
111
122
  or a list of dictionaries with the ratings for each criterion if a sequence of strings is provided.
112
123
  """
113
- manual = await self.draft_rating_manual(topic, criteria, **kwargs)
114
- if isinstance(to_rate, str):
115
- return await self.rate_fine_grind(to_rate, manual, score_range, **kwargs)
116
- if isinstance(to_rate, list):
117
- return await gather(*[self.rate_fine_grind(item, manual, score_range, **kwargs) for item in to_rate])
118
- raise ValueError("to_rate must be a string or a list of strings")
124
+ manual = await self.draft_rating_manual(topic, criteria, **kwargs) or dict(zip(criteria, criteria, strict=True))
125
+
126
+ return await self.rate_fine_grind(to_rate, manual, score_range, **kwargs)
119
127
 
120
128
  async def draft_rating_manual(
121
- self, topic: str, criteria: Set[str], **kwargs: Unpack[ValidateKwargs]
122
- ) -> Dict[str, str]:
129
+ self, topic: str, criteria: Set[str], **kwargs: Unpack[ValidateKwargs[Dict[str, str]]]
130
+ ) -> Optional[Dict[str, str]]:
123
131
  """Drafts a rating manual based on a topic and dimensions.
124
132
 
125
133
  Args:
@@ -151,16 +159,15 @@ class GiveRating(WithBriefing, LLMUsage):
151
159
  )
152
160
  ),
153
161
  validator=_validator,
154
- system_message=f"# your personal briefing: \n{self.briefing}",
155
- **kwargs,
162
+ **self.prepend(kwargs),
156
163
  )
157
164
 
158
165
  async def draft_rating_criteria(
159
166
  self,
160
167
  topic: str,
161
168
  criteria_count: NonNegativeInt = 0,
162
- **kwargs: Unpack[ValidateKwargs],
163
- ) -> Set[str]:
169
+ **kwargs: Unpack[ValidateKwargs[Set[str]]],
170
+ ) -> Optional[Set[str]]:
164
171
  """Drafts rating dimensions based on a topic.
165
172
 
166
173
  Args:
@@ -182,10 +189,9 @@ class GiveRating(WithBriefing, LLMUsage):
182
189
  )
183
190
  ),
184
191
  validator=lambda resp: set(out)
185
- if (out := JsonCapture.validate_with(resp, list, str, criteria_count))
192
+ if (out := JsonCapture.validate_with(resp, list, str, criteria_count)) is not None
186
193
  else out,
187
- system_message=f"# your personal briefing: \n{self.briefing}",
188
- **kwargs,
194
+ **self.prepend(kwargs),
189
195
  )
190
196
 
191
197
  async def draft_rating_criteria_from_examples(
@@ -196,7 +202,7 @@ class GiveRating(WithBriefing, LLMUsage):
196
202
  reasons_count: PositiveInt = 2,
197
203
  criteria_count: PositiveInt = 5,
198
204
  **kwargs: Unpack[ValidateKwargs],
199
- ) -> Set[str]:
205
+ ) -> Optional[Set[str]]:
200
206
  """Asynchronously drafts a set of rating criteria based on provided examples.
201
207
 
202
208
  This function generates rating criteria by analyzing examples and extracting reasons for comparison,
@@ -220,11 +226,10 @@ class GiveRating(WithBriefing, LLMUsage):
220
226
  if m:
221
227
  examples = sample(examples, m)
222
228
 
223
- kwargs = GenerateKwargs(system_message=f"# your personal briefing: \n{self.briefing}", **kwargs)
224
229
  # extract reasons from the comparison of ordered pairs of extracted from examples
225
230
  reasons = flatten(
226
- await self.aask_validate_batch(
227
- questions=[
231
+ await self.aask_validate(
232
+ question=[
228
233
  template_manager.render_template(
229
234
  configs.templates.extract_reasons_from_examples_template,
230
235
  {
@@ -239,7 +244,7 @@ class GiveRating(WithBriefing, LLMUsage):
239
244
  validator=lambda resp: JsonCapture.validate_with(
240
245
  resp, target_type=list, elements_type=str, length=reasons_count
241
246
  ),
242
- **kwargs,
247
+ **self.prepend(kwargs),
243
248
  )
244
249
  )
245
250
  # extract certain mount of criteria from reasons according to their importance and frequency
@@ -264,7 +269,7 @@ class GiveRating(WithBriefing, LLMUsage):
264
269
  self,
265
270
  topic: str,
266
271
  criteria: Set[str],
267
- **kwargs: Unpack[ValidateKwargs],
272
+ **kwargs: Unpack[ValidateKwargs[float]],
268
273
  ) -> Dict[str, float]:
269
274
  """Drafts rating weights for a given topic and criteria using the Klee method.
270
275
 
@@ -279,12 +284,12 @@ class GiveRating(WithBriefing, LLMUsage):
279
284
  if len(criteria) < 2: # noqa: PLR2004
280
285
  raise ValueError("At least two criteria are required to draft rating weights")
281
286
 
282
- criteria = list(criteria) # freeze the order
283
- windows = windowed(criteria, 2)
287
+ criteria_seq = list(criteria) # freeze the order
288
+ windows = windowed(criteria_seq, 2)
284
289
 
285
290
  # get the importance multiplier indicating how important is second criterion compared to the first one
286
- relative_weights = await self.aask_validate_batch(
287
- questions=[
291
+ relative_weights = await self.aask_validate(
292
+ question=[
288
293
  template_manager.render_template(
289
294
  configs.templates.draft_rating_weights_klee_template,
290
295
  {
@@ -296,13 +301,13 @@ class GiveRating(WithBriefing, LLMUsage):
296
301
  for pair in windows
297
302
  ],
298
303
  validator=lambda resp: JsonCapture.validate_with(resp, target_type=float),
299
- **GenerateKwargs(system_message=f"# your personal briefing: \n{self.briefing}", **kwargs),
304
+ **self.prepend(kwargs),
300
305
  )
301
306
  weights = [1]
302
307
  for rw in relative_weights:
303
308
  weights.append(weights[-1] * rw)
304
309
  total = sum(weights)
305
- return dict(zip(criteria, [w / total for w in weights], strict=True))
310
+ return dict(zip(criteria_seq, [w / total for w in weights], strict=True))
306
311
 
307
312
  async def composite_score(
308
313
  self,
@@ -1,15 +1,14 @@
1
1
  """A module that provides functionality to rate tasks based on a rating manual and score range."""
2
2
 
3
- from typing import List, Optional, Self, Set, Unpack
3
+ from typing import List, Optional, Self, Set, Unpack, cast
4
4
 
5
5
  from fabricatio import template_manager
6
6
  from fabricatio.capabilities.propose import Propose
7
7
  from fabricatio.capabilities.rating import GiveRating
8
8
  from fabricatio.config import configs
9
9
  from fabricatio.models.generic import Base, Display, ProposedAble, WithBriefing
10
- from fabricatio.models.kwargs_types import GenerateKwargs, ReviewKwargs
10
+ from fabricatio.models.kwargs_types import ReviewKwargs, ValidateKwargs
11
11
  from fabricatio.models.task import Task
12
- from pydantic import PrivateAttr
13
12
  from questionary import Choice, checkbox
14
13
  from rich import print
15
14
 
@@ -76,7 +75,7 @@ class ReviewResult[T](ProposedAble, Display):
76
75
  problem_solutions: List[ProblemSolutions]
77
76
  """Collection of problems identified during review along with their potential solutions."""
78
77
 
79
- _ref: T = PrivateAttr(None)
78
+ _ref: T
80
79
  """Reference to the original object that was reviewed."""
81
80
 
82
81
  def update_topic(self, topic: str) -> Self:
@@ -100,8 +99,8 @@ class ReviewResult[T](ProposedAble, Display):
100
99
  Returns:
101
100
  ReviewResult[K]: The current instance with updated reference type.
102
101
  """
103
- self._ref = ref
104
- return self
102
+ self._ref = ref # pyright: ignore [reportAttributeAccessIssue]
103
+ return cast(ReviewResult[K], self)
105
104
 
106
105
  def deref(self) -> T:
107
106
  """Retrieve the referenced object that was reviewed.
@@ -178,14 +177,14 @@ class Review(GiveRating, Propose):
178
177
  ReviewResult[Task[T]]: A review result containing identified problems and proposed solutions,
179
178
  with a reference to the original task.
180
179
  """
181
- return await self.review_obj(task, **kwargs)
180
+ return cast(ReviewResult[Task[T]], await self.review_obj(task, **kwargs))
182
181
 
183
182
  async def review_string(
184
183
  self,
185
184
  text: str,
186
185
  topic: str,
187
186
  criteria: Optional[Set[str]] = None,
188
- **kwargs: Unpack[GenerateKwargs[ReviewResult[str]]],
187
+ **kwargs: Unpack[ValidateKwargs[ReviewResult[str]]],
189
188
  ) -> ReviewResult[str]:
190
189
  """Review a string based on specified topic and criteria.
191
190
 
@@ -197,7 +196,7 @@ class Review(GiveRating, Propose):
197
196
  topic (str): The subject topic for the review criteria.
198
197
  criteria (Optional[Set[str]], optional): A set of criteria for the review.
199
198
  If not provided, criteria will be drafted automatically. Defaults to None.
200
- **kwargs (Unpack[GenerateKwargs]): Additional keyword arguments for the LLM usage.
199
+ **kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for the LLM usage.
201
200
 
202
201
  Returns:
203
202
  ReviewResult[str]: A review result containing identified problems and proposed solutions,
@@ -1,7 +1,7 @@
1
1
  """A module for the task capabilities of the Fabricatio library."""
2
2
 
3
3
  from types import CodeType
4
- from typing import Any, Dict, List, Optional, Tuple, Unpack
4
+ from typing import Any, Dict, List, Optional, Tuple, Unpack, cast
5
5
 
6
6
  import orjson
7
7
  from fabricatio._rust_instances import template_manager
@@ -22,7 +22,7 @@ class ProposeTask(WithBriefing, Propose):
22
22
  async def propose_task[T](
23
23
  self,
24
24
  prompt: str,
25
- **kwargs: Unpack[ValidateKwargs],
25
+ **kwargs: Unpack[ValidateKwargs[Task[T]]],
26
26
  ) -> Task[T]:
27
27
  """Asynchronously proposes a task based on a given prompt and parameters.
28
28
 
@@ -37,7 +37,7 @@ class ProposeTask(WithBriefing, Propose):
37
37
  logger.error(err := f"{self.name}: Prompt must be provided.")
38
38
  raise ValueError(err)
39
39
 
40
- return await self.propose(Task, prompt, system_message=f"# your personal briefing: \n{self.briefing}", **kwargs)
40
+ return await self.propose(Task, prompt, **self.prepend(cast(Dict[str, Any], kwargs)))
41
41
 
42
42
 
43
43
  class HandleTask(WithBriefing, ToolBoxUsage):
@@ -49,7 +49,7 @@ class HandleTask(WithBriefing, ToolBoxUsage):
49
49
  tools: List[Tool],
50
50
  data: Dict[str, Any],
51
51
  **kwargs: Unpack[ValidateKwargs],
52
- ) -> Tuple[CodeType, List[str]]:
52
+ ) -> Optional[Tuple[CodeType, List[str]]]:
53
53
  """Asynchronously drafts the tool usage code for a task based on a given task object and tools."""
54
54
  logger.info(f"Drafting tool usage code for task: {task.briefing}")
55
55
 
@@ -81,8 +81,7 @@ class HandleTask(WithBriefing, ToolBoxUsage):
81
81
  return await self.aask_validate(
82
82
  question=q,
83
83
  validator=_validator,
84
- system_message=f"# your personal briefing: \n{self.briefing}",
85
- **kwargs,
84
+ **self.prepend(cast(Dict[str, Any], kwargs)),
86
85
  )
87
86
 
88
87
  async def handle_fin_grind(
@@ -99,10 +98,10 @@ class HandleTask(WithBriefing, ToolBoxUsage):
99
98
  tools = await self.gather_tools_fine_grind(task, box_choose_kwargs, tool_choose_kwargs)
100
99
  logger.info(f"{self.name} have gathered {[t.name for t in tools]}")
101
100
 
102
- if tools:
101
+ if tools and (pack := await self.draft_tool_usage_code(task, tools, data, **kwargs)):
103
102
  executor = ToolExecutor(candidates=tools, data=data)
104
- code, to_extract = await self.draft_tool_usage_code(task, tools, data, **kwargs)
105
103
 
104
+ code, to_extract = pack
106
105
  cxt = executor.execute(code)
107
106
  if to_extract:
108
107
  return tuple(cxt.get(k) for k in to_extract)
fabricatio/config.py CHANGED
@@ -1,5 +1,6 @@
1
1
  """Configuration module for the Fabricatio application."""
2
2
 
3
+ from pathlib import Path
3
4
  from typing import List, Literal, Optional
4
5
 
5
6
  from appdirs import user_config_dir
@@ -67,7 +68,7 @@ class LLMConfig(BaseModel):
67
68
  temperature: NonNegativeFloat = Field(default=1.0)
68
69
  """The temperature of the LLM model. Controls randomness in generation. Set to 1.0 as per request."""
69
70
 
70
- stop_sign: str | List[str] = Field(default=("\n\n\n", "User:"))
71
+ stop_sign: str | List[str] = Field(default_factory=lambda :["\n\n\n", "User:"])
71
72
  """The stop sign of the LLM model. No default stop sign specified."""
72
73
 
73
74
  top_p: NonNegativeFloat = Field(default=0.35)
@@ -143,7 +144,7 @@ class DebugConfig(BaseModel):
143
144
  log_level: Literal["DEBUG", "INFO", "SUCCESS", "WARNING", "ERROR", "CRITICAL"] = Field(default="INFO")
144
145
  """The log level of the application."""
145
146
 
146
- log_file: FilePath = Field(default=rf"{ROAMING_DIR}\fabricatio.log")
147
+ log_file: FilePath = Field(default=Path(rf"{ROAMING_DIR}\fabricatio.log"))
147
148
  """The log file of the application."""
148
149
 
149
150
  rotation: int = Field(default=1)
@@ -161,7 +162,7 @@ class TemplateConfig(BaseModel):
161
162
 
162
163
  model_config = ConfigDict(use_attribute_docstrings=True)
163
164
  template_dir: List[DirectoryPath] = Field(
164
- default_factory=lambda: [DirectoryPath(r".\templates"), DirectoryPath(rf"{ROAMING_DIR}\templates")]
165
+ default_factory=lambda: [Path(r".\templates"), Path(rf"{ROAMING_DIR}\templates")]
165
166
  )
166
167
  """The directory containing the templates."""
167
168
  active_loading: bool = Field(default=False)
@@ -234,12 +235,15 @@ class GeneralConfig(BaseModel):
234
235
  """Global configuration class."""
235
236
 
236
237
  model_config = ConfigDict(use_attribute_docstrings=True)
237
- workspace: DirectoryPath = Field(default=DirectoryPath(r"."))
238
+ workspace: DirectoryPath = Field(default=Path())
238
239
  """The workspace directory for the application."""
239
240
 
240
241
  confirm_on_ops: bool = Field(default=True)
241
242
  """Whether to confirm on operations."""
242
243
 
244
+ use_json_repair: bool = Field(default=True)
245
+ """Whether to use JSON repair."""
246
+
243
247
 
244
248
  class ToolBoxConfig(BaseModel):
245
249
  """Toolbox configuration class."""
fabricatio/fs/readers.py CHANGED
@@ -3,8 +3,8 @@
3
3
  from pathlib import Path
4
4
  from typing import Dict
5
5
 
6
+ import orjson
6
7
  from magika import Magika
7
- from orjson import orjson
8
8
 
9
9
  from fabricatio.config import configs
10
10
  from fabricatio.journal import logger
fabricatio/journal.py CHANGED
@@ -18,6 +18,7 @@ logger.add(
18
18
  )
19
19
  logger.add(sys.stderr, level=configs.debug.log_level)
20
20
 
21
+ __all__ = ["logger"]
21
22
  if __name__ == "__main__":
22
23
  logger.debug("This is a trace message.")
23
24
  logger.info("This is an information message.")
@@ -75,7 +75,7 @@ class WorkFlow(WithBriefing, ToolBoxUsage):
75
75
  _context: Queue[Dict[str, Any]] = PrivateAttr(default_factory=lambda: Queue(maxsize=1))
76
76
  """ The context dictionary to be used for workflow execution."""
77
77
 
78
- _instances: Tuple[Action, ...] = PrivateAttr(...)
78
+ _instances: Tuple[Action, ...] = PrivateAttr(default_factory=tuple)
79
79
  """ The instances of the workflow steps."""
80
80
 
81
81
  steps: Tuple[Union[Type[Action], Action], ...] = Field(...)
@@ -18,7 +18,7 @@ class Event(BaseModel):
18
18
  """ The segments of the namespaces."""
19
19
 
20
20
  @classmethod
21
- def instantiate_from(cls, event: EventLike) -> Self:
21
+ def instantiate_from(cls, event: EventLike) -> "Event":
22
22
  """Create an Event instance from a string or list of strings or an Event instance.
23
23
 
24
24
  Args:
@@ -35,7 +35,7 @@ class Event(BaseModel):
35
35
  return cls(segments=event)
36
36
 
37
37
  @classmethod
38
- def quick_instantiate(cls, event: EventLike) -> Self:
38
+ def quick_instantiate(cls, event: EventLike) -> "Event":
39
39
  """Create an Event instance from a string or list of strings or an Event instance and push a wildcard and pending segment.
40
40
 
41
41
  Args:
@@ -59,7 +59,7 @@ class Event(BaseModel):
59
59
 
60
60
  def clone(self) -> Self:
61
61
  """Clone the event."""
62
- return Event(segments=list(self.segments))
62
+ return self.__class__(segments=list(self.segments))
63
63
 
64
64
  def push(self, segment: str) -> Self:
65
65
  """Push a segment to the event."""
@@ -113,6 +113,8 @@ class Event(BaseModel):
113
113
  """Return the hash of the event, using the collapsed string."""
114
114
  return hash(self.collapse())
115
115
 
116
- def __eq__(self, other: str | List[str] | Self) -> bool:
116
+ def __eq__(self, other: object) -> bool:
117
117
  """Check if the event is equal to another event or a string."""
118
+ if not isinstance(other, (str , list , Event)):
119
+ return False
118
120
  return self.collapse() == Event.instantiate_from(other).collapse()
@@ -9,23 +9,23 @@ from pydantic import Field
9
9
  class Equation(Base):
10
10
  """Structured representation of mathematical equations (including their physical or conceptual meanings)."""
11
11
 
12
- description: str = Field(...)
12
+ description: str
13
13
  """A concise explanation of the equation's meaning, purpose, and relevance in the context of the research."""
14
14
 
15
- latex_code: str = Field(...)
15
+ latex_code: str
16
16
  """The LaTeX code used to represent the equation in a publication-ready format."""
17
17
 
18
18
 
19
19
  class Figure(Base):
20
20
  """Structured representation of figures (including their academic significance and explanatory captions)."""
21
21
 
22
- description: str = Field(...)
22
+ description: str
23
23
  """A detailed explanation of the figure's content and its role in conveying key insights."""
24
24
 
25
- figure_caption: str = Field(...)
25
+ figure_caption: str
26
26
  """The caption accompanying the figure, summarizing its main points and academic value."""
27
27
 
28
- figure_path: str = Field(...)
28
+ figure_path: str
29
29
  """The file path to the figure"""
30
30
 
31
31
 
@@ -34,7 +34,7 @@ class Highlightings(Base):
34
34
 
35
35
  # Academic Achievements Showcase
36
36
  highlighted_equations: List[Equation] = Field(default_factory=list)
37
- """Core mathematical equations that represent breakthroughs in the field, accompanied by explanations of their physical or conceptual significance."""
37
+ """Core mathematical equations that represent breakthroughs in the field, accompanied by explanations of their physical or conceptual significance,Should always be in LaTeX format wrapped in $ or $$ signs."""
38
38
 
39
39
  highlighted_algorithms: List[str] = Field(default_factory=list)
40
40
  """Pseudocode for key algorithms, annotated to highlight innovative components."""
@@ -53,39 +53,42 @@ class ArticleEssence(ProposedAble, Display, PrepareVectorization):
53
53
  title: str = Field(...)
54
54
  """The full title of the paper, including any subtitles if applicable."""
55
55
 
56
- authors: List[str] = Field(default_factory=list)
56
+ authors: List[str]
57
57
  """A list of the paper's authors, typically in the order of contribution."""
58
58
 
59
- keywords: List[str] = Field(default_factory=list)
59
+ keywords: List[str]
60
60
  """A list of keywords that summarize the paper's focus and facilitate indexing."""
61
61
 
62
- publication_year: int = Field(None)
62
+ publication_year: int
63
63
  """The year in which the paper was published."""
64
64
 
65
65
  # Core Content Elements
66
- domain: List[str] = Field(default_factory=list)
66
+ highlightings: Highlightings = Field(default_factory=Highlightings)
67
+ """A collection of highlighted elements in the paper, including equations, algorithms, figures, and tables."""
68
+
69
+ domain: List[str]
67
70
  """The research domains or fields addressed by the paper (e.g., ['Natural Language Processing', 'Computer Vision'])."""
68
71
 
69
72
  abstract: str = Field(...)
70
73
  """A structured abstract that outlines the research problem, methodology, and conclusions in three distinct sections."""
71
74
 
72
- core_contributions: List[str] = Field(default_factory=list)
75
+ core_contributions: List[str]
73
76
  """Key academic contributions that distinguish the paper from prior work in the field."""
74
77
 
75
- technical_novelty: List[str] = Field(default_factory=list)
78
+ technical_novelty: List[str]
76
79
  """Specific technical innovations introduced by the research, listed as individual points."""
77
80
 
78
81
  # Academic Discussion Dimensions
79
- research_problem: str = Field("")
82
+ research_problems: List[str]
80
83
  """A clearly defined research question or problem addressed by the study."""
81
84
 
82
- limitations: List[str] = Field(default_factory=list)
85
+ limitations: List[str]
83
86
  """An analysis of the methodological or experimental limitations of the research."""
84
87
 
85
- future_work: List[str] = Field(default_factory=list)
88
+ future_work: List[str]
86
89
  """Suggestions for potential directions or topics for follow-up studies."""
87
90
 
88
- impact_analysis: str = Field("")
91
+ impact_analysis: List[str]
89
92
  """An assessment of the paper's potential influence on the development of the field."""
90
93
 
91
94
  def _prepare_vectorization_inner(self) -> str:
@@ -2,7 +2,7 @@
2
2
 
3
3
  from abc import abstractmethod
4
4
  from pathlib import Path
5
- from typing import Callable, Iterable, List, Optional, Self, Union, final
5
+ from typing import Any, Callable, Dict, Iterable, List, Optional, Self, Union, final
6
6
 
7
7
  import orjson
8
8
  from fabricatio._rust import blake3_hash
@@ -67,6 +67,18 @@ class WithBriefing(Named, Described):
67
67
  """
68
68
  return f"{self.name}: {self.description}" if self.description else self.name
69
69
 
70
+ def prepend[D: Dict[str, Any]](self, kwargs: D) -> D:
71
+ """Prepend the briefing to the system message in the kwargs.
72
+
73
+ Args:
74
+ kwargs (Dict[str, Any]): The keyword arguments to modify.
75
+
76
+ Returns:
77
+ Dict[str, Any]: The modified keyword arguments.
78
+ """
79
+ kwargs["system_message"] = f"# your personal briefing: \n{self.briefing}\n" + kwargs.get("system_message", "")
80
+ return kwargs
81
+
70
82
 
71
83
  class WithFormatedJsonSchema(Base):
72
84
  """Class that provides a formatted JSON schema of the model."""
@@ -361,3 +373,4 @@ class ScopedConfig(Base):
361
373
  for attr_name in ScopedConfig.model_fields:
362
374
  if (attr := getattr(self, attr_name)) is not None and getattr(other, attr_name) is None:
363
375
  setattr(other, attr_name, attr)
376
+ return self