fabricatio 0.3.14.dev0__cp312-cp312-win_amd64.whl → 0.3.14.dev2__cp312-cp312-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. fabricatio/__init__.py +3 -5
  2. fabricatio/actions/article.py +31 -31
  3. fabricatio/actions/article_rag.py +58 -58
  4. fabricatio/actions/output.py +58 -24
  5. fabricatio/actions/rag.py +2 -3
  6. fabricatio/capabilities/advanced_judge.py +4 -7
  7. fabricatio/capabilities/advanced_rag.py +2 -1
  8. fabricatio/capabilities/censor.py +5 -4
  9. fabricatio/capabilities/check.py +27 -27
  10. fabricatio/capabilities/correct.py +22 -22
  11. fabricatio/capabilities/extract.py +33 -33
  12. fabricatio/capabilities/persist.py +103 -0
  13. fabricatio/capabilities/propose.py +2 -2
  14. fabricatio/capabilities/rag.py +37 -37
  15. fabricatio/capabilities/rating.py +66 -70
  16. fabricatio/capabilities/review.py +12 -11
  17. fabricatio/capabilities/task.py +19 -18
  18. fabricatio/decorators.py +9 -9
  19. fabricatio/{core.py → emitter.py} +17 -19
  20. fabricatio/journal.py +2 -4
  21. fabricatio/models/action.py +10 -12
  22. fabricatio/models/extra/aricle_rag.py +15 -12
  23. fabricatio/models/extra/article_base.py +4 -5
  24. fabricatio/models/extra/article_essence.py +2 -1
  25. fabricatio/models/extra/article_main.py +12 -12
  26. fabricatio/models/extra/article_outline.py +2 -1
  27. fabricatio/models/extra/article_proposal.py +1 -1
  28. fabricatio/models/extra/rag.py +2 -2
  29. fabricatio/models/extra/rule.py +2 -1
  30. fabricatio/models/generic.py +53 -136
  31. fabricatio/models/kwargs_types.py +1 -9
  32. fabricatio/models/role.py +15 -16
  33. fabricatio/models/task.py +3 -4
  34. fabricatio/models/tool.py +4 -4
  35. fabricatio/models/usages.py +139 -146
  36. fabricatio/parser.py +59 -99
  37. fabricatio/rust.cp312-win_amd64.pyd +0 -0
  38. fabricatio/rust.pyi +40 -60
  39. fabricatio/utils.py +37 -170
  40. fabricatio-0.3.14.dev2.data/scripts/tdown.exe +0 -0
  41. {fabricatio-0.3.14.dev0.data → fabricatio-0.3.14.dev2.data}/scripts/ttm.exe +0 -0
  42. {fabricatio-0.3.14.dev0.dist-info → fabricatio-0.3.14.dev2.dist-info}/METADATA +7 -7
  43. fabricatio-0.3.14.dev2.dist-info/RECORD +64 -0
  44. fabricatio-0.3.14.dev0.data/scripts/tdown.exe +0 -0
  45. fabricatio-0.3.14.dev0.dist-info/RECORD +0 -63
  46. {fabricatio-0.3.14.dev0.dist-info → fabricatio-0.3.14.dev2.dist-info}/WHEEL +0 -0
  47. {fabricatio-0.3.14.dev0.dist-info → fabricatio-0.3.14.dev2.dist-info}/licenses/LICENSE +0 -0
fabricatio/parser.py CHANGED
@@ -1,152 +1,112 @@
1
- """A module to parse text using regular expressions."""
1
+ """A module for capturing patterns in text using regular expressions."""
2
2
 
3
3
  import re
4
+ from dataclasses import dataclass, field
4
5
  from functools import lru_cache
5
- from re import Pattern, compile
6
- from typing import Any, Callable, Iterable, List, Optional, Self, Tuple, Type
6
+ from typing import Any, Callable, Iterable, List, Optional, Self, Tuple, Type, Union
7
7
 
8
8
  import ujson
9
- from fabricatio.rust import CONFIG
10
9
  from json_repair import repair_json
11
- from pydantic import BaseModel, ConfigDict, Field, PositiveInt, PrivateAttr, ValidationError
12
10
 
13
11
  from fabricatio.journal import logger
12
+ from fabricatio.rust import CONFIG
14
13
 
15
14
 
16
- class Capture(BaseModel):
15
+ @dataclass(frozen=True)
16
+ class Capture:
17
17
  """A class to capture patterns in text using regular expressions.
18
18
 
19
19
  Attributes:
20
- pattern (str): The regular expression pattern to search for.
21
- _compiled (Pattern): The compiled regular expression pattern.
20
+ target_groups (Tuple[int, ...]): The target groups to extract from the match.
21
+ pattern (str): The regex pattern to search for.
22
+ flags (int): Flags to apply when compiling the regex.
23
+ capture_type (Optional[str]): Optional hint for post-processing (e.g., 'json').
22
24
  """
23
25
 
24
- model_config = ConfigDict(use_attribute_docstrings=True)
25
- target_groups: Tuple[int, ...] = Field(default_factory=tuple)
26
- """The target groups to capture from the pattern."""
27
- pattern: str = Field(frozen=True)
26
+ pattern: str = field()
28
27
  """The regular expression pattern to search for."""
29
- flags: PositiveInt = Field(default=re.DOTALL | re.MULTILINE | re.IGNORECASE, frozen=True)
30
- """The flags to use when compiling the regular expression pattern."""
28
+ flags: int = re.DOTALL | re.MULTILINE | re.IGNORECASE
29
+ """Flags to control regex behavior (DOTALL, MULTILINE, IGNORECASE by default)."""
31
30
  capture_type: Optional[str] = None
32
- """The type of capture to perform, e.g., 'json', which is used to dispatch the fixer accordingly."""
33
- _compiled: Pattern = PrivateAttr()
34
-
35
- def model_post_init(self, __context: Any) -> None:
36
- """Initialize the compiled pattern."""
37
- self._compiled = compile(self.pattern, self.flags)
38
-
39
- def fix[T](self, text: str | Iterable[str] | T) -> str | List[str] | T:
40
- """Fix the text using the pattern.
31
+ """Optional type identifier for post-processing (e.g., 'json' for JSON repair)."""
32
+ target_groups: Tuple[int, ...] = field(default_factory=tuple)
33
+ """Tuple of group indices to extract from the match (1-based indexing)."""
41
34
 
42
- Args:
43
- text (str | List[str]): The text to fix.
44
-
45
- Returns:
46
- str | List[str]: The fixed text with the same type as input.
47
- """
35
+ def fix(self, text: Union[str, Iterable[str], Any]) -> Union[str, List[str], Any]:
36
+ """Fix the text based on capture_type (e.g., JSON repair)."""
48
37
  match self.capture_type:
49
38
  case "json" if CONFIG.general.use_json_repair:
50
- logger.debug("Applying json repair to text.")
39
+ logger.debug("Applying JSON repair to text.")
51
40
  if isinstance(text, str):
52
- return repair_json(text, ensure_ascii=False) # pyright: ignore [reportReturnType]
53
- return [repair_json(item, ensure_ascii=False) for item in
54
- text] # pyright: ignore [reportReturnType, reportGeneralTypeIssues]
41
+ return repair_json(text, ensure_ascii=False)
42
+ return [repair_json(item, ensure_ascii=False) for item in text]
55
43
  case _:
56
- return text # pyright: ignore [reportReturnType]
57
-
58
- def capture(self, text: str) -> Tuple[str, ...] | str | None:
59
- """Capture the first occurrence of the pattern in the given text.
60
-
61
- Args:
62
- text (str): The text to search the pattern in.
63
-
64
- Returns:
65
- str | None: The captured text if the pattern is found, otherwise None.
66
-
67
- """
68
- if (match := self._compiled.match(text) or self._compiled.search(text)) is None:
69
- logger.debug(f"Capture Failed {type(text)}: \n{text}")
44
+ return text
45
+
46
+ def capture(self, text: str) -> Optional[Union[str, Tuple[str, ...]]]:
47
+ """Capture the first match of the pattern in the text."""
48
+ compiled = re.compile(self.pattern, self.flags)
49
+ match = compiled.match(text) or compiled.search(text)
50
+ if match is None:
51
+ logger.debug(f"Capture Failed: {text}")
70
52
  return None
53
+
71
54
  groups = self.fix(match.groups())
72
55
  if self.target_groups:
73
56
  cap = tuple(groups[g - 1] for g in self.target_groups)
74
- logger.debug(f"Captured text: {'\n\n'.join(cap)}")
57
+ logger.debug(f"Captured texts: {'\n==\n'.join(cap)}")
75
58
  return cap
76
59
  cap = groups[0]
77
60
  logger.debug(f"Captured text: \n{cap}")
78
61
  return cap
79
62
 
80
- def convert_with[T](self, text: str, convertor: Callable[[Tuple[str, ...]], T] | Callable[[str], T]) -> T | None:
81
- """Convert the given text using the pattern.
82
-
83
- Args:
84
- text (str): The text to search the pattern in.
85
- convertor (Callable[[Tuple[str, ...]], T] | Callable[[str], T]): The function to convert the captured text.
86
-
87
- Returns:
88
- str | None: The converted text if the pattern is found, otherwise None.
89
- """
63
+ def convert_with(
64
+ self,
65
+ text: str,
66
+ convertor: Callable[[Union[str, Tuple[str, ...]]], Any],
67
+ ) -> Optional[Any]:
68
+ """Convert captured text using a provided function."""
90
69
  if (cap := self.capture(text)) is None:
91
70
  return None
92
71
  try:
93
- return convertor(cap) # pyright: ignore [reportArgumentType]
94
- except (ValueError, SyntaxError, ValidationError) as e:
95
- logger.error(f"Failed to convert text using {convertor.__name__} to convert.\nerror: {e}\n {cap}")
72
+ return convertor(cap)
73
+ except Exception as e: # noqa: BLE001
74
+ logger.error(f"Failed to convert text using {convertor.__name__}: {e}\n{cap}")
96
75
  return None
97
76
 
98
- def validate_with[K, T, E](
99
- self,
100
- text: str,
101
- target_type: Type[T],
102
- elements_type: Optional[Type[E]] = None,
103
- length: Optional[int] = None,
104
- deserializer: Callable[[Tuple[str, ...]], K] | Callable[[str], K] = ujson.loads,
105
- ) -> T | None:
106
- """Validate the given text using the pattern.
107
-
108
- Args:
109
- text (str): The text to search the pattern in.
110
- target_type (Type[T]): The expected type of the output, dict or list.
111
- elements_type (Optional[Type[E]]): The expected type of the elements in the output dict keys or list elements.
112
- length (Optional[int]): The expected length of the output, bool(length)==False means no length validation.
113
- deserializer (Callable[[Tuple[str, ...]], K] | Callable[[str], K]): The function to deserialize the captured text.
114
-
115
- Returns:
116
- T | None: The validated text if the pattern is found and the output is of the expected type, otherwise None.
117
- """
118
- judges = [lambda output_obj: isinstance(output_obj, target_type)]
77
+ def validate_with[T, K, E](
78
+ self,
79
+ text: str,
80
+ target_type: Type[T],
81
+ elements_type: Optional[Type[E]] = None,
82
+ length: Optional[int] = None,
83
+ deserializer: Callable[[Union[str, Tuple[str, ...]]], K] = lambda x: ujson.loads(x) if isinstance(x, str) else ujson.loads(x[0]),
84
+ ) -> Optional[T]:
85
+ """Deserialize and validate the captured text against expected types."""
86
+ judges = [lambda obj: isinstance(obj, target_type)]
119
87
  if elements_type:
120
- judges.append(lambda output_obj: all(isinstance(e, elements_type) for e in output_obj))
88
+ judges.append(lambda obj: all(isinstance(e, elements_type) for e in obj))
121
89
  if length:
122
- judges.append(lambda output_obj: len(output_obj) == length)
90
+ judges.append(lambda obj: len(obj) == length)
123
91
 
124
92
  if (out := self.convert_with(text, deserializer)) and all(j(out) for j in judges):
125
- return out # pyright: ignore [reportReturnType]
93
+ return out # type: ignore
126
94
  return None
127
95
 
128
96
  @classmethod
129
97
  @lru_cache(32)
130
98
  def capture_code_block(cls, language: str) -> Self:
131
- """Capture the first occurrence of a code block in the given text.
132
-
133
- Args:
134
- language (str): The text containing the code block.
135
-
136
- Returns:
137
- Self: The instance of the class with the captured code block.
138
- """
99
+ """Capture a code block of the given language."""
139
100
  return cls(pattern=f"```{language}(.*?)```", capture_type=language)
140
101
 
141
102
  @classmethod
142
103
  @lru_cache(32)
143
104
  def capture_generic_block(cls, language: str) -> Self:
144
- """Capture the first occurrence of a generic code block in the given text.
145
-
146
- Returns:
147
- Self: The instance of the class with the captured code block.
148
- """
149
- return cls(pattern=f"--- Start of {language} ---(.*?)--- end of {language} ---", capture_type=language)
105
+ """Capture a generic block of the given language."""
106
+ return cls(
107
+ pattern=f"--- Start of {language} ---(.*?)--- End of {language} ---",
108
+ capture_type=language,
109
+ )
150
110
 
151
111
 
152
112
  JsonCapture = Capture.capture_code_block("json")
Binary file
fabricatio/rust.pyi CHANGED
@@ -10,12 +10,12 @@ Key Features:
10
10
  - Cryptographic utilities: BLAKE3 hashing.
11
11
  - Text utilities: Word boundary splitting and word counting.
12
12
  """
13
+
13
14
  from enum import StrEnum
14
- from typing import Any, Dict, List, Optional, Self, Tuple, overload, Union
15
+ from typing import Any, Dict, List, Literal, Optional, Self, Tuple, Union, overload
15
16
 
16
17
  from pydantic import JsonValue
17
18
 
18
-
19
19
  class TemplateManager:
20
20
  """Template rendering engine using Handlebars templates.
21
21
 
@@ -47,10 +47,8 @@ class TemplateManager:
47
47
 
48
48
  @overload
49
49
  def render_template(self, name: str, data: Dict[str, Any]) -> str: ...
50
-
51
50
  @overload
52
51
  def render_template(self, name: str, data: List[Dict[str, Any]]) -> List[str]: ...
53
-
54
52
  def render_template(self, name: str, data: Dict[str, Any] | List[Dict[str, Any]]) -> str | List[str]:
55
53
  """Render a template with context data.
56
54
 
@@ -67,10 +65,8 @@ class TemplateManager:
67
65
 
68
66
  @overload
69
67
  def render_template_raw(self, template: str, data: Dict[str, Any]) -> str: ...
70
-
71
68
  @overload
72
69
  def render_template_raw(self, template: str, data: List[Dict[str, Any]]) -> List[str]: ...
73
-
74
70
  def render_template_raw(self, template: str, data: Dict[str, Any] | List[Dict[str, Any]]) -> str | List[str]:
75
71
  """Render a template with context data.
76
72
 
@@ -82,7 +78,6 @@ class TemplateManager:
82
78
  Rendered template content as string or list of strings
83
79
  """
84
80
 
85
-
86
81
  class BibManager:
87
82
  """BibTeX bibliography manager for parsing and querying citation data."""
88
83
 
@@ -191,7 +186,6 @@ class BibManager:
191
186
  Field value if found, None otherwise
192
187
  """
193
188
 
194
-
195
189
  def blake3_hash(content: bytes) -> str:
196
190
  """Calculate the BLAKE3 cryptographic hash of data.
197
191
 
@@ -202,11 +196,9 @@ def blake3_hash(content: bytes) -> str:
202
196
  Hex-encoded BLAKE3 hash string
203
197
  """
204
198
 
205
-
206
199
  def detect_language(string: str) -> str:
207
200
  """Detect the language of a given string."""
208
201
 
209
-
210
202
  def split_word_bounds(string: str) -> List[str]:
211
203
  """Split the string into words based on word boundaries.
212
204
 
@@ -217,7 +209,6 @@ def split_word_bounds(string: str) -> List[str]:
217
209
  A list of words extracted from the string.
218
210
  """
219
211
 
220
-
221
212
  def split_sentence_bounds(string: str) -> List[str]:
222
213
  """Split the string into sentences based on sentence boundaries.
223
214
 
@@ -228,7 +219,6 @@ def split_sentence_bounds(string: str) -> List[str]:
228
219
  A list of sentences extracted from the string.
229
220
  """
230
221
 
231
-
232
222
  def split_into_chunks(string: str, max_chunk_size: int, max_overlapping_rate: float = 0.3) -> List[str]:
233
223
  """Split the string into chunks of a specified size.
234
224
 
@@ -241,7 +231,6 @@ def split_into_chunks(string: str, max_chunk_size: int, max_overlapping_rate: fl
241
231
  A list of chunks extracted from the string.
242
232
  """
243
233
 
244
-
245
234
  def word_count(string: str) -> int:
246
235
  """Count the number of words in the string.
247
236
 
@@ -252,67 +241,51 @@ def word_count(string: str) -> int:
252
241
  The number of words in the string.
253
242
  """
254
243
 
255
-
256
244
  def is_chinese(string: str) -> bool:
257
245
  """Check if the given string is in Chinese."""
258
246
 
259
-
260
247
  def is_english(string: str) -> bool:
261
248
  """Check if the given string is in English."""
262
249
 
263
-
264
250
  def is_japanese(string: str) -> bool:
265
251
  """Check if the given string is in Japanese."""
266
252
 
267
-
268
253
  def is_korean(string: str) -> bool:
269
254
  """Check if the given string is in Korean."""
270
255
 
271
-
272
256
  def is_arabic(string: str) -> bool:
273
257
  """Check if the given string is in Arabic."""
274
258
 
275
-
276
259
  def is_russian(string: str) -> bool:
277
260
  """Check if the given string is in Russian."""
278
261
 
279
-
280
262
  def is_german(string: str) -> bool:
281
263
  """Check if the given string is in German."""
282
264
 
283
-
284
265
  def is_french(string: str) -> bool:
285
266
  """Check if the given string is in French."""
286
267
 
287
-
288
268
  def is_hindi(string: str) -> bool:
289
269
  """Check if the given string is in Hindi."""
290
270
 
291
-
292
271
  def is_italian(string: str) -> bool:
293
272
  """Check if the given string is in Italian."""
294
273
 
295
-
296
274
  def is_dutch(string: str) -> bool:
297
275
  """Check if the given string is in Dutch."""
298
276
 
299
-
300
277
  def is_portuguese(string: str) -> bool:
301
278
  """Check if the given string is in Portuguese."""
302
279
 
303
-
304
280
  def is_swedish(string: str) -> bool:
305
281
  """Check if the given string is in Swedish."""
306
282
 
307
-
308
283
  def is_turkish(string: str) -> bool:
309
284
  """Check if the given string is in Turkish."""
310
285
 
311
-
312
286
  def is_vietnamese(string: str) -> bool:
313
287
  """Check if the given string is in Vietnamese."""
314
288
 
315
-
316
289
  def tex_to_typst(string: str) -> str:
317
290
  """Convert TeX to Typst.
318
291
 
@@ -323,7 +296,6 @@ def tex_to_typst(string: str) -> str:
323
296
  The converted Typst string.
324
297
  """
325
298
 
326
-
327
299
  def convert_all_inline_tex(string: str) -> str:
328
300
  """Convert all inline TeX code in the string.
329
301
 
@@ -334,7 +306,6 @@ def convert_all_inline_tex(string: str) -> str:
334
306
  The converted string with inline TeX code replaced.
335
307
  """
336
308
 
337
-
338
309
  def convert_all_block_tex(string: str) -> str:
339
310
  """Convert all block TeX code in the string.
340
311
 
@@ -345,7 +316,6 @@ def convert_all_block_tex(string: str) -> str:
345
316
  The converted string with block TeX code replaced.
346
317
  """
347
318
 
348
-
349
319
  def fix_misplaced_labels(string: str) -> str:
350
320
  """A func to fix labels in a string.
351
321
 
@@ -356,7 +326,6 @@ def fix_misplaced_labels(string: str) -> str:
356
326
  The fixed string with labels properly placed.
357
327
  """
358
328
 
359
-
360
329
  def comment(string: str) -> str:
361
330
  """Add comment to the string.
362
331
 
@@ -367,7 +336,6 @@ def comment(string: str) -> str:
367
336
  The string with each line prefixed by '// '.
368
337
  """
369
338
 
370
-
371
339
  def uncomment(string: str) -> str:
372
340
  """Remove comment from the string.
373
341
 
@@ -378,7 +346,6 @@ def uncomment(string: str) -> str:
378
346
  The string with comments (lines starting with '// ' or '//') removed.
379
347
  """
380
348
 
381
-
382
349
  def split_out_metadata(string: str) -> Tuple[Optional[JsonValue], str]:
383
350
  """Split out metadata from a string.
384
351
 
@@ -389,7 +356,6 @@ def split_out_metadata(string: str) -> Tuple[Optional[JsonValue], str]:
389
356
  A tuple containing the metadata as a Python object (if parseable) and the remaining string.
390
357
  """
391
358
 
392
-
393
359
  def to_metadata(data: JsonValue) -> str:
394
360
  """Convert a Python object to a YAML string.
395
361
 
@@ -400,15 +366,12 @@ def to_metadata(data: JsonValue) -> str:
400
366
  The YAML string representation of the input data.
401
367
  """
402
368
 
403
-
404
369
  def convert_to_inline_formula(string: str) -> str:
405
370
  r"""Convert `$...$` to inline formula `\(...\)` and trim spaces."""
406
371
 
407
-
408
372
  def convert_to_block_formula(string: str) -> str:
409
373
  r"""Convert `$$...$$` to block formula `\[...\]` and trim spaces."""
410
374
 
411
-
412
375
  def inplace_update(string: str, wrapper: str, new_body: str) -> Optional[str]:
413
376
  """Replace content between wrapper strings.
414
377
 
@@ -422,7 +385,6 @@ def inplace_update(string: str, wrapper: str, new_body: str) -> Optional[str]:
422
385
 
423
386
  """
424
387
 
425
-
426
388
  def extract_body(string: str, wrapper: str) -> Optional[str]:
427
389
  """Extract the content between two occurrences of a wrapper string.
428
390
 
@@ -434,7 +396,6 @@ def extract_body(string: str, wrapper: str) -> Optional[str]:
434
396
  The content between the first two occurrences of the wrapper string if found, otherwise None.
435
397
  """
436
398
 
437
-
438
399
  class LLMConfig:
439
400
  """LLM configuration structure.
440
401
 
@@ -486,7 +447,6 @@ class LLMConfig:
486
447
  frequency_penalty: Optional[float]
487
448
  """Penalizes new tokens based on their frequency in text so far (-2.0-2.0)."""
488
449
 
489
-
490
450
  class EmbeddingConfig:
491
451
  """Embedding configuration structure."""
492
452
 
@@ -511,7 +471,6 @@ class EmbeddingConfig:
511
471
  api_key: Optional[SecretStr]
512
472
  """The API key."""
513
473
 
514
-
515
474
  class RagConfig:
516
475
  """RAG (Retrieval Augmented Generation) configuration structure."""
517
476
 
@@ -527,14 +486,12 @@ class RagConfig:
527
486
  milvus_dimensions: Optional[int]
528
487
  """The dimensions for Milvus vectors."""
529
488
 
530
-
531
489
  class DebugConfig:
532
490
  """Debug configuration structure."""
533
491
 
534
492
  log_level: Optional[str]
535
493
  """The logging level to use."""
536
494
 
537
-
538
495
  class TemplateManagerConfig:
539
496
  """Template manager configuration structure."""
540
497
 
@@ -547,7 +504,6 @@ class TemplateManagerConfig:
547
504
  template_suffix: Optional[str]
548
505
  """The suffix of the templates."""
549
506
 
550
-
551
507
  class TemplateConfig:
552
508
  """Template configuration structure."""
553
509
 
@@ -632,7 +588,6 @@ class TemplateConfig:
632
588
  chap_summary_template: str
633
589
  """The name of the chap summary template which will be used to generate a chapter summary."""
634
590
 
635
-
636
591
  class RoutingConfig:
637
592
  """Routing configuration structure for controlling request dispatching behavior."""
638
593
 
@@ -648,7 +603,6 @@ class RoutingConfig:
648
603
  cooldown_time: Optional[int]
649
604
  """Time to cooldown a deployment after failure in seconds."""
650
605
 
651
-
652
606
  class GeneralConfig:
653
607
  """General configuration structure for application-wide settings."""
654
608
 
@@ -658,7 +612,6 @@ class GeneralConfig:
658
612
  use_json_repair: bool
659
613
  """Whether to automatically repair malformed JSON."""
660
614
 
661
-
662
615
  class ToolBoxConfig:
663
616
  """Configuration for toolbox functionality."""
664
617
 
@@ -668,7 +621,6 @@ class ToolBoxConfig:
668
621
  data_module_name: str
669
622
  """The name of the module containing the data."""
670
623
 
671
-
672
624
  class PymitterConfig:
673
625
  """Pymitter configuration structure for controlling event emission and listener behavior."""
674
626
 
@@ -681,7 +633,6 @@ class PymitterConfig:
681
633
  max_listeners: int
682
634
  """The maximum number of listeners per event. -1 means unlimited."""
683
635
 
684
-
685
636
  class Config:
686
637
  """Configuration structure containing all system components."""
687
638
 
@@ -715,27 +666,23 @@ class Config:
715
666
  pymitter: PymitterConfig
716
667
  """Pymitter configuration."""
717
668
 
718
-
719
669
  CONFIG: Config
720
670
 
721
-
722
671
  class SecretStr:
723
672
  """A string that should not be exposed."""
724
673
 
725
674
  def __init__(self, source: str) -> None: ...
726
-
727
- def expose(self) -> str:
675
+ def get_secret_value(self) -> str:
728
676
  """Expose the secret string."""
729
677
 
730
-
731
678
  TEMPLATE_MANAGER: TemplateManager
732
679
 
733
-
734
680
  class Event:
735
681
  """Event class that represents a hierarchical event with segments.
736
682
 
737
683
  Events can be constructed from strings, lists of strings, or other Events.
738
684
  """
685
+
739
686
  segments: List[str]
740
687
 
741
688
  def __init__(self, segments: Optional[List[str]] = None) -> None:
@@ -841,12 +788,9 @@ class Event:
841
788
  """
842
789
 
843
790
  def __hash__(self) -> int: ...
844
-
845
791
  def __eq__(self, other: object) -> bool: ...
846
-
847
792
  def __ne__(self, other: object) -> bool: ...
848
793
 
849
-
850
794
  class TaskStatus(StrEnum, str):
851
795
  """Enumeration of possible task statuses."""
852
796
 
@@ -864,3 +808,39 @@ class TaskStatus(StrEnum, str):
864
808
 
865
809
  Cancelled: TaskStatus
866
810
  """Task has been cancelled."""
811
+
812
+ class TEIClient:
813
+ """Client for TEI reranking service.
814
+
815
+ Handles communication with a TEI reranking service to reorder text snippets
816
+ based on their relevance to a query.
817
+ """
818
+
819
+ def __init__(self, base_url: str) -> None:
820
+ """Initialize the TEI client.
821
+
822
+ Args:
823
+ base_url: URL to the TEI reranking service
824
+ """
825
+
826
+ async def arerank(
827
+ self,
828
+ query: str,
829
+ texts: List[str],
830
+ truncate: bool = False,
831
+ truncation_direction: Literal["Left", "Right"] = "Left",
832
+ ) -> List[Tuple[int, float]]:
833
+ """Rerank texts based on relevance to query.
834
+
835
+ Args:
836
+ query: The query to match texts against
837
+ texts: List of text snippets to rerank
838
+ truncate: Whether to truncate texts to fit model context
839
+ truncation_direction: Direction to truncate from ("Left" or "Right")
840
+
841
+ Returns:
842
+ List of tuples containing (original_index, relevance_score)
843
+
844
+ Raises:
845
+ RuntimeError: If reranking fails or truncation_direction is invalid
846
+ """