fabricatio 0.2.6.dev2__cp312-cp312-win_amd64.whl → 0.2.7.dev3__cp312-cp312-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fabricatio/__init__.py +7 -24
- fabricatio/_rust.cp312-win_amd64.pyd +0 -0
- fabricatio/_rust.pyi +22 -0
- fabricatio/actions/article.py +150 -19
- fabricatio/actions/article_rag.py +35 -0
- fabricatio/actions/output.py +21 -6
- fabricatio/actions/rag.py +51 -3
- fabricatio/capabilities/correct.py +34 -4
- fabricatio/capabilities/rag.py +67 -16
- fabricatio/capabilities/rating.py +15 -6
- fabricatio/capabilities/review.py +7 -4
- fabricatio/capabilities/task.py +5 -5
- fabricatio/config.py +29 -21
- fabricatio/decorators.py +32 -0
- fabricatio/models/action.py +117 -43
- fabricatio/models/extra/article_essence.py +226 -0
- fabricatio/models/extra/article_main.py +359 -0
- fabricatio/models/extra/article_outline.py +276 -0
- fabricatio/models/extra/article_proposal.py +37 -0
- fabricatio/models/generic.py +95 -9
- fabricatio/models/kwargs_types.py +40 -10
- fabricatio/models/role.py +30 -6
- fabricatio/models/tool.py +6 -2
- fabricatio/models/usages.py +94 -47
- fabricatio/models/utils.py +29 -2
- fabricatio/parser.py +2 -0
- fabricatio/workflows/articles.py +12 -1
- fabricatio-0.2.7.dev3.data/scripts/tdown.exe +0 -0
- {fabricatio-0.2.6.dev2.dist-info → fabricatio-0.2.7.dev3.dist-info}/METADATA +6 -2
- fabricatio-0.2.7.dev3.dist-info/RECORD +46 -0
- {fabricatio-0.2.6.dev2.dist-info → fabricatio-0.2.7.dev3.dist-info}/WHEEL +1 -1
- fabricatio/models/extra.py +0 -171
- fabricatio-0.2.6.dev2.data/scripts/tdown.exe +0 -0
- fabricatio-0.2.6.dev2.dist-info/RECORD +0 -42
- {fabricatio-0.2.6.dev2.dist-info → fabricatio-0.2.7.dev3.dist-info}/licenses/LICENSE +0 -0
fabricatio/models/usages.py
CHANGED
@@ -7,14 +7,15 @@ import asyncstdlib
|
|
7
7
|
import litellm
|
8
8
|
from fabricatio._rust_instances import TEMPLATE_MANAGER
|
9
9
|
from fabricatio.config import configs
|
10
|
+
from fabricatio.decorators import logging_exec_time
|
10
11
|
from fabricatio.journal import logger
|
11
12
|
from fabricatio.models.generic import ScopedConfig, WithBriefing
|
12
13
|
from fabricatio.models.kwargs_types import ChooseKwargs, EmbeddingKwargs, GenerateKwargs, LLMKwargs, ValidateKwargs
|
13
14
|
from fabricatio.models.task import Task
|
14
15
|
from fabricatio.models.tool import Tool, ToolBox
|
15
|
-
from fabricatio.models.utils import Messages
|
16
|
+
from fabricatio.models.utils import Messages, ok
|
16
17
|
from fabricatio.parser import GenericCapture, JsonCapture
|
17
|
-
from litellm import Router, stream_chunk_builder
|
18
|
+
from litellm import RateLimitError, Router, stream_chunk_builder # pyright: ignore [reportPrivateImportUsage]
|
18
19
|
from litellm.types.router import Deployment, LiteLLM_Params, ModelInfo
|
19
20
|
from litellm.types.utils import (
|
20
21
|
Choices,
|
@@ -23,7 +24,7 @@ from litellm.types.utils import (
|
|
23
24
|
StreamingChoices,
|
24
25
|
TextChoices,
|
25
26
|
)
|
26
|
-
from litellm.utils import CustomStreamWrapper # pyright: ignore [reportPrivateImportUsage]
|
27
|
+
from litellm.utils import CustomStreamWrapper, token_counter # pyright: ignore [reportPrivateImportUsage]
|
27
28
|
from more_itertools import duplicates_everseen
|
28
29
|
from pydantic import Field, NonNegativeInt, PositiveInt
|
29
30
|
|
@@ -33,6 +34,7 @@ if configs.cache.enabled and configs.cache.type:
|
|
33
34
|
|
34
35
|
ROUTER = Router(
|
35
36
|
routing_strategy="usage-based-routing-v2",
|
37
|
+
default_max_parallel_requests=configs.routing.max_parallel_requests,
|
36
38
|
allowed_fails=configs.routing.allowed_fails,
|
37
39
|
retry_after=configs.routing.retry_after,
|
38
40
|
cooldown_time=configs.routing.cooldown_time,
|
@@ -70,14 +72,22 @@ class LLMUsage(ScopedConfig):
|
|
70
72
|
"""
|
71
73
|
# Call the underlying asynchronous completion function with the provided and default parameters
|
72
74
|
# noinspection PyTypeChecker,PydanticTypeChecker
|
73
|
-
|
74
75
|
return await self._deploy(
|
75
76
|
Deployment(
|
76
|
-
model_name=(
|
77
|
+
model_name=(
|
78
|
+
m_name := ok(
|
79
|
+
kwargs.get("model") or self.llm_model or configs.llm.model, "model name is not set at any place"
|
80
|
+
)
|
81
|
+
), # pyright: ignore [reportCallIssue]
|
77
82
|
litellm_params=(
|
78
83
|
p := LiteLLM_Params(
|
79
|
-
api_key=(
|
80
|
-
|
84
|
+
api_key=ok(
|
85
|
+
self.llm_api_key or configs.llm.api_key, "llm api key is not set at any place"
|
86
|
+
).get_secret_value(),
|
87
|
+
api_base=ok(
|
88
|
+
self.llm_api_endpoint or configs.llm.api_endpoint,
|
89
|
+
"llm api endpoint is not set at any place",
|
90
|
+
).unicode_string(),
|
81
91
|
model=m_name,
|
82
92
|
tpm=self.llm_tpm or configs.llm.tpm,
|
83
93
|
rpm=self.llm_rpm or configs.llm.rpm,
|
@@ -88,14 +98,14 @@ class LLMUsage(ScopedConfig):
|
|
88
98
|
model_info=ModelInfo(id=hash(m_name + p.model_dump_json(exclude_none=True))),
|
89
99
|
)
|
90
100
|
).acompletion(
|
91
|
-
messages=messages,
|
101
|
+
messages=messages, # pyright: ignore [reportArgumentType]
|
92
102
|
n=n or self.llm_generation_count or configs.llm.generation_count,
|
93
103
|
model=m_name,
|
94
104
|
temperature=kwargs.get("temperature") or self.llm_temperature or configs.llm.temperature,
|
95
105
|
stop=kwargs.get("stop") or self.llm_stop_sign or configs.llm.stop_sign,
|
96
106
|
top_p=kwargs.get("top_p") or self.llm_top_p or configs.llm.top_p,
|
97
107
|
max_tokens=kwargs.get("max_tokens") or self.llm_max_tokens or configs.llm.max_tokens,
|
98
|
-
stream=kwargs.get("stream") or self.llm_stream or configs.llm.stream,
|
108
|
+
stream=ok(kwargs.get("stream") or self.llm_stream or configs.llm.stream, "stream is not set at any place"),
|
99
109
|
cache={
|
100
110
|
"no-cache": kwargs.get("no_cache"),
|
101
111
|
"no-store": kwargs.get("no_store"),
|
@@ -109,6 +119,7 @@ class LLMUsage(ScopedConfig):
|
|
109
119
|
question: str,
|
110
120
|
system_message: str = "",
|
111
121
|
n: PositiveInt | None = None,
|
122
|
+
stream_buffer_size: int = 50,
|
112
123
|
**kwargs: Unpack[LLMKwargs],
|
113
124
|
) -> Sequence[TextChoices | Choices | StreamingChoices]:
|
114
125
|
"""Asynchronously invokes the language model with a question and optional system message.
|
@@ -117,6 +128,7 @@ class LLMUsage(ScopedConfig):
|
|
117
128
|
question (str): The question to ask the model.
|
118
129
|
system_message (str): The system message to provide context to the model. Defaults to an empty string.
|
119
130
|
n (PositiveInt | None): The number of responses to generate. Defaults to the instance's `llm_generation_count` or the global configuration.
|
131
|
+
stream_buffer_size (int): The buffer size for streaming responses. Defaults to 50.
|
120
132
|
**kwargs (Unpack[LLMKwargs]): Additional keyword arguments for the LLM usage.
|
121
133
|
|
122
134
|
Returns:
|
@@ -133,9 +145,14 @@ class LLMUsage(ScopedConfig):
|
|
133
145
|
if not configs.debug.streaming_visible and (pack := stream_chunk_builder(await asyncstdlib.list())):
|
134
146
|
return pack.choices
|
135
147
|
chunks = []
|
148
|
+
buffer = ""
|
136
149
|
async for chunk in resp:
|
137
150
|
chunks.append(chunk)
|
138
|
-
|
151
|
+
buffer += chunk.choices[0].delta.content or ""
|
152
|
+
if len(buffer) > stream_buffer_size:
|
153
|
+
print(buffer, end="") # noqa: T201
|
154
|
+
buffer = ""
|
155
|
+
print(buffer) # noqa: T201
|
139
156
|
if pack := stream_chunk_builder(chunks):
|
140
157
|
return pack.choices
|
141
158
|
logger.critical(err := f"Unexpected response type: {type(resp)}")
|
@@ -171,6 +188,7 @@ class LLMUsage(ScopedConfig):
|
|
171
188
|
**kwargs: Unpack[LLMKwargs],
|
172
189
|
) -> str: ...
|
173
190
|
|
191
|
+
@logging_exec_time
|
174
192
|
async def aask(
|
175
193
|
self,
|
176
194
|
question: str | List[str],
|
@@ -187,8 +205,7 @@ class LLMUsage(ScopedConfig):
|
|
187
205
|
Returns:
|
188
206
|
str | List[str]: The content of the model's response message.
|
189
207
|
"""
|
190
|
-
|
191
|
-
match (question, system_message):
|
208
|
+
match (question, system_message or ""):
|
192
209
|
case (list(q_seq), list(sm_seq)):
|
193
210
|
res = await gather(
|
194
211
|
*[
|
@@ -196,25 +213,31 @@ class LLMUsage(ScopedConfig):
|
|
196
213
|
for q, sm in zip(q_seq, sm_seq, strict=True)
|
197
214
|
]
|
198
215
|
)
|
199
|
-
|
216
|
+
out = [r[0].message.content for r in res] # pyright: ignore [reportAttributeAccessIssue]
|
200
217
|
case (list(q_seq), str(sm)):
|
201
218
|
res = await gather(*[self.ainvoke(n=1, question=q, system_message=sm, **kwargs) for q in q_seq])
|
202
|
-
|
219
|
+
out = [r[0].message.content for r in res] # pyright: ignore [reportAttributeAccessIssue]
|
203
220
|
case (str(q), list(sm_seq)):
|
204
221
|
res = await gather(*[self.ainvoke(n=1, question=q, system_message=sm, **kwargs) for sm in sm_seq])
|
205
|
-
|
222
|
+
out = [r[0].message.content for r in res] # pyright: ignore [reportAttributeAccessIssue]
|
206
223
|
case (str(q), str(sm)):
|
207
|
-
|
224
|
+
out = ((await self.ainvoke(n=1, question=q, system_message=sm, **kwargs))[0]).message.content # pyright: ignore [reportAttributeAccessIssue]
|
208
225
|
case _:
|
209
226
|
raise RuntimeError("Should not reach here.")
|
210
227
|
|
228
|
+
logger.debug(
|
229
|
+
f"Response Token Count: {token_counter(text=out) if isinstance(out, str) else sum(token_counter(text=o) for o in out)}" # pyright: ignore [reportOptionalIterable]
|
230
|
+
)
|
231
|
+
return out # pyright: ignore [reportReturnType]
|
232
|
+
|
211
233
|
@overload
|
212
234
|
async def aask_validate[T](
|
213
235
|
self,
|
214
236
|
question: str,
|
215
237
|
validator: Callable[[str], T | None],
|
216
|
-
default: T
|
238
|
+
default: T = ...,
|
217
239
|
max_validations: PositiveInt = 2,
|
240
|
+
co_extractor: Optional[GenerateKwargs] = None,
|
218
241
|
**kwargs: Unpack[GenerateKwargs],
|
219
242
|
) -> T: ...
|
220
243
|
@overload
|
@@ -222,8 +245,9 @@ class LLMUsage(ScopedConfig):
|
|
222
245
|
self,
|
223
246
|
question: List[str],
|
224
247
|
validator: Callable[[str], T | None],
|
225
|
-
default: T
|
248
|
+
default: T = ...,
|
226
249
|
max_validations: PositiveInt = 2,
|
250
|
+
co_extractor: Optional[GenerateKwargs] = None,
|
227
251
|
**kwargs: Unpack[GenerateKwargs],
|
228
252
|
) -> List[T]: ...
|
229
253
|
@overload
|
@@ -233,6 +257,7 @@ class LLMUsage(ScopedConfig):
|
|
233
257
|
validator: Callable[[str], T | None],
|
234
258
|
default: None = None,
|
235
259
|
max_validations: PositiveInt = 2,
|
260
|
+
co_extractor: Optional[GenerateKwargs] = None,
|
236
261
|
**kwargs: Unpack[GenerateKwargs],
|
237
262
|
) -> Optional[T]: ...
|
238
263
|
|
@@ -243,6 +268,7 @@ class LLMUsage(ScopedConfig):
|
|
243
268
|
validator: Callable[[str], T | None],
|
244
269
|
default: None = None,
|
245
270
|
max_validations: PositiveInt = 2,
|
271
|
+
co_extractor: Optional[GenerateKwargs] = None,
|
246
272
|
**kwargs: Unpack[GenerateKwargs],
|
247
273
|
) -> List[Optional[T]]: ...
|
248
274
|
|
@@ -252,6 +278,7 @@ class LLMUsage(ScopedConfig):
|
|
252
278
|
validator: Callable[[str], T | None],
|
253
279
|
default: Optional[T] = None,
|
254
280
|
max_validations: PositiveInt = 2,
|
281
|
+
co_extractor: Optional[GenerateKwargs] = None,
|
255
282
|
**kwargs: Unpack[GenerateKwargs],
|
256
283
|
) -> Optional[T] | List[Optional[T]] | List[T] | T:
|
257
284
|
"""Asynchronously asks a question and validates the response using a given validator.
|
@@ -261,6 +288,7 @@ class LLMUsage(ScopedConfig):
|
|
261
288
|
validator (Callable[[str], T | None]): A function to validate the response.
|
262
289
|
default (T | None): Default value to return if validation fails. Defaults to None.
|
263
290
|
max_validations (PositiveInt): Maximum number of validation attempts. Defaults to 2.
|
291
|
+
co_extractor (Optional[GenerateKwargs]): Keyword arguments for the co-extractor, if provided will enable co-extraction.
|
264
292
|
**kwargs (Unpack[LLMKwargs]): Additional keyword arguments for the LLM usage.
|
265
293
|
|
266
294
|
Returns:
|
@@ -271,26 +299,44 @@ class LLMUsage(ScopedConfig):
|
|
271
299
|
async def _inner(q: str) -> Optional[T]:
|
272
300
|
for lap in range(max_validations):
|
273
301
|
try:
|
274
|
-
if (
|
302
|
+
if (
|
303
|
+
(response := await self.aask(question=q, **kwargs))
|
304
|
+
or (
|
305
|
+
co_extractor
|
306
|
+
and (
|
307
|
+
response := await self.aask(
|
308
|
+
question=(
|
309
|
+
TEMPLATE_MANAGER.render_template(
|
310
|
+
configs.templates.co_validation_template,
|
311
|
+
{"original_q": q, "original_a": response},
|
312
|
+
)
|
313
|
+
),
|
314
|
+
**co_extractor,
|
315
|
+
)
|
316
|
+
)
|
317
|
+
)
|
318
|
+
) and (validated := validator(response)):
|
275
319
|
logger.debug(f"Successfully validated the response at {lap}th attempt.")
|
276
320
|
return validated
|
321
|
+
|
322
|
+
except RateLimitError as e:
|
323
|
+
logger.warning(f"Rate limit error: {e}")
|
324
|
+
continue
|
277
325
|
except Exception as e: # noqa: BLE001
|
278
326
|
logger.error(f"Error during validation: \n{e}")
|
279
327
|
break
|
280
|
-
kwargs
|
281
|
-
|
328
|
+
if not kwargs.get("no_cache"):
|
329
|
+
kwargs["no_cache"] = True
|
330
|
+
logger.debug("Closed the cache for the next attempt")
|
282
331
|
if default is None:
|
283
332
|
logger.error(f"Failed to validate the response after {max_validations} attempts.")
|
284
333
|
return default
|
285
334
|
|
286
|
-
if isinstance(question,
|
287
|
-
return await _inner(question)
|
288
|
-
|
289
|
-
return await gather(*[_inner(q) for q in question])
|
335
|
+
return await (gather(*[_inner(q) for q in question]) if isinstance(question, list) else _inner(question))
|
290
336
|
|
291
337
|
async def aliststr(
|
292
338
|
self, requirement: str, k: NonNegativeInt = 0, **kwargs: Unpack[ValidateKwargs[List[str]]]
|
293
|
-
) -> List[str]:
|
339
|
+
) -> Optional[List[str]]:
|
294
340
|
"""Asynchronously generates a list of strings based on a given requirement.
|
295
341
|
|
296
342
|
Args:
|
@@ -310,7 +356,7 @@ class LLMUsage(ScopedConfig):
|
|
310
356
|
**kwargs,
|
311
357
|
)
|
312
358
|
|
313
|
-
async def apathstr(self, requirement: str, **kwargs: Unpack[ChooseKwargs[List[str]]]) -> List[str]:
|
359
|
+
async def apathstr(self, requirement: str, **kwargs: Unpack[ChooseKwargs[List[str]]]) -> Optional[List[str]]:
|
314
360
|
"""Asynchronously generates a list of strings based on a given requirement.
|
315
361
|
|
316
362
|
Args:
|
@@ -328,7 +374,7 @@ class LLMUsage(ScopedConfig):
|
|
328
374
|
**kwargs,
|
329
375
|
)
|
330
376
|
|
331
|
-
async def awhich_pathstr(self, requirement: str, **kwargs: Unpack[ValidateKwargs[List[str]]]) -> str:
|
377
|
+
async def awhich_pathstr(self, requirement: str, **kwargs: Unpack[ValidateKwargs[List[str]]]) -> Optional[str]:
|
332
378
|
"""Asynchronously generates a single path string based on a given requirement.
|
333
379
|
|
334
380
|
Args:
|
@@ -338,15 +384,16 @@ class LLMUsage(ScopedConfig):
|
|
338
384
|
Returns:
|
339
385
|
str: The validated response as a single string.
|
340
386
|
"""
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
)
|
347
|
-
).pop()
|
387
|
+
if paths := await self.apathstr(
|
388
|
+
requirement,
|
389
|
+
k=1,
|
390
|
+
**kwargs,
|
391
|
+
):
|
392
|
+
return paths.pop()
|
348
393
|
|
349
|
-
|
394
|
+
return None
|
395
|
+
|
396
|
+
async def ageneric_string(self, requirement: str, **kwargs: Unpack[ValidateKwargs[str]]) -> Optional[str]:
|
350
397
|
"""Asynchronously generates a generic string based on a given requirement.
|
351
398
|
|
352
399
|
Args:
|
@@ -356,7 +403,7 @@ class LLMUsage(ScopedConfig):
|
|
356
403
|
Returns:
|
357
404
|
str: The generated string.
|
358
405
|
"""
|
359
|
-
return await self.aask_validate(
|
406
|
+
return await self.aask_validate( # pyright: ignore [reportReturnType]
|
360
407
|
TEMPLATE_MANAGER.render_template(
|
361
408
|
configs.templates.generic_string_template,
|
362
409
|
{"requirement": requirement, "language": GenericCapture.capture_type},
|
@@ -371,7 +418,7 @@ class LLMUsage(ScopedConfig):
|
|
371
418
|
choices: List[T],
|
372
419
|
k: NonNegativeInt = 0,
|
373
420
|
**kwargs: Unpack[ValidateKwargs[List[T]]],
|
374
|
-
) -> List[T]:
|
421
|
+
) -> Optional[List[T]]:
|
375
422
|
"""Asynchronously executes a multi-choice decision-making process, generating a prompt based on the instruction and options, and validates the returned selection results.
|
376
423
|
|
377
424
|
Args:
|
@@ -436,13 +483,13 @@ class LLMUsage(ScopedConfig):
|
|
436
483
|
Raises:
|
437
484
|
ValueError: If validation fails after maximum attempts or if no valid selection is made.
|
438
485
|
"""
|
439
|
-
return (
|
486
|
+
return ok(
|
440
487
|
await self.achoose(
|
441
488
|
instruction=instruction,
|
442
489
|
choices=choices,
|
443
490
|
k=1,
|
444
491
|
**kwargs,
|
445
|
-
)
|
492
|
+
),
|
446
493
|
)[0]
|
447
494
|
|
448
495
|
async def ajudge(
|
@@ -499,7 +546,7 @@ class EmbeddingUsage(LLMUsage):
|
|
499
546
|
"""
|
500
547
|
# check seq length
|
501
548
|
max_len = self.embedding_max_sequence_length or configs.embedding.max_sequence_length
|
502
|
-
if any(len(t) > max_len for t in input_text):
|
549
|
+
if max_len and any(len(t) > max_len for t in input_text):
|
503
550
|
logger.error(err := f"Input text exceeds maximum sequence length {max_len}.")
|
504
551
|
raise ValueError(err)
|
505
552
|
|
@@ -513,10 +560,10 @@ class EmbeddingUsage(LLMUsage):
|
|
513
560
|
or configs.embedding.timeout
|
514
561
|
or self.llm_timeout
|
515
562
|
or configs.llm.timeout,
|
516
|
-
api_key=(
|
563
|
+
api_key=ok(
|
517
564
|
self.embedding_api_key or configs.embedding.api_key or self.llm_api_key or configs.llm.api_key
|
518
565
|
).get_secret_value(),
|
519
|
-
api_base=(
|
566
|
+
api_base=ok(
|
520
567
|
self.embedding_api_endpoint
|
521
568
|
or configs.embedding.api_endpoint
|
522
569
|
or self.llm_api_endpoint
|
@@ -565,7 +612,7 @@ class ToolBoxUsage(LLMUsage):
|
|
565
612
|
self,
|
566
613
|
task: Task,
|
567
614
|
**kwargs: Unpack[ChooseKwargs[List[ToolBox]]],
|
568
|
-
) -> List[ToolBox]:
|
615
|
+
) -> Optional[List[ToolBox]]:
|
569
616
|
"""Asynchronously executes a multi-choice decision-making process to choose toolboxes.
|
570
617
|
|
571
618
|
Args:
|
@@ -590,7 +637,7 @@ class ToolBoxUsage(LLMUsage):
|
|
590
637
|
task: Task,
|
591
638
|
toolbox: ToolBox,
|
592
639
|
**kwargs: Unpack[ChooseKwargs[List[Tool]]],
|
593
|
-
) -> List[Tool]:
|
640
|
+
) -> Optional[List[Tool]]:
|
594
641
|
"""Asynchronously executes a multi-choice decision-making process to choose tools.
|
595
642
|
|
596
643
|
Args:
|
@@ -630,11 +677,11 @@ class ToolBoxUsage(LLMUsage):
|
|
630
677
|
tool_choose_kwargs = tool_choose_kwargs or {}
|
631
678
|
|
632
679
|
# Choose the toolboxes
|
633
|
-
chosen_toolboxes = await self.choose_toolboxes(task, **box_choose_kwargs)
|
680
|
+
chosen_toolboxes = ok(await self.choose_toolboxes(task, **box_choose_kwargs))
|
634
681
|
# Choose the tools
|
635
682
|
chosen_tools = []
|
636
683
|
for toolbox in chosen_toolboxes:
|
637
|
-
chosen_tools.extend(await self.choose_tools(task, toolbox, **tool_choose_kwargs))
|
684
|
+
chosen_tools.extend(ok(await self.choose_tools(task, toolbox, **tool_choose_kwargs)))
|
638
685
|
return chosen_tools
|
639
686
|
|
640
687
|
async def gather_tools(self, task: Task, **kwargs: Unpack[ChooseKwargs]) -> List[Tool]:
|
fabricatio/models/utils.py
CHANGED
@@ -11,11 +11,11 @@ class Message(BaseModel):
|
|
11
11
|
"""A class representing a message."""
|
12
12
|
|
13
13
|
model_config = ConfigDict(use_attribute_docstrings=True)
|
14
|
-
role: Literal["user", "system", "assistant"]
|
14
|
+
role: Literal["user", "system", "assistant"]
|
15
15
|
"""
|
16
16
|
Who is sending the message.
|
17
17
|
"""
|
18
|
-
content: str
|
18
|
+
content: str
|
19
19
|
"""
|
20
20
|
The content of the message.
|
21
21
|
"""
|
@@ -165,3 +165,30 @@ async def ask_edit(
|
|
165
165
|
if edited:
|
166
166
|
res.append(edited)
|
167
167
|
return res
|
168
|
+
|
169
|
+
|
170
|
+
def override_kwargs[T](kwargs: Dict[str, T], **overrides) -> Dict[str, T]:
|
171
|
+
"""Override the values in kwargs with the provided overrides."""
|
172
|
+
kwargs.update({k: v for k, v in overrides.items() if v is not None})
|
173
|
+
return kwargs
|
174
|
+
|
175
|
+
|
176
|
+
def fallback_kwargs[T](kwargs: Dict[str, T], **overrides) -> Dict[str, T]:
|
177
|
+
"""Fallback the values in kwargs with the provided overrides."""
|
178
|
+
kwargs.update({k: v for k, v in overrides.items() if k not in kwargs})
|
179
|
+
return kwargs
|
180
|
+
|
181
|
+
|
182
|
+
def ok[T](val: Optional[T], msg: str = "Value is None") -> T:
|
183
|
+
"""Check if a value is None and raise a ValueError with the provided message if it is.
|
184
|
+
|
185
|
+
Args:
|
186
|
+
val: The value to check.
|
187
|
+
msg: The message to include in the ValueError if val is None.
|
188
|
+
|
189
|
+
Returns:
|
190
|
+
T: The value if it is not None.
|
191
|
+
"""
|
192
|
+
if val is None:
|
193
|
+
raise ValueError(msg)
|
194
|
+
return val
|
fabricatio/parser.py
CHANGED
@@ -52,6 +52,7 @@ class Capture(BaseModel):
|
|
52
52
|
case _:
|
53
53
|
return text
|
54
54
|
|
55
|
+
|
55
56
|
def capture(self, text: str) -> Tuple[str, ...] | str | None:
|
56
57
|
"""Capture the first occurrence of the pattern in the given text.
|
57
58
|
|
@@ -64,6 +65,7 @@ class Capture(BaseModel):
|
|
64
65
|
"""
|
65
66
|
match = self._compiled.search(text)
|
66
67
|
if match is None:
|
68
|
+
logger.debug(f"Capture Failed: \n{text}")
|
67
69
|
return None
|
68
70
|
groups = self.fix(match.groups()) if configs.general.use_json_repair else match.groups()
|
69
71
|
if self.target_groups:
|
fabricatio/workflows/articles.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
"""Store article essence in the database."""
|
2
2
|
|
3
|
-
from fabricatio.actions.article import GenerateArticleProposal, GenerateOutline
|
3
|
+
from fabricatio.actions.article import CorrectOutline, CorrectProposal, GenerateArticleProposal, GenerateOutline
|
4
4
|
from fabricatio.actions.output import DumpFinalizedOutput
|
5
5
|
from fabricatio.models.action import WorkFlow
|
6
6
|
|
@@ -13,3 +13,14 @@ WriteOutlineWorkFlow = WorkFlow(
|
|
13
13
|
DumpFinalizedOutput(output_key="task_output"),
|
14
14
|
),
|
15
15
|
)
|
16
|
+
WriteOutlineCorrectedWorkFlow = WorkFlow(
|
17
|
+
name="Generate Article Outline",
|
18
|
+
description="Generate an outline for an article. dump the outline to the given path. in typst format.",
|
19
|
+
steps=(
|
20
|
+
GenerateArticleProposal,
|
21
|
+
CorrectProposal(output_key="article_proposal"),
|
22
|
+
GenerateOutline,
|
23
|
+
CorrectOutline(output_key="to_dump"),
|
24
|
+
DumpFinalizedOutput(output_key="task_output"),
|
25
|
+
),
|
26
|
+
)
|
Binary file
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: fabricatio
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.7.dev3
|
4
4
|
Classifier: License :: OSI Approved :: MIT License
|
5
5
|
Classifier: Programming Language :: Rust
|
6
6
|
Classifier: Programming Language :: Python :: 3.12
|
@@ -24,9 +24,13 @@ Requires-Dist: questionary>=2.1.0
|
|
24
24
|
Requires-Dist: regex>=2024.11.6
|
25
25
|
Requires-Dist: rich>=13.9.4
|
26
26
|
Requires-Dist: pymilvus>=2.5.4 ; extra == 'rag'
|
27
|
-
Requires-Dist: fabricatio[rag] ; extra == 'full'
|
27
|
+
Requires-Dist: fabricatio[calc,plot,rag] ; extra == 'full'
|
28
|
+
Requires-Dist: sympy>=1.13.3 ; extra == 'calc'
|
29
|
+
Requires-Dist: matplotlib>=3.10.1 ; extra == 'plot'
|
28
30
|
Provides-Extra: rag
|
29
31
|
Provides-Extra: full
|
32
|
+
Provides-Extra: calc
|
33
|
+
Provides-Extra: plot
|
30
34
|
License-File: LICENSE
|
31
35
|
Summary: A LLM multi-agent framework.
|
32
36
|
Keywords: ai,agents,multi-agent,llm,pyo3
|
@@ -0,0 +1,46 @@
|
|
1
|
+
fabricatio-0.2.7.dev3.dist-info/METADATA,sha256=6O8sm_fbzBC9kYdt9UKzf8ZoXEfFcLGWKHOgJMvC52Q,14236
|
2
|
+
fabricatio-0.2.7.dev3.dist-info/WHEEL,sha256=jABKVkLC9kJr8mi_er5jOqpiQUjARSLXDUIIxDqsS50,96
|
3
|
+
fabricatio-0.2.7.dev3.dist-info/licenses/LICENSE,sha256=do7J7EiCGbq0QPbMAL_FqLYufXpHnCnXBOuqVPwSV8Y,1088
|
4
|
+
fabricatio/actions/article.py,sha256=AgxNKIRLXF9T-TdrhLPE8NWmT8QZXz1QvFnouvuoRBc,7684
|
5
|
+
fabricatio/actions/article_rag.py,sha256=PiOFxI6VTmLXm3BK-01g_KH1mTE9uOtnA-CwUjt16AU,1456
|
6
|
+
fabricatio/actions/output.py,sha256=K7xsBH8MjXRH6JOy3ZO94KCQzX2jNrwPPK_rRXVkS0E,1161
|
7
|
+
fabricatio/actions/rag.py,sha256=QBdzEM8MloM_ahx5pTBZAETm9_631lTe_0ih_he_Iuo,2759
|
8
|
+
fabricatio/capabilities/correct.py,sha256=8GOU2VBPUakjG-r59SqsCgCD0QHX-l__IynCLO-ib8Q,6482
|
9
|
+
fabricatio/capabilities/propose.py,sha256=y3kge5g6bb8HYuV8e9h4MdqOMTlsfAIZpqE_cagWPTY,1593
|
10
|
+
fabricatio/capabilities/rag.py,sha256=XVvfH6rcog-moj1WCgwtR-l0-NdbFR6-fMQFLG7_asY,17690
|
11
|
+
fabricatio/capabilities/rating.py,sha256=yEPqL5_DqVMj_AH9cMvKsHdMnSbvm8dN6PaKHLsJUPQ,14904
|
12
|
+
fabricatio/capabilities/review.py,sha256=uc4WV9Xu-4rXAXZ-k-32HXAgm4WMLbDRfiZdRP7Nepg,11384
|
13
|
+
fabricatio/capabilities/task.py,sha256=vtS0YOe639vN8iTrkP2WK0AJVCr5N_JAaJuvRGyY2Fg,4639
|
14
|
+
fabricatio/config.py,sha256=hUv5XMzOkEw8cQjsVHTpPPix52IKwmxjBsZM6Px3xZI,16915
|
15
|
+
fabricatio/core.py,sha256=VQ_JKgUGIy2gZ8xsTBZCdr_IP7wC5aPg0_bsOmjQ588,6458
|
16
|
+
fabricatio/decorators.py,sha256=C0Gi7wcXC-0sWITqsSv3JdBGcgVJOlRvOt0FfO0aUsA,7554
|
17
|
+
fabricatio/fs/curd.py,sha256=N6l2MncjrFfnXBRtteRouXp5Rjy8EAKC_i29_G-zz98,4618
|
18
|
+
fabricatio/fs/readers.py,sha256=EZKN_AZdrp8DggJECP53QHw3uHeSDf-AwCAA_V7fNKU,1202
|
19
|
+
fabricatio/fs/__init__.py,sha256=PCf0s_9KDjVfNw7AfPoJzGt3jMq4gJOfbcT4pb0D0ZY,588
|
20
|
+
fabricatio/journal.py,sha256=stnEP88aUBA_GmU9gfTF2EZI8FS2OyMLGaMSTgK4QgA,476
|
21
|
+
fabricatio/models/action.py,sha256=UlflniS__MMrUXglu_U3PDFAtKEjVsKEix17AT9oP3M,8769
|
22
|
+
fabricatio/models/events.py,sha256=QvlnS8FEELg6KNabcJMeh2GV_y0ZBzKOPphcteKYWYU,4183
|
23
|
+
fabricatio/models/extra/article_essence.py,sha256=DUESuK4CGgkRvIMoJCv4l8MNp5MawRYoNOtLCrFRPXY,9229
|
24
|
+
fabricatio/models/extra/article_main.py,sha256=F6rhHMICErzvtRIEWdxS5AoY9thLhUUeDofoJbNF9ZI,13984
|
25
|
+
fabricatio/models/extra/article_outline.py,sha256=0t-aI3OtY1O1_dhwDDm1y4kUdqoh4bmQ8voNe6MDU4w,12452
|
26
|
+
fabricatio/models/extra/article_proposal.py,sha256=eXtomW88urP9M4aKbVNN9dct0GH-fBwYOM_Rcq3d7j4,1771
|
27
|
+
fabricatio/models/generic.py,sha256=TXNPGeVOWJnCKJ6KZU8T-SWQ913woX4Xt1BLJ0x4V9M,16820
|
28
|
+
fabricatio/models/kwargs_types.py,sha256=chJ-rHaeBVRUPuORHuGR3DdNxxTUrotz0eflPEh4l4w,5474
|
29
|
+
fabricatio/models/role.py,sha256=mmQbJ6GKr2Gx3wtjEz8d-vYoXs09ffcEkT_eCXaDd3E,2782
|
30
|
+
fabricatio/models/task.py,sha256=8NaR7ojQWyM740EDTqt9stwHKdrD6axCRpLKo0QzS-I,10492
|
31
|
+
fabricatio/models/tool.py,sha256=kD0eB7OxO9geZOxO6JIKvCBeG-KOpRAkfRZqK_WGfW4,7105
|
32
|
+
fabricatio/models/usages.py,sha256=BSqTENSva8Flga3bPBfwuc1nHo5Z_29oYzar99NbjLM,31566
|
33
|
+
fabricatio/models/utils.py,sha256=yjxPZ6N7QGpGwkI_Vb28Ud3EhkdlB-tyfGRHAZMcGxs,5872
|
34
|
+
fabricatio/parser.py,sha256=9Jzw-yV6uKbFvf6sPna-XHdziVGVBZWvPctgX_6ODL8,6251
|
35
|
+
fabricatio/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
36
|
+
fabricatio/toolboxes/arithmetic.py,sha256=WLqhY-Pikv11Y_0SGajwZx3WhsLNpHKf9drzAqOf_nY,1369
|
37
|
+
fabricatio/toolboxes/fs.py,sha256=l4L1CVxJmjw9Ld2XUpIlWfV0_Fu_2Og6d3E13I-S4aE,736
|
38
|
+
fabricatio/toolboxes/__init__.py,sha256=KBJi5OG_pExscdlM7Bnt_UF43j4I3Lv6G71kPVu4KQU,395
|
39
|
+
fabricatio/workflows/articles.py,sha256=G5HGRr-DHuYuEcfhFdFAuDvTTJ9aSU_UQ2yYXEjTMtM,1047
|
40
|
+
fabricatio/workflows/rag.py,sha256=-YYp2tlE9Vtfgpg6ROpu6QVO8j8yVSPa6yDzlN3qVxs,520
|
41
|
+
fabricatio/_rust.pyi,sha256=dGTGV7viu3YAGl1cRKIWrdHPc1hlwk3_hbaDaswxdVo,3831
|
42
|
+
fabricatio/_rust_instances.py,sha256=2GwF8aVfYNemRI2feBzH1CZfBGno-XJJE5imJokGEYw,314
|
43
|
+
fabricatio/__init__.py,sha256=SzBYsRhZeL77jLtfJEjmoHOSwHwUGyvMATX6xfndLDM,1135
|
44
|
+
fabricatio/_rust.cp312-win_amd64.pyd,sha256=JqLbZHAa0qBcJyWzAWC4Px5MTCxV9VN_BMsfm4saGus,1840128
|
45
|
+
fabricatio-0.2.7.dev3.data/scripts/tdown.exe,sha256=2sqdWL-XEloL8IsQRQ9wZ_4O0aaLO02jgeb8KWCLgdM,3402752
|
46
|
+
fabricatio-0.2.7.dev3.dist-info/RECORD,,
|