fabricatio 0.2.6.dev3__cp39-cp39-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. fabricatio/__init__.py +60 -0
  2. fabricatio/_rust.cp39-win_amd64.pyd +0 -0
  3. fabricatio/_rust.pyi +116 -0
  4. fabricatio/_rust_instances.py +10 -0
  5. fabricatio/actions/article.py +81 -0
  6. fabricatio/actions/output.py +19 -0
  7. fabricatio/actions/rag.py +25 -0
  8. fabricatio/capabilities/correct.py +115 -0
  9. fabricatio/capabilities/propose.py +49 -0
  10. fabricatio/capabilities/rag.py +369 -0
  11. fabricatio/capabilities/rating.py +339 -0
  12. fabricatio/capabilities/review.py +278 -0
  13. fabricatio/capabilities/task.py +113 -0
  14. fabricatio/config.py +400 -0
  15. fabricatio/core.py +181 -0
  16. fabricatio/decorators.py +179 -0
  17. fabricatio/fs/__init__.py +29 -0
  18. fabricatio/fs/curd.py +149 -0
  19. fabricatio/fs/readers.py +46 -0
  20. fabricatio/journal.py +21 -0
  21. fabricatio/models/action.py +158 -0
  22. fabricatio/models/events.py +120 -0
  23. fabricatio/models/extra.py +171 -0
  24. fabricatio/models/generic.py +406 -0
  25. fabricatio/models/kwargs_types.py +158 -0
  26. fabricatio/models/role.py +48 -0
  27. fabricatio/models/task.py +299 -0
  28. fabricatio/models/tool.py +189 -0
  29. fabricatio/models/usages.py +682 -0
  30. fabricatio/models/utils.py +167 -0
  31. fabricatio/parser.py +149 -0
  32. fabricatio/py.typed +0 -0
  33. fabricatio/toolboxes/__init__.py +15 -0
  34. fabricatio/toolboxes/arithmetic.py +62 -0
  35. fabricatio/toolboxes/fs.py +31 -0
  36. fabricatio/workflows/articles.py +15 -0
  37. fabricatio/workflows/rag.py +11 -0
  38. fabricatio-0.2.6.dev3.data/scripts/tdown.exe +0 -0
  39. fabricatio-0.2.6.dev3.dist-info/METADATA +432 -0
  40. fabricatio-0.2.6.dev3.dist-info/RECORD +42 -0
  41. fabricatio-0.2.6.dev3.dist-info/WHEEL +4 -0
  42. fabricatio-0.2.6.dev3.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,113 @@
1
+ """A module for the task capabilities of the Fabricatio library."""
2
+
3
+ from types import CodeType
4
+ from typing import Any, Dict, List, Optional, Tuple, Unpack, cast
5
+
6
+ import orjson
7
+ from fabricatio._rust_instances import TEMPLATE_MANAGER
8
+ from fabricatio.capabilities.propose import Propose
9
+ from fabricatio.config import configs
10
+ from fabricatio.journal import logger
11
+ from fabricatio.models.generic import WithBriefing
12
+ from fabricatio.models.kwargs_types import ChooseKwargs, ValidateKwargs
13
+ from fabricatio.models.task import Task
14
+ from fabricatio.models.tool import Tool, ToolExecutor
15
+ from fabricatio.models.usages import ToolBoxUsage
16
+ from fabricatio.parser import JsonCapture, PythonCapture
17
+
18
+
19
+ class ProposeTask(WithBriefing, Propose):
20
+ """A class that proposes a task based on a prompt."""
21
+
22
+ async def propose_task[T](
23
+ self,
24
+ prompt: str,
25
+ **kwargs: Unpack[ValidateKwargs[Task[T]]],
26
+ ) -> Task[T]:
27
+ """Asynchronously proposes a task based on a given prompt and parameters.
28
+
29
+ Parameters:
30
+ prompt: The prompt text for proposing a task, which is a string that must be provided.
31
+ **kwargs: The keyword arguments for the LLM (Large Language Model) usage.
32
+
33
+ Returns:
34
+ A Task object based on the proposal result.
35
+ """
36
+ if not prompt:
37
+ logger.error(err := f"{self.name}: Prompt must be provided.")
38
+ raise ValueError(err)
39
+
40
+ return await self.propose(Task, prompt, **self.prepend(cast(Dict[str, Any], kwargs)))
41
+
42
+
43
+ class HandleTask(WithBriefing, ToolBoxUsage):
44
+ """A class that handles a task based on a task object."""
45
+
46
+ async def draft_tool_usage_code(
47
+ self,
48
+ task: Task,
49
+ tools: List[Tool],
50
+ data: Dict[str, Any],
51
+ **kwargs: Unpack[ValidateKwargs],
52
+ ) -> Optional[Tuple[CodeType, List[str]]]:
53
+ """Asynchronously drafts the tool usage code for a task based on a given task object and tools."""
54
+ logger.info(f"Drafting tool usage code for task: {task.briefing}")
55
+
56
+ if not tools:
57
+ err = f"{self.name}: Tools must be provided to draft the tool usage code."
58
+ logger.error(err)
59
+ raise ValueError(err)
60
+
61
+ def _validator(response: str) -> Tuple[CodeType, List[str]] | None:
62
+ if (source := PythonCapture.convert_with(response, lambda resp: compile(resp, "<string>", "exec"))) and (
63
+ to_extract := JsonCapture.convert_with(response, orjson.loads)
64
+ ):
65
+ return source, to_extract
66
+
67
+ return None
68
+
69
+ q = TEMPLATE_MANAGER.render_template(
70
+ configs.templates.draft_tool_usage_code_template,
71
+ {
72
+ "data_module_name": configs.toolbox.data_module_name,
73
+ "tool_module_name": configs.toolbox.tool_module_name,
74
+ "task": task.briefing,
75
+ "deps": task.dependencies_prompt,
76
+ "tools": [{"name": t.name, "briefing": t.briefing} for t in tools],
77
+ "data": data,
78
+ },
79
+ )
80
+ logger.debug(f"Code Drafting Question: \n{q}")
81
+ return await self.aask_validate(
82
+ question=q,
83
+ validator=_validator,
84
+ **self.prepend(cast(Dict[str, Any], kwargs)),
85
+ )
86
+
87
+ async def handle_fin_grind(
88
+ self,
89
+ task: Task,
90
+ data: Dict[str, Any],
91
+ box_choose_kwargs: Optional[ChooseKwargs] = None,
92
+ tool_choose_kwargs: Optional[ChooseKwargs] = None,
93
+ **kwargs: Unpack[ValidateKwargs],
94
+ ) -> Optional[Tuple]:
95
+ """Asynchronously handles a task based on a given task object and parameters."""
96
+ logger.info(f"Handling task: \n{task.briefing}")
97
+
98
+ tools = await self.gather_tools_fine_grind(task, box_choose_kwargs, tool_choose_kwargs)
99
+ logger.info(f"{self.name} have gathered {[t.name for t in tools]}")
100
+
101
+ if tools and (pack := await self.draft_tool_usage_code(task, tools, data, **kwargs)):
102
+ executor = ToolExecutor(candidates=tools, data=data)
103
+
104
+ code, to_extract = pack
105
+ cxt = executor.execute(code)
106
+ if to_extract:
107
+ return tuple(cxt.get(k) for k in to_extract)
108
+
109
+ return None
110
+
111
+ async def handle(self, task: Task, data: Dict[str, Any], **kwargs: Unpack[ValidateKwargs]) -> Optional[Tuple]:
112
+ """Asynchronously handles a task based on a given task object and parameters."""
113
+ return await self.handle_fin_grind(task, data, **kwargs)
fabricatio/config.py ADDED
@@ -0,0 +1,400 @@
1
+ """Configuration module for the Fabricatio application."""
2
+
3
+ from pathlib import Path
4
+ from typing import List, Literal, Optional
5
+
6
+ from appdirs import user_config_dir
7
+ from litellm.types.caching import LiteLLMCacheType
8
+ from pydantic import (
9
+ BaseModel,
10
+ ConfigDict,
11
+ DirectoryPath,
12
+ Field,
13
+ FilePath,
14
+ HttpUrl,
15
+ NonNegativeFloat,
16
+ PositiveFloat,
17
+ PositiveInt,
18
+ SecretStr,
19
+ )
20
+ from pydantic_settings import (
21
+ BaseSettings,
22
+ PydanticBaseSettingsSource,
23
+ PyprojectTomlConfigSettingsSource,
24
+ SettingsConfigDict,
25
+ TomlConfigSettingsSource,
26
+ )
27
+
28
+ from fabricatio.models.kwargs_types import CacheKwargs
29
+
30
+ ROAMING_DIR = user_config_dir("fabricatio", "", roaming=True)
31
+
32
+
33
+ class LLMConfig(BaseModel):
34
+ """LLM configuration class.
35
+
36
+ Attributes:
37
+ api_endpoint (HttpUrl): OpenAI API Endpoint.
38
+ api_key (SecretStr): OpenAI API key. Empty by default for security reasons, should be set before use.
39
+ timeout (PositiveInt): The timeout of the LLM model in seconds. Default is 300 seconds as per request.
40
+ max_retries (PositiveInt): The maximum number of retries. Default is 3 retries.
41
+ model (str): The LLM model name. Set to 'gpt-3.5-turbo' as per request.
42
+ temperature (NonNegativeFloat): The temperature of the LLM model. Controls randomness in generation. Set to 1.0 as per request.
43
+ stop_sign (str): The stop sign of the LLM model. No default stop sign specified.
44
+ top_p (NonNegativeFloat): The top p of the LLM model. Controls diversity via nucleus sampling. Set to 0.35 as per request.
45
+ generation_count (PositiveInt): The number of generations to generate. Default is 1.
46
+ stream (bool): Whether to stream the LLM model's response. Default is False.
47
+ max_tokens (PositiveInt): The maximum number of tokens to generate. Set to 8192 as per request.
48
+ """
49
+
50
+ model_config = ConfigDict(use_attribute_docstrings=True)
51
+ api_endpoint: HttpUrl = Field(default=HttpUrl("https://api.openai.com"))
52
+ """OpenAI API Endpoint."""
53
+
54
+ api_key: SecretStr = Field(default=SecretStr(""))
55
+ """OpenAI API key. Empty by default for security reasons, should be set before use."""
56
+
57
+ timeout: PositiveInt = Field(default=300)
58
+ """The timeout of the LLM model in seconds. Default is 300 seconds as per request."""
59
+
60
+ max_retries: PositiveInt = Field(default=3)
61
+ """The maximum number of retries. Default is 3 retries."""
62
+
63
+ model: str = Field(default="gpt-3.5-turbo")
64
+ """The LLM model name. Set to 'gpt-3.5-turbo' as per request."""
65
+
66
+ temperature: NonNegativeFloat = Field(default=1.0)
67
+ """The temperature of the LLM model. Controls randomness in generation. Set to 1.0 as per request."""
68
+
69
+ stop_sign: str | List[str] = Field(default_factory=lambda: ["\n\n\n", "User:"])
70
+ """The stop sign of the LLM model. No default stop sign specified."""
71
+
72
+ top_p: NonNegativeFloat = Field(default=0.35)
73
+ """The top p of the LLM model. Controls diversity via nucleus sampling. Set to 0.35 as per request."""
74
+
75
+ generation_count: PositiveInt = Field(default=1)
76
+ """The number of generations to generate. Default is 1."""
77
+
78
+ stream: bool = Field(default=False)
79
+ """Whether to stream the LLM model's response. Default is False."""
80
+
81
+ max_tokens: PositiveInt = Field(default=8192)
82
+ """The maximum number of tokens to generate. Set to 8192 as per request."""
83
+
84
+ rpm: Optional[PositiveInt] = Field(default=100)
85
+ """The rate limit of the LLM model in requests per minute. None means not checked."""
86
+
87
+ tpm: Optional[PositiveInt] = Field(default=1000000)
88
+ """The rate limit of the LLM model in tokens per minute. None means not checked."""
89
+
90
+
91
+ class EmbeddingConfig(BaseModel):
92
+ """Embedding configuration class."""
93
+
94
+ model_config = ConfigDict(use_attribute_docstrings=True)
95
+
96
+ model: str = Field(default="text-embedding-ada-002")
97
+ """The embedding model name. """
98
+
99
+ dimensions: Optional[PositiveInt] = Field(default=None)
100
+ """The dimensions of the embedding. None means not checked."""
101
+
102
+ timeout: Optional[PositiveInt] = Field(default=None)
103
+ """The timeout of the embedding model in seconds."""
104
+
105
+ max_sequence_length: PositiveInt = Field(default=8192)
106
+ """The maximum sequence length of the embedding model. Default is 8192 as per request."""
107
+
108
+ caching: bool = Field(default=False)
109
+ """Whether to cache the embedding. Default is False."""
110
+
111
+ api_endpoint: Optional[HttpUrl] = None
112
+ """The OpenAI API endpoint."""
113
+
114
+ api_key: Optional[SecretStr] = None
115
+ """The OpenAI API key."""
116
+
117
+
118
+ class PymitterConfig(BaseModel):
119
+ """Pymitter configuration class.
120
+
121
+ Attributes:
122
+ delimiter (str): The delimiter used to separate the event name into segments.
123
+ new_listener_event (bool): If set, a newListener event is emitted when a new listener is added.
124
+ max_listeners (int): The maximum number of listeners per event.
125
+ """
126
+
127
+ model_config = ConfigDict(use_attribute_docstrings=True)
128
+ delimiter: str = Field(default="::", frozen=True)
129
+ """The delimiter used to separate the event name into segments."""
130
+
131
+ new_listener_event: bool = Field(default=False, frozen=True)
132
+ """If set, a newListener event is emitted when a new listener is added."""
133
+
134
+ max_listeners: int = Field(default=-1, frozen=True)
135
+ """The maximum number of listeners per event."""
136
+
137
+
138
+ class DebugConfig(BaseModel):
139
+ """Debug configuration class.
140
+
141
+ Attributes:
142
+ log_level (Literal["DEBUG", "INFO", "SUCCESS", "WARNING", "ERROR", "CRITICAL"]): The log level of the application.
143
+ log_file (FilePath): The log file of the application.
144
+ """
145
+
146
+ model_config = ConfigDict(use_attribute_docstrings=True)
147
+
148
+ log_level: Literal["DEBUG", "INFO", "SUCCESS", "WARNING", "ERROR", "CRITICAL"] = Field(default="INFO")
149
+ """The log level of the application."""
150
+
151
+ log_file: FilePath = Field(default=Path(rf"{ROAMING_DIR}\fabricatio.log"))
152
+ """The log file of the application."""
153
+
154
+ rotation: int = Field(default=1)
155
+ """The rotation of the log file. in weeks."""
156
+
157
+ retention: int = Field(default=2)
158
+ """The retention of the log file. in weeks."""
159
+
160
+ streaming_visible: bool = Field(default=False)
161
+ """Whether to print the llm output when streaming."""
162
+
163
+
164
+ class TemplateConfig(BaseModel):
165
+ """Template configuration class."""
166
+
167
+ model_config = ConfigDict(use_attribute_docstrings=True)
168
+ template_dir: List[DirectoryPath] = Field(
169
+ default_factory=lambda: [Path(r".\templates"), Path(rf"{ROAMING_DIR}\templates")]
170
+ )
171
+ """The directory containing the templates."""
172
+ active_loading: bool = Field(default=False)
173
+ """Whether to enable active loading of templates."""
174
+
175
+ template_suffix: str = Field(default="hbs", frozen=True)
176
+ """The suffix of the templates."""
177
+
178
+ create_json_obj_template: str = Field(default="create_json_obj")
179
+ """The name of the create json object template which will be used to create a json object."""
180
+
181
+ draft_tool_usage_code_template: str = Field(default="draft_tool_usage_code")
182
+ """The name of the draft tool usage code template which will be used to draft tool usage code."""
183
+
184
+ make_choice_template: str = Field(default="make_choice")
185
+ """The name of the make choice template which will be used to make a choice."""
186
+
187
+ make_judgment_template: str = Field(default="make_judgment")
188
+ """The name of the make judgment template which will be used to make a judgment."""
189
+
190
+ dependencies_template: str = Field(default="dependencies")
191
+ """The name of the dependencies template which will be used to manage dependencies."""
192
+
193
+ task_briefing_template: str = Field(default="task_briefing")
194
+ """The name of the task briefing template which will be used to brief a task."""
195
+
196
+ rate_fine_grind_template: str = Field(default="rate_fine_grind")
197
+ """The name of the rate fine grind template which will be used to rate fine grind."""
198
+
199
+ draft_rating_manual_template: str = Field(default="draft_rating_manual")
200
+ """The name of the draft rating manual template which will be used to draft rating manual."""
201
+
202
+ draft_rating_criteria_template: str = Field(default="draft_rating_criteria")
203
+ """The name of the draft rating criteria template which will be used to draft rating criteria."""
204
+
205
+ extract_reasons_from_examples_template: str = Field(default="extract_reasons_from_examples")
206
+ """The name of the extract reasons from examples template which will be used to extract reasons from examples."""
207
+
208
+ extract_criteria_from_reasons_template: str = Field(default="extract_criteria_from_reasons")
209
+ """The name of the extract criteria from reasons template which will be used to extract criteria from reasons."""
210
+
211
+ draft_rating_weights_klee_template: str = Field(default="draft_rating_weights_klee")
212
+ """The name of the draft rating weights klee template which will be used to draft rating weights with Klee method."""
213
+
214
+ retrieved_display_template: str = Field(default="retrieved_display")
215
+ """The name of the retrieved display template which will be used to display retrieved documents."""
216
+
217
+ liststr_template: str = Field(default="liststr")
218
+ """The name of the liststr template which will be used to display a list of strings."""
219
+
220
+ refined_query_template: str = Field(default="refined_query")
221
+ """The name of the refined query template which will be used to refine a query."""
222
+
223
+ pathstr_template: str = Field(default="pathstr")
224
+ """The name of the pathstr template which will be used to acquire a path of strings."""
225
+
226
+ review_string_template: str = Field(default="review_string")
227
+ """The name of the review string template which will be used to review a string."""
228
+
229
+ generic_string_template: str = Field(default="generic_string")
230
+ """The name of the generic string template which will be used to review a string."""
231
+
232
+ correct_template: str = Field(default="correct")
233
+ """The name of the correct template which will be used to correct a string."""
234
+
235
+
236
+ class MagikaConfig(BaseModel):
237
+ """Magika configuration class."""
238
+
239
+ model_config = ConfigDict(use_attribute_docstrings=True)
240
+ model_dir: Optional[DirectoryPath] = Field(default=None)
241
+ """The directory containing the models for magika."""
242
+
243
+
244
+ class GeneralConfig(BaseModel):
245
+ """Global configuration class."""
246
+
247
+ model_config = ConfigDict(use_attribute_docstrings=True)
248
+ workspace: DirectoryPath = Field(default=Path())
249
+ """The workspace directory for the application."""
250
+
251
+ confirm_on_ops: bool = Field(default=True)
252
+ """Whether to confirm on operations."""
253
+
254
+ use_json_repair: bool = Field(default=True)
255
+ """Whether to use JSON repair."""
256
+
257
+
258
+ class ToolBoxConfig(BaseModel):
259
+ """Toolbox configuration class."""
260
+
261
+ model_config = ConfigDict(use_attribute_docstrings=True)
262
+
263
+ tool_module_name: str = Field(default="Toolbox")
264
+ """The name of the module containing the toolbox."""
265
+
266
+ data_module_name: str = Field(default="Data")
267
+ """The name of the module containing the data."""
268
+
269
+
270
+ class RagConfig(BaseModel):
271
+ """RAG configuration class."""
272
+
273
+ model_config = ConfigDict(use_attribute_docstrings=True)
274
+
275
+ milvus_uri: HttpUrl = Field(default=HttpUrl("http://localhost:19530"))
276
+ """The URI of the Milvus server."""
277
+ milvus_timeout: Optional[PositiveFloat] = Field(default=None)
278
+ """The timeout of the Milvus server."""
279
+ milvus_token: Optional[SecretStr] = Field(default=None)
280
+ """The token of the Milvus server."""
281
+ milvus_dimensions: Optional[PositiveInt] = Field(default=None)
282
+ """The dimensions of the Milvus server."""
283
+
284
+
285
+ class CacheConfig(BaseModel):
286
+ """cache configuration class, uses litellm as cache backend. more info see https://docs.litellm.ai/docs/caching/all_caches."""
287
+
288
+ model_config = ConfigDict(use_attribute_docstrings=True)
289
+
290
+ type: Optional[LiteLLMCacheType] = None
291
+ """The type of cache to use. If None, the default cache type will be used."""
292
+ params: CacheKwargs = Field(default_factory=CacheKwargs)
293
+ """The parameters for the cache. If type is None, the default parameters will be used."""
294
+ enabled: bool = Field(default=False)
295
+ """Whether to enable cache."""
296
+
297
+
298
+ class RoutingConfig(BaseModel):
299
+ """Routing configuration class."""
300
+
301
+ model_config = ConfigDict(use_attribute_docstrings=True)
302
+
303
+ allowed_fails: Optional[int] = 1
304
+ """The number of allowed fails before the routing is considered failed."""
305
+ retry_after: int = 15
306
+ """The time in seconds to wait before retrying the routing after a fail."""
307
+ cooldown_time: Optional[int] = 120
308
+ """The time in seconds to wait before retrying the routing after a cooldown."""
309
+
310
+
311
+ class Settings(BaseSettings):
312
+ """Application settings class.
313
+
314
+ Attributes:
315
+ llm (LLMConfig): LLM Configuration
316
+ debug (DebugConfig): Debug Configuration
317
+ pymitter (PymitterConfig): Pymitter Configuration
318
+ templates (TemplateConfig): Template Configuration
319
+ magika (MagikaConfig): Magika Configuration
320
+ """
321
+
322
+ model_config = SettingsConfigDict(
323
+ env_prefix="FABRIK_",
324
+ env_nested_delimiter="__",
325
+ pyproject_toml_depth=1,
326
+ pyproject_toml_table_header=("tool", "fabricatio"),
327
+ toml_file=["fabricatio.toml", rf"{ROAMING_DIR}\fabricatio.toml"],
328
+ env_file=[".env", ".envrc"],
329
+ use_attribute_docstrings=True,
330
+ extra="ignore",
331
+ )
332
+
333
+ llm: LLMConfig = Field(default_factory=LLMConfig)
334
+ """LLM Configuration"""
335
+
336
+ routing: RoutingConfig = Field(default_factory=RoutingConfig)
337
+ """Routing Configuration"""
338
+
339
+ embedding: EmbeddingConfig = Field(default_factory=EmbeddingConfig)
340
+ """Embedding Configuration"""
341
+
342
+ debug: DebugConfig = Field(default_factory=DebugConfig)
343
+ """Debug Configuration"""
344
+
345
+ pymitter: PymitterConfig = Field(default_factory=PymitterConfig)
346
+ """Pymitter Configuration"""
347
+
348
+ templates: TemplateConfig = Field(default_factory=TemplateConfig)
349
+ """Template Configuration"""
350
+
351
+ magika: MagikaConfig = Field(default_factory=MagikaConfig)
352
+ """Magika Configuration"""
353
+
354
+ general: GeneralConfig = Field(default_factory=GeneralConfig)
355
+ """General Configuration"""
356
+
357
+ toolbox: ToolBoxConfig = Field(default_factory=ToolBoxConfig)
358
+ """Toolbox Configuration"""
359
+
360
+ rag: RagConfig = Field(default_factory=RagConfig)
361
+ """RAG Configuration"""
362
+
363
+ cache: CacheConfig = Field(default_factory=CacheConfig)
364
+ """Cache Configuration"""
365
+
366
+ @classmethod
367
+ def settings_customise_sources(
368
+ cls,
369
+ settings_cls: type[BaseSettings],
370
+ init_settings: PydanticBaseSettingsSource,
371
+ env_settings: PydanticBaseSettingsSource,
372
+ dotenv_settings: PydanticBaseSettingsSource,
373
+ file_secret_settings: PydanticBaseSettingsSource,
374
+ ) -> tuple[PydanticBaseSettingsSource, ...]:
375
+ """Customize settings sources.
376
+
377
+ This method customizes the settings sources used by the application. It returns a tuple of settings sources, including
378
+ the dotenv settings source, environment settings source, a custom TomlConfigSettingsSource, and a custom
379
+
380
+ Args:
381
+ settings_cls (type[BaseSettings]): The settings class.
382
+ init_settings (PydanticBaseSettingsSource): Initial settings source.
383
+ env_settings (PydanticBaseSettingsSource): Environment settings source.
384
+ dotenv_settings (PydanticBaseSettingsSource): Dotenv settings source.
385
+ file_secret_settings (PydanticBaseSettingsSource): File secret settings source.
386
+
387
+ Returns:
388
+ tuple[PydanticBaseSettingsSource, ...]: A tuple of settings sources.
389
+ """
390
+ return (
391
+ init_settings,
392
+ dotenv_settings,
393
+ env_settings,
394
+ file_secret_settings,
395
+ PyprojectTomlConfigSettingsSource(settings_cls),
396
+ TomlConfigSettingsSource(settings_cls),
397
+ )
398
+
399
+
400
+ configs: Settings = Settings()
fabricatio/core.py ADDED
@@ -0,0 +1,181 @@
1
+ """Core module that contains the Env class for managing event handling."""
2
+
3
+ from typing import Callable, Optional, Self, overload
4
+
5
+ from pydantic import BaseModel, ConfigDict, PrivateAttr
6
+ from pymitter import EventEmitter
7
+
8
+ from fabricatio.config import configs
9
+ from fabricatio.models.events import Event
10
+
11
+
12
+ class Env(BaseModel):
13
+ """Environment class that manages event handling using EventEmitter."""
14
+
15
+ model_config = ConfigDict(use_attribute_docstrings=True)
16
+ _ee: EventEmitter = PrivateAttr(
17
+ default_factory=lambda: EventEmitter(
18
+ delimiter=configs.pymitter.delimiter,
19
+ new_listener=configs.pymitter.new_listener_event,
20
+ max_listeners=configs.pymitter.max_listeners,
21
+ wildcard=True,
22
+ )
23
+ )
24
+
25
+ @overload
26
+ def on(self, event: str | Event, /, ttl: int = -1) -> Self:
27
+ """
28
+ Registers an event listener that listens indefinitely or for a specified number of times.
29
+
30
+ Args:
31
+ event (str | Event): The event to listen for.
32
+ ttl (int): Time-to-live for the listener. If -1, the listener will listen indefinitely.
33
+
34
+ Returns:
35
+ Self: The current instance of Env.
36
+ """
37
+ ...
38
+
39
+ @overload
40
+ def on[**P, R](
41
+ self,
42
+ event: str | Event,
43
+ func: Optional[Callable[P, R]] = None,
44
+ /,
45
+ ttl: int = -1,
46
+ ) -> Callable[[Callable[P, R]], Callable[P, R]]:
47
+ """
48
+ Registers an event listener with a specific function that listens indefinitely or for a specified number of times.
49
+
50
+ Args:
51
+ event (str | Event): The event to listen for.
52
+ func (Callable[P, R]): The function to be called when the event is emitted.
53
+ ttl (int): Time-to-live for the listener. If -1, the listener will listen indefinitely.
54
+
55
+ Returns:
56
+ Callable[[Callable[P, R]], Callable[P, R]]: A decorator that registers the function as an event listener.
57
+ """
58
+ ...
59
+
60
+ def on[**P, R](
61
+ self,
62
+ event: str | Event,
63
+ func: Optional[Callable[P, R]] = None,
64
+ /,
65
+ ttl=-1,
66
+ ) -> Callable[[Callable[P, R]], Callable[P, R]] | Self:
67
+ """Registers an event listener with a specific function that listens indefinitely or for a specified number of times.
68
+
69
+ Args:
70
+ event (str | Event): The event to listen for.
71
+ func (Callable[P, R]): The function to be called when the event is emitted.
72
+ ttl (int): Time-to-live for the listener. If -1, the listener will listen indefinitely.
73
+
74
+ Returns:
75
+ Callable[[Callable[P, R]], Callable[P, R]] | Self: A decorator that registers the function as an event listener or the current instance of Env.
76
+ """
77
+ if isinstance(event, Event):
78
+ event = event.collapse()
79
+ if func is None:
80
+ return self._ee.on(event, ttl=ttl)
81
+ self._ee.on(event, func, ttl=ttl)
82
+ return self
83
+
84
+ @overload
85
+ def once[**P, R](
86
+ self,
87
+ event: str | Event,
88
+ ) -> Callable[[Callable[P, R]], Callable[P, R]]:
89
+ """
90
+ Registers an event listener that listens only once.
91
+
92
+ Args:
93
+ event (str | Event): The event to listen for.
94
+
95
+ Returns:
96
+ Callable[[Callable[P, R]], Callable[P, R]]: A decorator that registers the function as an event listener.
97
+ """
98
+ ...
99
+
100
+ @overload
101
+ def once[**P, R](
102
+ self,
103
+ event: str | Event,
104
+ func: Callable[[Callable[P, R]], Callable[P, R]],
105
+ ) -> Self:
106
+ """
107
+ Registers an event listener with a specific function that listens only once.
108
+
109
+ Args:
110
+ event (str | Event): The event to listen for.
111
+ func (Callable[P, R]): The function to be called when the event is emitted.
112
+
113
+ Returns:
114
+ Self: The current instance of Env.
115
+ """
116
+ ...
117
+
118
+ def once[**P, R](
119
+ self,
120
+ event: str | Event,
121
+ func: Optional[Callable[P, R]] = None,
122
+ ) -> Callable[[Callable[P, R]], Callable[P, R]] | Self:
123
+ """Registers an event listener with a specific function that listens only once.
124
+
125
+ Args:
126
+ event (str | Event): The event to listen for.
127
+ func (Callable[P, R]): The function to be called when the event is emitted.
128
+
129
+ Returns:
130
+ Callable[[Callable[P, R]], Callable[P, R]] | Self: A decorator that registers the function as an event listener or the current instance
131
+ """
132
+ if isinstance(event, Event):
133
+ event = event.collapse()
134
+ if func is None:
135
+ return self._ee.once(event)
136
+
137
+ self._ee.once(event, func)
138
+ return self
139
+
140
+ def emit[**P](self, event: str | Event, *args: P.args, **kwargs: P.kwargs) -> None:
141
+ """Emits an event to all registered listeners.
142
+
143
+ Args:
144
+ event (str | Event): The event to emit.
145
+ *args: Positional arguments to pass to the listeners.
146
+ **kwargs: Keyword arguments to pass to the listeners.
147
+ """
148
+ if isinstance(event, Event):
149
+ event = event.collapse()
150
+
151
+ self._ee.emit(event, *args, **kwargs)
152
+
153
+ async def emit_async[**P](self, event: str | Event, *args: P.args, **kwargs: P.kwargs) -> None:
154
+ """Asynchronously emits an event to all registered listeners.
155
+
156
+ Args:
157
+ event (str | Event): The event to emit.
158
+ *args: Positional arguments to pass to the listeners.
159
+ **kwargs: Keyword arguments to pass to the listeners.
160
+ """
161
+ if isinstance(event, Event):
162
+ event = event.collapse()
163
+ return await self._ee.emit_async(event, *args, **kwargs)
164
+
165
+ def emit_future[**P](self, event: str | Event, *args: P.args, **kwargs: P.kwargs) -> None:
166
+ """Emits an event to all registered listeners and returns a future object.
167
+
168
+ Args:
169
+ event (str | Event): The event to emit.
170
+ *args: Positional arguments to pass to the listeners.
171
+ **kwargs: Keyword arguments to pass to the listeners.
172
+
173
+ Returns:
174
+ None: The future object.
175
+ """
176
+ if isinstance(event, Event):
177
+ event = event.collapse()
178
+ return self._ee.emit_future(event, *args, **kwargs)
179
+
180
+
181
+ env = Env()