fabricatio 0.2.6__cp39-cp39-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. fabricatio/__init__.py +43 -0
  2. fabricatio/_rust.cp39-win_amd64.pyd +0 -0
  3. fabricatio/_rust.pyi +115 -0
  4. fabricatio/_rust_instances.py +10 -0
  5. fabricatio/actions/article.py +128 -0
  6. fabricatio/actions/output.py +19 -0
  7. fabricatio/actions/rag.py +71 -0
  8. fabricatio/capabilities/correct.py +115 -0
  9. fabricatio/capabilities/propose.py +49 -0
  10. fabricatio/capabilities/rag.py +384 -0
  11. fabricatio/capabilities/rating.py +339 -0
  12. fabricatio/capabilities/review.py +278 -0
  13. fabricatio/capabilities/task.py +113 -0
  14. fabricatio/config.py +405 -0
  15. fabricatio/core.py +181 -0
  16. fabricatio/decorators.py +179 -0
  17. fabricatio/fs/__init__.py +29 -0
  18. fabricatio/fs/curd.py +149 -0
  19. fabricatio/fs/readers.py +46 -0
  20. fabricatio/journal.py +21 -0
  21. fabricatio/models/action.py +230 -0
  22. fabricatio/models/events.py +120 -0
  23. fabricatio/models/extra.py +655 -0
  24. fabricatio/models/generic.py +406 -0
  25. fabricatio/models/kwargs_types.py +169 -0
  26. fabricatio/models/role.py +72 -0
  27. fabricatio/models/task.py +299 -0
  28. fabricatio/models/tool.py +189 -0
  29. fabricatio/models/usages.py +718 -0
  30. fabricatio/models/utils.py +192 -0
  31. fabricatio/parser.py +151 -0
  32. fabricatio/py.typed +0 -0
  33. fabricatio/toolboxes/__init__.py +15 -0
  34. fabricatio/toolboxes/arithmetic.py +62 -0
  35. fabricatio/toolboxes/fs.py +31 -0
  36. fabricatio/workflows/articles.py +26 -0
  37. fabricatio/workflows/rag.py +11 -0
  38. fabricatio-0.2.6.data/scripts/tdown.exe +0 -0
  39. fabricatio-0.2.6.dist-info/METADATA +432 -0
  40. fabricatio-0.2.6.dist-info/RECORD +42 -0
  41. fabricatio-0.2.6.dist-info/WHEEL +4 -0
  42. fabricatio-0.2.6.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,113 @@
1
+ """A module for the task capabilities of the Fabricatio library."""
2
+
3
+ from types import CodeType
4
+ from typing import Any, Dict, List, Optional, Tuple, Unpack, cast
5
+
6
+ import orjson
7
+ from fabricatio._rust_instances import TEMPLATE_MANAGER
8
+ from fabricatio.capabilities.propose import Propose
9
+ from fabricatio.config import configs
10
+ from fabricatio.journal import logger
11
+ from fabricatio.models.generic import WithBriefing
12
+ from fabricatio.models.kwargs_types import ChooseKwargs, ValidateKwargs
13
+ from fabricatio.models.task import Task
14
+ from fabricatio.models.tool import Tool, ToolExecutor
15
+ from fabricatio.models.usages import ToolBoxUsage
16
+ from fabricatio.parser import JsonCapture, PythonCapture
17
+
18
+
19
+ class ProposeTask(WithBriefing, Propose):
20
+ """A class that proposes a task based on a prompt."""
21
+
22
+ async def propose_task[T](
23
+ self,
24
+ prompt: str,
25
+ **kwargs: Unpack[ValidateKwargs[Task[T]]],
26
+ ) -> Task[T]:
27
+ """Asynchronously proposes a task based on a given prompt and parameters.
28
+
29
+ Parameters:
30
+ prompt: The prompt text for proposing a task, which is a string that must be provided.
31
+ **kwargs: The keyword arguments for the LLM (Large Language Model) usage.
32
+
33
+ Returns:
34
+ A Task object based on the proposal result.
35
+ """
36
+ if not prompt:
37
+ logger.error(err := f"{self.name}: Prompt must be provided.")
38
+ raise ValueError(err)
39
+
40
+ return await self.propose(Task, prompt, **self.prepend(cast(Dict[str, Any], kwargs)))
41
+
42
+
43
+ class HandleTask(WithBriefing, ToolBoxUsage):
44
+ """A class that handles a task based on a task object."""
45
+
46
+ async def draft_tool_usage_code(
47
+ self,
48
+ task: Task,
49
+ tools: List[Tool],
50
+ data: Dict[str, Any],
51
+ **kwargs: Unpack[ValidateKwargs],
52
+ ) -> Optional[Tuple[CodeType, List[str]]]:
53
+ """Asynchronously drafts the tool usage code for a task based on a given task object and tools."""
54
+ logger.info(f"Drafting tool usage code for task: {task.briefing}")
55
+
56
+ if not tools:
57
+ err = f"{self.name}: Tools must be provided to draft the tool usage code."
58
+ logger.error(err)
59
+ raise ValueError(err)
60
+
61
+ def _validator(response: str) -> Tuple[CodeType, List[str]] | None:
62
+ if (source := PythonCapture.convert_with(response, lambda resp: compile(resp, "<string>", "exec"))) and (
63
+ to_extract := JsonCapture.convert_with(response, orjson.loads)
64
+ ):
65
+ return source, to_extract
66
+
67
+ return None
68
+
69
+ q = TEMPLATE_MANAGER.render_template(
70
+ configs.templates.draft_tool_usage_code_template,
71
+ {
72
+ "data_module_name": configs.toolbox.data_module_name,
73
+ "tool_module_name": configs.toolbox.tool_module_name,
74
+ "task": task.briefing,
75
+ "deps": task.dependencies_prompt,
76
+ "tools": [{"name": t.name, "briefing": t.briefing} for t in tools],
77
+ "data": data,
78
+ },
79
+ )
80
+ logger.debug(f"Code Drafting Question: \n{q}")
81
+ return await self.aask_validate(
82
+ question=q,
83
+ validator=_validator,
84
+ **self.prepend(cast(Dict[str, Any], kwargs)),
85
+ )
86
+
87
+ async def handle_fine_grind(
88
+ self,
89
+ task: Task,
90
+ data: Dict[str, Any],
91
+ box_choose_kwargs: Optional[ChooseKwargs] = None,
92
+ tool_choose_kwargs: Optional[ChooseKwargs] = None,
93
+ **kwargs: Unpack[ValidateKwargs],
94
+ ) -> Optional[Tuple]:
95
+ """Asynchronously handles a task based on a given task object and parameters."""
96
+ logger.info(f"Handling task: \n{task.briefing}")
97
+
98
+ tools = await self.gather_tools_fine_grind(task, box_choose_kwargs, tool_choose_kwargs)
99
+ logger.info(f"{self.name} have gathered {[t.name for t in tools]}")
100
+
101
+ if tools and (pack := await self.draft_tool_usage_code(task, tools, data, **kwargs)):
102
+ executor = ToolExecutor(candidates=tools, data=data)
103
+
104
+ code, to_extract = pack
105
+ cxt = executor.execute(code)
106
+ if to_extract:
107
+ return tuple(cxt.get(k) for k in to_extract)
108
+
109
+ return None
110
+
111
+ async def handle(self, task: Task, data: Dict[str, Any], **kwargs: Unpack[ValidateKwargs]) -> Optional[Tuple]:
112
+ """Asynchronously handles a task based on a given task object and parameters."""
113
+ return await self.handle_fine_grind(task, data, **kwargs)
fabricatio/config.py ADDED
@@ -0,0 +1,405 @@
1
+ """Configuration module for the Fabricatio application."""
2
+
3
+ from pathlib import Path
4
+ from typing import List, Literal, Optional
5
+
6
+ from appdirs import user_config_dir
7
+ from litellm.types.caching import LiteLLMCacheType
8
+ from pydantic import (
9
+ BaseModel,
10
+ ConfigDict,
11
+ DirectoryPath,
12
+ Field,
13
+ FilePath,
14
+ HttpUrl,
15
+ NonNegativeFloat,
16
+ PositiveFloat,
17
+ PositiveInt,
18
+ SecretStr,
19
+ )
20
+ from pydantic_settings import (
21
+ BaseSettings,
22
+ PydanticBaseSettingsSource,
23
+ PyprojectTomlConfigSettingsSource,
24
+ SettingsConfigDict,
25
+ TomlConfigSettingsSource,
26
+ )
27
+
28
+ from fabricatio.models.kwargs_types import CacheKwargs
29
+
30
+ ROAMING_DIR = user_config_dir("fabricatio", "", roaming=True)
31
+
32
+
33
+ class LLMConfig(BaseModel):
34
+ """LLM configuration class.
35
+
36
+ Attributes:
37
+ api_endpoint (HttpUrl): OpenAI API Endpoint.
38
+ api_key (SecretStr): OpenAI API key. Empty by default for security reasons, should be set before use.
39
+ timeout (PositiveInt): The timeout of the LLM model in seconds. Default is 300 seconds as per request.
40
+ max_retries (PositiveInt): The maximum number of retries. Default is 3 retries.
41
+ model (str): The LLM model name. Set to 'gpt-3.5-turbo' as per request.
42
+ temperature (NonNegativeFloat): The temperature of the LLM model. Controls randomness in generation. Set to 1.0 as per request.
43
+ stop_sign (str): The stop sign of the LLM model. No default stop sign specified.
44
+ top_p (NonNegativeFloat): The top p of the LLM model. Controls diversity via nucleus sampling. Set to 0.35 as per request.
45
+ generation_count (PositiveInt): The number of generations to generate. Default is 1.
46
+ stream (bool): Whether to stream the LLM model's response. Default is False.
47
+ max_tokens (PositiveInt): The maximum number of tokens to generate. Set to 8192 as per request.
48
+ """
49
+
50
+ model_config = ConfigDict(use_attribute_docstrings=True)
51
+ api_endpoint: Optional[HttpUrl] = Field(default=HttpUrl("https://api.openai.com"))
52
+ """OpenAI API Endpoint."""
53
+
54
+ api_key: Optional[SecretStr] = Field(default=SecretStr("sk-setyourkey"))
55
+ """OpenAI API key. Empty by default for security reasons, should be set before use."""
56
+
57
+ timeout: Optional[PositiveInt] = Field(default=300)
58
+ """The timeout of the LLM model in seconds. Default is 300 seconds as per request."""
59
+
60
+ max_retries: Optional[PositiveInt] = Field(default=3)
61
+ """The maximum number of retries. Default is 3 retries."""
62
+
63
+ model: Optional[str] = Field(default="gpt-3.5-turbo")
64
+ """The LLM model name. Set to 'gpt-3.5-turbo' as per request."""
65
+
66
+ temperature: Optional[NonNegativeFloat] = Field(default=1.0)
67
+ """The temperature of the LLM model. Controls randomness in generation. Set to 1.0 as per request."""
68
+
69
+ stop_sign: Optional[str | List[str]] = Field(default=None)
70
+ """The stop sign of the LLM model. No default stop sign specified."""
71
+
72
+ top_p: Optional[NonNegativeFloat] = Field(default=0.35)
73
+ """The top p of the LLM model. Controls diversity via nucleus sampling. Set to 0.35 as per request."""
74
+
75
+ generation_count: Optional[PositiveInt] = Field(default=1)
76
+ """The number of generations to generate. Default is 1."""
77
+
78
+ stream: Optional[bool] = Field(default=False)
79
+ """Whether to stream the LLM model's response. Default is False."""
80
+
81
+ max_tokens: Optional[PositiveInt] = Field(default=None)
82
+ """The maximum number of tokens to generate. Set to 8192 as per request."""
83
+
84
+ rpm: Optional[PositiveInt] = Field(default=100)
85
+ """The rate limit of the LLM model in requests per minute. None means not checked."""
86
+
87
+ tpm: Optional[PositiveInt] = Field(default=1000000)
88
+ """The rate limit of the LLM model in tokens per minute. None means not checked."""
89
+
90
+
91
+ class EmbeddingConfig(BaseModel):
92
+ """Embedding configuration class."""
93
+
94
+ model_config = ConfigDict(use_attribute_docstrings=True)
95
+
96
+ model: Optional[str] = Field(default="text-embedding-ada-002")
97
+ """The embedding model name. """
98
+
99
+ dimensions: Optional[PositiveInt] = Field(default=None)
100
+ """The dimensions of the embedding. None means not checked."""
101
+
102
+ timeout: Optional[PositiveInt] = Field(default=None)
103
+ """The timeout of the embedding model in seconds."""
104
+
105
+ max_sequence_length: Optional[PositiveInt] = Field(default=8192)
106
+ """The maximum sequence length of the embedding model. Default is 8192 as per request."""
107
+
108
+ caching: Optional[bool] = Field(default=False)
109
+ """Whether to cache the embedding. Default is False."""
110
+
111
+ api_endpoint: Optional[HttpUrl] = None
112
+ """The OpenAI API endpoint."""
113
+
114
+ api_key: Optional[SecretStr] = None
115
+ """The OpenAI API key."""
116
+
117
+
118
+ class PymitterConfig(BaseModel):
119
+ """Pymitter configuration class.
120
+
121
+ Attributes:
122
+ delimiter (str): The delimiter used to separate the event name into segments.
123
+ new_listener_event (bool): If set, a newListener event is emitted when a new listener is added.
124
+ max_listeners (int): The maximum number of listeners per event.
125
+ """
126
+
127
+ model_config = ConfigDict(use_attribute_docstrings=True)
128
+ delimiter: str = Field(default="::", frozen=True)
129
+ """The delimiter used to separate the event name into segments."""
130
+
131
+ new_listener_event: bool = Field(default=False, frozen=True)
132
+ """If set, a newListener event is emitted when a new listener is added."""
133
+
134
+ max_listeners: int = Field(default=-1, frozen=True)
135
+ """The maximum number of listeners per event."""
136
+
137
+
138
+ class DebugConfig(BaseModel):
139
+ """Debug configuration class.
140
+
141
+ Attributes:
142
+ log_level (Literal["DEBUG", "INFO", "SUCCESS", "WARNING", "ERROR", "CRITICAL"]): The log level of the application.
143
+ log_file (FilePath): The log file of the application.
144
+ """
145
+
146
+ model_config = ConfigDict(use_attribute_docstrings=True)
147
+
148
+ log_level: Literal["DEBUG", "INFO", "SUCCESS", "WARNING", "ERROR", "CRITICAL"] = Field(default="INFO")
149
+ """The log level of the application."""
150
+
151
+ log_file: FilePath = Field(default=Path(rf"{ROAMING_DIR}\fabricatio.log"), frozen=True)
152
+ """The log file of the application."""
153
+
154
+ rotation: int = Field(default=1, frozen=True)
155
+ """The rotation of the log file. in weeks."""
156
+
157
+ retention: int = Field(default=2, frozen=True)
158
+ """The retention of the log file. in weeks."""
159
+
160
+ streaming_visible: bool = Field(default=False)
161
+ """Whether to print the llm output when streaming."""
162
+
163
+
164
+ class TemplateConfig(BaseModel):
165
+ """Template configuration class."""
166
+
167
+ model_config = ConfigDict(use_attribute_docstrings=True)
168
+ template_dir: List[DirectoryPath] = Field(
169
+ default_factory=lambda: [Path(r".\templates"), Path(rf"{ROAMING_DIR}\templates")]
170
+ )
171
+ """The directory containing the templates."""
172
+ active_loading: bool = Field(default=False)
173
+ """Whether to enable active loading of templates."""
174
+
175
+ template_suffix: str = Field(default="hbs", frozen=True)
176
+ """The suffix of the templates."""
177
+
178
+ create_json_obj_template: str = Field(default="create_json_obj")
179
+ """The name of the create json object template which will be used to create a json object."""
180
+
181
+ draft_tool_usage_code_template: str = Field(default="draft_tool_usage_code")
182
+ """The name of the draft tool usage code template which will be used to draft tool usage code."""
183
+
184
+ make_choice_template: str = Field(default="make_choice")
185
+ """The name of the make choice template which will be used to make a choice."""
186
+
187
+ make_judgment_template: str = Field(default="make_judgment")
188
+ """The name of the make judgment template which will be used to make a judgment."""
189
+
190
+ dependencies_template: str = Field(default="dependencies")
191
+ """The name of the dependencies template which will be used to manage dependencies."""
192
+
193
+ task_briefing_template: str = Field(default="task_briefing")
194
+ """The name of the task briefing template which will be used to brief a task."""
195
+
196
+ rate_fine_grind_template: str = Field(default="rate_fine_grind")
197
+ """The name of the rate fine grind template which will be used to rate fine grind."""
198
+
199
+ draft_rating_manual_template: str = Field(default="draft_rating_manual")
200
+ """The name of the draft rating manual template which will be used to draft rating manual."""
201
+
202
+ draft_rating_criteria_template: str = Field(default="draft_rating_criteria")
203
+ """The name of the draft rating criteria template which will be used to draft rating criteria."""
204
+
205
+ extract_reasons_from_examples_template: str = Field(default="extract_reasons_from_examples")
206
+ """The name of the extract reasons from examples template which will be used to extract reasons from examples."""
207
+
208
+ extract_criteria_from_reasons_template: str = Field(default="extract_criteria_from_reasons")
209
+ """The name of the extract criteria from reasons template which will be used to extract criteria from reasons."""
210
+
211
+ draft_rating_weights_klee_template: str = Field(default="draft_rating_weights_klee")
212
+ """The name of the draft rating weights klee template which will be used to draft rating weights with Klee method."""
213
+
214
+ retrieved_display_template: str = Field(default="retrieved_display")
215
+ """The name of the retrieved display template which will be used to display retrieved documents."""
216
+
217
+ liststr_template: str = Field(default="liststr")
218
+ """The name of the liststr template which will be used to display a list of strings."""
219
+
220
+ refined_query_template: str = Field(default="refined_query")
221
+ """The name of the refined query template which will be used to refine a query."""
222
+
223
+ pathstr_template: str = Field(default="pathstr")
224
+ """The name of the pathstr template which will be used to acquire a path of strings."""
225
+
226
+ review_string_template: str = Field(default="review_string")
227
+ """The name of the review string template which will be used to review a string."""
228
+
229
+ generic_string_template: str = Field(default="generic_string")
230
+ """The name of the generic string template which will be used to review a string."""
231
+
232
+ correct_template: str = Field(default="correct")
233
+ """The name of the correct template which will be used to correct a string."""
234
+
235
+ co_validation_template: str = Field(default="co_validation")
236
+ """The name of the co-validation template which will be used to co-validate a string."""
237
+
238
+
239
+ class MagikaConfig(BaseModel):
240
+ """Magika configuration class."""
241
+
242
+ model_config = ConfigDict(use_attribute_docstrings=True)
243
+ model_dir: Optional[DirectoryPath] = Field(default=None)
244
+ """The directory containing the models for magika."""
245
+
246
+
247
+ class GeneralConfig(BaseModel):
248
+ """Global configuration class."""
249
+
250
+ model_config = ConfigDict(use_attribute_docstrings=True)
251
+ workspace: DirectoryPath = Field(default=Path())
252
+ """The workspace directory for the application."""
253
+
254
+ confirm_on_ops: bool = Field(default=True)
255
+ """Whether to confirm on operations."""
256
+
257
+ use_json_repair: bool = Field(default=True)
258
+ """Whether to use JSON repair."""
259
+
260
+
261
+ class ToolBoxConfig(BaseModel):
262
+ """Toolbox configuration class."""
263
+
264
+ model_config = ConfigDict(use_attribute_docstrings=True)
265
+
266
+ tool_module_name: str = Field(default="Toolbox")
267
+ """The name of the module containing the toolbox."""
268
+
269
+ data_module_name: str = Field(default="Data")
270
+ """The name of the module containing the data."""
271
+
272
+
273
+ class RagConfig(BaseModel):
274
+ """RAG configuration class."""
275
+
276
+ model_config = ConfigDict(use_attribute_docstrings=True)
277
+
278
+ milvus_uri: Optional[HttpUrl] = Field(default=HttpUrl("http://localhost:19530"))
279
+ """The URI of the Milvus server."""
280
+ milvus_timeout: Optional[PositiveFloat] = Field(default=30.0)
281
+ """The timeout of the Milvus server."""
282
+ milvus_token: Optional[SecretStr] = Field(default=None)
283
+ """The token of the Milvus server."""
284
+ milvus_dimensions: Optional[PositiveInt] = Field(default=None)
285
+ """The dimensions of the Milvus server."""
286
+
287
+
288
+ class CacheConfig(BaseModel):
289
+ """cache configuration class, uses litellm as cache backend. more info see https://docs.litellm.ai/docs/caching/all_caches."""
290
+
291
+ model_config = ConfigDict(use_attribute_docstrings=True)
292
+
293
+ type: Optional[LiteLLMCacheType] = None
294
+ """The type of cache to use. If None, the default cache type will be used."""
295
+ params: CacheKwargs = Field(default_factory=CacheKwargs)
296
+ """The parameters for the cache. If type is None, the default parameters will be used."""
297
+ enabled: bool = Field(default=False)
298
+ """Whether to enable cache."""
299
+
300
+
301
+ class RoutingConfig(BaseModel):
302
+ """Routing configuration class."""
303
+
304
+ model_config = ConfigDict(use_attribute_docstrings=True)
305
+
306
+ max_parallel_requests: Optional[int] = 60
307
+ """The maximum number of parallel requests. None means not checked."""
308
+ allowed_fails: Optional[int] = 3
309
+ """The number of allowed fails before the routing is considered failed."""
310
+ retry_after: int = 15
311
+ """The time in seconds to wait before retrying the routing after a fail."""
312
+ cooldown_time: Optional[int] = 30
313
+ """The time in seconds to wait before retrying the routing after a cooldown."""
314
+
315
+
316
+ class Settings(BaseSettings):
317
+ """Application settings class.
318
+
319
+ Attributes:
320
+ llm (LLMConfig): LLM Configuration
321
+ debug (DebugConfig): Debug Configuration
322
+ pymitter (PymitterConfig): Pymitter Configuration
323
+ templates (TemplateConfig): Template Configuration
324
+ magika (MagikaConfig): Magika Configuration
325
+ """
326
+
327
+ model_config = SettingsConfigDict(
328
+ env_prefix="FABRIK_",
329
+ env_nested_delimiter="__",
330
+ pyproject_toml_depth=1,
331
+ pyproject_toml_table_header=("tool", "fabricatio"),
332
+ toml_file=["fabricatio.toml", rf"{ROAMING_DIR}\fabricatio.toml"],
333
+ env_file=[".env", ".envrc"],
334
+ use_attribute_docstrings=True,
335
+ extra="ignore",
336
+ )
337
+
338
+ llm: LLMConfig = Field(default_factory=LLMConfig)
339
+ """LLM Configuration"""
340
+
341
+ routing: RoutingConfig = Field(default_factory=RoutingConfig)
342
+ """Routing Configuration"""
343
+
344
+ embedding: EmbeddingConfig = Field(default_factory=EmbeddingConfig)
345
+ """Embedding Configuration"""
346
+
347
+ debug: DebugConfig = Field(default_factory=DebugConfig)
348
+ """Debug Configuration"""
349
+
350
+ pymitter: PymitterConfig = Field(default_factory=PymitterConfig)
351
+ """Pymitter Configuration"""
352
+
353
+ templates: TemplateConfig = Field(default_factory=TemplateConfig)
354
+ """Template Configuration"""
355
+
356
+ magika: MagikaConfig = Field(default_factory=MagikaConfig)
357
+ """Magika Configuration"""
358
+
359
+ general: GeneralConfig = Field(default_factory=GeneralConfig)
360
+ """General Configuration"""
361
+
362
+ toolbox: ToolBoxConfig = Field(default_factory=ToolBoxConfig)
363
+ """Toolbox Configuration"""
364
+
365
+ rag: RagConfig = Field(default_factory=RagConfig)
366
+ """RAG Configuration"""
367
+
368
+ cache: CacheConfig = Field(default_factory=CacheConfig)
369
+ """Cache Configuration"""
370
+
371
+ @classmethod
372
+ def settings_customise_sources(
373
+ cls,
374
+ settings_cls: type[BaseSettings],
375
+ init_settings: PydanticBaseSettingsSource,
376
+ env_settings: PydanticBaseSettingsSource,
377
+ dotenv_settings: PydanticBaseSettingsSource,
378
+ file_secret_settings: PydanticBaseSettingsSource,
379
+ ) -> tuple[PydanticBaseSettingsSource, ...]:
380
+ """Customize settings sources.
381
+
382
+ This method customizes the settings sources used by the application. It returns a tuple of settings sources, including
383
+ the dotenv settings source, environment settings source, a custom TomlConfigSettingsSource, and a custom
384
+
385
+ Args:
386
+ settings_cls (type[BaseSettings]): The settings class.
387
+ init_settings (PydanticBaseSettingsSource): Initial settings source.
388
+ env_settings (PydanticBaseSettingsSource): Environment settings source.
389
+ dotenv_settings (PydanticBaseSettingsSource): Dotenv settings source.
390
+ file_secret_settings (PydanticBaseSettingsSource): File secret settings source.
391
+
392
+ Returns:
393
+ tuple[PydanticBaseSettingsSource, ...]: A tuple of settings sources.
394
+ """
395
+ return (
396
+ init_settings,
397
+ dotenv_settings,
398
+ env_settings,
399
+ file_secret_settings,
400
+ PyprojectTomlConfigSettingsSource(settings_cls),
401
+ TomlConfigSettingsSource(settings_cls),
402
+ )
403
+
404
+
405
+ configs: Settings = Settings()