fabricatio 0.1.0__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,67 +2,65 @@ from typing import List, Self
2
2
 
3
3
  from pydantic import BaseModel, ConfigDict, Field
4
4
 
5
+ from fabricatio.config import configs
6
+
5
7
 
6
8
  class Event(BaseModel):
9
+ """A class representing an event."""
10
+
7
11
  model_config = ConfigDict(use_attribute_docstrings=True)
8
- delimiter: str = Field(default=".", frozen=True)
9
- """ The delimiter used to separate the event name into segments."""
10
12
 
11
13
  segments: List[str] = Field(default_factory=list, frozen=True)
12
14
  """ The segments of the namespaces."""
13
15
 
14
16
  @classmethod
15
- def from_string(cls, event: str, delimiter: str = ".") -> Self:
16
- """
17
- Create an Event instance from a string.
17
+ def from_string(cls, event: str) -> Self:
18
+ """Create an Event instance from a string.
18
19
 
19
20
  Args:
20
21
  event (str): The event string.
21
- delimiter (str): The delimiter used to separate the event name into segments.
22
22
 
23
23
  Returns:
24
24
  Event: The Event instance.
25
25
  """
26
- return cls(delimiter=delimiter, segments=event.split(delimiter))
26
+ return cls(segments=event.split(configs.pymitter.delimiter))
27
27
 
28
28
  def collapse(self) -> str:
29
- """
30
- Collapse the event into a string.
31
- """
32
- return self.delimiter.join(self.segments)
29
+ """Collapse the event into a string."""
30
+ return configs.pymitter.delimiter.join(self.segments)
33
31
 
34
32
  def clone(self) -> Self:
35
- """
36
- Clone the event.
37
- """
38
- return Event(delimiter=self.delimiter, segments=[segment for segment in self.segments])
33
+ """Clone the event."""
34
+ return Event(segments=list(self.segments))
39
35
 
40
36
  def push(self, segment: str) -> Self:
41
- """
42
- Push a segment to the event.
43
- """
37
+ """Push a segment to the event."""
44
38
  assert segment, "The segment must not be empty."
45
- assert self.delimiter not in segment, "The segment must not contain the delimiter."
39
+ assert configs.pymitter.delimiter not in segment, "The segment must not contain the delimiter."
46
40
 
47
41
  self.segments.append(segment)
48
42
  return self
49
43
 
50
44
  def pop(self) -> str:
51
- """
52
- Pop a segment from the event.
53
- """
45
+ """Pop a segment from the event."""
54
46
  return self.segments.pop()
55
47
 
56
48
  def clear(self) -> Self:
57
- """
58
- Clear the event.
59
- """
49
+ """Clear the event."""
60
50
  self.segments.clear()
61
51
  return self
62
52
 
63
53
  def concat(self, event: Self) -> Self:
64
- """
65
- Concatenate another event to this event.
66
- """
54
+ """Concatenate another event to this event."""
67
55
  self.segments.extend(event.segments)
68
56
  return self
57
+
58
+ def __hash__(self) -> int:
59
+ """Return the hash of the event, using the collapsed string."""
60
+ return hash(self.collapse())
61
+
62
+ def __eq__(self, other: Self | str) -> bool:
63
+ """Check if the event is equal to another event or a string."""
64
+ if isinstance(other, Event):
65
+ other = other.collapse()
66
+ return self.collapse() == other
@@ -1,18 +1,18 @@
1
1
  from asyncio import Queue
2
- from typing import Iterable, Any, Dict, Self, List
2
+ from typing import Any, Dict, Iterable, List, Optional, Self
3
3
 
4
4
  import litellm
5
- from litellm.types.utils import StreamingChoices, ModelResponse, Choices
5
+ from litellm.types.utils import Choices, ModelResponse, StreamingChoices
6
6
  from pydantic import (
7
7
  BaseModel,
8
- Field,
9
- PositiveInt,
10
- NonNegativeInt,
11
8
  ConfigDict,
9
+ Field,
12
10
  HttpUrl,
13
- SecretStr,
14
11
  NonNegativeFloat,
12
+ NonNegativeInt,
13
+ PositiveInt,
15
14
  PrivateAttr,
15
+ SecretStr,
16
16
  )
17
17
 
18
18
  from fabricatio.config import configs
@@ -20,40 +20,44 @@ from fabricatio.models.utils import Messages
20
20
 
21
21
 
22
22
  class Base(BaseModel):
23
+ """Base class for all models with Pydantic configuration."""
24
+
23
25
  model_config = ConfigDict(use_attribute_docstrings=True)
24
26
 
25
27
 
26
28
  class WithToDo(Base):
29
+ """Class that manages a todo list using an asynchronous queue."""
30
+
27
31
  _todo: Queue[str] = PrivateAttr(default_factory=Queue)
28
32
  """
29
33
  The todo list of the current instance.
30
34
  """
31
35
 
32
36
  async def add_todo(self, todo_msg: str) -> Self:
33
- """
34
- Add a todo item to the todo list.
37
+ """Add a todo item to the todo list.
38
+
35
39
  Args:
36
- todo_msg: The todo item to be added to the todo list.
40
+ todo_msg (str): The todo item to be added to the todo list.
37
41
 
38
42
  Returns:
39
43
  Self: The current instance object to support method chaining.
40
44
  """
41
-
42
45
  await self._todo.put(todo_msg)
43
46
  return self
44
47
 
45
48
  async def get_todo(self) -> str:
46
- """
47
- Get the last todo item from the todo list.
49
+ """Get the last todo item from the todo list.
50
+
48
51
  Returns:
49
52
  str: The last todo item from the todo list.
50
-
51
53
  """
52
54
  # Pop the last todo item from the todo list
53
55
  return await self._todo.get()
54
56
 
55
57
 
56
58
  class Named(Base):
59
+ """Class that includes a name attribute."""
60
+
57
61
  name: str = Field(frozen=True)
58
62
  """
59
63
  Name of the object.
@@ -61,6 +65,8 @@ class Named(Base):
61
65
 
62
66
 
63
67
  class Described(Base):
68
+ """Class that includes a description attribute."""
69
+
64
70
  description: str = Field(default="", frozen=True)
65
71
  """
66
72
  Description of the object.
@@ -68,11 +74,12 @@ class Described(Base):
68
74
 
69
75
 
70
76
  class WithBriefing(Named, Described):
77
+ """Class that provides a briefing based on the name and description."""
71
78
 
72
79
  @property
73
80
  def briefing(self) -> str:
74
- """
75
- Get the briefing of the object.
81
+ """Get the briefing of the object.
82
+
76
83
  Returns:
77
84
  str: The briefing of the object.
78
85
  """
@@ -80,6 +87,8 @@ class WithBriefing(Named, Described):
80
87
 
81
88
 
82
89
  class Memorable(Base):
90
+ """Class that manages a memory list with a maximum size."""
91
+
83
92
  memory: List[str] = Field(default_factory=list)
84
93
  """
85
94
  Memory list.
@@ -90,19 +99,13 @@ class Memorable(Base):
90
99
  """
91
100
 
92
101
  def add_memory(self, memories: str | Iterable[str]) -> Self:
93
- """
94
- Add memory items to the memory list.
95
-
96
- This method appends memory items to the memory list of the current instance.
102
+ """Add memory items to the memory list.
97
103
 
98
- Parameters:
99
- - memories: str | Iterable[str] - A single memory item as a string or multiple memory items as an iterable.
104
+ Args:
105
+ memories (str | Iterable[str]): A single memory item as a string or multiple memory items as an iterable.
100
106
 
101
107
  Returns:
102
- - Returns the current instance object to support method chaining.
103
-
104
- This method design allows users to add memory items to the memory list
105
- through a unified interface, enhancing code usability and extensibility.
108
+ Self: The current instance object to support method chaining.
106
109
  """
107
110
  # Convert a single memory item to a list
108
111
  if isinstance(memories, str):
@@ -111,43 +114,31 @@ class Memorable(Base):
111
114
  self.memory.extend(memories)
112
115
  # Limit the memory list size if the maximum size is set
113
116
  if self.memory_max_size > 0:
114
- self.memory = self.memory[-self.memory_max_size:]
117
+ self.memory = self.memory[-self.memory_max_size :]
115
118
  # Return the current instance object to support method chaining
116
119
  return self
117
120
 
118
121
  def top_memories(self, n: PositiveInt = 1) -> List[str]:
119
- """
120
- Get the top memory items from the memory list.
122
+ """Get the top memory items from the memory list.
121
123
 
122
- This method returns the top memory items from the memory list of the current instance.
123
-
124
- Parameters:
125
- - n: PositiveInt - The number of top memory items to return.
124
+ Args:
125
+ n (PositiveInt): The number of top memory items to return.
126
126
 
127
127
  Returns:
128
- - List[str] - The top memory items from the memory list.
129
-
130
- This method design allows users to get the top memory items from the memory list
131
- through a unified interface, enhancing code usability and extensibility.
128
+ List[str]: The top memory items from the memory list.
132
129
  """
133
130
  # Get the top memory items from the memory list
134
131
  return self.memory[-n:]
135
132
 
136
133
  def top_memories_as_string(self, n: PositiveInt = 1, separator: str = "\n\n") -> str:
137
- """
138
- Get the memory items as a string.
134
+ """Get the memory items as a string.
139
135
 
140
- This method returns the memory items as a string from the memory list of the current instance.
141
-
142
- Parameters:
143
- - n: PositiveInt - The number of memory items to return.
144
- - separator: str - The separator to join memory items.
136
+ Args:
137
+ n (PositiveInt): The number of memory items to return.
138
+ separator (str): The separator to join memory items.
145
139
 
146
140
  Returns:
147
- - str - The memory items as a string.
148
-
149
- This method design allows users to get the memory items as a string from the memory list
150
- through a unified interface, enhancing code usability and extensibility.
141
+ str: The memory items as a string.
151
142
  """
152
143
  # Get the top memory items from the memory list
153
144
  memories = self.top_memories(n)
@@ -155,19 +146,10 @@ class Memorable(Base):
155
146
  return separator.join(memories)
156
147
 
157
148
  def clear_memories(self) -> Self:
158
- """
159
- Clear all memory items.
160
-
161
- This method clears all memory items from the memory list of the current instance.
162
-
163
- Parameters:
164
- - self: The current instance object.
149
+ """Clear all memory items.
165
150
 
166
151
  Returns:
167
- - Returns the current instance object to support method chaining.
168
-
169
- This method design allows users to clear all memory items from the memory list
170
- through a unified interface, enhancing code usability and extensibility.
152
+ Self: The current instance object to support method chaining.
171
153
  """
172
154
  # Clear all memory items from the memory list
173
155
  self.memory.clear()
@@ -176,124 +158,148 @@ class Memorable(Base):
176
158
 
177
159
 
178
160
  class LLMUsage(Base):
179
- llm_api_endpoint: HttpUrl = Field(default=configs.llm.api_endpoint)
161
+ """Class that manages LLM (Large Language Model) usage parameters and methods."""
162
+
163
+ llm_api_endpoint: Optional[HttpUrl] = None
180
164
  """
181
165
  The OpenAI API endpoint.
182
166
  """
183
167
 
184
- llm_api_key: SecretStr = Field(default=configs.llm.api_key)
168
+ llm_api_key: Optional[SecretStr] = None
185
169
  """
186
170
  The OpenAI API key.
187
171
  """
188
172
 
189
- llm_timeout: PositiveInt = Field(default=configs.llm.timeout)
173
+ llm_timeout: Optional[PositiveInt] = None
190
174
  """
191
175
  The timeout of the LLM model.
192
176
  """
193
177
 
194
- llm_max_retries: PositiveInt = Field(default=configs.llm.max_retries)
178
+ llm_max_retries: Optional[PositiveInt] = None
195
179
  """
196
180
  The maximum number of retries.
197
181
  """
198
182
 
199
- llm_model: str = Field(default=configs.llm.model)
183
+ llm_model: Optional[str] = None
200
184
  """
201
185
  The LLM model name.
202
186
  """
203
187
 
204
- llm_temperature: NonNegativeFloat = Field(default=configs.llm.temperature)
188
+ llm_temperature: Optional[NonNegativeFloat] = None
205
189
  """
206
190
  The temperature of the LLM model.
207
191
  """
208
192
 
209
- llm_stop_sign: str = Field(default=configs.llm.stop_sign)
193
+ llm_stop_sign: Optional[str] = None
210
194
  """
211
195
  The stop sign of the LLM model.
212
196
  """
213
197
 
214
- llm_top_p: NonNegativeFloat = Field(default=configs.llm.top_p)
198
+ llm_top_p: Optional[NonNegativeFloat] = None
215
199
  """
216
200
  The top p of the LLM model.
217
201
  """
218
202
 
219
- llm_generation_count: PositiveInt = Field(default=configs.llm.generation_count)
203
+ llm_generation_count: Optional[PositiveInt] = None
220
204
  """
221
205
  The number of generations to generate.
222
206
  """
223
207
 
224
- llm_stream: bool = Field(default=configs.llm.stream)
208
+ llm_stream: Optional[bool] = None
225
209
  """
226
210
  Whether to stream the LLM model's response.
227
211
  """
228
212
 
229
- llm_max_tokens: PositiveInt = Field(default=configs.llm.max_tokens)
213
+ llm_max_tokens: Optional[PositiveInt] = None
230
214
  """
231
215
  The maximum number of tokens to generate.
232
216
  """
233
217
 
234
218
  def model_post_init(self, __context: Any) -> None:
235
- litellm.api_key = self.llm_api_key.get_secret_value()
236
- litellm.api_base = self.llm_api_endpoint.unicode_string()
219
+ """Initialize the LLM model with API key and endpoint.
220
+
221
+ Args:
222
+ __context (Any): The context passed during model initialization.
223
+ """
224
+ litellm.api_key = self.llm_api_key.get_secret_value() if self.llm_api_key else configs.llm.api_key
225
+ litellm.api_base = self.llm_api_endpoint.unicode_string() if self.llm_api_endpoint else configs.llm.api_endpoint
237
226
 
238
227
  async def aquery(
239
- self,
240
- messages: List[Dict[str, str]],
241
- model: str | None = None,
242
- temperature: NonNegativeFloat | None = None,
243
- stop: str | None = None,
244
- top_p: NonNegativeFloat | None = None,
245
- max_tokens: PositiveInt | None = None,
246
- n: PositiveInt | None = None,
247
- stream: bool | None = None,
248
- timeout: PositiveInt | None = None,
249
- max_retries: PositiveInt | None = None,
228
+ self,
229
+ messages: List[Dict[str, str]],
230
+ model: str | None = None,
231
+ temperature: NonNegativeFloat | None = None,
232
+ stop: str | None = None,
233
+ top_p: NonNegativeFloat | None = None,
234
+ max_tokens: PositiveInt | None = None,
235
+ n: PositiveInt | None = None,
236
+ stream: bool | None = None,
237
+ timeout: PositiveInt | None = None,
238
+ max_retries: PositiveInt | None = None,
250
239
  ) -> ModelResponse:
251
- """
252
- Asynchronously queries the language model to generate a response based on the provided messages and parameters.
253
-
254
- Parameters:
255
- - messages (List[Dict[str, str]]): A list of messages, where each message is a dictionary containing the role and content of the message.
256
- - model (str | None): The name of the model to use. If not provided, the default model will be used.
257
- - temperature (NonNegativeFloat | None): Controls the randomness of the output. Lower values make the output more deterministic.
258
- - stop (str | None): A sequence at which to stop the generation of the response.
259
- - top_p (NonNegativeFloat | None): Controls the diversity of the output through nucleus sampling.
260
- - max_tokens (PositiveInt | None): The maximum number of tokens to generate in the response.
261
- - n (PositiveInt | None): The number of responses to generate.
262
- - stream (bool | None): Whether to receive the response in a streaming fashion.
263
- - timeout (PositiveInt | None): The timeout duration for the request.
264
- - max_retries (PositiveInt | None): The maximum number of retries in case of failure.
240
+ """Asynchronously queries the language model to generate a response based on the provided messages and parameters.
241
+
242
+ Args:
243
+ messages (List[Dict[str, str]]): A list of messages, where each message is a dictionary containing the role and content of the message.
244
+ model (str | None): The name of the model to use. If not provided, the default model will be used.
245
+ temperature (NonNegativeFloat | None): Controls the randomness of the output. Lower values make the output more deterministic.
246
+ stop (str | None): A sequence at which to stop the generation of the response.
247
+ top_p (NonNegativeFloat | None): Controls the diversity of the output through nucleus sampling.
248
+ max_tokens (PositiveInt | None): The maximum number of tokens to generate in the response.
249
+ n (PositiveInt | None): The number of responses to generate.
250
+ stream (bool | None): Whether to receive the response in a streaming fashion.
251
+ timeout (PositiveInt | None): The timeout duration for the request.
252
+ max_retries (PositiveInt | None): The maximum number of retries in case of failure.
265
253
 
266
254
  Returns:
267
- - ModelResponse: An object containing the generated response and other metadata from the model.
255
+ ModelResponse: An object containing the generated response and other metadata from the model.
268
256
  """
269
257
  # Call the underlying asynchronous completion function with the provided and default parameters
270
258
  return await litellm.acompletion(
271
259
  messages=messages,
272
- model=model or self.llm_model,
273
- temperature=temperature or self.llm_temperature,
274
- stop=stop or self.llm_stop_sign,
275
- top_p=top_p or self.llm_top_p,
276
- max_tokens=max_tokens or self.llm_max_tokens,
277
- n=n or self.llm_generation_count,
278
- stream=stream or self.llm_stream,
279
- timeout=timeout or self.llm_timeout,
280
- max_retries=max_retries or self.llm_max_retries,
260
+ model=model or self.llm_model or configs.llm.model,
261
+ temperature=temperature or self.llm_temperature or configs.llm.temperature,
262
+ stop=stop or self.llm_stop_sign or configs.llm.stop_sign,
263
+ top_p=top_p or self.llm_top_p or configs.llm.top_p,
264
+ max_tokens=max_tokens or self.llm_max_tokens or configs.llm.max_tokens,
265
+ n=n or self.llm_generation_count or configs.llm.generation_count,
266
+ stream=stream or self.llm_stream or configs.llm.stream,
267
+ timeout=timeout or self.llm_timeout or configs.llm.timeout,
268
+ max_retries=max_retries or self.llm_max_retries or configs.llm.max_retries,
281
269
  )
282
270
 
283
- async def aask(
284
- self,
285
- question: str,
286
- system_message: str = "",
287
- model: str | None = None,
288
- temperature: NonNegativeFloat | None = None,
289
- stop: str | None = None,
290
- top_p: NonNegativeFloat | None = None,
291
- max_tokens: PositiveInt | None = None,
292
- n: PositiveInt | None = None,
293
- stream: bool | None = None,
294
- timeout: PositiveInt | None = None,
295
- max_retries: PositiveInt | None = None,
271
+ async def ainvoke(
272
+ self,
273
+ question: str,
274
+ system_message: str = "",
275
+ model: str | None = None,
276
+ temperature: NonNegativeFloat | None = None,
277
+ stop: str | None = None,
278
+ top_p: NonNegativeFloat | None = None,
279
+ max_tokens: PositiveInt | None = None,
280
+ n: PositiveInt | None = None,
281
+ stream: bool | None = None,
282
+ timeout: PositiveInt | None = None,
283
+ max_retries: PositiveInt | None = None,
296
284
  ) -> List[Choices | StreamingChoices]:
285
+ """Asynchronously invokes the language model with a question and optional system message.
286
+
287
+ Args:
288
+ question (str): The question to ask the model.
289
+ system_message (str): The system message to provide context to the model.
290
+ model (str | None): The name of the model to use. If not provided, the default model will be used.
291
+ temperature (NonNegativeFloat | None): Controls the randomness of the output. Lower values make the output more deterministic.
292
+ stop (str | None): A sequence at which to stop the generation of the response.
293
+ top_p (NonNegativeFloat | None): Controls the diversity of the output through nucleus sampling.
294
+ max_tokens (PositiveInt | None): The maximum number of tokens to generate in the response.
295
+ n (PositiveInt | None): The number of responses to generate.
296
+ stream (bool | None): Whether to receive the response in a streaming fashion.
297
+ timeout (PositiveInt | None): The timeout duration for the request.
298
+ max_retries (PositiveInt | None): The maximum number of retries in case of failure.
299
+
300
+ Returns:
301
+ List[Choices | StreamingChoices]: A list of choices or streaming choices from the model response.
302
+ """
297
303
  return (
298
304
  await self.aquery(
299
305
  messages=Messages().add_system_message(system_message).add_user_message(question),
@@ -308,3 +314,86 @@ class LLMUsage(Base):
308
314
  max_retries=max_retries,
309
315
  )
310
316
  ).choices
317
+
318
+ async def aask(
319
+ self,
320
+ question: str,
321
+ system_message: str = "",
322
+ model: str | None = None,
323
+ temperature: NonNegativeFloat | None = None,
324
+ stop: str | None = None,
325
+ top_p: NonNegativeFloat | None = None,
326
+ max_tokens: PositiveInt | None = None,
327
+ stream: bool | None = None,
328
+ timeout: PositiveInt | None = None,
329
+ max_retries: PositiveInt | None = None,
330
+ ) -> str:
331
+ """Asynchronously asks the language model a question and returns the response content.
332
+
333
+ Args:
334
+ question (str): The question to ask the model.
335
+ system_message (str): The system message to provide context to the model.
336
+ model (str | None): The name of the model to use. If not provided, the default model will be used.
337
+ temperature (NonNegativeFloat | None): Controls the randomness of the output. Lower values make the output more deterministic.
338
+ stop (str | None): A sequence at which to stop the generation of the response.
339
+ top_p (NonNegativeFloat | None): Controls the diversity of the output through nucleus sampling.
340
+ max_tokens (PositiveInt | None): The maximum number of tokens to generate in the response.
341
+ stream (bool | None): Whether to receive the response in a streaming fashion.
342
+ timeout (PositiveInt | None): The timeout duration for the request.
343
+ max_retries (PositiveInt | None): The maximum number of retries in case of failure.
344
+
345
+ Returns:
346
+ str: The content of the model's response message.
347
+ """
348
+ return (
349
+ (
350
+ await self.ainvoke(
351
+ n=1,
352
+ question=question,
353
+ system_message=system_message,
354
+ model=model,
355
+ temperature=temperature,
356
+ stop=stop,
357
+ top_p=top_p,
358
+ max_tokens=max_tokens,
359
+ stream=stream,
360
+ timeout=timeout,
361
+ max_retries=max_retries,
362
+ )
363
+ )
364
+ .pop()
365
+ .message.content
366
+ )
367
+
368
+ def fallback_to(self, other: "LLMUsage") -> Self:
369
+ """Fallback to another instance's attribute values if the current instance's attributes are None.
370
+
371
+ Args:
372
+ other (LLMUsage): Another instance from which to copy attribute values.
373
+
374
+ Returns:
375
+ Self: The current instance, allowing for method chaining.
376
+ """
377
+ # Define the list of attribute names to check and potentially copy
378
+ attr_names = [
379
+ "llm_api_endpoint",
380
+ "llm_api_key",
381
+ "llm_model",
382
+ "llm_stop_sign",
383
+ "llm_temperature",
384
+ "llm_top_p",
385
+ "llm_generation_count",
386
+ "llm_stream",
387
+ "llm_max_tokens",
388
+ "llm_timeout",
389
+ "llm_max_retries",
390
+ ]
391
+
392
+ # Iterate over the attribute names and copy values from 'other' to 'self' where applicable
393
+ for attr_name in attr_names:
394
+ # Copy the attribute value from 'other' to 'self' only if 'self' has None and 'other' has a non-None value
395
+ if getattr(self, attr_name) is None and (attr := getattr(other, attr_name)) is not None:
396
+ setattr(self, attr_name, attr)
397
+
398
+ # Return the current instance to allow for method chaining
399
+ return self
fabricatio/models/role.py CHANGED
@@ -1,14 +1,29 @@
1
- from typing import List
1
+ from typing import Any
2
2
 
3
3
  from pydantic import Field
4
4
 
5
+ from fabricatio.core import env
6
+ from fabricatio.journal import logger
5
7
  from fabricatio.models.action import WorkFlow
6
- from fabricatio.models.generic import Memorable, WithToDo, WithBriefing, LLMUsage
8
+ from fabricatio.models.events import Event
9
+ from fabricatio.models.generic import LLMUsage, Memorable, WithBriefing, WithToDo
10
+ from fabricatio.models.task import Task
7
11
 
8
12
 
9
- class Role[T: WorkFlow](Memorable, WithBriefing, WithToDo, LLMUsage):
10
- workflows: List[T] = Field(frozen=True)
11
- """A list of action names that the role can perform."""
13
+ class Role(Memorable, WithBriefing, WithToDo, LLMUsage):
14
+ """Class that represents a role with a registry of events and workflows."""
12
15
 
13
- async def act(self):
14
- pass
16
+ registry: dict[Event | str, WorkFlow] = Field(...)
17
+ """ The registry of events and workflows."""
18
+
19
+ def model_post_init(self, __context: Any) -> None:
20
+ """Register the workflows in the role to the event bus."""
21
+ for event, workflow in self.registry.items():
22
+ workflow.fallback_to(self)
23
+ logger.debug(
24
+ f"Registering workflow: {workflow.name} for event: {event.collapse() if isinstance(event, Event) else event}"
25
+ )
26
+ env.on(event, workflow.serve)
27
+
28
+ async def propose(self, prompt: str) -> Task:
29
+ """Propose a task to the role."""