lionagi 0.6.0__py3-none-any.whl → 0.7.0__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (70) hide show
  1. lionagi/__init__.py +2 -0
  2. lionagi/libs/token_transform/__init__.py +0 -0
  3. lionagi/libs/token_transform/llmlingua.py +1 -0
  4. lionagi/libs/token_transform/perplexity.py +439 -0
  5. lionagi/libs/token_transform/synthlang.py +409 -0
  6. lionagi/operations/ReAct/ReAct.py +126 -0
  7. lionagi/operations/ReAct/utils.py +28 -0
  8. lionagi/operations/__init__.py +1 -9
  9. lionagi/operations/_act/act.py +73 -0
  10. lionagi/operations/chat/__init__.py +3 -0
  11. lionagi/operations/chat/chat.py +173 -0
  12. lionagi/operations/communicate/__init__.py +0 -0
  13. lionagi/operations/communicate/communicate.py +167 -0
  14. lionagi/operations/instruct/__init__.py +3 -0
  15. lionagi/operations/instruct/instruct.py +29 -0
  16. lionagi/operations/interpret/__init__.py +3 -0
  17. lionagi/operations/interpret/interpret.py +40 -0
  18. lionagi/operations/operate/__init__.py +3 -0
  19. lionagi/operations/operate/operate.py +189 -0
  20. lionagi/operations/parse/__init__.py +3 -0
  21. lionagi/operations/parse/parse.py +125 -0
  22. lionagi/operations/plan/plan.py +3 -3
  23. lionagi/operations/select/__init__.py +0 -4
  24. lionagi/operations/select/select.py +11 -30
  25. lionagi/operations/select/utils.py +13 -2
  26. lionagi/operations/translate/__init__.py +0 -0
  27. lionagi/operations/translate/translate.py +47 -0
  28. lionagi/operations/types.py +25 -3
  29. lionagi/operatives/action/function_calling.py +1 -1
  30. lionagi/operatives/action/manager.py +22 -26
  31. lionagi/operatives/action/tool.py +1 -1
  32. lionagi/operatives/strategies/__init__.py +3 -0
  33. lionagi/{operations → operatives}/strategies/params.py +18 -2
  34. lionagi/protocols/adapters/__init__.py +0 -0
  35. lionagi/protocols/adapters/adapter.py +95 -0
  36. lionagi/protocols/adapters/json_adapter.py +101 -0
  37. lionagi/protocols/adapters/pandas_/__init__.py +0 -0
  38. lionagi/protocols/adapters/pandas_/csv_adapter.py +50 -0
  39. lionagi/protocols/adapters/pandas_/excel_adapter.py +52 -0
  40. lionagi/protocols/adapters/pandas_/pd_dataframe_adapter.py +31 -0
  41. lionagi/protocols/adapters/pandas_/pd_series_adapter.py +17 -0
  42. lionagi/protocols/adapters/types.py +18 -0
  43. lionagi/protocols/generic/pile.py +22 -1
  44. lionagi/protocols/graph/node.py +17 -1
  45. lionagi/protocols/types.py +3 -3
  46. lionagi/service/__init__.py +1 -14
  47. lionagi/service/endpoints/base.py +1 -1
  48. lionagi/service/endpoints/rate_limited_processor.py +2 -1
  49. lionagi/service/manager.py +1 -1
  50. lionagi/service/types.py +18 -0
  51. lionagi/session/branch.py +1098 -929
  52. lionagi/version.py +1 -1
  53. {lionagi-0.6.0.dist-info → lionagi-0.7.0.dist-info}/METADATA +4 -4
  54. {lionagi-0.6.0.dist-info → lionagi-0.7.0.dist-info}/RECORD +66 -38
  55. lionagi/libs/compress/models.py +0 -66
  56. lionagi/libs/compress/utils.py +0 -69
  57. lionagi/operations/select/prompt.py +0 -5
  58. lionagi/protocols/_adapter.py +0 -224
  59. /lionagi/{libs/compress → operations/ReAct}/__init__.py +0 -0
  60. /lionagi/operations/{strategies → _act}/__init__.py +0 -0
  61. /lionagi/{operations → operatives}/strategies/base.py +0 -0
  62. /lionagi/{operations → operatives}/strategies/concurrent.py +0 -0
  63. /lionagi/{operations → operatives}/strategies/concurrent_chunk.py +0 -0
  64. /lionagi/{operations → operatives}/strategies/concurrent_sequential_chunk.py +0 -0
  65. /lionagi/{operations → operatives}/strategies/sequential.py +0 -0
  66. /lionagi/{operations → operatives}/strategies/sequential_chunk.py +0 -0
  67. /lionagi/{operations → operatives}/strategies/sequential_concurrent_chunk.py +0 -0
  68. /lionagi/{operations → operatives}/strategies/utils.py +0 -0
  69. {lionagi-0.6.0.dist-info → lionagi-0.7.0.dist-info}/WHEEL +0 -0
  70. {lionagi-0.6.0.dist-info → lionagi-0.7.0.dist-info}/licenses/LICENSE +0 -0
lionagi/session/branch.py CHANGED
@@ -2,24 +2,20 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
- import logging
6
- from typing import Any, Literal
5
+ from enum import Enum
6
+ from typing import TYPE_CHECKING, Any, Literal
7
7
 
8
8
  import pandas as pd
9
9
  from jinja2 import Template
10
10
  from pydantic import BaseModel, Field, JsonValue, PrivateAttr
11
11
 
12
- from lionagi.libs.validate.fuzzy_validate_mapping import fuzzy_validate_mapping
13
- from lionagi.operatives.models.field_model import FieldModel
14
- from lionagi.operatives.models.model_params import ModelParams
15
12
  from lionagi.operatives.types import (
16
13
  ActionManager,
17
- ActionResponseModel,
18
- FunctionCalling,
14
+ FieldModel,
19
15
  FuncTool,
20
16
  Instruct,
17
+ ModelParams,
21
18
  Operative,
22
- Step,
23
19
  Tool,
24
20
  ToolRef,
25
21
  )
@@ -38,7 +34,6 @@ from lionagi.protocols.types import (
38
34
  LogManagerConfig,
39
35
  Mail,
40
36
  Mailbox,
41
- MessageFlag,
42
37
  MessageManager,
43
38
  MessageRole,
44
39
  Package,
@@ -50,51 +45,67 @@ from lionagi.protocols.types import (
50
45
  SenderRecipient,
51
46
  System,
52
47
  )
53
- from lionagi.service import iModel, iModelManager
48
+ from lionagi.service.types import iModel, iModelManager
54
49
  from lionagi.settings import Settings
55
- from lionagi.utils import (
56
- UNDEFINED,
57
- alcall,
58
- breakdown_pydantic_annotation,
59
- copy,
60
- )
50
+ from lionagi.utils import UNDEFINED, alcall, copy
51
+
52
+ if TYPE_CHECKING:
53
+ # Forward references for type checking (e.g., in operations or extended modules)
54
+ from lionagi.session.branch import Branch
61
55
 
62
56
  __all__ = ("Branch",)
63
57
 
64
58
 
65
59
  class Branch(Element, Communicatable, Relational):
66
- """Manages a conversation 'branch' with messages, tools, and iModels.
60
+ """
61
+ Manages a conversation 'branch' with messages, tools, and iModels.
62
+
63
+ The `Branch` class serves as a high-level interface or orchestrator that:
64
+ - Handles message management (`MessageManager`).
65
+ - Registers and invokes tools/actions (`ActionManager`).
66
+ - Manages model instances (`iModelManager`).
67
+ - Logs activity (`LogManager`).
68
+ - Communicates via mailboxes (`Mailbox`).
67
69
 
68
- The Branch class orchestrates message handling, model invocation,
69
- action (tool) execution, logging, and mailbox-based communication.
70
- It maintains references to a MessageManager, ActionManager, iModelManager,
71
- and a LogManager, providing high-level methods for combined operations.
70
+ **Key responsibilities**:
71
+ 1. Storing and organizing messages, including system instructions, user instructions, and model responses.
72
+ 2. Handling asynchronous or synchronous execution of LLM calls and tool invocations.
73
+ 3. Providing a consistent interface for “operate,” “chat,” “communicate,” “parse,” etc.
72
74
 
73
75
  Attributes:
74
76
  user (SenderRecipient | None):
75
- The user or sender of this branch context (e.g., a session object).
77
+ The user or "owner" of this branch (often tied to a session).
76
78
  name (str | None):
77
79
  A human-readable name for this branch.
78
80
  mailbox (Mailbox):
79
- A mailbox for sending and receiving `Package`s to/from other
80
- branches or components.
81
+ A mailbox for sending and receiving `Package` objects to/from other branches.
82
+
83
+ Note:
84
+ Actual implementations for chat, parse, operate, etc., are referenced
85
+ via lazy loading or modular imports. You typically won't need to
86
+ subclass `Branch`, but you can instantiate it and call the
87
+ associated methods for complex orchestrations.
81
88
  """
82
89
 
83
90
  user: SenderRecipient | None = Field(
84
91
  None,
85
92
  description=(
86
- "The user or sender of the branch, typically a session object or"
87
- "an external user identifier. Please note that this is a distinct"
88
- "concept from `user` parameter in LLM API calls."
93
+ "The user or sender of the branch, often a session object or "
94
+ "an external user identifier. Not to be confused with the "
95
+ "LLM API's user parameter."
89
96
  ),
90
97
  )
91
98
 
92
99
  name: str | None = Field(
93
100
  None,
94
- description="A human readable name of the branch, if any.",
101
+ description="A human-readable name of the branch (optional).",
95
102
  )
96
103
 
97
- mailbox: Mailbox = Field(default_factory=Mailbox, exclude=True)
104
+ mailbox: Mailbox = Field(
105
+ default_factory=Mailbox,
106
+ exclude=True,
107
+ description="Mailbox for cross-branch or external communication.",
108
+ )
98
109
 
99
110
  _message_manager: MessageManager | None = PrivateAttr(None)
100
111
  _action_manager: ActionManager | None = PrivateAttr(None)
@@ -109,52 +120,60 @@ class Branch(Element, Communicatable, Relational):
109
120
  messages: Pile[RoledMessage] = None, # message manager kwargs
110
121
  system: System | JsonValue = None,
111
122
  system_sender: SenderRecipient = None,
112
- chat_model: iModel = None, # imodel manager kwargs
123
+ chat_model: iModel = None, # iModelManager kwargs
113
124
  parse_model: iModel = None,
114
125
  imodel: iModel = None, # deprecated, alias of chat_model
115
- tools: FuncTool | list[FuncTool] = None, # action manager kwargs
116
- log_config: LogManagerConfig | dict = None, # log manager kwargs
126
+ tools: FuncTool | list[FuncTool] = None, # ActionManager kwargs
127
+ log_config: LogManagerConfig | dict = None, # LogManager kwargs
117
128
  system_datetime: bool | str = None,
118
129
  system_template: Template | str = None,
119
130
  system_template_context: dict = None,
120
131
  logs: Pile[Log] = None,
121
132
  **kwargs,
122
133
  ):
123
- """Initializes a Branch with references to managers and mailbox.
134
+ """
135
+ Initializes a `Branch` with references to managers and an optional mailbox.
124
136
 
125
137
  Args:
126
138
  user (SenderRecipient, optional):
127
- The user or sender of the branch context.
139
+ The user or sender context for this branch.
128
140
  name (str | None, optional):
129
141
  A human-readable name for this branch.
130
142
  messages (Pile[RoledMessage], optional):
131
- Initial messages to seed the MessageManager.
143
+ Initial messages for seeding the MessageManager.
132
144
  system (System | JsonValue, optional):
133
- A system message or data to configure system role.
145
+ Optional system-level configuration or message for the LLM.
134
146
  system_sender (SenderRecipient, optional):
135
- Sender to assign if the system message is added.
147
+ Sender to attribute to the system message if it is added.
136
148
  chat_model (iModel, optional):
137
- The chat model used by iModelManager (if not provided,
138
- falls back to defaults).
149
+ The primary "chat" iModel for conversation. If not provided,
150
+ a default from `Settings.iModel.CHAT` is used.
139
151
  parse_model (iModel, optional):
140
- The parse model used by iModelManager.
152
+ The "parse" iModel for structured data parsing.
153
+ Defaults to `Settings.iModel.PARSE`.
141
154
  imodel (iModel, optional):
142
- Deprecated. Alias for chat_model.
155
+ Deprecated. Alias for `chat_model`.
143
156
  tools (FuncTool | list[FuncTool], optional):
144
- Tools for the ActionManager.
157
+ Tools or a list of tools for the ActionManager.
145
158
  log_config (LogManagerConfig | dict, optional):
146
- Configuration for the LogManager.
159
+ Configuration dict or object for the LogManager.
147
160
  system_datetime (bool | str, optional):
148
- Whether to include timestamps in system messages.
161
+ Whether to include timestamps in system messages (True/False)
162
+ or a string format for datetime.
149
163
  system_template (Template | str, optional):
150
- A Jinja2 template or template string for system messages.
164
+ Optional Jinja2 template for system messages.
151
165
  system_template_context (dict, optional):
152
- Context variables for the system template.
153
- **kwargs: Additional parameters passed to the Element parent init.
166
+ Context for rendering the system template.
167
+ logs (Pile[Log], optional):
168
+ Existing logs to seed the LogManager.
169
+ **kwargs:
170
+ Additional parameters passed to `Element` parent init.
154
171
  """
155
172
  super().__init__(user=user, name=name, **kwargs)
156
173
 
174
+ # --- MessageManager ---
157
175
  self._message_manager = MessageManager(messages=messages)
176
+ # If system instructions or templates are provided, add them
158
177
  if any(
159
178
  i is not None
160
179
  for i in [system, system_sender, system_datetime, system_template]
@@ -183,641 +202,419 @@ class Branch(Element, Communicatable, Relational):
183
202
  self._imodel_manager = iModelManager(
184
203
  chat=chat_model, parse=parse_model
185
204
  )
205
+
206
+ # --- ActionManager ---
186
207
  self._action_manager = ActionManager(tools)
187
208
 
209
+ # --- LogManager ---
188
210
  if log_config:
189
- log_config = (
190
- log_config
191
- if isinstance(log_config, LogManagerConfig)
192
- else LogManagerConfig(**log_config)
193
- )
211
+ if isinstance(log_config, dict):
212
+ log_config = LogManagerConfig(**log_config)
194
213
  self._log_manager = LogManager.from_config(log_config, logs=logs)
195
214
  else:
196
215
  self._log_manager = LogManager(**Settings.Config.LOG, logs=logs)
197
216
 
217
+ # -------------------------------------------------------------------------
218
+ # Properties to expose managers and core data
219
+ # -------------------------------------------------------------------------
198
220
  @property
199
221
  def system(self) -> System | None:
200
- """System | None: The system message or configuration."""
222
+ """The system message/configuration, if any."""
201
223
  return self._message_manager.system
202
224
 
203
225
  @property
204
226
  def msgs(self) -> MessageManager:
205
- """MessageManager: Manages the conversation messages."""
227
+ """Returns the associated MessageManager."""
206
228
  return self._message_manager
207
229
 
208
230
  @property
209
231
  def acts(self) -> ActionManager:
210
- """ActionManager: Manages available tools (actions)."""
232
+ """Returns the associated ActionManager for tool management."""
211
233
  return self._action_manager
212
234
 
213
235
  @property
214
236
  def mdls(self) -> iModelManager:
215
- """iModelManager: Manages chat and parse models."""
237
+ """Returns the associated iModelManager."""
216
238
  return self._imodel_manager
217
239
 
218
240
  @property
219
241
  def messages(self) -> Pile[RoledMessage]:
220
- """Pile[RoledMessage]: The collection of messages in this branch."""
242
+ """Convenience property to retrieve all messages from MessageManager."""
221
243
  return self._message_manager.messages
222
244
 
223
245
  @property
224
246
  def logs(self) -> Pile[Log]:
225
- """Pile[Log]: The collection of log entries for this branch."""
247
+ """Convenience property to retrieve all logs from the LogManager."""
226
248
  return self._log_manager.logs
227
249
 
228
250
  @property
229
251
  def chat_model(self) -> iModel:
230
- """iModel: The primary (chat) model in the iModelManager."""
252
+ """
253
+ The primary "chat" model (`iModel`) used for conversational LLM calls.
254
+ """
231
255
  return self._imodel_manager.chat
232
256
 
233
257
  @chat_model.setter
234
258
  def chat_model(self, value: iModel) -> None:
235
- """Sets the chat model in the iModelManager.
259
+ """
260
+ Sets the primary "chat" model in the iModelManager.
236
261
 
237
262
  Args:
238
- value (iModel): The new chat model.
263
+ value (iModel): The new chat model to register.
239
264
  """
240
265
  self._imodel_manager.register_imodel("chat", value)
241
266
 
242
267
  @property
243
268
  def parse_model(self) -> iModel:
244
- """iModel: The parsing model in the iModelManager."""
269
+ """The "parse" model (`iModel`) used for structured data parsing."""
245
270
  return self._imodel_manager.parse
246
271
 
247
272
  @parse_model.setter
248
273
  def parse_model(self, value: iModel) -> None:
249
- """Sets the parse model in the iModelManager.
274
+ """
275
+ Sets the "parse" model in the iModelManager.
250
276
 
251
277
  Args:
252
- value (iModel): The new parse model.
278
+ value (iModel): The new parse model to register.
253
279
  """
254
280
  self._imodel_manager.register_imodel("parse", value)
255
281
 
256
282
  @property
257
283
  def tools(self) -> dict[str, Tool]:
258
- """dict[str, Tool]: All tools registered in the ActionManager."""
284
+ """
285
+ All registered tools (actions) in the ActionManager,
286
+ keyed by their tool names or IDs.
287
+ """
259
288
  return self._action_manager.registry
260
289
 
290
+ # -------------------------------------------------------------------------
291
+ # Cloning
292
+ # -------------------------------------------------------------------------
261
293
  async def aclone(self, sender: ID.Ref = None) -> "Branch":
262
- """Asynchronous clone of this Branch.
294
+ """
295
+ Asynchronously clones this `Branch` with optional new sender ID.
263
296
 
264
297
  Args:
265
298
  sender (ID.Ref, optional):
266
- If provided, sets this sender ID on all cloned messages.
299
+ If provided, this ID is set as the sender for all cloned messages.
267
300
 
268
301
  Returns:
269
- Branch: A new branch instance with cloned messages.
302
+ Branch: A new branch instance, containing cloned state.
270
303
  """
271
304
  async with self.msgs.messages:
272
305
  return self.clone(sender)
273
306
 
274
- async def operate(
275
- self,
276
- *,
277
- instruct: Instruct = None,
278
- instruction: Instruction | JsonValue = None,
279
- guidance: JsonValue = None,
280
- context: JsonValue = None,
281
- sender: SenderRecipient = None,
282
- recipient: SenderRecipient = None,
283
- progression: Progression = None,
284
- imodel: iModel = None, # deprecated, alias of chat_model
285
- chat_model: iModel = None,
286
- invoke_actions: bool = True,
287
- tool_schemas: list[dict] = None,
288
- images: list = None,
289
- image_detail: Literal["low", "high", "auto"] = None,
290
- parse_model: iModel = None,
291
- skip_validation: bool = False,
292
- tools: ToolRef = None,
293
- operative: Operative = None,
294
- response_format: type[
295
- BaseModel
296
- ] = None, # alias of operative.request_type
297
- return_operative: bool = False,
298
- actions: bool = False,
299
- reason: bool = False,
300
- action_kwargs: dict = None,
301
- field_models: list[FieldModel] = None,
302
- exclude_fields: list | dict | None = None,
303
- request_params: ModelParams = None,
304
- request_param_kwargs: dict = None,
305
- response_params: ModelParams = None,
306
- response_param_kwargs: dict = None,
307
- handle_validation: Literal[
308
- "raise", "return_value", "return_none"
309
- ] = "return_value",
310
- operative_model: type[BaseModel] = None,
311
- request_model: type[BaseModel] = None,
312
- **kwargs,
313
- ) -> list | BaseModel | None | dict | str:
314
- """Orchestrates an 'operate' flow with optional tool invocation.
307
+ def clone(self, sender: ID.Ref = None) -> "Branch":
308
+ """
309
+ Clones this `Branch` synchronously, optionally updating the sender ID.
310
+
311
+ Args:
312
+ sender (ID.Ref, optional):
313
+ If provided, all messages in the clone will have this sender ID.
314
+ Otherwise, uses the current branch's ID.
315
+
316
+ Raises:
317
+ ValueError: If `sender` is not a valid ID.Ref.
315
318
 
316
- This method creates or updates an Operative, sends an instruction
317
- to the chat model, optionally parses the response, and invokes
318
- requested tools if `invoke_actions` is True.
319
+ Returns:
320
+ Branch: A new branch object with a copy of the messages, system info, etc.
321
+ """
322
+ if sender is not None:
323
+ if not ID.is_id(sender):
324
+ raise ValueError(
325
+ f"Cannot clone Branch: '{sender}' is not a valid sender ID."
326
+ )
327
+ sender = ID.get_id(sender)
328
+
329
+ system = self.msgs.system.clone() if self.msgs.system else None
330
+ tools = (
331
+ list(self._action_manager.registry.values())
332
+ if self._action_manager.registry
333
+ else None
334
+ )
335
+ branch_clone = Branch(
336
+ system=system,
337
+ user=self.user,
338
+ messages=[msg.clone() for msg in self.msgs.messages],
339
+ tools=tools,
340
+ metadata={"clone_from": self},
341
+ )
342
+ for message in branch_clone.msgs.messages:
343
+ message.sender = sender or self.id
344
+ message.recipient = branch_clone.id
345
+
346
+ return branch_clone
347
+
348
+ # -------------------------------------------------------------------------
349
+ # Conversion / Serialization
350
+ # -------------------------------------------------------------------------
351
+ def to_df(self, *, progression: Progression = None) -> pd.DataFrame:
352
+ """
353
+ Convert branch messages into a `pandas.DataFrame`.
319
354
 
320
355
  Args:
321
- instruct (Instruct):
322
- The instruction containing context, guidance, etc.
323
- sender (SenderRecipient, optional):
324
- The sender of this operation.
325
- recipient (SenderRecipient, optional):
326
- The recipient of this operation.
327
356
  progression (Progression, optional):
328
- Specific progression of messages to use.
329
- imodel (iModel, optional):
330
- Deprecated, alias for chat_model.
331
- chat_model (iModel, optional):
332
- The chat model to invoke.
333
- invoke_actions (bool, optional):
334
- Whether to call requested tools (actions).
335
- tool_schemas (list[dict], optional):
336
- Overridden schemas for the tools to be used.
337
- images (list, optional):
338
- Additional images for the model context.
339
- image_detail (Literal["low", "high", "auto"], optional):
340
- The level of detail for images, if relevant.
341
- parse_model (iModel, optional):
342
- The parse model for validating or refining responses.
343
- skip_validation (bool, optional):
344
- If True, skip post-response validation steps.
345
- tools (ToolRef, optional):
346
- Specific tools to make available if `invoke_actions` is True.
347
- operative (Operative, optional):
348
- The operative describing how to handle the response.
349
- response_format (type[BaseModel], optional):
350
- An expected response schema (alias of `operative.request_type`).
351
- fuzzy_match_kwargs (dict, optional):
352
- Settings for fuzzy validation if used.
353
- operative_kwargs (dict, optional):
354
- Additional arguments for creating an Operative if none is given.
355
- **kwargs: Additional arguments passed to the model invocation.
357
+ A custom message ordering. If `None`, uses the stored progression.
356
358
 
357
359
  Returns:
358
- list | BaseModel | None | dict | str:
359
- The final parsed response, or an Operative object, or the
360
- string/dict if skipping validation or no tools needed.
360
+ pd.DataFrame: Each row represents a message, with columns defined by MESSAGE_FIELDS.
361
361
  """
362
- if operative_model:
363
- logging.warning(
364
- "operative_model is deprecated. Use response_format instead."
365
- )
366
- if (
367
- (operative_model and response_format)
368
- or (operative_model and request_model)
369
- or (response_format and request_model)
370
- ):
371
- raise ValueError(
372
- "Cannot specify both operative_model and response_format"
373
- "or operative_model and request_model as they are aliases"
374
- "for the same parameter."
375
- )
362
+ if progression is None:
363
+ progression = self.msgs.progression
376
364
 
377
- response_format = response_format or operative_model or request_model
378
- chat_model = chat_model or imodel or self.chat_model
379
- parse_model = parse_model or chat_model
365
+ msgs = [
366
+ self.msgs.messages[i]
367
+ for i in progression
368
+ if i in self.msgs.messages
369
+ ]
370
+ p = Pile(collections=msgs)
371
+ return p.to_df(columns=MESSAGE_FIELDS)
380
372
 
381
- if isinstance(instruct, dict):
382
- instruct = Instruct(**instruct)
373
+ # -------------------------------------------------------------------------
374
+ # Mailbox Send / Receive
375
+ # -------------------------------------------------------------------------
376
+ def send(
377
+ self,
378
+ recipient: IDType,
379
+ category: PackageCategory | None,
380
+ item: Any,
381
+ request_source: IDType | None = None,
382
+ ) -> None:
383
+ """
384
+ Sends a `Package` (wrapped in a `Mail` object) to a specified recipient.
383
385
 
384
- instruct = instruct or Instruct(
385
- instruction=instruction,
386
- guidance=guidance,
387
- context=context,
386
+ Args:
387
+ recipient (IDType):
388
+ ID of the recipient branch or component.
389
+ category (PackageCategory | None):
390
+ The category/type of the package (e.g., 'message', 'tool', 'imodel').
391
+ item (Any):
392
+ The payload to send (e.g., a message, tool reference, model, etc.).
393
+ request_source (IDType | None):
394
+ The ID that prompted or requested this send operation (optional).
395
+ """
396
+ package = Package(
397
+ category=category,
398
+ item=item,
399
+ request_source=request_source,
388
400
  )
389
401
 
390
- if reason:
391
- instruct.reason = True
392
- if actions:
393
- instruct.actions = True
394
-
395
- operative: Operative = Step.request_operative(
396
- request_params=request_params,
397
- reason=instruct.reason,
398
- actions=instruct.actions,
399
- exclude_fields=exclude_fields,
400
- base_type=response_format,
401
- field_models=field_models,
402
- **(request_param_kwargs or {}),
403
- )
404
- if instruct.actions:
405
- tools = tools or True
406
- if invoke_actions and tools:
407
- tool_schemas = self.acts.get_tool_schema(tools)
408
-
409
- ins, res = await self.invoke_chat(
410
- instruction=instruct.instruction,
411
- guidance=instruct.guidance,
412
- context=instruct.context,
413
- sender=sender,
402
+ mail = Mail(
403
+ sender=self.id,
414
404
  recipient=recipient,
415
- response_format=operative.request_type,
416
- progression=progression,
417
- imodel=chat_model,
418
- images=images,
419
- image_detail=image_detail,
420
- tool_schemas=tool_schemas,
421
- **kwargs,
405
+ package=package,
422
406
  )
423
- self.msgs.add_message(instruction=ins)
424
- self.msgs.add_message(assistant_response=res)
425
-
426
- operative.response_str_dict = res.response
427
- if skip_validation:
428
- if return_operative:
429
- return operative
430
- return operative.response_str_dict
431
-
432
- response_model = operative.update_response_model(res.response)
433
-
434
- if not isinstance(response_model, BaseModel):
435
- response_model = await self.parse(
436
- text=res.response,
437
- request_type=operative.request_type,
438
- max_retries=operative.max_retries,
439
- handle_validation="return_value",
440
- )
441
- operative.response_model = operative.update_response_model(
442
- response_model
443
- )
407
+ self.mailbox.append_out(mail)
444
408
 
445
- if not isinstance(response_model, BaseModel):
446
- match handle_validation:
447
- case "return_value":
448
- return response_model
449
- case "return_none":
450
- return None
451
- case "raise":
452
- raise ValueError(
453
- "Failed to parse response into request format"
454
- )
409
+ def receive(
410
+ self,
411
+ sender: IDType,
412
+ message: bool = False,
413
+ tool: bool = False,
414
+ imodel: bool = False,
415
+ ) -> None:
416
+ """
417
+ Retrieves and processes mail from a given sender according to the specified flags.
455
418
 
456
- if not invoke_actions:
457
- return operative if return_operative else operative.response_model
419
+ Args:
420
+ sender (IDType):
421
+ The ID of the mail sender.
422
+ message (bool):
423
+ If `True`, process packages categorized as "message".
424
+ tool (bool):
425
+ If `True`, process packages categorized as "tool".
426
+ imodel (bool):
427
+ If `True`, process packages categorized as "imodel".
458
428
 
459
- if (
460
- getattr(response_model, "action_required", None) is True
461
- and getattr(response_model, "action_requests", None) is not None
462
- ):
463
- action_response_models = await self.invoke_action(
464
- response_model.action_requests,
465
- **(action_kwargs or {}),
466
- )
467
- operative = Step.respond_operative(
468
- response_params=response_params,
469
- operative=operative,
470
- additional_data={"action_responses": action_response_models},
471
- **(response_param_kwargs or {}),
472
- )
473
- return operative if return_operative else operative.response_model
429
+ Raises:
430
+ ValueError: If no mail exists from the specified sender,
431
+ or if a package is invalid for the chosen category.
432
+ """
433
+ sender = ID.get_id(sender)
434
+ if sender not in self.mailbox.pending_ins.keys():
435
+ raise ValueError(f"No mail or package found from sender: {sender}")
474
436
 
475
- async def parse(
476
- self,
477
- text: str,
478
- handle_validation: Literal[
479
- "raise", "return_value", "return_none"
480
- ] = "return_value",
481
- max_retries: int = 3,
482
- request_type: type[BaseModel] = None,
483
- operative: Operative = None,
484
- similarity_algo="jaro_winkler",
485
- similarity_threshold: float = 0.85,
486
- fuzzy_match: bool = True,
487
- handle_unmatched: Literal[
488
- "ignore", "raise", "remove", "fill", "force"
489
- ] = "force",
490
- fill_value: Any = None,
491
- fill_mapping: dict[str, Any] | None = None,
492
- strict: bool = False,
493
- suppress_conversion_errors: bool = False,
494
- ):
495
- """Attempts to parse text into a structured Pydantic model.
437
+ skipped_requests = Progression()
438
+ while self.mailbox.pending_ins[sender]:
439
+ mail_id = self.mailbox.pending_ins[sender].popleft()
440
+ mail: Mail = self.mailbox.pile_[mail_id]
496
441
 
497
- Uses optional fuzzy matching to handle partial or unclear fields.
442
+ if mail.category == "message" and message:
443
+ if not isinstance(mail.package.item, RoledMessage):
444
+ raise ValueError(
445
+ "Invalid message package: The item must be a `RoledMessage`."
446
+ )
447
+ new_message = mail.package.item.clone()
448
+ new_message.sender = mail.sender
449
+ new_message.recipient = self.id
450
+ self.msgs.messages.include(new_message)
451
+ self.mailbox.pile_.pop(mail_id)
498
452
 
499
- Args:
500
- text (str): The raw text to parse.
501
- handle_validation (Literal["raise","return_value","return_none"]):
502
- What to do if parsing fails. Defaults to "return_value".
503
- max_retries (int):
504
- How many times to retry parsing if it fails.
505
- request_type (type[BaseModel], optional):
506
- The Pydantic model to parse into.
507
- operative (Operative, optional):
508
- If provided, uses its model and max_retries setting.
509
- similarity_algo (str):
510
- The similarity algorithm for fuzzy field matching.
511
- similarity_threshold (float):
512
- A threshold for fuzzy matching (0.0 - 1.0).
513
- fuzzy_match (bool):
514
- If True, tries to match unrecognized keys to known ones.
515
- handle_unmatched (Literal["ignore","raise","remove","fill","force"]):
516
- How to handle unmatched fields.
517
- fill_value (Any):
518
- A default value used when fill is needed.
519
- fill_mapping (dict[str, Any] | None):
520
- A mapping from field -> fill value override.
521
- strict (bool):
522
- If True, raises errors on ambiguous fields or data types.
523
- suppress_conversion_errors (bool):
524
- If True, logs or ignores errors during data conversion.
453
+ elif mail.category == "tool" and tool:
454
+ if not isinstance(mail.package.item, Tool):
455
+ raise ValueError(
456
+ "Invalid tool package: The item must be a `Tool` instance."
457
+ )
458
+ self._action_manager.register_tools(mail.package.item)
459
+ self.mailbox.pile_.pop(mail_id)
525
460
 
526
- Returns:
527
- BaseModel | Any | None:
528
- The parsed model instance, or a dict/string/None depending
529
- on the handling mode.
530
- """
531
- _should_try = True
532
- num_try = 0
533
- response_model = text
534
- if operative is not None:
535
- max_retries = operative.max_retries
536
- request_type = operative.request_type
537
-
538
- while (
539
- _should_try
540
- and num_try < max_retries
541
- and not isinstance(response_model, BaseModel)
542
- ):
543
- num_try += 1
544
- _, res = await self.invoke_chat(
545
- instruction="reformat text into specified model",
546
- guidane="follow the required response format, using the model schema as a guide",
547
- context=[{"text_to_format": text}],
548
- response_format=request_type,
549
- sender=self.user,
550
- recipient=self.id,
551
- imodel=self.parse_model,
552
- )
553
- if operative is not None:
554
- response_model = operative.update_response_model(res.response)
555
- else:
556
- response_model = fuzzy_validate_mapping(
557
- res.response,
558
- breakdown_pydantic_annotation(request_type),
559
- similarity_algo=similarity_algo,
560
- similarity_threshold=similarity_threshold,
561
- fuzzy_match=fuzzy_match,
562
- handle_unmatched=handle_unmatched,
563
- fill_value=fill_value,
564
- fill_mapping=fill_mapping,
565
- strict=strict,
566
- suppress_conversion_errors=suppress_conversion_errors,
567
- )
568
- response_model = request_type.model_validate(response_model)
569
-
570
- if not isinstance(response_model, BaseModel):
571
- match handle_validation:
572
- case "return_value":
573
- return response_model
574
- case "return_none":
575
- return None
576
- case "raise":
461
+ elif mail.category == "imodel" and imodel:
462
+ if not isinstance(mail.package.item, iModel):
577
463
  raise ValueError(
578
- "Failed to parse response into request format"
464
+ "Invalid iModel package: The item must be an `iModel` instance."
579
465
  )
466
+ self._imodel_manager.register_imodel(
467
+ mail.package.item.name or "chat", mail.package.item
468
+ )
469
+ self.mailbox.pile_.pop(mail_id)
580
470
 
581
- return response_model
471
+ else:
472
+ # If the category doesn't match the flags or is unhandled
473
+ skipped_requests.append(mail)
582
474
 
583
- async def communicate(
475
+ # Requeue any skipped mail
476
+ self.mailbox.pending_ins[sender] = skipped_requests
477
+ if len(self.mailbox.pending_ins[sender]) == 0:
478
+ self.mailbox.pending_ins.pop(sender)
479
+
480
+ async def asend(
584
481
  self,
585
- instruction: Instruction | JsonValue = None,
586
- guidance: JsonValue = None,
587
- context: JsonValue = None,
588
- sender: SenderRecipient = None,
589
- recipient: SenderRecipient = None,
590
- progression: ID.IDSeq = None,
591
- request_model: type[BaseModel] | BaseModel | None = None,
592
- response_format: type[BaseModel] = None,
593
- request_fields: dict | list[str] = None,
594
- imodel: iModel = None, # alias of chat_model
595
- chat_model: iModel = None,
596
- parse_model: iModel = None,
597
- skip_validation: bool = False,
598
- images: list = None,
599
- image_detail: Literal["low", "high", "auto"] = None,
600
- num_parse_retries: int = 3,
601
- fuzzy_match_kwargs: dict = None,
602
- clear_messages: bool = False,
603
- operative_model: type[BaseModel] = None,
604
- **kwargs,
482
+ recipient: IDType,
483
+ category: PackageCategory | None,
484
+ package: Any,
485
+ request_source: IDType | None = None,
605
486
  ):
606
- """Handles a general 'communicate' flow without tool invocation.
607
-
608
- Sends messages to the model, optionally parses the response, and
609
- can handle simpler field-level validation.
487
+ """
488
+ Async version of `send()`.
610
489
 
611
490
  Args:
612
- instruction (Instruction | JsonValue, optional):
613
- The main user query or context.
614
- guidance (JsonValue, optional):
615
- Additional LLM instructions.
616
- context (JsonValue, optional):
617
- Context data to pass to the LLM.
618
- sender (SenderRecipient, optional):
619
- The sender of this message.
620
- recipient (SenderRecipient, optional):
621
- The recipient of this message.
622
- progression (ID.IDSeq, optional):
623
- A custom progression of conversation messages.
624
- request_model (type[BaseModel] | BaseModel | None, optional):
625
- Model for structured responses.
626
- response_format (type[BaseModel], optional):
627
- Alias for request_model if both are not given simultaneously.
628
- request_fields (dict | list[str], optional):
629
- Simpler field-level mapping if no model is used.
630
- imodel (iModel, optional):
631
- Deprecated, alias for chat_model.
632
- chat_model (iModel, optional):
633
- Model used for the conversation.
634
- parse_model (iModel, optional):
635
- Model used for any parsing operation.
636
- skip_validation (bool, optional):
637
- If True, returns the raw response without further checks.
638
- images (list, optional):
639
- Additional images if relevant to the LLM context.
640
- image_detail (Literal["low","high","auto"], optional):
641
- Level of image detail if used.
642
- num_parse_retries (int, optional):
643
- Max times to retry parsing on failure (capped at 5).
644
- fuzzy_match_kwargs (dict, optional):
645
- Settings passed to the fuzzy validation function.
646
- clear_messages (bool, optional):
647
- If True, clears previously stored messages.
648
- **kwargs:
649
- Additional arguments for the LLM call.
650
-
651
- Returns:
652
- Any:
653
- The raw string, a validated Pydantic model, or a dict
654
- of requested fields, depending on the parameters.
491
+ recipient (IDType):
492
+ ID of the recipient branch or component.
493
+ category (PackageCategory | None):
494
+ The category/type of the package.
495
+ package (Any):
496
+ The item(s) to send (message/tool/model).
497
+ request_source (IDType | None):
498
+ The origin request ID (if any).
655
499
  """
656
- if operative_model:
657
- logging.warning(
658
- "operative_model is deprecated. Use response_format instead."
659
- )
660
- if (
661
- (operative_model and response_format)
662
- or (operative_model and request_model)
663
- or (response_format and request_model)
664
- ):
665
- raise ValueError(
666
- "Cannot specify both operative_model and response_format"
667
- "or operative_model and request_model as they are aliases"
668
- "for the same parameter."
669
- )
670
-
671
- response_format = response_format or operative_model or request_model
500
+ async with self.mailbox.pile_:
501
+ self.send(recipient, category, package, request_source)
672
502
 
673
- imodel = imodel or chat_model or self.chat_model
674
- parse_model = parse_model or self.parse_model
503
+ async def areceive(
504
+ self,
505
+ sender: IDType,
506
+ message: bool = False,
507
+ tool: bool = False,
508
+ imodel: bool = False,
509
+ ) -> None:
510
+ """
511
+ Async version of `receive()`.
675
512
 
676
- if clear_messages:
677
- self.msgs.clear_messages()
513
+ Args:
514
+ sender (IDType):
515
+ The ID of the mail sender.
516
+ message (bool):
517
+ If `True`, process packages categorized as "message".
518
+ tool (bool):
519
+ If `True`, process packages categorized as "tool".
520
+ imodel (bool):
521
+ If `True`, process packages categorized as "imodel".
522
+ """
523
+ async with self.mailbox.pile_:
524
+ self.receive(sender, message, tool, imodel)
678
525
 
679
- if num_parse_retries > 5:
680
- logging.warning(
681
- f"Are you sure you want to retry {num_parse_retries} "
682
- "times? lowering retry attempts to 5. Suggestion is under 3"
683
- )
684
- num_parse_retries = 5
526
+ def receive_all(self) -> None:
527
+ """
528
+ Receives mail from all known senders without filtering.
685
529
 
686
- ins, res = await self.invoke_chat(
687
- instruction=instruction,
688
- guidance=guidance,
689
- context=context,
690
- sender=sender,
691
- recipient=recipient,
692
- response_format=response_format,
693
- progression=progression,
694
- imodel=imodel,
695
- images=images,
696
- image_detail=image_detail,
697
- **kwargs,
698
- )
699
- self.msgs.add_message(instruction=ins)
700
- self.msgs.add_message(assistant_response=res)
701
-
702
- if skip_validation:
703
- return res.response
704
-
705
- if response_format is not None:
706
- return await self.parse(
707
- text=res.response,
708
- request_type=response_format,
709
- max_retries=num_parse_retries,
710
- **(fuzzy_match_kwargs or {}),
711
- )
530
+ (Duplicate method included in your snippet; you may unify or remove.)
531
+ """
532
+ for key in self.mailbox.pending_ins:
533
+ self.receive(key)
712
534
 
713
- if request_fields is not None:
714
- _d = fuzzy_validate_mapping(
715
- res.response,
716
- request_fields,
717
- handle_unmatched="force",
718
- fill_value=UNDEFINED,
719
- )
720
- return {k: v for k, v in _d.items() if v != UNDEFINED}
535
+ # -------------------------------------------------------------------------
536
+ # Dictionary Conversion
537
+ # -------------------------------------------------------------------------
538
+ def to_dict(self):
539
+ """
540
+ Serializes the branch to a Python dictionary, including:
541
+ - Messages
542
+ - Logs
543
+ - Chat/Parse models
544
+ - System message
545
+ - LogManager config
546
+ - Metadata
721
547
 
722
- return res.response
548
+ Returns:
549
+ dict: A dictionary representing the branch's internal state.
550
+ """
551
+ meta = {}
552
+ if "clone_from" in self.metadata:
723
553
 
724
- async def invoke_action(
725
- self,
726
- action_request: list | ActionRequest | BaseModel | dict,
727
- /,
728
- suppress_errors: bool = False,
729
- sanitize_input: bool = False,
730
- unique_input: bool = False,
731
- num_retries: int = 0,
732
- initial_delay: float = 0,
733
- retry_delay: float = 0,
734
- backoff_factor: float = 1,
735
- retry_default: Any = UNDEFINED,
736
- retry_timeout: float | None = None,
737
- retry_timing: bool = False,
738
- max_concurrent: int | None = None,
739
- throttle_period: float | None = None,
740
- flatten: bool = True,
741
- dropna: bool = True,
742
- unique_output: bool = False,
743
- flatten_tuple_set: bool = False,
744
- ):
745
- params = locals()
746
- params.pop("self")
747
- params.pop("action_request")
748
- return await alcall(
749
- action_request,
750
- self._invoke_action,
751
- **params,
554
+ # Provide some reference info about the source from which we cloned
555
+ meta["clone_from"] = {
556
+ "id": str(self.metadata["clone_from"].id),
557
+ "user": str(self.metadata["clone_from"].user),
558
+ "created_at": self.metadata["clone_from"].created_at,
559
+ "progression": [
560
+ str(i)
561
+ for i in self.metadata["clone_from"].msgs.progression
562
+ ],
563
+ }
564
+ meta.update(
565
+ copy({k: v for k, v in self.metadata.items() if k != "clone_from"})
752
566
  )
753
567
 
754
- async def _invoke_action(
755
- self,
756
- action_request: ActionRequest | BaseModel | dict,
757
- suppress_errors: bool = False,
758
- ) -> ActionResponse:
759
- """Invokes a tool (action) asynchronously.
568
+ dict_ = super().to_dict()
569
+ dict_["messages"] = self.messages.to_dict()
570
+ dict_["logs"] = self.logs.to_dict()
571
+ dict_["chat_model"] = self.chat_model.to_dict()
572
+ dict_["parse_model"] = self.parse_model.to_dict()
573
+ if self.system:
574
+ dict_["system"] = self.system.to_dict()
575
+ dict_["log_config"] = self._log_manager._config.model_dump()
576
+ dict_["metadata"] = meta
577
+ return dict_
578
+
579
+ @classmethod
580
+ def from_dict(cls, data: dict):
581
+ """
582
+ Creates a `Branch` instance from a serialized dictionary.
760
583
 
761
584
  Args:
762
- action_request (ActionRequest | BaseModel | dict):
763
- Contains the function name (`function`) and arguments.
764
- suppress_errors (bool, optional):
765
- If True, logs errors instead of raising.
585
+ data (dict):
586
+ Must include (or optionally include) `messages`, `logs`,
587
+ `chat_model`, `parse_model`, `system`, and `log_config`.
766
588
 
767
589
  Returns:
768
- ActionResponse: The result of the tool call.
590
+ Branch: A new `Branch` instance based on the deserialized data.
769
591
  """
770
- try:
771
- func, args = None, None
772
- if isinstance(action_request, BaseModel):
773
- if hasattr(action_request, "function") and hasattr(
774
- action_request, "arguments"
775
- ):
776
- func = action_request.function
777
- args = action_request.arguments
778
- elif isinstance(action_request, dict):
779
- if action_request.keys() >= {"function", "arguments"}:
780
- func = action_request["function"]
781
- args = action_request["arguments"]
782
-
783
- func_call: FunctionCalling = await self._action_manager.invoke(
784
- action_request
785
- )
786
- if isinstance(func_call, FunctionCalling):
787
- self._log_manager.log(Log.create(func_call))
788
-
789
- if not isinstance(action_request, ActionRequest):
790
- action_request = ActionRequest.create(
791
- function=func,
792
- arguments=args,
793
- sender=self.id,
794
- recipient=func_call.func_tool.id,
795
- )
796
-
797
- if action_request not in self.messages:
798
- self.msgs.add_message(action_request=action_request)
799
-
800
- self.msgs.add_message(
801
- action_request=action_request,
802
- action_output=func_call.response,
803
- )
804
-
805
- return ActionResponseModel(
806
- function=action_request.function,
807
- arguments=action_request.arguments,
808
- output=func_call.response,
809
- )
810
- if isinstance(func_call, Log):
811
- self._log_manager.log(func_call)
812
- return None
592
+ dict_ = {
593
+ "messages": data.pop("messages", UNDEFINED),
594
+ "logs": data.pop("logs", UNDEFINED),
595
+ "chat_model": data.pop("chat_model", UNDEFINED),
596
+ "parse_model": data.pop("parse_model", UNDEFINED),
597
+ "system": data.pop("system", UNDEFINED),
598
+ "log_config": data.pop("log_config", UNDEFINED),
599
+ }
600
+ params = {}
813
601
 
814
- except Exception as e:
815
- if suppress_errors:
816
- logging.error(f"Error invoking action: {e}")
602
+ # Merge in the rest of the data
603
+ for k, v in data.items():
604
+ # If the item is a dict with an 'id', we expand it
605
+ if isinstance(v, dict) and "id" in v:
606
+ params.update(v)
817
607
  else:
818
- raise e
608
+ params[k] = v
609
+
610
+ params.update(dict_)
611
+ # Remove placeholders (UNDEFINED) so we don't incorrectly assign them
612
+ return cls(**{k: v for k, v in params.items() if v is not UNDEFINED})
819
613
 
820
- async def invoke_chat(
614
+ # -------------------------------------------------------------------------
615
+ # Asynchronous Operations (chat, parse, operate, etc.)
616
+ # -------------------------------------------------------------------------
617
+ async def chat(
821
618
  self,
822
619
  instruction=None,
823
620
  guidance=None,
@@ -831,457 +628,829 @@ class Branch(Element, Communicatable, Relational):
831
628
  tool_schemas=None,
832
629
  images: list = None,
833
630
  image_detail: Literal["low", "high", "auto"] = None,
631
+ plain_content: str = None,
834
632
  **kwargs,
835
633
  ) -> tuple[Instruction, AssistantResponse]:
836
- """Invokes the chat model with the current conversation history.
634
+ """
635
+ Invokes the chat model with the current conversation history.
837
636
 
838
- This method constructs a sequence of messages from the stored
839
- progression, merges any pending action responses into the context,
840
- and calls the model. The result is then wrapped in an
841
- AssistantResponse.
637
+ **High-level flow**:
638
+ 1. Construct a sequence of messages from the stored progression.
639
+ 2. Integrate any pending action responses into the context.
640
+ 3. Invoke the chat model with the combined messages.
641
+ 4. Capture and return the final response as an `AssistantResponse`.
842
642
 
843
643
  Args:
844
644
  instruction (Any):
845
- The main user instruction text or structured data.
645
+ Main user instruction text or structured data.
846
646
  guidance (Any):
847
- Additional system or user guidance.
647
+ Additional system or user guidance text.
848
648
  context (Any):
849
649
  Context data to pass to the model.
850
650
  sender (Any):
851
- The user or entity sending this message.
651
+ The user or entity sending this message (defaults to `Branch.user`).
852
652
  recipient (Any):
853
- The intended recipient of this message (default is self.id).
653
+ The recipient of this message (defaults to `self.id`).
854
654
  request_fields (Any):
855
- A set of fields for partial validation (rarely used).
856
- request_model (type[BaseModel], optional):
857
- A specific Pydantic model to request from the LLM.
655
+ Partial field-level validation reference (rarely used).
656
+ response_format (type[BaseModel], optional):
657
+ A Pydantic model type for structured model responses.
858
658
  progression (Any):
859
- The conversation flow or message ordering.
659
+ Custom ordering of messages in the conversation.
860
660
  imodel (iModel, optional):
861
- The chat model to use.
661
+ An override for the chat model to use. If not provided, uses `self.chat_model`.
862
662
  tool_schemas (Any, optional):
863
- Additional schemas to pass if tools are invoked.
663
+ Additional schemas for tool invocation in function-calling.
864
664
  images (list, optional):
865
- Optional list of images.
866
- image_detail (Literal["low","high","auto"], optional):
867
- The level of detail for images, if relevant.
665
+ Optional images relevant to the model's context.
666
+ image_detail (Literal["low", "high", "auto"], optional):
667
+ Level of detail for image-based context (if relevant).
668
+ plain_content (str, optional):
669
+ Plain text content appended to the instruction.
868
670
  **kwargs:
869
- Additional model invocation parameters.
671
+ Additional parameters for the LLM invocation.
870
672
 
871
673
  Returns:
872
674
  tuple[Instruction, AssistantResponse]:
873
- The instruction object (with context) and the final
874
- AssistantResponse from the model call.
675
+ The `Instruction` object and the final `AssistantResponse`.
875
676
  """
876
- ins: Instruction = self.msgs.create_instruction(
677
+ from lionagi.operations.chat.chat import chat
678
+
679
+ return await chat(
680
+ self,
877
681
  instruction=instruction,
878
682
  guidance=guidance,
879
683
  context=context,
880
- sender=sender or self.user or "user",
881
- recipient=recipient or self.id,
882
- response_format=response_format,
684
+ sender=sender,
685
+ recipient=recipient,
883
686
  request_fields=request_fields,
687
+ response_format=response_format,
688
+ progression=progression,
689
+ chat_model=imodel,
690
+ tool_schemas=tool_schemas,
884
691
  images=images,
885
692
  image_detail=image_detail,
886
- tool_schemas=tool_schemas,
887
- )
888
-
889
- progression = progression or self.msgs.progression
890
- messages: list[RoledMessage] = [
891
- self.msgs.messages[i] for i in progression
892
- ]
893
-
894
- use_ins = None
895
- _to_use = []
896
- _action_responses: set[ActionResponse] = set()
897
-
898
- for i in messages:
899
- if isinstance(i, ActionResponse):
900
- _action_responses.add(i)
901
- if isinstance(i, AssistantResponse):
902
- j = AssistantResponse(
903
- role=i.role,
904
- content=copy(i.content),
905
- sender=i.sender,
906
- recipient=i.recipient,
907
- template=i.template,
908
- )
909
- _to_use.append(j)
910
- if isinstance(i, Instruction):
911
- j = Instruction(
912
- role=i.role,
913
- content=copy(i.content),
914
- sender=i.sender,
915
- recipient=i.recipient,
916
- template=i.template,
917
- )
918
- j.tool_schemas = None
919
- j.respond_schema_info = None
920
- j.request_response_format = None
921
-
922
- if _action_responses:
923
- d_ = [k.content for k in _action_responses]
924
- for z in d_:
925
- if z not in j.context:
926
- j.context.append(z)
927
-
928
- _to_use.append(j)
929
- _action_responses = set()
930
- else:
931
- _to_use.append(j)
932
-
933
- messages = _to_use
934
- if _action_responses:
935
- j = ins.model_copy()
936
- d_ = [k.content for k in _action_responses]
937
- for z in d_:
938
- if z not in j.context:
939
- j.context.append(z)
940
- use_ins = j
941
-
942
- if messages and len(messages) > 1:
943
- _msgs = [messages[0]]
944
-
945
- for i in messages[1:]:
946
- if isinstance(i, AssistantResponse):
947
- if isinstance(_msgs[-1], AssistantResponse):
948
- _msgs[-1].response = (
949
- f"{_msgs[-1].response}\n\n{i.response}"
950
- )
951
- else:
952
- _msgs.append(i)
953
- else:
954
- if isinstance(_msgs[-1], AssistantResponse):
955
- _msgs.append(i)
956
- messages = _msgs
957
-
958
- if self.msgs.system and imodel.sequential_exchange:
959
- messages = [msg for msg in messages if msg.role != "system"]
960
- first_instruction = None
961
-
962
- if len(messages) == 0:
963
- first_instruction = ins.model_copy()
964
- first_instruction.guidance = self.msgs.system.rendered + (
965
- first_instruction.guidance or ""
966
- )
967
- messages.append(first_instruction)
968
- elif len(messages) >= 1:
969
- first_instruction = messages[0]
970
- if not isinstance(first_instruction, Instruction):
971
- raise ValueError(
972
- "First message in progression must be an Instruction or System"
973
- )
974
- first_instruction = first_instruction.model_copy()
975
- first_instruction.guidance = self.msgs.system.rendered + (
976
- first_instruction.guidance or ""
977
- )
978
- messages[0] = first_instruction
979
- messages.append(use_ins or ins)
980
-
981
- else:
982
- messages.append(use_ins or ins)
983
-
984
- kwargs["messages"] = [i.chat_msg for i in messages]
985
- imodel = imodel or self.chat_model
986
-
987
- api_call = await imodel.invoke(**kwargs)
988
- self._log_manager.log(Log.create(api_call))
989
-
990
- res = AssistantResponse.create(
991
- assistant_response=api_call.response,
992
- sender=self.id,
993
- recipient=self.user,
693
+ plain_content=plain_content,
694
+ **kwargs,
994
695
  )
995
696
 
996
- return ins, res
697
+ async def parse(
698
+ self,
699
+ text: str,
700
+ handle_validation: Literal[
701
+ "raise", "return_value", "return_none"
702
+ ] = "return_value",
703
+ max_retries: int = 3,
704
+ request_type: type[BaseModel] = None,
705
+ operative: Operative = None,
706
+ similarity_algo="jaro_winkler",
707
+ similarity_threshold: float = 0.85,
708
+ fuzzy_match: bool = True,
709
+ handle_unmatched: Literal[
710
+ "ignore", "raise", "remove", "fill", "force"
711
+ ] = "force",
712
+ fill_value: Any = None,
713
+ fill_mapping: dict[str, Any] | None = None,
714
+ strict: bool = False,
715
+ suppress_conversion_errors: bool = False,
716
+ response_format: type[BaseModel] = None,
717
+ ):
718
+ """
719
+ Attempts to parse text into a structured Pydantic model using parse model logic.
997
720
 
998
- def clone(self, sender: ID.Ref = None) -> "Branch":
999
- """Clones this Branch, creating a new instance with the same data.
721
+ If fuzzy matching is enabled, tries to map partial or uncertain keys
722
+ to the known fields of the model. Retries are performed if initial parsing fails.
1000
723
 
1001
724
  Args:
1002
- sender (ID.Ref, optional):
1003
- If provided, sets this sender ID on the cloned messages.
1004
- Otherwise, uses self.id.
725
+ text (str):
726
+ The raw text to parse.
727
+ handle_validation (Literal["raise","return_value","return_none"]):
728
+ What to do if parsing fails (default: "return_value").
729
+ max_retries (int):
730
+ Number of times to retry parsing on failure (default: 3).
731
+ request_type (type[BaseModel], optional):
732
+ The Pydantic model to parse into.
733
+ operative (Operative, optional):
734
+ An `Operative` object with known request model and settings.
735
+ similarity_algo (str):
736
+ Algorithm name for fuzzy field matching.
737
+ similarity_threshold (float):
738
+ Threshold for matching (0.0 - 1.0).
739
+ fuzzy_match (bool):
740
+ Whether to attempt fuzzy matching for unmatched fields.
741
+ handle_unmatched (Literal["ignore","raise","remove","fill","force"]):
742
+ Policy for unrecognized fields (default: "force").
743
+ fill_value (Any):
744
+ Default placeholder for missing fields (if fill is used).
745
+ fill_mapping (dict[str, Any] | None):
746
+ A mapping of specific fields to fill values.
747
+ strict (bool):
748
+ If True, raises errors on ambiguous fields or data types.
749
+ suppress_conversion_errors (bool):
750
+ If True, logs or ignores conversion errors instead of raising.
1005
751
 
1006
752
  Returns:
1007
- Branch: A new branch with cloned messages and the same tools.
753
+ BaseModel | dict | str | None:
754
+ Parsed model instance, or a fallback based on `handle_validation`.
1008
755
  """
1009
- if sender is not None:
1010
- if not ID.is_id(sender):
1011
- raise ValueError(
1012
- "Input value for branch.clone sender is not a valid sender"
1013
- )
1014
- sender = ID.get_id(sender)
1015
-
1016
- system = self.msgs.system.clone() if self.msgs.system else None
1017
- tools = (
1018
- list(self._action_manager.registry.values())
1019
- if self._action_manager.registry
1020
- else None
756
+ from lionagi.operations.parse.parse import parse
757
+
758
+ return await parse(
759
+ self,
760
+ text=text,
761
+ handle_validation=handle_validation,
762
+ max_retries=max_retries,
763
+ request_type=request_type,
764
+ operative=operative,
765
+ similarity_algo=similarity_algo,
766
+ similarity_threshold=similarity_threshold,
767
+ fuzzy_match=fuzzy_match,
768
+ handle_unmatched=handle_unmatched,
769
+ fill_value=fill_value,
770
+ fill_mapping=fill_mapping,
771
+ strict=strict,
772
+ suppress_conversion_errors=suppress_conversion_errors,
773
+ response_format=response_format,
1021
774
  )
1022
- branch_clone = Branch(
1023
- system=system,
1024
- user=self.user,
1025
- messages=[i.clone() for i in self.msgs.messages],
1026
- tools=tools,
1027
- metadata={"clone_from": self},
1028
- )
1029
- for message in branch_clone.msgs.messages:
1030
- message.sender = sender or self.id
1031
- message.recipient = branch_clone.id
1032
- return branch_clone
1033
775
 
1034
- def to_df(self, *, progression: Progression = None) -> pd.DataFrame:
1035
- """Converts messages in the branch to a Pandas DataFrame.
776
+ async def operate(
777
+ self,
778
+ *,
779
+ instruct: Instruct = None,
780
+ instruction: Instruction | JsonValue = None,
781
+ guidance: JsonValue = None,
782
+ context: JsonValue = None,
783
+ sender: SenderRecipient = None,
784
+ recipient: SenderRecipient = None,
785
+ progression: Progression = None,
786
+ imodel: iModel = None, # deprecated, alias of chat_model
787
+ chat_model: iModel = None,
788
+ invoke_actions: bool = True,
789
+ tool_schemas: list[dict] = None,
790
+ images: list = None,
791
+ image_detail: Literal["low", "high", "auto"] = None,
792
+ parse_model: iModel = None,
793
+ skip_validation: bool = False,
794
+ tools: ToolRef = None,
795
+ operative: Operative = None,
796
+ response_format: type[
797
+ BaseModel
798
+ ] = None, # alias of operative.request_type
799
+ return_operative: bool = False,
800
+ actions: bool = False,
801
+ reason: bool = False,
802
+ action_kwargs: dict = None,
803
+ field_models: list[FieldModel] = None,
804
+ exclude_fields: list | dict | None = None,
805
+ request_params: ModelParams = None,
806
+ request_param_kwargs: dict = None,
807
+ response_params: ModelParams = None,
808
+ response_param_kwargs: dict = None,
809
+ handle_validation: Literal[
810
+ "raise", "return_value", "return_none"
811
+ ] = "return_value",
812
+ operative_model: type[BaseModel] = None,
813
+ request_model: type[BaseModel] = None,
814
+ **kwargs,
815
+ ) -> list | BaseModel | None | dict | str:
816
+ """
817
+ Orchestrates an "operate" flow with optional tool invocation and
818
+ structured response validation.
819
+
820
+ **Workflow**:
821
+ 1) Builds or updates an `Operative` object to specify how the LLM should respond.
822
+ 2) Sends an instruction (`instruct`) or direct `instruction` text to `branch.chat()`.
823
+ 3) Optionally validates/parses the result into a model or dictionary.
824
+ 4) If `invoke_actions=True`, any requested tool calls are automatically invoked.
825
+ 5) Returns either the final structure, raw response, or an updated `Operative`.
1036
826
 
1037
827
  Args:
828
+ branch (Branch):
829
+ The active branch that orchestrates messages, models, and logs.
830
+ instruct (Instruct, optional):
831
+ Contains the instruction, guidance, context, etc. If not provided,
832
+ uses `instruction`, `guidance`, `context` directly.
833
+ instruction (Instruction | JsonValue, optional):
834
+ The main user instruction or content for the LLM.
835
+ guidance (JsonValue, optional):
836
+ Additional system or user instructions.
837
+ context (JsonValue, optional):
838
+ Extra context data.
839
+ sender (SenderRecipient, optional):
840
+ The sender ID for newly added messages.
841
+ recipient (SenderRecipient, optional):
842
+ The recipient ID for newly added messages.
1038
843
  progression (Progression, optional):
1039
- A custom progression of messages to include. If None, uses
1040
- the existing stored progression.
844
+ Custom ordering of conversation messages.
845
+ imodel (iModel, deprecated):
846
+ Alias of `chat_model`.
847
+ chat_model (iModel, optional):
848
+ The LLM used for the main chat operation. Defaults to `branch.chat_model`.
849
+ invoke_actions (bool, optional):
850
+ If `True`, executes any requested tools found in the LLM's response.
851
+ tool_schemas (list[dict], optional):
852
+ Additional schema definitions for tool-based function-calling.
853
+ images (list, optional):
854
+ Optional images appended to the LLM context.
855
+ image_detail (Literal["low","high","auto"], optional):
856
+ The level of image detail, if relevant.
857
+ parse_model (iModel, optional):
858
+ Model used for deeper or specialized parsing, if needed.
859
+ skip_validation (bool, optional):
860
+ If `True`, bypasses final validation and returns raw text or partial structure.
861
+ tools (ToolRef, optional):
862
+ Tools to be registered or made available if `invoke_actions` is True.
863
+ operative (Operative, optional):
864
+ If provided, reuses an existing operative's config for parsing/validation.
865
+ response_format (type[BaseModel], optional):
866
+ Expected Pydantic model for the final response (alias for `operative.request_type`).
867
+ return_operative (bool, optional):
868
+ If `True`, returns the entire `Operative` object after processing
869
+ rather than the structured or raw output.
870
+ actions (bool, optional):
871
+ If `True`, signals that function-calling or "action" usage is expected.
872
+ reason (bool, optional):
873
+ If `True`, signals that the LLM should provide chain-of-thought or reasoning (where applicable).
874
+ action_kwargs (dict | None, optional):
875
+ Additional parameters for the `branch.act()` call if tools are invoked.
876
+ field_models (list[FieldModel] | None, optional):
877
+ Field-level definitions or overrides for the model schema.
878
+ exclude_fields (list|dict|None, optional):
879
+ Which fields to exclude from final validation or model building.
880
+ request_params (ModelParams | None, optional):
881
+ Extra config for building the request model in the operative.
882
+ request_param_kwargs (dict|None, optional):
883
+ Additional kwargs passed to the `ModelParams` constructor for the request.
884
+ response_params (ModelParams | None, optional):
885
+ Config for building the response model after actions.
886
+ response_param_kwargs (dict|None, optional):
887
+ Additional kwargs passed to the `ModelParams` constructor for the response.
888
+ handle_validation (Literal["raise","return_value","return_none"], optional):
889
+ How to handle parsing failures (default: "return_value").
890
+ operative_model (type[BaseModel], deprecated):
891
+ Alias for `response_format`.
892
+ request_model (type[BaseModel], optional):
893
+ Another alias for `response_format`.
894
+ **kwargs:
895
+ Additional keyword arguments passed to the LLM via `branch.chat()`.
1041
896
 
1042
897
  Returns:
1043
- pd.DataFrame:
1044
- A DataFrame containing message data for easy inspection
1045
- or serialization.
898
+ list | BaseModel | None | dict | str:
899
+ - The parsed or raw response from the LLM,
900
+ - `None` if validation fails and `handle_validation='return_none'`,
901
+ - or the entire `Operative` object if `return_operative=True`.
902
+
903
+ Raises:
904
+ ValueError:
905
+ - If both `operative_model` and `response_format` or `request_model` are given.
906
+ - If the LLM's response cannot be parsed into the expected format and `handle_validation='raise'`.
1046
907
  """
1047
- if progression is None:
1048
- progression = self.msgs.progression
908
+ from lionagi.operations.operate.operate import operate
1049
909
 
1050
- msgs = [
1051
- self.msgs.messages[i]
1052
- for i in progression
1053
- if i in self.msgs.messages
1054
- ]
1055
- p = Pile(collections=msgs)
1056
- return p.to_df(columns=MESSAGE_FIELDS)
910
+ return await operate(
911
+ self,
912
+ instruct=instruct,
913
+ instruction=instruction,
914
+ guidance=guidance,
915
+ context=context,
916
+ sender=sender,
917
+ recipient=recipient,
918
+ progression=progression,
919
+ chat_model=chat_model,
920
+ invoke_actions=invoke_actions,
921
+ tool_schemas=tool_schemas,
922
+ images=images,
923
+ image_detail=image_detail,
924
+ parse_model=parse_model,
925
+ skip_validation=skip_validation,
926
+ tools=tools,
927
+ operative=operative,
928
+ response_format=response_format,
929
+ return_operative=return_operative,
930
+ actions=actions,
931
+ reason=reason,
932
+ action_kwargs=action_kwargs,
933
+ field_models=field_models,
934
+ exclude_fields=exclude_fields,
935
+ request_params=request_params,
936
+ request_param_kwargs=request_param_kwargs,
937
+ response_params=response_params,
938
+ response_param_kwargs=response_param_kwargs,
939
+ handle_validation=handle_validation,
940
+ operative_model=operative_model,
941
+ request_model=request_model,
942
+ imodel=imodel,
943
+ **kwargs,
944
+ )
1057
945
 
1058
- async def _instruct(self, instruct: Instruct, /, **kwargs) -> Any:
1059
- """Convenience method for handling an 'Instruct'.
946
+ async def communicate(
947
+ self,
948
+ instruction: Instruction | JsonValue = None,
949
+ *,
950
+ guidance: JsonValue = None,
951
+ context: JsonValue = None,
952
+ plain_content: str = None,
953
+ sender: SenderRecipient = None,
954
+ recipient: SenderRecipient = None,
955
+ progression: ID.IDSeq = None,
956
+ request_model: type[BaseModel] | BaseModel | None = None,
957
+ response_format: type[BaseModel] = None,
958
+ request_fields: dict | list[str] = None,
959
+ imodel: iModel = None, # alias of chat_model
960
+ chat_model: iModel = None,
961
+ parse_model: iModel = None,
962
+ skip_validation: bool = False,
963
+ images: list = None,
964
+ image_detail: Literal["low", "high", "auto"] = None,
965
+ num_parse_retries: int = 3,
966
+ fuzzy_match_kwargs: dict = None,
967
+ clear_messages: bool = False,
968
+ operative_model: type[BaseModel] = None,
969
+ **kwargs,
970
+ ):
971
+ """
972
+ A simpler orchestration than `operate()`, typically without tool invocation.
1060
973
 
1061
- Checks if the instruct uses reserved kwargs for an 'operate' flow
1062
- (e.g., actions and a response format). Otherwise, falls back to a
1063
- simpler 'communicate' flow.
974
+ **Flow**:
975
+ 1. Sends an instruction (or conversation) to the chat model.
976
+ 2. Optionally parses the response into a structured model or fields.
977
+ 3. Returns either the raw string, the parsed model, or a dict of fields.
1064
978
 
1065
979
  Args:
1066
- instruct (Instruct):
1067
- The instruction context and guidance.
980
+ instruction (Instruction | dict, optional):
981
+ The user's main query or data.
982
+ guidance (JsonValue, optional):
983
+ Additional instructions or context for the LLM.
984
+ context (JsonValue, optional):
985
+ Extra data or context.
986
+ plain_content (str, optional):
987
+ Plain text content appended to the instruction.
988
+ sender (SenderRecipient, optional):
989
+ Sender ID (defaults to `Branch.user`).
990
+ recipient (SenderRecipient, optional):
991
+ Recipient ID (defaults to `self.id`).
992
+ progression (ID.IDSeq, optional):
993
+ Custom ordering of messages.
994
+ request_model (type[BaseModel] | BaseModel | None, optional):
995
+ Model for validating or structuring the LLM's response.
996
+ response_format (type[BaseModel], optional):
997
+ Alias for `request_model`. If both are provided, raises ValueError.
998
+ request_fields (dict|list[str], optional):
999
+ If you only need certain fields from the LLM's response.
1000
+ imodel (iModel, optional):
1001
+ Deprecated alias for `chat_model`.
1002
+ chat_model (iModel, optional):
1003
+ An alternative to the default chat model.
1004
+ parse_model (iModel, optional):
1005
+ If parsing is needed, you can override the default parse model.
1006
+ skip_validation (bool, optional):
1007
+ If True, returns the raw response string unvalidated.
1008
+ images (list, optional):
1009
+ Any relevant images.
1010
+ image_detail (Literal["low","high","auto"], optional):
1011
+ Image detail level (if used).
1012
+ num_parse_retries (int, optional):
1013
+ Maximum parsing retries (capped at 5).
1014
+ fuzzy_match_kwargs (dict, optional):
1015
+ Additional settings for fuzzy field matching (if used).
1016
+ clear_messages (bool, optional):
1017
+ Whether to clear stored messages before sending.
1018
+ operative_model (type[BaseModel], optional):
1019
+ Deprecated, alias for `response_format`.
1068
1020
  **kwargs:
1069
- Additional arguments for operate or communicate.
1021
+ Additional arguments for the underlying LLM call.
1070
1022
 
1071
1023
  Returns:
1072
- Any: The result of the chosen flow, e.g., a validated response.
1024
+ Any:
1025
+ - Raw string (if `skip_validation=True`),
1026
+ - A validated Pydantic model,
1027
+ - A dict of the requested fields,
1028
+ - or `None` if parsing fails and `handle_validation='return_none'`.
1073
1029
  """
1074
- config = {**instruct.to_dict(), **kwargs}
1075
- if any(i in config and config[i] for i in Instruct.reserved_kwargs):
1076
- if "response_format" in config or "request_model" in config:
1077
- return await self.operate(**config)
1078
- for i in Instruct.reserved_kwargs:
1079
- config.pop(i, None)
1030
+ from lionagi.operations.communicate.communicate import communicate
1080
1031
 
1081
- return await self.communicate(**config)
1032
+ return await communicate(
1033
+ self,
1034
+ instruction=instruction,
1035
+ guidance=guidance,
1036
+ context=context,
1037
+ plain_content=plain_content,
1038
+ sender=sender,
1039
+ recipient=recipient,
1040
+ progression=progression,
1041
+ request_model=request_model,
1042
+ response_format=response_format,
1043
+ request_fields=request_fields,
1044
+ chat_model=chat_model or imodel,
1045
+ parse_model=parse_model,
1046
+ skip_validation=skip_validation,
1047
+ images=images,
1048
+ image_detail=image_detail,
1049
+ num_parse_retries=num_parse_retries,
1050
+ fuzzy_match_kwargs=fuzzy_match_kwargs,
1051
+ clear_messages=clear_messages,
1052
+ operative_model=operative_model,
1053
+ **kwargs,
1054
+ )
1082
1055
 
1083
- def send(
1056
+ async def _act(
1084
1057
  self,
1085
- recipient: IDType,
1086
- category: PackageCategory | None,
1087
- item: Any,
1088
- request_source: IDType | None = None,
1089
- ) -> None:
1090
- """Sends a package (wrapped in Mail) to a specific recipient.
1058
+ action_request: ActionRequest | BaseModel | dict,
1059
+ suppress_errors: bool = False,
1060
+ ) -> ActionResponse:
1061
+ """
1062
+ Internal method to invoke a tool (action) asynchronously.
1091
1063
 
1092
1064
  Args:
1093
- recipient (IDType):
1094
- The ID of the recipient entity.
1095
- category (PackageCategory | None):
1096
- The category of the package (e.g., 'message', 'tool', etc.).
1097
- package (Any):
1098
- The payload to send (could be a message, tool, model, etc.).
1099
- request_source (IDType | None):
1100
- The ID that requested this send (if any).
1065
+ action_request (ActionRequest|BaseModel|dict):
1066
+ Must contain `function` and `arguments`.
1067
+ suppress_errors (bool, optional):
1068
+ If True, errors are logged instead of raised.
1069
+
1070
+ Returns:
1071
+ ActionResponse: Result of the tool invocation or `None` if suppressed.
1101
1072
  """
1102
- package = Package(
1103
- category=category,
1104
- item=item,
1105
- request_source=request_source,
1106
- )
1073
+ from lionagi.operations._act.act import _act
1107
1074
 
1108
- mail = Mail(
1109
- sender=self.id,
1110
- recipient=recipient,
1111
- package=package,
1112
- )
1113
- self.mailbox.append_out(mail)
1075
+ return await _act(self, action_request, suppress_errors)
1114
1076
 
1115
- def receive(
1077
+ async def act(
1116
1078
  self,
1117
- sender: IDType,
1118
- message: bool = False,
1119
- tool: bool = False,
1120
- imodel: bool = False,
1121
- ) -> None:
1122
- """Retrieves mail from a sender, processing it if enabled by parameters.
1079
+ action_request: list | ActionRequest | BaseModel | dict,
1080
+ *,
1081
+ suppress_errors: bool = True,
1082
+ sanitize_input: bool = False,
1083
+ unique_input: bool = False,
1084
+ num_retries: int = 0,
1085
+ initial_delay: float = 0,
1086
+ retry_delay: float = 0,
1087
+ backoff_factor: float = 1,
1088
+ retry_default: Any = UNDEFINED,
1089
+ retry_timeout: float | None = None,
1090
+ retry_timing: bool = False,
1091
+ max_concurrent: int | None = None,
1092
+ throttle_period: float | None = None,
1093
+ flatten: bool = True,
1094
+ dropna: bool = True,
1095
+ unique_output: bool = False,
1096
+ flatten_tuple_set: bool = False,
1097
+ ) -> list[ActionResponse] | ActionResponse | Any:
1098
+ """
1099
+ Public, potentially batched, asynchronous interface to run one or multiple action requests.
1123
1100
 
1124
1101
  Args:
1125
- sender (IDType):
1126
- The ID of the sender.
1127
- message (bool):
1128
- If True, processes mails categorized as "message".
1129
- tool (bool):
1130
- If True, processes mails categorized as "tool".
1131
- imodel (bool):
1132
- If True, processes mails categorized as "imodel".
1102
+ action_request (list|ActionRequest|BaseModel|dict):
1103
+ A single or list of action requests, each requiring
1104
+ `function` and `arguments`.
1105
+ suppress_errors (bool):
1106
+ If True, log errors instead of raising exceptions.
1107
+ sanitize_input (bool):
1108
+ Reserved. Potentially sanitize the action arguments.
1109
+ unique_input (bool):
1110
+ Reserved. Filter out duplicate requests.
1111
+ num_retries (int):
1112
+ Number of times to retry on failure (default 0).
1113
+ initial_delay (float):
1114
+ Delay before first attempt (seconds).
1115
+ retry_delay (float):
1116
+ Base delay between retries.
1117
+ backoff_factor (float):
1118
+ Multiplier for the `retry_delay` after each attempt.
1119
+ retry_default (Any):
1120
+ Fallback value if all retries fail (if suppressing errors).
1121
+ retry_timeout (float|None):
1122
+ Overall timeout for all attempts (None = no limit).
1123
+ retry_timing (bool):
1124
+ If True, track time used for retries.
1125
+ max_concurrent (int|None):
1126
+ Maximum concurrent tasks (if batching).
1127
+ throttle_period (float|None):
1128
+ Minimum spacing (in seconds) between requests.
1129
+ flatten (bool):
1130
+ If a list of results is returned, flatten them if possible.
1131
+ dropna (bool):
1132
+ Remove `None` or invalid results from final output if True.
1133
+ unique_output (bool):
1134
+ Only return unique results if True.
1135
+ flatten_tuple_set (bool):
1136
+ Flatten nested tuples in results if True.
1133
1137
 
1134
- Raises:
1135
- ValueError:
1136
- If the sender doesn't exist or if the mail category is invalid
1137
- for the chosen processing options.
1138
+ Returns:
1139
+ Any:
1140
+ The result or results from the invoked tool(s).
1138
1141
  """
1139
- skipped_requests = Progression()
1140
- sender = ID.get_id(sender)
1141
- if sender not in self.mailbox.pending_ins.keys():
1142
- raise ValueError(f"No package from {sender}")
1143
- while self.mailbox.pending_ins[sender]:
1144
- mail_id = self.mailbox.pending_ins[sender].popleft()
1145
- mail: Mail = self.mailbox.pile_[mail_id]
1146
-
1147
- if mail.category == "message" and message:
1148
- if not isinstance(mail.package.item, RoledMessage):
1149
- raise ValueError("Invalid message format")
1150
- new_message = mail.package.item.clone()
1151
- new_message.sender = mail.sender
1152
- new_message.recipient = self.id
1153
- self.msgs.messages.include(new_message)
1154
- self.mailbox.pile_.pop(mail_id)
1155
-
1156
- elif mail.category == "tool" and tool:
1157
- if not isinstance(mail.package.item, Tool):
1158
- raise ValueError("Invalid tools format")
1159
- self._action_manager.register_tools(mail.package.item)
1160
- self.mailbox.pile_.pop(mail_id)
1161
-
1162
- elif mail.category == "imodel" and imodel:
1163
- if not isinstance(mail.package.item, iModel):
1164
- raise ValueError("Invalid iModel format")
1165
- self._imodel_manager.register_imodel(
1166
- imodel.name or "chat", mail.package.item
1167
- )
1168
- self.mailbox.pile_.pop(mail_id)
1142
+ params = locals()
1143
+ params.pop("self")
1144
+ params.pop("action_request")
1145
+ return await alcall(action_request, self._act, **params)
1169
1146
 
1170
- else:
1171
- skipped_requests.append(mail)
1147
+ async def translate(
1148
+ self,
1149
+ text: str,
1150
+ technique: Literal["SynthLang"] = "SynthLang",
1151
+ technique_kwargs: dict = None,
1152
+ compress: bool = False,
1153
+ chat_model: iModel = None,
1154
+ compress_model: iModel = None,
1155
+ compression_ratio: float = 0.2,
1156
+ compress_kwargs=None,
1157
+ verbose: bool = True,
1158
+ new_branch: bool = True,
1159
+ **kwargs,
1160
+ ) -> str:
1161
+ """
1162
+ An example "translate" operation that transforms text using a chosen technique
1163
+ (e.g., "SynthLang"). Optionally compresses text with a custom `compress_model`.
1172
1164
 
1173
- self.mailbox.pending_ins[sender] = skipped_requests
1165
+ Args:
1166
+ text (str):
1167
+ The text to be translated or transformed.
1168
+ technique (Literal["SynthLang"]):
1169
+ The translation/transform technique (currently only "SynthLang").
1170
+ technique_kwargs (dict, optional):
1171
+ Additional parameters for the chosen technique.
1172
+ compress (bool):
1173
+ Whether to compress the resulting text further.
1174
+ chat_model (iModel, optional):
1175
+ A custom model for the translation step (defaults to self.chat_model).
1176
+ compress_model (iModel, optional):
1177
+ A separate model for compression (if `compress=True`).
1178
+ compression_ratio (float):
1179
+ Desired compression ratio if compressing text (0.0 - 1.0).
1180
+ compress_kwargs (dict, optional):
1181
+ Additional arguments for the compression step.
1182
+ verbose (bool):
1183
+ If True, prints debug/logging info.
1184
+ new_branch (bool):
1185
+ If True, performs the translation in a new branch context.
1186
+ **kwargs:
1187
+ Additional parameters passed through to the technique function.
1174
1188
 
1175
- if len(self.mailbox.pending_ins[sender]) == 0:
1176
- self.mailbox.pending_ins.pop(sender)
1189
+ Returns:
1190
+ str: The transformed (and optionally compressed) text.
1191
+ """
1192
+ from lionagi.operations.translate.translate import translate
1193
+
1194
+ return await translate(
1195
+ branch=self,
1196
+ text=text,
1197
+ technique=technique,
1198
+ technique_kwargs=technique_kwargs,
1199
+ compress=compress,
1200
+ chat_model=chat_model,
1201
+ compress_model=compress_model,
1202
+ compression_ratio=compression_ratio,
1203
+ compress_kwargs=compress_kwargs,
1204
+ verbose=verbose,
1205
+ new_branch=new_branch,
1206
+ **kwargs,
1207
+ )
1177
1208
 
1178
- async def asend(
1209
+ async def select(
1179
1210
  self,
1180
- recipient: IDType,
1181
- category: PackageCategory | None,
1182
- package: Any,
1183
- request_source: IDType | None = None,
1211
+ instruct: Instruct | dict[str, Any],
1212
+ choices: list[str] | type[Enum] | dict[str, Any],
1213
+ max_num_selections: int = 1,
1214
+ branch_kwargs: dict[str, Any] | None = None,
1215
+ verbose: bool = False,
1216
+ **kwargs: Any,
1184
1217
  ):
1185
- """Asynchronous version of send().
1218
+ """
1219
+ Performs a selection operation from given choices using an LLM-driven approach.
1186
1220
 
1187
1221
  Args:
1188
- recipient (IDType): The ID of the recipient.
1189
- category (PackageCategory | None): The category of the package.
1190
- package (Any): The item(s) to send.
1191
- request_source (IDType | None): The origin of this request.
1222
+ instruct (Instruct|dict[str, Any]):
1223
+ The instruction model or dictionary for the LLM call.
1224
+ choices (list[str]|type[Enum]|dict[str,Any]):
1225
+ The set of options to choose from.
1226
+ max_num_selections (int):
1227
+ Maximum allowed selections (default = 1).
1228
+ branch_kwargs (dict[str, Any]|None):
1229
+ Extra arguments to create or configure a new branch if needed.
1230
+ verbose (bool):
1231
+ If True, prints debug info.
1232
+ **kwargs:
1233
+ Additional arguments for the underlying `operate()` call.
1234
+
1235
+ Returns:
1236
+ Any:
1237
+ A `SelectionModel` or similar that indicates the user's choice(s).
1192
1238
  """
1193
- async with self.mailbox.pile_:
1194
- self.send(recipient, category, package, request_source)
1239
+ from lionagi.operations.select.select import select
1240
+
1241
+ return await select(
1242
+ branch=self,
1243
+ instruct=instruct,
1244
+ choices=choices,
1245
+ max_num_selections=max_num_selections,
1246
+ branch_kwargs=branch_kwargs,
1247
+ verbose=verbose,
1248
+ **kwargs,
1249
+ )
1195
1250
 
1196
- async def areceive(
1251
+ async def compress(
1197
1252
  self,
1198
- sender: IDType,
1199
- message: bool = False,
1200
- tool: bool = False,
1201
- imodel: bool = False,
1202
- ) -> None:
1203
- """Asynchronous version of receive().
1253
+ text: str,
1254
+ system_msg: str = None,
1255
+ target_ratio: float = 0.2,
1256
+ n_samples: int = 5,
1257
+ max_tokens_per_sample=80,
1258
+ verbose=True,
1259
+ ) -> str:
1260
+ """
1261
+ Uses the `chat_model`'s built-in compression routine to shorten text.
1204
1262
 
1205
1263
  Args:
1206
- sender (IDType): The sender's ID.
1207
- message (bool): Whether to process message packages.
1208
- tool (bool): Whether to process tool packages.
1209
- imodel (bool): Whether to process iModel packages.
1264
+ text (str):
1265
+ The text to compress.
1266
+ system_msg (str, optional):
1267
+ System-level instructions, appended to the prompt.
1268
+ target_ratio (float):
1269
+ Desired compression ratio (0.0-1.0).
1270
+ n_samples (int):
1271
+ How many compression attempts to combine or evaluate.
1272
+ max_tokens_per_sample (int):
1273
+ Max token count per sample chunk.
1274
+ verbose (bool):
1275
+ If True, logs or prints progress.
1276
+
1277
+ Returns:
1278
+ str: The compressed text.
1210
1279
  """
1211
- async with self.mailbox.pile_:
1212
- self.receive(sender, message, tool, imodel)
1280
+ return await self.chat_model.compress_text(
1281
+ text=text,
1282
+ system_msg=system_msg,
1283
+ target_ratio=target_ratio,
1284
+ n_samples=n_samples,
1285
+ max_tokens_per_sample=max_tokens_per_sample,
1286
+ verbose=verbose,
1287
+ )
1213
1288
 
1214
- def receive_all(self) -> None:
1215
- """Receives mail from all senders."""
1216
- for key in list(self.mailbox.pending_ins.keys()):
1217
- self.receive(key)
1289
+ async def interpret(
1290
+ self,
1291
+ text: str,
1292
+ domain: str | None = None,
1293
+ style: str | None = None,
1294
+ **kwargs,
1295
+ ) -> str:
1296
+ """
1297
+ Interprets (rewrites) a user's raw input into a more formal or structured
1298
+ LLM prompt. This function can be seen as a "prompt translator," which
1299
+ ensures the user's original query is clarified or enhanced for better
1300
+ LLM responses.
1218
1301
 
1219
- def to_dict(self):
1220
- meta = {}
1221
- if "clone_from" in self.metadata:
1302
+ The method calls `branch.communicate()` behind the scenes with a system prompt
1303
+ that instructs the LLM to rewrite the input. You can provide additional
1304
+ parameters in `**kwargs` (e.g., `parse_model`, `skip_validation`, etc.)
1305
+ if you want to shape how the rewriting is done.
1222
1306
 
1223
- meta["clone_from"] = {
1224
- "id": str(self.metadata["clone_from"].id),
1225
- "user": str(self.metadata["clone_from"].user),
1226
- "created_at": self.metadata["clone_from"].created_at,
1227
- "progression": [
1228
- str(i)
1229
- for i in self.metadata["clone_from"].msgs.progression
1230
- ],
1231
- }
1232
- meta.update(
1233
- copy({k: v for k, v in self.metadata.items() if k != "clone_from"})
1307
+ Args:
1308
+ branch (Branch):
1309
+ The active branch context for messages, logging, etc.
1310
+ text (str):
1311
+ The raw user input or question that needs interpreting.
1312
+ domain (str | None, optional):
1313
+ Optional domain hint (e.g. "finance", "marketing", "devops").
1314
+ The LLM can use this hint to tailor its rewriting approach.
1315
+ style (str | None, optional):
1316
+ Optional style hint (e.g. "concise", "detailed").
1317
+ **kwargs:
1318
+ Additional arguments passed to `branch.communicate()`,
1319
+ such as `parse_model`, `skip_validation`, `temperature`, etc.
1320
+
1321
+ Returns:
1322
+ str:
1323
+ A refined or "improved" user prompt string, suitable for feeding
1324
+ back into the LLM as a clearer instruction.
1325
+
1326
+ Example:
1327
+ refined = await interpret(
1328
+ branch=my_branch, text="How do I do marketing analytics?",
1329
+ domain="marketing", style="detailed"
1330
+ )
1331
+ # refined might be "Explain step-by-step how to set up a marketing analytics
1332
+ # pipeline to track campaign performance..."
1333
+ """
1334
+ from lionagi.operations.interpret.interpret import interpret
1335
+
1336
+ return await interpret(
1337
+ self, text=text, domain=domain, style=style, **kwargs
1234
1338
  )
1235
1339
 
1236
- dict_ = super().to_dict()
1237
- dict_["messages"] = self.messages.to_dict()
1238
- dict_["logs"] = self.logs.to_dict()
1239
- dict_["chat_model"] = self.chat_model.to_dict()
1240
- dict_["parse_model"] = self.parse_model.to_dict()
1241
- if self.system:
1242
- dict_["system"] = self.system.to_dict()
1243
- dict_["log_config"] = self._log_manager._config.model_dump()
1244
- dict_["metadata"] = meta
1340
+ async def instruct(
1341
+ self,
1342
+ instruct: Instruct,
1343
+ /,
1344
+ **kwargs,
1345
+ ):
1346
+ """
1347
+ A convenience method that chooses between `operate()` and `communicate()`
1348
+ based on the contents of an `Instruct` object.
1245
1349
 
1246
- return dict_
1350
+ If the `Instruct` indicates tool usage or advanced response format,
1351
+ `operate()` is used. Otherwise, it defaults to `communicate()`.
1247
1352
 
1248
- @classmethod
1249
- def from_dict(cls, data: dict):
1250
- dict_ = {
1251
- "messages": data.pop("messages", UNDEFINED),
1252
- "logs": data.pop("logs", UNDEFINED),
1253
- "chat_model": data.pop("chat_model", UNDEFINED),
1254
- "parse_model": data.pop("parse_model", UNDEFINED),
1255
- "system": data.pop("system", UNDEFINED),
1256
- "log_config": data.pop("log_config", UNDEFINED),
1257
- }
1258
- params = {}
1259
- for k, v in data.items():
1260
- if isinstance(v, dict) and "id" in v:
1261
- params.update(v)
1262
- else:
1263
- params[k] = v
1353
+ Args:
1354
+ instruct (Instruct):
1355
+ An object containing `instruction`, `guidance`, `context`, etc.
1356
+ **kwargs:
1357
+ Additional args forwarded to `operate()` or `communicate()`.
1264
1358
 
1265
- params.update(dict_)
1266
- return cls(**{k: v for k, v in params.items() if v is not UNDEFINED})
1359
+ Returns:
1360
+ Any:
1361
+ The result of the underlying call (structured object, raw text, etc.).
1362
+ """
1363
+ from lionagi.operations.instruct.instruct import instruct as _ins
1267
1364
 
1268
- def receive_all(self) -> None:
1269
- """Receives mail from all senders."""
1270
- for key in self.mailbox.pending_ins:
1271
- self.receive(key)
1365
+ return await _ins(self, instruct, **kwargs)
1272
1366
 
1273
- def flagged_messages(
1367
+ async def ReAct(
1274
1368
  self,
1275
- include_clone: bool = False,
1276
- include_load: bool = False,
1277
- ) -> None:
1278
- flags = []
1279
- if include_clone:
1280
- flags.append(MessageFlag.MESSAGE_CLONE)
1281
- if include_load:
1282
- flags.append(MessageFlag.MESSAGE_LOAD)
1283
- out = [i for i in self.messages if i._flag in flags]
1284
- return Pile(collections=out, item_type=RoledMessage, strict_type=False)
1369
+ instruct: Instruct | dict[str, Any],
1370
+ interpret: bool = False,
1371
+ tools: Any = None,
1372
+ tool_schemas: Any = None,
1373
+ response_format: type[BaseModel] = None,
1374
+ extension_allowed: bool = False,
1375
+ max_extensions: int = None,
1376
+ response_kwargs: dict | None = None,
1377
+ return_analysis: bool = False,
1378
+ analysis_model: iModel | None = None,
1379
+ **kwargs,
1380
+ ):
1381
+ """
1382
+ Performs a multi-step "ReAct" flow (inspired by the ReAct paradigm in LLM usage),
1383
+ which may include:
1384
+ 1) Optionally interpreting the user's original instructions via `branch.interpret()`.
1385
+ 2) Generating chain-of-thought analysis or reasoning using a specialized schema (`ReActAnalysis`).
1386
+ 3) Optionally expanding the conversation multiple times if the analysis indicates more steps (extensions).
1387
+ 4) Producing a final answer by invoking the branch's `instruct()` method.
1388
+
1389
+ Args:
1390
+ branch (Branch):
1391
+ The active branch context that orchestrates messages, models, and actions.
1392
+ instruct (Instruct | dict[str, Any]):
1393
+ The user's instruction object or a dict with equivalent keys.
1394
+ interpret (bool, optional):
1395
+ If `True`, first interprets (`branch.interpret`) the instructions to refine them
1396
+ before proceeding. Defaults to `False`.
1397
+ tools (Any, optional):
1398
+ Tools to be made available for the ReAct process. If omitted or `None`,
1399
+ and if no `tool_schemas` are provided, it defaults to `True` (all tools).
1400
+ tool_schemas (Any, optional):
1401
+ Additional or custom schemas for tools if function calling is needed.
1402
+ response_format (type[BaseModel], optional):
1403
+ The final schema for the user-facing output after the ReAct expansions.
1404
+ If `None`, the output is raw text or an unstructured response.
1405
+ extension_allowed (bool, optional):
1406
+ Whether to allow multiple expansions if the analysis indicates more steps.
1407
+ Defaults to `False`.
1408
+ max_extensions (int | None, optional):
1409
+ The max number of expansions if `extension_allowed` is `True`.
1410
+ If omitted, no upper limit is enforced (other than logic).
1411
+ response_kwargs (dict | None, optional):
1412
+ Extra kwargs passed into the final `_instruct()` call that produces the
1413
+ final output. Defaults to `None`.
1414
+ return_analysis (bool, optional):
1415
+ If `True`, returns both the final output and the list of analysis objects
1416
+ produced throughout expansions. Defaults to `False`.
1417
+ analysis_model (iModel | None, optional):
1418
+ A custom LLM model for generating the ReAct analysis steps. If `None`,
1419
+ uses the branch's default `chat_model`.
1420
+ **kwargs:
1421
+ Additional keyword arguments passed into the initial `branch.operate()` call.
1422
+
1423
+ Returns:
1424
+ Any | tuple[Any, list]:
1425
+ - If `return_analysis=False`, returns only the final output (which may be
1426
+ a raw string, dict, or structured model depending on `response_format`).
1427
+ - If `return_analysis=True`, returns a tuple of (final_output, list_of_analyses).
1428
+ The list_of_analyses is a sequence of the intermediate or extended
1429
+ ReActAnalysis objects.
1430
+
1431
+ Notes:
1432
+ - If `max_extensions` is greater than 5, a warning is logged, and it is set to 5.
1433
+ - If `interpret=True`, the user instruction is replaced by the interpreted
1434
+ string before proceeding.
1435
+ - The expansions loop continues until either `analysis.extension_needed` is `False`
1436
+ or `extensions` (the remaining allowed expansions) is `0`.
1437
+ """
1438
+ from lionagi.operations.ReAct.ReAct import ReAct
1439
+
1440
+ return await ReAct(
1441
+ self,
1442
+ instruct,
1443
+ interpret=interpret,
1444
+ tools=tools,
1445
+ tool_schemas=tool_schemas,
1446
+ response_format=response_format,
1447
+ extension_allowed=extension_allowed,
1448
+ max_extensions=max_extensions,
1449
+ response_kwargs=response_kwargs,
1450
+ return_analysis=return_analysis,
1451
+ analysis_model=analysis_model,
1452
+ **kwargs,
1453
+ )
1285
1454
 
1286
1455
 
1287
1456
  # File: lionagi/session/branch.py