lionagi 0.17.11__py3-none-any.whl → 0.18.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (109) hide show
  1. lionagi/_errors.py +0 -5
  2. lionagi/fields.py +83 -0
  3. lionagi/libs/schema/minimal_yaml.py +98 -0
  4. lionagi/ln/__init__.py +3 -1
  5. lionagi/ln/concurrency/primitives.py +4 -4
  6. lionagi/ln/concurrency/task.py +1 -0
  7. lionagi/ln/types.py +32 -5
  8. lionagi/models/field_model.py +21 -4
  9. lionagi/models/hashable_model.py +2 -3
  10. lionagi/operations/ReAct/ReAct.py +475 -238
  11. lionagi/operations/ReAct/utils.py +3 -0
  12. lionagi/operations/act/act.py +206 -0
  13. lionagi/operations/builder.py +5 -7
  14. lionagi/operations/chat/chat.py +130 -114
  15. lionagi/operations/communicate/communicate.py +101 -42
  16. lionagi/operations/fields.py +380 -0
  17. lionagi/operations/flow.py +8 -10
  18. lionagi/operations/interpret/interpret.py +65 -20
  19. lionagi/operations/node.py +4 -4
  20. lionagi/operations/operate/operate.py +216 -108
  21. lionagi/{protocols/operatives → operations/operate}/operative.py +4 -5
  22. lionagi/{protocols/operatives → operations/operate}/step.py +34 -39
  23. lionagi/operations/parse/parse.py +170 -142
  24. lionagi/operations/select/select.py +79 -18
  25. lionagi/operations/select/utils.py +8 -2
  26. lionagi/operations/types.py +119 -23
  27. lionagi/protocols/action/manager.py +5 -6
  28. lionagi/protocols/contracts.py +2 -2
  29. lionagi/protocols/generic/__init__.py +22 -0
  30. lionagi/protocols/generic/element.py +36 -127
  31. lionagi/protocols/generic/log.py +3 -2
  32. lionagi/protocols/generic/pile.py +9 -10
  33. lionagi/protocols/generic/progression.py +23 -22
  34. lionagi/protocols/graph/edge.py +6 -5
  35. lionagi/protocols/ids.py +6 -49
  36. lionagi/protocols/messages/__init__.py +29 -0
  37. lionagi/protocols/messages/action_request.py +86 -184
  38. lionagi/protocols/messages/action_response.py +73 -131
  39. lionagi/protocols/messages/assistant_response.py +130 -159
  40. lionagi/protocols/messages/base.py +31 -22
  41. lionagi/protocols/messages/instruction.py +280 -625
  42. lionagi/protocols/messages/manager.py +112 -62
  43. lionagi/protocols/messages/message.py +87 -197
  44. lionagi/protocols/messages/system.py +52 -123
  45. lionagi/protocols/types.py +1 -13
  46. lionagi/service/connections/__init__.py +3 -0
  47. lionagi/service/connections/endpoint.py +0 -8
  48. lionagi/service/connections/providers/claude_code_cli.py +3 -2
  49. lionagi/service/connections/providers/oai_.py +29 -94
  50. lionagi/service/connections/providers/ollama_.py +3 -2
  51. lionagi/service/hooks/_types.py +1 -1
  52. lionagi/service/hooks/_utils.py +1 -1
  53. lionagi/service/hooks/hook_event.py +3 -8
  54. lionagi/service/hooks/hook_registry.py +5 -5
  55. lionagi/service/hooks/hooked_event.py +63 -3
  56. lionagi/service/imodel.py +24 -20
  57. lionagi/service/third_party/claude_code.py +3 -3
  58. lionagi/service/third_party/openai_models.py +435 -0
  59. lionagi/service/token_calculator.py +1 -94
  60. lionagi/session/branch.py +190 -400
  61. lionagi/session/session.py +8 -99
  62. lionagi/tools/file/reader.py +2 -2
  63. lionagi/version.py +1 -1
  64. {lionagi-0.17.11.dist-info → lionagi-0.18.1.dist-info}/METADATA +6 -6
  65. lionagi-0.18.1.dist-info/RECORD +164 -0
  66. lionagi/fields/__init__.py +0 -47
  67. lionagi/fields/action.py +0 -188
  68. lionagi/fields/base.py +0 -153
  69. lionagi/fields/code.py +0 -239
  70. lionagi/fields/file.py +0 -234
  71. lionagi/fields/instruct.py +0 -135
  72. lionagi/fields/reason.py +0 -55
  73. lionagi/fields/research.py +0 -52
  74. lionagi/operations/_act/act.py +0 -86
  75. lionagi/operations/brainstorm/__init__.py +0 -2
  76. lionagi/operations/brainstorm/brainstorm.py +0 -498
  77. lionagi/operations/brainstorm/prompt.py +0 -11
  78. lionagi/operations/instruct/__init__.py +0 -2
  79. lionagi/operations/instruct/instruct.py +0 -28
  80. lionagi/operations/plan/__init__.py +0 -6
  81. lionagi/operations/plan/plan.py +0 -386
  82. lionagi/operations/plan/prompt.py +0 -25
  83. lionagi/operations/utils.py +0 -45
  84. lionagi/protocols/forms/__init__.py +0 -2
  85. lionagi/protocols/forms/base.py +0 -85
  86. lionagi/protocols/forms/flow.py +0 -79
  87. lionagi/protocols/forms/form.py +0 -86
  88. lionagi/protocols/forms/report.py +0 -48
  89. lionagi/protocols/mail/__init__.py +0 -2
  90. lionagi/protocols/mail/exchange.py +0 -220
  91. lionagi/protocols/mail/mail.py +0 -51
  92. lionagi/protocols/mail/mailbox.py +0 -103
  93. lionagi/protocols/mail/manager.py +0 -218
  94. lionagi/protocols/mail/package.py +0 -101
  95. lionagi/protocols/messages/templates/README.md +0 -28
  96. lionagi/protocols/messages/templates/action_request.jinja2 +0 -5
  97. lionagi/protocols/messages/templates/action_response.jinja2 +0 -9
  98. lionagi/protocols/messages/templates/assistant_response.jinja2 +0 -6
  99. lionagi/protocols/messages/templates/instruction_message.jinja2 +0 -61
  100. lionagi/protocols/messages/templates/system_message.jinja2 +0 -11
  101. lionagi/protocols/messages/templates/tool_schemas.jinja2 +0 -7
  102. lionagi/protocols/operatives/__init__.py +0 -2
  103. lionagi/service/connections/providers/types.py +0 -28
  104. lionagi/service/third_party/openai_model_names.py +0 -198
  105. lionagi/service/types.py +0 -58
  106. lionagi-0.17.11.dist-info/RECORD +0 -199
  107. /lionagi/operations/{_act → act}/__init__.py +0 -0
  108. {lionagi-0.17.11.dist-info → lionagi-0.18.1.dist-info}/WHEEL +0 -0
  109. {lionagi-0.17.11.dist-info → lionagi-0.18.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,386 +0,0 @@
1
- # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
2
- # SPDX-License-Identifier: Apache-2.0
3
-
4
- from typing import Any, Literal
5
-
6
- from pydantic import BaseModel
7
-
8
- from lionagi.fields.instruct import (
9
- LIST_INSTRUCT_FIELD_MODEL,
10
- Instruct,
11
- InstructResponse,
12
- )
13
- from lionagi.ln import alcall
14
- from lionagi.protocols.types import ID
15
- from lionagi.session.branch import Branch
16
- from lionagi.session.session import Session
17
-
18
- from ..utils import prepare_instruct, prepare_session
19
- from .prompt import EXPANSION_PROMPT, PLAN_PROMPT
20
-
21
- # ---------------------------------------------------------------------
22
- # Data Model
23
- # ---------------------------------------------------------------------
24
-
25
-
26
- class PlanOperation(BaseModel):
27
- """
28
- Stores all relevant outcomes for a multi-step Plan:
29
- * initial: The result of the initial plan prompt
30
- * plan: A list of plan steps (Instruct objects) generated from the initial planning
31
- * execute: Any responses from executing those plan steps
32
- """
33
-
34
- initial: Any
35
- plan: list[Instruct] | None = None
36
- execute: list[InstructResponse] | None = None
37
-
38
-
39
- # ---------------------------------------------------------------------
40
- # Utilities
41
- # ---------------------------------------------------------------------
42
-
43
-
44
- def chunked(iterable, n):
45
- """
46
- Yield successive n-sized chunks from an iterable.
47
- Example:
48
- >>> list(chunked([1,2,3,4,5], 2))
49
- [[1,2],[3,4],[5]]
50
- """
51
- for i in range(0, len(iterable), n):
52
- yield iterable[i : i + n]
53
-
54
-
55
- # ---------------------------------------------------------------------
56
- # Single-Step Runner
57
- # ---------------------------------------------------------------------
58
-
59
-
60
- async def run_step(
61
- ins: Instruct,
62
- session: Session,
63
- branch: Branch,
64
- verbose: bool = True,
65
- **kwargs: Any,
66
- ) -> Any:
67
- """
68
- Execute a single step of the plan with an 'expansion' or guidance prompt.
69
-
70
- Args:
71
- ins: The instruction model for the step.
72
- session: The current session context.
73
- branch: The branch to operate on for this step.
74
- verbose: Whether to enable verbose output.
75
- **kwargs: Additional keyword arguments passed to the branch operation.
76
-
77
- Returns:
78
- The result of the branch operation (which may contain more instructions).
79
- """
80
- if verbose:
81
- snippet = (
82
- ins.instruction[:100] + "..."
83
- if len(ins.instruction) > 100
84
- else ins.instruction
85
- )
86
- print(f"Further planning: {snippet}")
87
-
88
- # Incorporate the EXPANSION_PROMPT into guidance
89
- config = {**ins.model_dump(), **kwargs}
90
- guidance_text = config.pop("guidance", "")
91
- config["guidance"] = f"{EXPANSION_PROMPT}\n{guidance_text}"
92
-
93
- # Run the step
94
- result = await branch.operate(**config)
95
- branch.dump_logs() # Dump logs if needed
96
- return result
97
-
98
-
99
- # ---------------------------------------------------------------------
100
- # Main Plan Function (with Multiple Execution Strategies)
101
- # ---------------------------------------------------------------------
102
-
103
-
104
- async def plan(
105
- instruct: Instruct | dict[str, Any],
106
- num_steps: int = 2,
107
- session: Session | None = None,
108
- branch: Branch | ID.Ref | None = None,
109
- auto_run: bool = True,
110
- auto_execute: bool = False,
111
- execution_strategy: Literal[
112
- "sequential",
113
- "concurrent",
114
- "sequential_concurrent_chunk",
115
- "concurrent_sequential_chunk",
116
- ] = "sequential",
117
- execution_kwargs: dict[str, Any] | None = None,
118
- branch_kwargs: dict[str, Any] | None = None,
119
- return_session: bool = False,
120
- verbose: bool = True,
121
- **kwargs: Any,
122
- ) -> PlanOperation | tuple[PlanOperation, Session]:
123
- """
124
- Create and optionally execute a multi-step plan with up to `num_steps`.
125
-
126
- Steps:
127
- 1. Generate an initial plan with up to `num_steps`.
128
- 2. Optionally (auto_run=True) expand on each planned step
129
- to refine or further clarify them.
130
- 3. Optionally (auto_execute=True) execute those refined steps
131
- according to `execution_strategy`.
132
-
133
- Args:
134
- instruct: Initial instruction or a dict describing it.
135
- num_steps: Maximum number of plan steps (must be <= 5).
136
- session: An existing Session, or None to create a new one.
137
- branch: An existing Branch, or None to create a new one.
138
- auto_run: If True, automatically run the intermediate plan steps.
139
- auto_execute: If True, automatically execute the fully refined steps.
140
- execution_strategy:
141
- - "sequential" (default) runs steps one by one
142
- - "concurrent" runs all steps in parallel
143
- - "sequential_concurrent_chunk" processes chunks sequentially, each chunk in parallel
144
- - "concurrent_sequential_chunk" processes all chunks in parallel, each chunk sequentially
145
- execution_kwargs: Extra kwargs used during execution calls.
146
- branch_kwargs: Extra kwargs for branch/session creation.
147
- return_session: Whether to return (PlanOperation, Session) instead of just PlanOperation.
148
- verbose: If True, prints verbose logs.
149
- **kwargs: Additional arguments for the initial plan operation.
150
-
151
- Returns:
152
- A PlanOperation object containing:
153
- - initial plan
154
- - (optional) plan expansions
155
- - (optional) execution responses
156
- Optionally returns the session as well, if `return_session=True`.
157
- """
158
-
159
- # -----------------------------------------------------------------
160
- # 0. Basic Validation & Setup
161
- # -----------------------------------------------------------------
162
- if num_steps > 5:
163
- raise ValueError("Number of steps must be 5 or less")
164
-
165
- if verbose:
166
- print(f"Planning execution with {num_steps} steps...")
167
-
168
- # Ensure the correct field model
169
- field_models: list = kwargs.get("field_models", [])
170
- if LIST_INSTRUCT_FIELD_MODEL not in field_models:
171
- field_models.append(LIST_INSTRUCT_FIELD_MODEL)
172
- kwargs["field_models"] = field_models
173
-
174
- # Prepare session/branch
175
- session, branch = prepare_session(session, branch, branch_kwargs)
176
- execute_branch: Branch = session.split(
177
- branch
178
- ) # a separate branch for execution
179
-
180
- # -----------------------------------------------------------------
181
- # 1. Run the Initial Plan Prompt
182
- # -----------------------------------------------------------------
183
- plan_prompt = PLAN_PROMPT.format(num_steps=num_steps)
184
- instruct = prepare_instruct(instruct, plan_prompt)
185
- initial_res = await branch.operate(**instruct, **kwargs)
186
-
187
- # Wrap initial result in the PlanOperation
188
- out = PlanOperation(initial=initial_res)
189
-
190
- if verbose:
191
- print("Initial planning complete. Starting step planning...")
192
-
193
- # If we aren't auto-running the steps, just return the initial plan
194
- if not auto_run:
195
- return (out, session) if return_session else out
196
-
197
- # -----------------------------------------------------------------
198
- # 2. Expand Each Step (auto_run=True)
199
- # -----------------------------------------------------------------
200
- results = []
201
- if hasattr(initial_res, "instruct_models"):
202
- instructs: list[Instruct] = initial_res.instruct_models
203
- for i, step_ins in enumerate(instructs, start=1):
204
- if verbose:
205
- print(f"\n----- Planning step {i}/{len(instructs)} -----")
206
- expanded_res = await run_step(
207
- step_ins, session, branch, verbose=verbose, **kwargs
208
- )
209
- results.append(expanded_res)
210
-
211
- if verbose:
212
- print("\nAll planning steps expanded/refined successfully!")
213
-
214
- # Gather all newly created plan instructions
215
- refined_plans = []
216
- for step_result in results:
217
- if hasattr(step_result, "instruct_models"):
218
- for model in step_result.instruct_models:
219
- if model and model not in refined_plans:
220
- refined_plans.append(model)
221
-
222
- out.plan = refined_plans
223
-
224
- # -----------------------------------------------------------------
225
- # 3. Execute the Plan Steps (auto_execute=True)
226
- # -----------------------------------------------------------------
227
- if auto_execute:
228
- if verbose:
229
- print("\nStarting execution of all plan steps...")
230
-
231
- # We now handle multiple strategies:
232
- match execution_strategy:
233
- # ---------------------------------------------------------
234
- # Strategy A: SEQUENTIAL
235
- # ---------------------------------------------------------
236
- case "sequential":
237
- seq_results = []
238
- for i, plan_step in enumerate(refined_plans, start=1):
239
- if verbose:
240
- snippet = (
241
- plan_step.instruction[:100] + "..."
242
- if len(plan_step.instruction) > 100
243
- else plan_step.instruction
244
- )
245
- print(
246
- f"\n------ Executing step {i}/{len(refined_plans)} ------"
247
- )
248
- print(f"Instruction: {snippet}")
249
-
250
- step_response = await execute_branch.instruct(
251
- plan_step, **(execution_kwargs or {})
252
- )
253
- seq_results.append(
254
- InstructResponse(
255
- instruct=plan_step, response=step_response
256
- )
257
- )
258
-
259
- out.execute = seq_results
260
- if verbose:
261
- print("\nAll steps executed successfully (sequential)!")
262
-
263
- # ---------------------------------------------------------
264
- # Strategy B: CONCURRENT
265
- # ---------------------------------------------------------
266
- case "concurrent":
267
-
268
- async def execute_step_concurrently(plan_step: Instruct):
269
- if verbose:
270
- snippet = (
271
- plan_step.instruction[:100] + "..."
272
- if len(plan_step.instruction) > 100
273
- else plan_step.instruction
274
- )
275
- print(f"\n------ Executing step (concurrently) ------")
276
- print(f"Instruction: {snippet}")
277
- local_branch = session.split(execute_branch)
278
- resp = await local_branch.instruct(
279
- plan_step, **(execution_kwargs or {})
280
- )
281
- return InstructResponse(instruct=plan_step, response=resp)
282
-
283
- # Launch all steps in parallel
284
- concurrent_res = await alcall(
285
- refined_plans, execute_step_concurrently
286
- )
287
- out.execute = concurrent_res
288
- if verbose:
289
- print("\nAll steps executed successfully (concurrent)!")
290
-
291
- # ---------------------------------------------------------
292
- # Strategy C: SEQUENTIAL_CONCURRENT_CHUNK
293
- # - process plan steps in chunks (one chunk after another),
294
- # - each chunk's steps run in parallel.
295
- # ---------------------------------------------------------
296
- case "sequential_concurrent_chunk":
297
- chunk_size = (execution_kwargs or {}).get("chunk_size", 5)
298
- all_exec_responses = []
299
-
300
- async def execute_chunk_concurrently(
301
- sub_steps: list[Instruct],
302
- ):
303
- if verbose:
304
- print(
305
- f"\n--- Executing a chunk of size {len(sub_steps)} concurrently ---"
306
- )
307
-
308
- async def _execute(plan_step: Instruct):
309
- local_branch = session.split(execute_branch)
310
- resp = await local_branch.instruct(
311
- plan_step, **(execution_kwargs or {})
312
- )
313
- return InstructResponse(
314
- instruct=plan_step, response=resp
315
- )
316
-
317
- # run each chunk in parallel
318
- return await alcall(sub_steps, _execute)
319
-
320
- # process each chunk sequentially
321
- for chunk in chunked(refined_plans, chunk_size):
322
- chunk_responses = await execute_chunk_concurrently(chunk)
323
- all_exec_responses.extend(chunk_responses)
324
-
325
- out.execute = all_exec_responses
326
- if verbose:
327
- print(
328
- "\nAll steps executed successfully (sequential concurrent chunk)!"
329
- )
330
-
331
- # ---------------------------------------------------------
332
- # Strategy D: CONCURRENT_SEQUENTIAL_CHUNK
333
- # - split plan steps into chunks,
334
- # - run all chunks in parallel,
335
- # - but each chunk's steps run sequentially.
336
- # ---------------------------------------------------------
337
- case "concurrent_sequential_chunk":
338
- chunk_size = (execution_kwargs or {}).get("chunk_size", 5)
339
- all_chunks = list(chunked(refined_plans, chunk_size))
340
-
341
- async def execute_chunk_sequentially(
342
- sub_steps: list[Instruct],
343
- ):
344
- chunk_result = []
345
- local_branch = session.split(execute_branch)
346
- for plan_step in sub_steps:
347
- if verbose:
348
- snippet = (
349
- plan_step.instruction[:100] + "..."
350
- if len(plan_step.instruction) > 100
351
- else plan_step.instruction
352
- )
353
- print(
354
- f"\n--- Executing step (sequential in chunk) ---\nInstruction: {snippet}"
355
- )
356
- resp = await local_branch.instruct(
357
- plan_step, **(execution_kwargs or {})
358
- )
359
- chunk_result.append(
360
- InstructResponse(instruct=plan_step, response=resp)
361
- )
362
- return chunk_result
363
-
364
- # run all chunks in parallel, each chunk sequentially
365
- parallel_chunk_results = await alcall(
366
- all_chunks,
367
- execute_chunk_sequentially,
368
- output_flatten=True,
369
- output_dropna=True,
370
- )
371
-
372
- out.execute = parallel_chunk_results
373
- if verbose:
374
- print(
375
- "\nAll steps executed successfully (concurrent sequential chunk)!"
376
- )
377
-
378
- case _:
379
- raise ValueError(
380
- f"Invalid execution strategy: {execution_strategy}"
381
- )
382
-
383
- # -----------------------------------------------------------------
384
- # 4. Final Return
385
- # -----------------------------------------------------------------
386
- return (out, session) if return_session else out
@@ -1,25 +0,0 @@
1
- # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
2
- # SPDX-License-Identifier: Apache-2.0
3
-
4
- PLAN_PROMPT = """
5
- Develop a high-level plan containing {num_steps} distinct steps. Each step must:
6
- 1. Represent a clear milestone or phase.
7
- 2. Follow a logical sequence, respecting inter-step dependencies.
8
- 3. Differ clearly from other steps.
9
- 4. Have measurable completion criteria.
10
- 5. Be open to further breakdown if needed.
11
-
12
- Keep each step concise yet actionable, ensuring the overall plan remains coherent.
13
- """
14
-
15
- EXPANSION_PROMPT = """
16
- Transform each high-level plan step into detailed, executable actions. For every step:
17
-
18
- 1. Keep actions atomic, verifiable, and clearly scoped.
19
- 2. Include essential context and preconditions.
20
- 3. Define expected outcomes, success criteria, and validations.
21
- 4. Respect sequential dependencies and error handling.
22
- 5. Provide all necessary parameters and specify outputs.
23
-
24
- Ensure each action is self-contained yet fits within the larger plan.
25
- """
@@ -1,45 +0,0 @@
1
- # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
2
- # SPDX-License-Identifier: Apache-2.0
3
-
4
- from typing import TYPE_CHECKING
5
-
6
- from lionagi.fields.instruct import Instruct
7
-
8
- if TYPE_CHECKING:
9
- from lionagi.session.session import Branch, Session
10
-
11
-
12
- def prepare_session(
13
- session: "Session" = None,
14
- branch: "Branch" = None,
15
- branch_kwargs=None,
16
- ) -> tuple["Session", "Branch"]:
17
- from lionagi.session.session import Branch, Session
18
-
19
- if session is not None:
20
- if branch is not None:
21
- branch: "Branch" = session.branches[branch]
22
- else:
23
- branch = session.new_branch(**(branch_kwargs or {}))
24
- else:
25
- session = Session()
26
- if isinstance(branch, Branch):
27
- session.branches.include(branch)
28
- session.default_branch = branch
29
- if branch is None:
30
- branch = session.new_branch(**(branch_kwargs or {}))
31
-
32
- return session, branch
33
-
34
-
35
- def prepare_instruct(instruct: Instruct | dict, prompt: str):
36
- if isinstance(instruct, Instruct):
37
- instruct = instruct.to_dict()
38
- if not isinstance(instruct, dict):
39
- raise ValueError(
40
- "instruct needs to be an InstructModel object or a dictionary of valid parameters"
41
- )
42
-
43
- guidance = instruct.get("guidance", "")
44
- instruct["guidance"] = f"\n{prompt}\n{guidance}"
45
- return instruct
@@ -1,2 +0,0 @@
1
- # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
2
- # SPDX-License-Identifier: Apache-2.0
@@ -1,85 +0,0 @@
1
- # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
2
- # SPDX-License-Identifier: Apache-2.0
3
-
4
- from typing import Any, Literal
5
-
6
- from pydantic import ConfigDict, Field
7
- from pydantic_core import PydanticUndefined
8
-
9
- from lionagi.utils import UNDEFINED
10
-
11
- from ..generic.element import Element
12
-
13
-
14
- class BaseForm(Element):
15
- """
16
- A minimal base form class to store fields and define output logic.
17
- Typically, you'll inherit from this for domain-specific forms.
18
- """
19
-
20
- model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)
21
-
22
- # A short "assignment" describing input->output
23
- assignment: str | None = Field(
24
- default=None,
25
- description="A small DSL describing transformation, e.g. 'a,b -> c'.",
26
- )
27
- # Which fields are produced as 'final' or 'required' outputs.
28
- output_fields: list[str] = Field(
29
- default_factory=list,
30
- description="Which fields are considered mandatory outputs.",
31
- )
32
- # Whether None counts as valid or incomplete
33
- none_as_valid: bool = Field(
34
- default=False,
35
- description="If True, None is accepted as a valid value for completion checks.",
36
- )
37
- has_processed: bool = Field(
38
- default=False,
39
- description="Marks if the form is considered completed or 'processed'.",
40
- )
41
-
42
- def is_completed(self) -> bool:
43
- """Check if all required output fields are set (and not UNDEFINED/None if not allowed)."""
44
- missing = self.check_completeness()
45
- return not missing
46
-
47
- def check_completeness(
48
- self, how: Literal["raise", "return_missing"] = "return_missing"
49
- ) -> list[str]:
50
- """
51
- Return a list of any 'required' output fields that are missing or invalid.
52
- If how='raise', raise an exception if missing any.
53
- """
54
- invalid_vals = [UNDEFINED, PydanticUndefined]
55
- if not self.none_as_valid:
56
- invalid_vals.append(None)
57
-
58
- missing = []
59
- for f in self.output_fields:
60
- val = getattr(self, f, UNDEFINED)
61
- if val in invalid_vals:
62
- missing.append(f)
63
-
64
- if missing and how == "raise":
65
- raise ValueError(f"Form missing required fields: {missing}")
66
- return missing
67
-
68
- def get_results(self, valid_only: bool = False) -> dict[str, Any]:
69
- """
70
- Return a dict of all `output_fields`, optionally skipping invalid/None if `valid_only`.
71
- """
72
- results = {}
73
- invalid_vals = [UNDEFINED, PydanticUndefined]
74
- if not self.none_as_valid:
75
- invalid_vals.append(None)
76
-
77
- for f in self.output_fields:
78
- val = getattr(self, f, UNDEFINED)
79
- if valid_only and val in invalid_vals:
80
- continue
81
- results[f] = val
82
- return results
83
-
84
-
85
- # File: lionagi/protocols/forms/base.py
@@ -1,79 +0,0 @@
1
- # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
2
- # SPDX-License-Identifier: Apache-2.0
3
-
4
- from pydantic import BaseModel, ConfigDict, Field
5
-
6
-
7
- class FlowStep(BaseModel):
8
- """
9
- A minimal 'step' describing one transformation from some input fields to some output fields.
10
- """
11
-
12
- model_config = ConfigDict(arbitrary_types_allowed=True)
13
-
14
- name: str = Field(..., description="Identifier for the step.")
15
- inputs: list[str] = Field(
16
- ..., description="Which fields are needed for this step."
17
- )
18
- outputs: list[str] = Field(
19
- ..., description="Which fields are produced by this step."
20
- )
21
- description: str | None = None # optional text doc
22
-
23
-
24
- class FlowDefinition(BaseModel):
25
- """
26
- A minimal DSL-based multi-step flow, e.g. 'a,b->c; c->d' to yield two steps.
27
- """
28
-
29
- model_config = ConfigDict(arbitrary_types_allowed=True)
30
-
31
- steps: list[FlowStep] = Field(default_factory=list)
32
-
33
- def parse_flow_string(self, flow_str: str):
34
- """
35
- Parse a string like 'a,b->c; c->d' into multiple FlowSteps.
36
- We'll store them in self.steps in order.
37
- """
38
- if not flow_str:
39
- return
40
- segments = [seg.strip() for seg in flow_str.split(";") if seg.strip()]
41
- for i, seg in enumerate(segments):
42
- # seg might be like 'a,b->c' or 'a->b, c' etc
43
- if "->" not in seg:
44
- raise ValueError(f"Invalid DSL segment (no '->'): '{seg}'")
45
- ins_str, outs_str = seg.split("->", 1)
46
- inputs = [x.strip() for x in ins_str.split(",") if x.strip()]
47
- outputs = [y.strip() for y in outs_str.split(",") if y.strip()]
48
- step = FlowStep(
49
- name=f"step_{i + 1}", inputs=inputs, outputs=outputs
50
- )
51
- self.steps.append(step)
52
-
53
- def get_required_fields(self) -> set[str]:
54
- """
55
- Return all fields that are used as inputs in the earliest steps but not produced by prior steps.
56
- This is a minimal approach; or we can do more advanced logic if needed.
57
- """
58
- produced = set()
59
- required = set()
60
- for step in self.steps:
61
- # anything not yet produced is needed
62
- for i in step.inputs:
63
- if i not in produced:
64
- required.add(i)
65
- for o in step.outputs:
66
- produced.add(o)
67
- return required
68
-
69
- def get_produced_fields(self) -> set[str]:
70
- """
71
- Return all fields that eventually get produced by any step.
72
- """
73
- result = set()
74
- for st in self.steps:
75
- result.update(st.outputs)
76
- return result
77
-
78
-
79
- # File: lionagi/protocols/forms/flow.py