lionagi 0.5.2__py3-none-any.whl → 0.5.4__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- lionagi/__init__.py +4 -1
- lionagi/core/session/branch.py +3 -2
- lionagi/core/session/session.py +3 -3
- lionagi/libs/func/async_calls/alcall.py +7 -0
- lionagi/operations/brainstorm/brainstorm.py +319 -94
- lionagi/operations/brainstorm/prompt.py +8 -1
- lionagi/operations/plan/plan.py +280 -67
- lionagi/operations/plan/prompt.py +17 -16
- lionagi/protocols/operatives/instruct.py +0 -6
- lionagi/protocols/operatives/prompts.py +50 -203
- lionagi/version.py +1 -1
- {lionagi-0.5.2.dist-info → lionagi-0.5.4.dist-info}/METADATA +2 -2
- {lionagi-0.5.2.dist-info → lionagi-0.5.4.dist-info}/RECORD +15 -15
- {lionagi-0.5.2.dist-info → lionagi-0.5.4.dist-info}/WHEEL +0 -0
- {lionagi-0.5.2.dist-info → lionagi-0.5.4.dist-info}/licenses/LICENSE +0 -0
lionagi/operations/plan/plan.py
CHANGED
@@ -2,10 +2,11 @@
|
|
2
2
|
#
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
4
4
|
|
5
|
-
|
6
5
|
from lionagi.core.session.branch import Branch
|
7
6
|
from lionagi.core.session.session import Session
|
8
7
|
from lionagi.core.typing import ID, Any, BaseModel, Literal
|
8
|
+
from lionagi.libs.func.types import alcall
|
9
|
+
from lionagi.libs.parse import to_flat_list
|
9
10
|
from lionagi.protocols.operatives.instruct import (
|
10
11
|
INSTRUCT_FIELD_MODEL,
|
11
12
|
Instruct,
|
@@ -15,13 +16,45 @@ from lionagi.protocols.operatives.instruct import (
|
|
15
16
|
from ..utils import prepare_instruct, prepare_session
|
16
17
|
from .prompt import EXPANSION_PROMPT, PLAN_PROMPT
|
17
18
|
|
19
|
+
# ---------------------------------------------------------------------
|
20
|
+
# Data Model
|
21
|
+
# ---------------------------------------------------------------------
|
22
|
+
|
18
23
|
|
19
24
|
class PlanOperation(BaseModel):
|
25
|
+
"""
|
26
|
+
Stores all relevant outcomes for a multi-step Plan:
|
27
|
+
* initial: The result of the initial plan prompt
|
28
|
+
* plan: A list of plan steps (Instruct objects) generated from the initial planning
|
29
|
+
* execute: Any responses from executing those plan steps
|
30
|
+
"""
|
31
|
+
|
20
32
|
initial: Any
|
21
33
|
plan: list[Instruct] | None = None
|
22
34
|
execute: list[InstructResponse] | None = None
|
23
35
|
|
24
36
|
|
37
|
+
# ---------------------------------------------------------------------
|
38
|
+
# Utilities
|
39
|
+
# ---------------------------------------------------------------------
|
40
|
+
|
41
|
+
|
42
|
+
def chunked(iterable, n):
|
43
|
+
"""
|
44
|
+
Yield successive n-sized chunks from an iterable.
|
45
|
+
Example:
|
46
|
+
>>> list(chunked([1,2,3,4,5], 2))
|
47
|
+
[[1,2],[3,4],[5]]
|
48
|
+
"""
|
49
|
+
for i in range(0, len(iterable), n):
|
50
|
+
yield iterable[i : i + n]
|
51
|
+
|
52
|
+
|
53
|
+
# ---------------------------------------------------------------------
|
54
|
+
# Single-Step Runner
|
55
|
+
# ---------------------------------------------------------------------
|
56
|
+
|
57
|
+
|
25
58
|
async def run_step(
|
26
59
|
ins: Instruct,
|
27
60
|
session: Session,
|
@@ -29,33 +62,41 @@ async def run_step(
|
|
29
62
|
verbose: bool = True,
|
30
63
|
**kwargs: Any,
|
31
64
|
) -> Any:
|
32
|
-
"""
|
65
|
+
"""
|
66
|
+
Execute a single step of the plan with an 'expansion' or guidance prompt.
|
33
67
|
|
34
68
|
Args:
|
35
69
|
ins: The instruction model for the step.
|
36
|
-
session: The current session.
|
37
|
-
branch: The branch to operate on.
|
70
|
+
session: The current session context.
|
71
|
+
branch: The branch to operate on for this step.
|
38
72
|
verbose: Whether to enable verbose output.
|
39
|
-
**kwargs: Additional keyword arguments.
|
73
|
+
**kwargs: Additional keyword arguments passed to the branch operation.
|
40
74
|
|
41
75
|
Returns:
|
42
|
-
The result of the branch operation.
|
76
|
+
The result of the branch operation (which may contain more instructions).
|
43
77
|
"""
|
44
78
|
if verbose:
|
45
|
-
|
79
|
+
snippet = (
|
46
80
|
ins.instruction[:100] + "..."
|
47
81
|
if len(ins.instruction) > 100
|
48
82
|
else ins.instruction
|
49
83
|
)
|
50
|
-
print(f"Further planning: {
|
84
|
+
print(f"Further planning: {snippet}")
|
51
85
|
|
86
|
+
# Incorporate the EXPANSION_PROMPT into guidance
|
52
87
|
config = {**ins.model_dump(), **kwargs}
|
53
|
-
|
54
|
-
config["guidance"] = EXPANSION_PROMPT
|
88
|
+
guidance_text = config.pop("guidance", "")
|
89
|
+
config["guidance"] = f"{EXPANSION_PROMPT}\n{guidance_text}"
|
90
|
+
|
91
|
+
# Run the step
|
92
|
+
result = await branch.operate(**config)
|
93
|
+
branch.msgs.logger.dump() # Dump logs if needed
|
94
|
+
return result
|
55
95
|
|
56
|
-
|
57
|
-
|
58
|
-
|
96
|
+
|
97
|
+
# ---------------------------------------------------------------------
|
98
|
+
# Main Plan Function (with Multiple Execution Strategies)
|
99
|
+
# ---------------------------------------------------------------------
|
59
100
|
|
60
101
|
|
61
102
|
async def plan(
|
@@ -65,108 +106,280 @@ async def plan(
|
|
65
106
|
branch: Branch | ID.Ref | None = None,
|
66
107
|
auto_run: bool = True,
|
67
108
|
auto_execute: bool = False,
|
68
|
-
execution_strategy: Literal[
|
109
|
+
execution_strategy: Literal[
|
110
|
+
"sequential",
|
111
|
+
"concurrent",
|
112
|
+
"sequential_concurrent_chunk",
|
113
|
+
"concurrent_sequential_chunk",
|
114
|
+
] = "sequential",
|
69
115
|
execution_kwargs: dict[str, Any] | None = None,
|
70
116
|
branch_kwargs: dict[str, Any] | None = None,
|
71
117
|
return_session: bool = False,
|
72
118
|
verbose: bool = True,
|
73
119
|
**kwargs: Any,
|
74
|
-
) -> PlanOperation | tuple[
|
75
|
-
"""
|
120
|
+
) -> PlanOperation | tuple[PlanOperation, Session]:
|
121
|
+
"""
|
122
|
+
Create and optionally execute a multi-step plan with up to `num_steps`.
|
123
|
+
|
124
|
+
Steps:
|
125
|
+
1. Generate an initial plan with up to `num_steps`.
|
126
|
+
2. Optionally (auto_run=True) expand on each planned step
|
127
|
+
to refine or further clarify them.
|
128
|
+
3. Optionally (auto_execute=True) execute those refined steps
|
129
|
+
according to `execution_strategy`.
|
76
130
|
|
77
131
|
Args:
|
78
|
-
instruct:
|
79
|
-
num_steps:
|
80
|
-
session:
|
81
|
-
branch:
|
82
|
-
auto_run: If True, automatically run the steps.
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
132
|
+
instruct: Initial instruction or a dict describing it.
|
133
|
+
num_steps: Maximum number of plan steps (must be <= 5).
|
134
|
+
session: An existing Session, or None to create a new one.
|
135
|
+
branch: An existing Branch, or None to create a new one.
|
136
|
+
auto_run: If True, automatically run the intermediate plan steps.
|
137
|
+
auto_execute: If True, automatically execute the fully refined steps.
|
138
|
+
execution_strategy:
|
139
|
+
- "sequential" (default) runs steps one by one
|
140
|
+
- "concurrent" runs all steps in parallel
|
141
|
+
- "sequential_concurrent_chunk" processes chunks sequentially, each chunk in parallel
|
142
|
+
- "concurrent_sequential_chunk" processes all chunks in parallel, each chunk sequentially
|
143
|
+
execution_kwargs: Extra kwargs used during execution calls.
|
144
|
+
branch_kwargs: Extra kwargs for branch/session creation.
|
145
|
+
return_session: Whether to return (PlanOperation, Session) instead of just PlanOperation.
|
146
|
+
verbose: If True, prints verbose logs.
|
147
|
+
**kwargs: Additional arguments for the initial plan operation.
|
87
148
|
|
88
149
|
Returns:
|
89
|
-
|
150
|
+
A PlanOperation object containing:
|
151
|
+
- initial plan
|
152
|
+
- (optional) plan expansions
|
153
|
+
- (optional) execution responses
|
154
|
+
Optionally returns the session as well, if `return_session=True`.
|
90
155
|
"""
|
156
|
+
|
157
|
+
# -----------------------------------------------------------------
|
158
|
+
# 0. Basic Validation & Setup
|
159
|
+
# -----------------------------------------------------------------
|
91
160
|
if num_steps > 5:
|
92
161
|
raise ValueError("Number of steps must be 5 or less")
|
93
162
|
|
94
163
|
if verbose:
|
95
164
|
print(f"Planning execution with {num_steps} steps...")
|
96
165
|
|
166
|
+
# Ensure the correct field model
|
97
167
|
field_models: list = kwargs.get("field_models", [])
|
98
168
|
if INSTRUCT_FIELD_MODEL not in field_models:
|
99
169
|
field_models.append(INSTRUCT_FIELD_MODEL)
|
100
170
|
kwargs["field_models"] = field_models
|
171
|
+
|
172
|
+
# Prepare session/branch
|
101
173
|
session, branch = prepare_session(session, branch, branch_kwargs)
|
102
|
-
execute_branch: Branch = session.split(
|
103
|
-
|
104
|
-
|
105
|
-
)
|
174
|
+
execute_branch: Branch = session.split(
|
175
|
+
branch
|
176
|
+
) # a separate branch for execution
|
106
177
|
|
107
|
-
|
108
|
-
|
178
|
+
# -----------------------------------------------------------------
|
179
|
+
# 1. Run the Initial Plan Prompt
|
180
|
+
# -----------------------------------------------------------------
|
181
|
+
plan_prompt = PLAN_PROMPT.format(num_steps=num_steps)
|
182
|
+
instruct = prepare_instruct(instruct, plan_prompt)
|
183
|
+
initial_res = await branch.operate(**instruct, **kwargs)
|
184
|
+
|
185
|
+
# Wrap initial result in the PlanOperation
|
186
|
+
out = PlanOperation(initial=initial_res)
|
109
187
|
|
110
188
|
if verbose:
|
111
189
|
print("Initial planning complete. Starting step planning...")
|
112
190
|
|
191
|
+
# If we aren't auto-running the steps, just return the initial plan
|
113
192
|
if not auto_run:
|
114
|
-
if return_session
|
115
|
-
return res1, session
|
116
|
-
return res1
|
193
|
+
return (out, session) if return_session else out
|
117
194
|
|
195
|
+
# -----------------------------------------------------------------
|
196
|
+
# 2. Expand Each Step (auto_run=True)
|
197
|
+
# -----------------------------------------------------------------
|
118
198
|
results = []
|
119
|
-
if hasattr(
|
120
|
-
instructs: list[Instruct] =
|
121
|
-
for i,
|
199
|
+
if hasattr(initial_res, "instruct_models"):
|
200
|
+
instructs: list[Instruct] = initial_res.instruct_models
|
201
|
+
for i, step_ins in enumerate(instructs, start=1):
|
122
202
|
if verbose:
|
123
203
|
print(f"\n----- Planning step {i}/{len(instructs)} -----")
|
124
|
-
|
125
|
-
|
204
|
+
expanded_res = await run_step(
|
205
|
+
step_ins, session, branch, verbose=verbose, **kwargs
|
126
206
|
)
|
127
|
-
results.append(
|
207
|
+
results.append(expanded_res)
|
128
208
|
|
129
209
|
if verbose:
|
130
|
-
print("\nAll planning
|
210
|
+
print("\nAll planning steps expanded/refined successfully!")
|
211
|
+
|
212
|
+
# Gather all newly created plan instructions
|
213
|
+
refined_plans = []
|
214
|
+
for step_result in results:
|
215
|
+
if hasattr(step_result, "instruct_models"):
|
216
|
+
for model in step_result.instruct_models:
|
217
|
+
if model and model not in refined_plans:
|
218
|
+
refined_plans.append(model)
|
131
219
|
|
132
|
-
|
133
|
-
for res in results:
|
134
|
-
if hasattr(res, "instruct_models"):
|
135
|
-
for i in res.instruct_models:
|
136
|
-
if i and i not in all_plans:
|
137
|
-
all_plans.append(i)
|
138
|
-
out.plan = all_plans
|
220
|
+
out.plan = refined_plans
|
139
221
|
|
222
|
+
# -----------------------------------------------------------------
|
223
|
+
# 3. Execute the Plan Steps (auto_execute=True)
|
224
|
+
# -----------------------------------------------------------------
|
140
225
|
if auto_execute:
|
141
226
|
if verbose:
|
142
|
-
print("\nStarting execution of all steps...")
|
143
|
-
|
227
|
+
print("\nStarting execution of all plan steps...")
|
228
|
+
|
229
|
+
# We now handle multiple strategies:
|
144
230
|
match execution_strategy:
|
231
|
+
|
232
|
+
# ---------------------------------------------------------
|
233
|
+
# Strategy A: SEQUENTIAL
|
234
|
+
# ---------------------------------------------------------
|
145
235
|
case "sequential":
|
146
|
-
|
236
|
+
seq_results = []
|
237
|
+
for i, plan_step in enumerate(refined_plans, start=1):
|
238
|
+
if verbose:
|
239
|
+
snippet = (
|
240
|
+
plan_step.instruction[:100] + "..."
|
241
|
+
if len(plan_step.instruction) > 100
|
242
|
+
else plan_step.instruction
|
243
|
+
)
|
244
|
+
print(
|
245
|
+
f"\n------ Executing step {i}/{len(refined_plans)} ------"
|
246
|
+
)
|
247
|
+
print(f"Instruction: {snippet}")
|
248
|
+
|
249
|
+
step_response = await execute_branch.instruct(
|
250
|
+
plan_step, **(execution_kwargs or {})
|
251
|
+
)
|
252
|
+
seq_results.append(
|
253
|
+
InstructResponse(
|
254
|
+
instruct=plan_step, response=step_response
|
255
|
+
)
|
256
|
+
)
|
257
|
+
|
258
|
+
out.execute = seq_results
|
259
|
+
if verbose:
|
260
|
+
print("\nAll steps executed successfully (sequential)!")
|
261
|
+
|
262
|
+
# ---------------------------------------------------------
|
263
|
+
# Strategy B: CONCURRENT
|
264
|
+
# ---------------------------------------------------------
|
265
|
+
case "concurrent":
|
266
|
+
|
267
|
+
async def execute_step_concurrently(plan_step: Instruct):
|
268
|
+
if verbose:
|
269
|
+
snippet = (
|
270
|
+
plan_step.instruction[:100] + "..."
|
271
|
+
if len(plan_step.instruction) > 100
|
272
|
+
else plan_step.instruction
|
273
|
+
)
|
274
|
+
print(f"\n------ Executing step (concurrently) ------")
|
275
|
+
print(f"Instruction: {snippet}")
|
276
|
+
local_branch = session.split(execute_branch)
|
277
|
+
resp = await local_branch.instruct(
|
278
|
+
plan_step, **(execution_kwargs or {})
|
279
|
+
)
|
280
|
+
return InstructResponse(instruct=plan_step, response=resp)
|
281
|
+
|
282
|
+
# Launch all steps in parallel
|
283
|
+
concurrent_res = await alcall(
|
284
|
+
refined_plans, execute_step_concurrently
|
285
|
+
)
|
286
|
+
out.execute = concurrent_res
|
287
|
+
if verbose:
|
288
|
+
print("\nAll steps executed successfully (concurrent)!")
|
289
|
+
|
290
|
+
# ---------------------------------------------------------
|
291
|
+
# Strategy C: SEQUENTIAL_CONCURRENT_CHUNK
|
292
|
+
# - process plan steps in chunks (one chunk after another),
|
293
|
+
# - each chunk’s steps run in parallel.
|
294
|
+
# ---------------------------------------------------------
|
295
|
+
case "sequential_concurrent_chunk":
|
296
|
+
chunk_size = (execution_kwargs or {}).get("chunk_size", 5)
|
297
|
+
all_exec_responses = []
|
298
|
+
|
299
|
+
async def execute_chunk_concurrently(
|
300
|
+
sub_steps: list[Instruct],
|
301
|
+
):
|
147
302
|
if verbose:
|
148
303
|
print(
|
149
|
-
f"\n
|
304
|
+
f"\n--- Executing a chunk of size {len(sub_steps)} concurrently ---"
|
150
305
|
)
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
306
|
+
|
307
|
+
async def _execute(plan_step: Instruct):
|
308
|
+
local_branch = session.split(execute_branch)
|
309
|
+
resp = await local_branch.instruct(
|
310
|
+
plan_step, **(execution_kwargs or {})
|
311
|
+
)
|
312
|
+
return InstructResponse(
|
313
|
+
instruct=plan_step, response=resp
|
155
314
|
)
|
156
|
-
|
157
|
-
|
158
|
-
|
315
|
+
|
316
|
+
# run each chunk in parallel
|
317
|
+
return await alcall(sub_steps, _execute)
|
318
|
+
|
319
|
+
# process each chunk sequentially
|
320
|
+
for chunk in chunked(refined_plans, chunk_size):
|
321
|
+
chunk_responses = await execute_chunk_concurrently(chunk)
|
322
|
+
all_exec_responses.extend(chunk_responses)
|
323
|
+
|
324
|
+
out.execute = all_exec_responses
|
325
|
+
if verbose:
|
326
|
+
print(
|
327
|
+
"\nAll steps executed successfully (sequential concurrent chunk)!"
|
159
328
|
)
|
160
|
-
|
161
|
-
|
162
|
-
|
329
|
+
|
330
|
+
# ---------------------------------------------------------
|
331
|
+
# Strategy D: CONCURRENT_SEQUENTIAL_CHUNK
|
332
|
+
# - split plan steps into chunks,
|
333
|
+
# - run all chunks in parallel,
|
334
|
+
# - but each chunk’s steps run sequentially.
|
335
|
+
# ---------------------------------------------------------
|
336
|
+
case "concurrent_sequential_chunk":
|
337
|
+
chunk_size = (execution_kwargs or {}).get("chunk_size", 5)
|
338
|
+
all_chunks = list(chunked(refined_plans, chunk_size))
|
339
|
+
|
340
|
+
async def execute_chunk_sequentially(
|
341
|
+
sub_steps: list[Instruct],
|
342
|
+
):
|
343
|
+
chunk_result = []
|
344
|
+
local_branch = session.split(execute_branch)
|
345
|
+
for plan_step in sub_steps:
|
346
|
+
if verbose:
|
347
|
+
snippet = (
|
348
|
+
plan_step.instruction[:100] + "..."
|
349
|
+
if len(plan_step.instruction) > 100
|
350
|
+
else plan_step.instruction
|
351
|
+
)
|
352
|
+
print(
|
353
|
+
f"\n--- Executing step (sequential in chunk) ---\nInstruction: {snippet}"
|
354
|
+
)
|
355
|
+
resp = await local_branch.instruct(
|
356
|
+
plan_step, **(execution_kwargs or {})
|
357
|
+
)
|
358
|
+
chunk_result.append(
|
359
|
+
InstructResponse(instruct=plan_step, response=resp)
|
360
|
+
)
|
361
|
+
return chunk_result
|
362
|
+
|
363
|
+
# run all chunks in parallel, each chunk sequentially
|
364
|
+
parallel_chunk_results = await alcall(
|
365
|
+
all_chunks,
|
366
|
+
execute_chunk_sequentially,
|
367
|
+
flatten=True,
|
368
|
+
dropna=True,
|
369
|
+
)
|
370
|
+
|
371
|
+
out.execute = parallel_chunk_results
|
163
372
|
if verbose:
|
164
|
-
print(
|
373
|
+
print(
|
374
|
+
"\nAll steps executed successfully (concurrent sequential chunk)!"
|
375
|
+
)
|
376
|
+
|
165
377
|
case _:
|
166
378
|
raise ValueError(
|
167
379
|
f"Invalid execution strategy: {execution_strategy}"
|
168
380
|
)
|
169
381
|
|
170
|
-
|
171
|
-
|
172
|
-
|
382
|
+
# -----------------------------------------------------------------
|
383
|
+
# 4. Final Return
|
384
|
+
# -----------------------------------------------------------------
|
385
|
+
return (out, session) if return_session else out
|
@@ -1,21 +1,22 @@
|
|
1
1
|
PLAN_PROMPT = """
|
2
|
-
Develop a high-level plan
|
3
|
-
1. Represent a
|
4
|
-
2.
|
5
|
-
3.
|
6
|
-
4. Have measurable completion criteria
|
7
|
-
5. Be
|
2
|
+
Develop a high-level plan containing {num_steps} distinct steps. Each step must:
|
3
|
+
1. Represent a clear milestone or phase.
|
4
|
+
2. Follow a logical sequence, respecting inter-step dependencies.
|
5
|
+
3. Differ clearly from other steps.
|
6
|
+
4. Have measurable completion criteria.
|
7
|
+
5. Be open to further breakdown if needed.
|
8
|
+
|
9
|
+
Keep each step concise yet actionable, ensuring the overall plan remains coherent.
|
8
10
|
"""
|
9
11
|
|
10
12
|
EXPANSION_PROMPT = """
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
-
|
20
|
-
- Define expected outputs
|
13
|
+
Transform each high-level plan step into detailed, executable actions. For every step:
|
14
|
+
|
15
|
+
1. Keep actions atomic, verifiable, and clearly scoped.
|
16
|
+
2. Include essential context and preconditions.
|
17
|
+
3. Define expected outcomes, success criteria, and validations.
|
18
|
+
4. Respect sequential dependencies and error handling.
|
19
|
+
5. Provide all necessary parameters and specify outputs.
|
20
|
+
|
21
|
+
Ensure each action is self-contained yet fits within the larger plan.
|
21
22
|
"""
|
@@ -14,11 +14,8 @@ from lionagi.libs.parse import validate_boolean
|
|
14
14
|
|
15
15
|
from .prompts import (
|
16
16
|
actions_field_description,
|
17
|
-
context_examples,
|
18
17
|
context_field_description,
|
19
|
-
guidance_examples,
|
20
18
|
guidance_field_description,
|
21
|
-
instruction_examples,
|
22
19
|
instruction_field_description,
|
23
20
|
reason_field_description,
|
24
21
|
)
|
@@ -62,7 +59,6 @@ INSTRUCTION_FIELD = FieldModel(
|
|
62
59
|
default=None,
|
63
60
|
title="Primary Instruction",
|
64
61
|
description=instruction_field_description,
|
65
|
-
examples=instruction_examples,
|
66
62
|
validator=validate_instruction,
|
67
63
|
validator_kwargs={"mode": "before"},
|
68
64
|
)
|
@@ -73,7 +69,6 @@ GUIDANCE_FIELD = FieldModel(
|
|
73
69
|
default=None,
|
74
70
|
title="Execution Guidance",
|
75
71
|
description=guidance_field_description,
|
76
|
-
examples=guidance_examples,
|
77
72
|
)
|
78
73
|
|
79
74
|
CONTEXT_FIELD = FieldModel(
|
@@ -82,7 +77,6 @@ CONTEXT_FIELD = FieldModel(
|
|
82
77
|
default=None,
|
83
78
|
title="Task Context",
|
84
79
|
description=context_field_description,
|
85
|
-
examples=context_examples,
|
86
80
|
)
|
87
81
|
|
88
82
|
REASON_FIELD = FieldModel(
|