markdown-flow 0.2.17__tar.gz → 0.2.18__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of markdown-flow might be problematic. Click here for more details.
- {markdown_flow-0.2.17 → markdown_flow-0.2.18}/PKG-INFO +1 -1
- {markdown_flow-0.2.17 → markdown_flow-0.2.18}/markdown_flow/__init__.py +5 -5
- {markdown_flow-0.2.17 → markdown_flow-0.2.18}/markdown_flow/core.py +44 -44
- {markdown_flow-0.2.17 → markdown_flow-0.2.18}/markdown_flow/llm.py +4 -5
- {markdown_flow-0.2.17 → markdown_flow-0.2.18}/markdown_flow.egg-info/PKG-INFO +1 -1
- {markdown_flow-0.2.17 → markdown_flow-0.2.18}/tests/test_preserved_simple.py +4 -5
- {markdown_flow-0.2.17 → markdown_flow-0.2.18}/LICENSE +0 -0
- {markdown_flow-0.2.17 → markdown_flow-0.2.18}/README.md +0 -0
- {markdown_flow-0.2.17 → markdown_flow-0.2.18}/markdown_flow/constants.py +0 -0
- {markdown_flow-0.2.17 → markdown_flow-0.2.18}/markdown_flow/enums.py +0 -0
- {markdown_flow-0.2.17 → markdown_flow-0.2.18}/markdown_flow/exceptions.py +0 -0
- {markdown_flow-0.2.17 → markdown_flow-0.2.18}/markdown_flow/models.py +0 -0
- {markdown_flow-0.2.17 → markdown_flow-0.2.18}/markdown_flow/utils.py +0 -0
- {markdown_flow-0.2.17 → markdown_flow-0.2.18}/markdown_flow.egg-info/SOURCES.txt +0 -0
- {markdown_flow-0.2.17 → markdown_flow-0.2.18}/markdown_flow.egg-info/dependency_links.txt +0 -0
- {markdown_flow-0.2.17 → markdown_flow-0.2.18}/markdown_flow.egg-info/top_level.txt +0 -0
- {markdown_flow-0.2.17 → markdown_flow-0.2.18}/pyproject.toml +0 -0
- {markdown_flow-0.2.17 → markdown_flow-0.2.18}/setup.cfg +0 -0
- {markdown_flow-0.2.17 → markdown_flow-0.2.18}/tests/test_dynamic_interaction.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: markdown-flow
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.18
|
|
4
4
|
Summary: An agent library designed to parse and process MarkdownFlow documents
|
|
5
5
|
Project-URL: Homepage, https://github.com/ai-shifu/markdown-flow-agent-py
|
|
6
6
|
Project-URL: Bug Tracker, https://github.com/ai-shifu/markdown-flow-agent-py/issues
|
|
@@ -32,12 +32,12 @@ Basic Usage:
|
|
|
32
32
|
blocks = mf.get_all_blocks()
|
|
33
33
|
|
|
34
34
|
# Process blocks using unified interface
|
|
35
|
-
result =
|
|
35
|
+
result = mf.process(0, variables={'name': 'John'}, mode=ProcessMode.COMPLETE)
|
|
36
36
|
|
|
37
37
|
# Different processing modes
|
|
38
|
-
prompt_result =
|
|
39
|
-
complete_result =
|
|
40
|
-
stream_result =
|
|
38
|
+
prompt_result = mf.process(0, mode=ProcessMode.PROMPT_ONLY)
|
|
39
|
+
complete_result = mf.process(0, mode=ProcessMode.COMPLETE)
|
|
40
|
+
stream_result = mf.process(0, mode=ProcessMode.STREAM)
|
|
41
41
|
|
|
42
42
|
Variable System:
|
|
43
43
|
- {{variable}} - Regular variables, replaced with actual values
|
|
@@ -83,4 +83,4 @@ __all__ = [
|
|
|
83
83
|
"replace_variables_in_text",
|
|
84
84
|
]
|
|
85
85
|
|
|
86
|
-
__version__ = "0.2.
|
|
86
|
+
__version__ = "0.2.18"
|
|
@@ -6,7 +6,7 @@ Refactored MarkdownFlow class with built-in LLM processing capabilities and unif
|
|
|
6
6
|
|
|
7
7
|
import json
|
|
8
8
|
import re
|
|
9
|
-
from collections.abc import
|
|
9
|
+
from collections.abc import Generator
|
|
10
10
|
from copy import copy
|
|
11
11
|
from typing import Any
|
|
12
12
|
|
|
@@ -177,14 +177,14 @@ class MarkdownFlow:
|
|
|
177
177
|
|
|
178
178
|
# Core unified interface
|
|
179
179
|
|
|
180
|
-
|
|
180
|
+
def process(
|
|
181
181
|
self,
|
|
182
182
|
block_index: int,
|
|
183
183
|
mode: ProcessMode = ProcessMode.COMPLETE,
|
|
184
184
|
context: list[dict[str, str]] | None = None,
|
|
185
185
|
variables: dict[str, str | list[str]] | None = None,
|
|
186
186
|
user_input: dict[str, list[str]] | None = None,
|
|
187
|
-
)
|
|
187
|
+
):
|
|
188
188
|
"""
|
|
189
189
|
Unified block processing interface.
|
|
190
190
|
|
|
@@ -196,7 +196,7 @@ class MarkdownFlow:
|
|
|
196
196
|
user_input: User input (for interaction blocks)
|
|
197
197
|
|
|
198
198
|
Returns:
|
|
199
|
-
LLMResult or
|
|
199
|
+
LLMResult or Generator[LLMResult, None, None]
|
|
200
200
|
"""
|
|
201
201
|
# Process document_prompt variable replacement
|
|
202
202
|
if self._document_prompt:
|
|
@@ -205,31 +205,31 @@ class MarkdownFlow:
|
|
|
205
205
|
block = self.get_block(block_index)
|
|
206
206
|
|
|
207
207
|
if block.block_type == BlockType.CONTENT:
|
|
208
|
-
return
|
|
208
|
+
return self._process_content(block_index, mode, context, variables)
|
|
209
209
|
|
|
210
210
|
if block.block_type == BlockType.INTERACTION:
|
|
211
211
|
if user_input is None:
|
|
212
212
|
# Render interaction content
|
|
213
|
-
return
|
|
213
|
+
return self._process_interaction_render(block_index, mode, variables)
|
|
214
214
|
# Process user input
|
|
215
|
-
return
|
|
215
|
+
return self._process_interaction_input(block_index, user_input, mode, context, variables)
|
|
216
216
|
|
|
217
217
|
if block.block_type == BlockType.PRESERVED_CONTENT:
|
|
218
218
|
# Preserved content output as-is, no LLM call
|
|
219
|
-
return
|
|
219
|
+
return self._process_preserved_content(block_index, variables)
|
|
220
220
|
|
|
221
221
|
# Handle other types as content
|
|
222
|
-
return
|
|
222
|
+
return self._process_content(block_index, mode, context, variables)
|
|
223
223
|
|
|
224
224
|
# Internal processing methods
|
|
225
225
|
|
|
226
|
-
|
|
226
|
+
def _process_content(
|
|
227
227
|
self,
|
|
228
228
|
block_index: int,
|
|
229
229
|
mode: ProcessMode,
|
|
230
230
|
context: list[dict[str, str]] | None,
|
|
231
231
|
variables: dict[str, str | list[str]] | None,
|
|
232
|
-
)
|
|
232
|
+
):
|
|
233
233
|
"""Process content block."""
|
|
234
234
|
# Build messages
|
|
235
235
|
messages = self._build_content_messages(block_index, variables)
|
|
@@ -241,20 +241,20 @@ class MarkdownFlow:
|
|
|
241
241
|
if not self._llm_provider:
|
|
242
242
|
raise ValueError(LLM_PROVIDER_REQUIRED_ERROR)
|
|
243
243
|
|
|
244
|
-
content =
|
|
244
|
+
content = self._llm_provider.complete(messages)
|
|
245
245
|
return LLMResult(content=content, prompt=messages[-1]["content"])
|
|
246
246
|
|
|
247
247
|
if mode == ProcessMode.STREAM:
|
|
248
248
|
if not self._llm_provider:
|
|
249
249
|
raise ValueError(LLM_PROVIDER_REQUIRED_ERROR)
|
|
250
250
|
|
|
251
|
-
|
|
252
|
-
|
|
251
|
+
def stream_generator():
|
|
252
|
+
for chunk in self._llm_provider.stream(messages): # type: ignore[attr-defined]
|
|
253
253
|
yield LLMResult(content=chunk, prompt=messages[-1]["content"])
|
|
254
254
|
|
|
255
255
|
return stream_generator()
|
|
256
256
|
|
|
257
|
-
|
|
257
|
+
def _process_preserved_content(self, block_index: int, variables: dict[str, str | list[str]] | None) -> LLMResult:
|
|
258
258
|
"""Process preserved content block, output as-is without LLM call."""
|
|
259
259
|
block = self.get_block(block_index)
|
|
260
260
|
|
|
@@ -266,7 +266,7 @@ class MarkdownFlow:
|
|
|
266
266
|
|
|
267
267
|
return LLMResult(content=content)
|
|
268
268
|
|
|
269
|
-
|
|
269
|
+
def _process_interaction_render(self, block_index: int, mode: ProcessMode, variables: dict[str, str | list[str]] | None = None):
|
|
270
270
|
"""Process interaction content rendering."""
|
|
271
271
|
block = self.get_block(block_index)
|
|
272
272
|
|
|
@@ -299,7 +299,7 @@ class MarkdownFlow:
|
|
|
299
299
|
if not self._llm_provider:
|
|
300
300
|
return LLMResult(content=processed_block.content) # Fallback processing
|
|
301
301
|
|
|
302
|
-
rendered_question =
|
|
302
|
+
rendered_question = self._llm_provider.complete(messages)
|
|
303
303
|
rendered_content = self._reconstruct_interaction_content(processed_block.content, rendered_question)
|
|
304
304
|
|
|
305
305
|
return LLMResult(
|
|
@@ -316,7 +316,7 @@ class MarkdownFlow:
|
|
|
316
316
|
# For interaction blocks, return reconstructed content (one-time output)
|
|
317
317
|
rendered_content = self._reconstruct_interaction_content(processed_block.content, question_text or "")
|
|
318
318
|
|
|
319
|
-
|
|
319
|
+
def stream_generator():
|
|
320
320
|
yield LLMResult(
|
|
321
321
|
content=rendered_content,
|
|
322
322
|
prompt=messages[-1]["content"],
|
|
@@ -325,9 +325,9 @@ class MarkdownFlow:
|
|
|
325
325
|
return stream_generator()
|
|
326
326
|
|
|
327
327
|
# With LLM provider, collect full response then return once
|
|
328
|
-
|
|
328
|
+
def stream_generator():
|
|
329
329
|
full_response = ""
|
|
330
|
-
|
|
330
|
+
for chunk in self._llm_provider.stream(messages): # type: ignore[attr-defined]
|
|
331
331
|
full_response += chunk
|
|
332
332
|
|
|
333
333
|
# Reconstruct final interaction content
|
|
@@ -341,14 +341,14 @@ class MarkdownFlow:
|
|
|
341
341
|
|
|
342
342
|
return stream_generator()
|
|
343
343
|
|
|
344
|
-
|
|
344
|
+
def _process_interaction_input(
|
|
345
345
|
self,
|
|
346
346
|
block_index: int,
|
|
347
347
|
user_input: dict[str, list[str]],
|
|
348
348
|
mode: ProcessMode,
|
|
349
349
|
context: list[dict[str, str]] | None,
|
|
350
350
|
variables: dict[str, str | list[str]] | None = None,
|
|
351
|
-
) -> LLMResult |
|
|
351
|
+
) -> LLMResult | Generator[LLMResult, None, None]:
|
|
352
352
|
"""Process interaction user input."""
|
|
353
353
|
block = self.get_block(block_index)
|
|
354
354
|
target_variable = block.variables[0] if block.variables else "user_input"
|
|
@@ -356,7 +356,7 @@ class MarkdownFlow:
|
|
|
356
356
|
# Basic validation
|
|
357
357
|
if not user_input or not any(values for values in user_input.values()):
|
|
358
358
|
error_msg = INPUT_EMPTY_ERROR
|
|
359
|
-
return
|
|
359
|
+
return self._render_error(error_msg, mode)
|
|
360
360
|
|
|
361
361
|
# Get the target variable value from user_input
|
|
362
362
|
target_values = user_input.get(target_variable, [])
|
|
@@ -370,7 +370,7 @@ class MarkdownFlow:
|
|
|
370
370
|
|
|
371
371
|
if "error" in parse_result:
|
|
372
372
|
error_msg = INTERACTION_PARSE_ERROR.format(error=parse_result["error"])
|
|
373
|
-
return
|
|
373
|
+
return self._render_error(error_msg, mode)
|
|
374
374
|
|
|
375
375
|
interaction_type = parse_result.get("type")
|
|
376
376
|
|
|
@@ -382,7 +382,7 @@ class MarkdownFlow:
|
|
|
382
382
|
InteractionType.BUTTONS_MULTI_WITH_TEXT,
|
|
383
383
|
]:
|
|
384
384
|
# All button types: validate user input against available buttons
|
|
385
|
-
return
|
|
385
|
+
return self._process_button_validation(
|
|
386
386
|
parse_result,
|
|
387
387
|
target_values,
|
|
388
388
|
target_variable,
|
|
@@ -415,16 +415,16 @@ class MarkdownFlow:
|
|
|
415
415
|
},
|
|
416
416
|
)
|
|
417
417
|
error_msg = f"No input provided for variable '{target_variable}'"
|
|
418
|
-
return
|
|
418
|
+
return self._render_error(error_msg, mode)
|
|
419
419
|
|
|
420
|
-
|
|
420
|
+
def _process_button_validation(
|
|
421
421
|
self,
|
|
422
422
|
parse_result: dict[str, Any],
|
|
423
423
|
target_values: list[str],
|
|
424
424
|
target_variable: str,
|
|
425
425
|
mode: ProcessMode,
|
|
426
426
|
interaction_type: InteractionType,
|
|
427
|
-
) -> LLMResult |
|
|
427
|
+
) -> LLMResult | Generator[LLMResult, None, None]:
|
|
428
428
|
"""
|
|
429
429
|
Simplified button validation with new input format.
|
|
430
430
|
|
|
@@ -459,7 +459,7 @@ class MarkdownFlow:
|
|
|
459
459
|
# Pure button mode requires input
|
|
460
460
|
button_displays = [btn["display"] for btn in buttons]
|
|
461
461
|
error_msg = f"Please select from: {', '.join(button_displays)}"
|
|
462
|
-
return
|
|
462
|
+
return self._render_error(error_msg, mode)
|
|
463
463
|
|
|
464
464
|
# Validate input values against available buttons
|
|
465
465
|
valid_values = []
|
|
@@ -484,7 +484,7 @@ class MarkdownFlow:
|
|
|
484
484
|
if invalid_values and not allow_text_input:
|
|
485
485
|
button_displays = [btn["display"] for btn in buttons]
|
|
486
486
|
error_msg = f"Invalid options: {', '.join(invalid_values)}. Please select from: {', '.join(button_displays)}"
|
|
487
|
-
return
|
|
487
|
+
return self._render_error(error_msg, mode)
|
|
488
488
|
|
|
489
489
|
# Success: return validated values
|
|
490
490
|
return LLMResult(
|
|
@@ -499,13 +499,13 @@ class MarkdownFlow:
|
|
|
499
499
|
},
|
|
500
500
|
)
|
|
501
501
|
|
|
502
|
-
|
|
502
|
+
def _process_llm_validation(
|
|
503
503
|
self,
|
|
504
504
|
block_index: int,
|
|
505
505
|
user_input: dict[str, list[str]],
|
|
506
506
|
target_variable: str,
|
|
507
507
|
mode: ProcessMode,
|
|
508
|
-
) -> LLMResult |
|
|
508
|
+
) -> LLMResult | Generator[LLMResult, None, None]:
|
|
509
509
|
"""Process LLM validation."""
|
|
510
510
|
# Build validation messages
|
|
511
511
|
messages = self._build_validation_messages(block_index, user_input, target_variable)
|
|
@@ -524,7 +524,7 @@ class MarkdownFlow:
|
|
|
524
524
|
# Fallback processing, return variables directly
|
|
525
525
|
return LLMResult(content="", variables=user_input) # type: ignore[arg-type]
|
|
526
526
|
|
|
527
|
-
llm_response =
|
|
527
|
+
llm_response = self._llm_provider.complete(messages)
|
|
528
528
|
|
|
529
529
|
# Parse validation response and convert to LLMResult
|
|
530
530
|
# Use joined target values for fallback; avoids JSON string injection
|
|
@@ -536,9 +536,9 @@ class MarkdownFlow:
|
|
|
536
536
|
if not self._llm_provider:
|
|
537
537
|
return LLMResult(content="", variables=user_input) # type: ignore[arg-type]
|
|
538
538
|
|
|
539
|
-
|
|
539
|
+
def stream_generator():
|
|
540
540
|
full_response = ""
|
|
541
|
-
|
|
541
|
+
for chunk in self._llm_provider.stream(messages): # type: ignore[attr-defined]
|
|
542
542
|
full_response += chunk
|
|
543
543
|
|
|
544
544
|
# Parse complete response and convert to LLMResult
|
|
@@ -552,7 +552,7 @@ class MarkdownFlow:
|
|
|
552
552
|
|
|
553
553
|
return stream_generator()
|
|
554
554
|
|
|
555
|
-
|
|
555
|
+
def _process_llm_validation_with_options(
|
|
556
556
|
self,
|
|
557
557
|
block_index: int,
|
|
558
558
|
user_input: dict[str, list[str]],
|
|
@@ -560,7 +560,7 @@ class MarkdownFlow:
|
|
|
560
560
|
options: list[str],
|
|
561
561
|
question: str,
|
|
562
562
|
mode: ProcessMode,
|
|
563
|
-
) -> LLMResult |
|
|
563
|
+
) -> LLMResult | Generator[LLMResult, None, None]:
|
|
564
564
|
"""Process LLM validation with button options (third case)."""
|
|
565
565
|
# Build special validation messages containing button option information
|
|
566
566
|
messages = self._build_validation_messages_with_options(user_input, target_variable, options, question)
|
|
@@ -581,7 +581,7 @@ class MarkdownFlow:
|
|
|
581
581
|
# Fallback processing, return variables directly
|
|
582
582
|
return LLMResult(content="", variables=user_input) # type: ignore[arg-type]
|
|
583
583
|
|
|
584
|
-
llm_response =
|
|
584
|
+
llm_response = self._llm_provider.complete(messages)
|
|
585
585
|
|
|
586
586
|
# Parse validation response and convert to LLMResult
|
|
587
587
|
# Use joined target values for fallback; avoids JSON string injection
|
|
@@ -593,9 +593,9 @@ class MarkdownFlow:
|
|
|
593
593
|
if not self._llm_provider:
|
|
594
594
|
return LLMResult(content="", variables=user_input) # type: ignore[arg-type]
|
|
595
595
|
|
|
596
|
-
|
|
596
|
+
def stream_generator():
|
|
597
597
|
full_response = ""
|
|
598
|
-
|
|
598
|
+
for chunk in self._llm_provider.stream(messages): # type: ignore[attr-defined]
|
|
599
599
|
full_response += chunk
|
|
600
600
|
# For validation scenario, don't output chunks in real-time, only final result
|
|
601
601
|
|
|
@@ -612,7 +612,7 @@ class MarkdownFlow:
|
|
|
612
612
|
|
|
613
613
|
return stream_generator()
|
|
614
614
|
|
|
615
|
-
|
|
615
|
+
def _render_error(self, error_message: str, mode: ProcessMode) -> LLMResult | Generator[LLMResult, None, None]:
|
|
616
616
|
"""Render user-friendly error message."""
|
|
617
617
|
messages = self._build_error_render_messages(error_message)
|
|
618
618
|
|
|
@@ -626,15 +626,15 @@ class MarkdownFlow:
|
|
|
626
626
|
if not self._llm_provider:
|
|
627
627
|
return LLMResult(content=error_message) # Fallback processing
|
|
628
628
|
|
|
629
|
-
friendly_error =
|
|
629
|
+
friendly_error = self._llm_provider.complete(messages)
|
|
630
630
|
return LLMResult(content=friendly_error, prompt=messages[-1]["content"])
|
|
631
631
|
|
|
632
632
|
if mode == ProcessMode.STREAM:
|
|
633
633
|
if not self._llm_provider:
|
|
634
634
|
return LLMResult(content=error_message)
|
|
635
635
|
|
|
636
|
-
|
|
637
|
-
|
|
636
|
+
def stream_generator():
|
|
637
|
+
for chunk in self._llm_provider.stream(messages): # type: ignore[attr-defined]
|
|
638
638
|
yield LLMResult(content=chunk, prompt=messages[-1]["content"])
|
|
639
639
|
|
|
640
640
|
return stream_generator()
|
|
@@ -5,7 +5,6 @@ Provides LLM provider interfaces and related data models, supporting multiple pr
|
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
7
|
from abc import ABC, abstractmethod
|
|
8
|
-
from collections.abc import AsyncGenerator
|
|
9
8
|
from dataclasses import dataclass
|
|
10
9
|
from enum import Enum
|
|
11
10
|
from typing import Any
|
|
@@ -39,7 +38,7 @@ class LLMProvider(ABC):
|
|
|
39
38
|
"""Abstract LLM provider interface."""
|
|
40
39
|
|
|
41
40
|
@abstractmethod
|
|
42
|
-
|
|
41
|
+
def complete(self, messages: list[dict[str, str]]) -> str:
|
|
43
42
|
"""
|
|
44
43
|
Non-streaming LLM call.
|
|
45
44
|
|
|
@@ -54,7 +53,7 @@ class LLMProvider(ABC):
|
|
|
54
53
|
"""
|
|
55
54
|
|
|
56
55
|
@abstractmethod
|
|
57
|
-
|
|
56
|
+
def stream(self, messages: list[dict[str, str]]):
|
|
58
57
|
"""
|
|
59
58
|
Streaming LLM call.
|
|
60
59
|
|
|
@@ -72,8 +71,8 @@ class LLMProvider(ABC):
|
|
|
72
71
|
class NoLLMProvider(LLMProvider):
|
|
73
72
|
"""Empty LLM provider for prompt-only scenarios."""
|
|
74
73
|
|
|
75
|
-
|
|
74
|
+
def complete(self, messages: list[dict[str, str]]) -> str:
|
|
76
75
|
raise NotImplementedError(NO_LLM_PROVIDER_ERROR)
|
|
77
76
|
|
|
78
|
-
|
|
77
|
+
def stream(self, messages: list[dict[str, str]]):
|
|
79
78
|
raise NotImplementedError(NO_LLM_PROVIDER_ERROR)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: markdown-flow
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.18
|
|
4
4
|
Summary: An agent library designed to parse and process MarkdownFlow documents
|
|
5
5
|
Project-URL: Homepage, https://github.com/ai-shifu/markdown-flow-agent-py
|
|
6
6
|
Project-URL: Bug Tracker, https://github.com/ai-shifu/markdown-flow-agent-py/issues
|
|
@@ -14,7 +14,6 @@
|
|
|
14
14
|
- 检查 LLM 输出是否不包含 XML 标记
|
|
15
15
|
"""
|
|
16
16
|
|
|
17
|
-
import asyncio
|
|
18
17
|
import os
|
|
19
18
|
import sys
|
|
20
19
|
|
|
@@ -29,7 +28,7 @@ from markdown_flow import MarkdownFlow, ProcessMode # noqa: E402
|
|
|
29
28
|
from markdown_flow.llm import LLMResult # noqa: E402
|
|
30
29
|
|
|
31
30
|
|
|
32
|
-
|
|
31
|
+
def test_preserved_output():
|
|
33
32
|
"""测试固定输出功能"""
|
|
34
33
|
print("\n" + "=" * 60)
|
|
35
34
|
print("🔖 固定输出测试")
|
|
@@ -92,7 +91,7 @@ async def test_preserved_output():
|
|
|
92
91
|
print("\n📝 测试 PROMPT_ONLY 模式")
|
|
93
92
|
print("-" * 60)
|
|
94
93
|
|
|
95
|
-
result_prompt_raw =
|
|
94
|
+
result_prompt_raw = mf.process(
|
|
96
95
|
block_index=block_index,
|
|
97
96
|
mode=ProcessMode.PROMPT_ONLY,
|
|
98
97
|
variables=variables if variables else None,
|
|
@@ -134,7 +133,7 @@ async def test_preserved_output():
|
|
|
134
133
|
print("\n📝 测试 COMPLETE 模式")
|
|
135
134
|
print("-" * 60)
|
|
136
135
|
|
|
137
|
-
result_complete_raw =
|
|
136
|
+
result_complete_raw = mf.process(
|
|
138
137
|
block_index=block_index,
|
|
139
138
|
mode=ProcessMode.COMPLETE,
|
|
140
139
|
variables=variables if variables else None,
|
|
@@ -168,4 +167,4 @@ async def test_preserved_output():
|
|
|
168
167
|
|
|
169
168
|
|
|
170
169
|
if __name__ == "__main__":
|
|
171
|
-
|
|
170
|
+
test_preserved_output()
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|