markdown-flow 0.2.19__tar.gz → 0.2.30__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. {markdown_flow-0.2.19 → markdown_flow-0.2.30}/PKG-INFO +45 -52
  2. {markdown_flow-0.2.19 → markdown_flow-0.2.30}/README.md +44 -51
  3. {markdown_flow-0.2.19 → markdown_flow-0.2.30}/markdown_flow/__init__.py +4 -4
  4. markdown_flow-0.2.30/markdown_flow/constants.py +337 -0
  5. markdown_flow-0.2.30/markdown_flow/core.py +1202 -0
  6. {markdown_flow-0.2.19 → markdown_flow-0.2.30}/markdown_flow/llm.py +4 -3
  7. {markdown_flow-0.2.19 → markdown_flow-0.2.30}/markdown_flow/models.py +1 -17
  8. markdown_flow-0.2.30/markdown_flow/parser/__init__.py +38 -0
  9. markdown_flow-0.2.30/markdown_flow/parser/code_fence_utils.py +190 -0
  10. markdown_flow-0.2.30/markdown_flow/parser/interaction.py +354 -0
  11. markdown_flow-0.2.30/markdown_flow/parser/json_parser.py +50 -0
  12. markdown_flow-0.2.30/markdown_flow/parser/output.py +215 -0
  13. markdown_flow-0.2.30/markdown_flow/parser/preprocessor.py +151 -0
  14. markdown_flow-0.2.30/markdown_flow/parser/validation.py +100 -0
  15. markdown_flow-0.2.30/markdown_flow/parser/variable.py +95 -0
  16. markdown_flow-0.2.30/markdown_flow/providers/__init__.py +16 -0
  17. markdown_flow-0.2.30/markdown_flow/providers/config.py +46 -0
  18. markdown_flow-0.2.30/markdown_flow/providers/openai.py +369 -0
  19. {markdown_flow-0.2.19 → markdown_flow-0.2.30}/markdown_flow/utils.py +43 -43
  20. {markdown_flow-0.2.19 → markdown_flow-0.2.30}/markdown_flow.egg-info/PKG-INFO +45 -52
  21. markdown_flow-0.2.30/markdown_flow.egg-info/SOURCES.txt +32 -0
  22. markdown_flow-0.2.30/tests/test_markdownflow_basic.py +232 -0
  23. markdown_flow-0.2.30/tests/test_parser_interaction.py +124 -0
  24. markdown_flow-0.2.30/tests/test_parser_variable.py +111 -0
  25. markdown_flow-0.2.30/tests/test_preprocessor.py +288 -0
  26. {markdown_flow-0.2.19 → markdown_flow-0.2.30}/tests/test_preserved_simple.py +99 -5
  27. markdown_flow-0.2.19/markdown_flow/constants.py +0 -226
  28. markdown_flow-0.2.19/markdown_flow/core.py +0 -806
  29. markdown_flow-0.2.19/markdown_flow.egg-info/SOURCES.txt +0 -17
  30. {markdown_flow-0.2.19 → markdown_flow-0.2.30}/LICENSE +0 -0
  31. {markdown_flow-0.2.19 → markdown_flow-0.2.30}/markdown_flow/enums.py +0 -0
  32. {markdown_flow-0.2.19 → markdown_flow-0.2.30}/markdown_flow/exceptions.py +0 -0
  33. {markdown_flow-0.2.19 → markdown_flow-0.2.30}/markdown_flow.egg-info/dependency_links.txt +0 -0
  34. {markdown_flow-0.2.19 → markdown_flow-0.2.30}/markdown_flow.egg-info/top_level.txt +0 -0
  35. {markdown_flow-0.2.19 → markdown_flow-0.2.30}/pyproject.toml +0 -0
  36. {markdown_flow-0.2.19 → markdown_flow-0.2.30}/setup.cfg +0 -0
  37. {markdown_flow-0.2.19 → markdown_flow-0.2.30}/tests/test_dynamic_interaction.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: markdown-flow
3
- Version: 0.2.19
3
+ Version: 0.2.30
4
4
  Summary: An agent library designed to parse and process MarkdownFlow documents
5
5
  Project-URL: Homepage, https://github.com/ai-shifu/markdown-flow-agent-py
6
6
  Project-URL: Bug Tracker, https://github.com/ai-shifu/markdown-flow-agent-py/issues
@@ -73,7 +73,7 @@ llm_provider = YourLLMProvider(api_key="your-key")
73
73
  mf = MarkdownFlow(document, llm_provider=llm_provider)
74
74
 
75
75
  # Process with different modes
76
- result = await mf.process(
76
+ result = mf.process(
77
77
  block_index=0,
78
78
  mode=ProcessMode.COMPLETE,
79
79
  variables={'name': 'Alice', 'level': 'Intermediate'}
@@ -84,7 +84,7 @@ result = await mf.process(
84
84
 
85
85
  ```python
86
86
  # Stream processing for real-time responses
87
- async for chunk in mf.process(
87
+ for chunk in mf.process(
88
88
  block_index=0,
89
89
  mode=ProcessMode.STREAM,
90
90
  variables={'name': 'Bob'}
@@ -122,7 +122,7 @@ user_input = {
122
122
  'skills': ['Python', 'JavaScript', 'Go'] # Multi-selection
123
123
  }
124
124
 
125
- result = await mf.process(
125
+ result = mf.process(
126
126
  block_index=1, # Process skills interaction
127
127
  user_input=user_input,
128
128
  mode=ProcessMode.COMPLETE
@@ -148,13 +148,13 @@ class MarkdownFlow:
148
148
  def get_all_blocks(self) -> List[Block]: ...
149
149
  def extract_variables(self) -> Set[str]: ...
150
150
 
151
- async def process(
151
+ def process(
152
152
  self,
153
153
  block_index: int,
154
154
  mode: ProcessMode = ProcessMode.COMPLETE,
155
155
  variables: Optional[Dict[str, str]] = None,
156
156
  user_input: Optional[str] = None
157
- ) -> LLMResult: ...
157
+ ) -> LLMResult | Generator[LLMResult, None, None]: ...
158
158
  ```
159
159
 
160
160
  **Methods:**
@@ -184,24 +184,19 @@ Processing mode enumeration for different use cases.
184
184
 
185
185
  ```python
186
186
  class ProcessMode(Enum):
187
- PROMPT_ONLY = "prompt_only" # Generate prompts without LLM calls
188
- COMPLETE = "complete" # Non-streaming LLM processing
189
- STREAM = "stream" # Streaming LLM responses
187
+ COMPLETE = "complete" # Non-streaming LLM processing
188
+ STREAM = "stream" # Streaming LLM responses
190
189
  ```
191
190
 
192
191
  **Usage:**
193
192
 
194
193
  ```python
195
- # Generate prompt only
196
- prompt_result = await mf.process(0, ProcessMode.PROMPT_ONLY)
197
- print(prompt_result.content) # Raw prompt text
198
-
199
194
  # Complete response
200
- complete_result = await mf.process(0, ProcessMode.COMPLETE)
195
+ complete_result = mf.process(0, ProcessMode.COMPLETE)
201
196
  print(complete_result.content) # Full LLM response
202
197
 
203
198
  # Streaming response
204
- async for chunk in mf.process(0, ProcessMode.STREAM):
199
+ for chunk in mf.process(0, ProcessMode.STREAM):
205
200
  print(chunk.content, end='')
206
201
  ```
207
202
 
@@ -211,14 +206,14 @@ Abstract base class for implementing LLM providers.
211
206
 
212
207
  ```python
213
208
  from abc import ABC, abstractmethod
214
- from typing import AsyncGenerator
209
+ from typing import Generator
215
210
 
216
211
  class LLMProvider(ABC):
217
212
  @abstractmethod
218
- async def complete(self, prompt: str) -> LLMResult: ...
213
+ def complete(self, messages: list[dict[str, str]]) -> str: ...
219
214
 
220
215
  @abstractmethod
221
- async def stream(self, prompt: str) -> AsyncGenerator[str, None]: ...
216
+ def stream(self, messages: list[dict[str, str]]) -> Generator[str, None, None]: ...
222
217
  ```
223
218
 
224
219
  **Custom Implementation:**
@@ -226,25 +221,24 @@ class LLMProvider(ABC):
226
221
  ```python
227
222
  class OpenAIProvider(LLMProvider):
228
223
  def __init__(self, api_key: str):
229
- self.client = openai.AsyncOpenAI(api_key=api_key)
224
+ self.client = openai.OpenAI(api_key=api_key)
230
225
 
231
- async def complete(self, prompt: str) -> LLMResult:
232
- response = await self.client.completions.create(
226
+ def complete(self, messages: list[dict[str, str]]) -> str:
227
+ response = self.client.chat.completions.create(
233
228
  model="gpt-3.5-turbo",
234
- prompt=prompt,
235
- max_tokens=500
229
+ messages=messages
236
230
  )
237
- return LLMResult(content=response.choices[0].text.strip())
231
+ return response.choices[0].message.content
238
232
 
239
- async def stream(self, prompt: str):
240
- stream = await self.client.completions.create(
233
+ def stream(self, messages: list[dict[str, str]]):
234
+ stream = self.client.chat.completions.create(
241
235
  model="gpt-3.5-turbo",
242
- prompt=prompt,
236
+ messages=messages,
243
237
  stream=True
244
238
  )
245
- async for chunk in stream:
246
- if chunk.choices[0].text:
247
- yield chunk.choices[0].text
239
+ for chunk in stream:
240
+ if chunk.choices[0].delta.content:
241
+ yield chunk.choices[0].delta.content
248
242
  ```
249
243
 
250
244
  ### Block Types
@@ -402,7 +396,7 @@ The new version introduces multi-select interaction support with improvements to
402
396
  user_input = "Python"
403
397
 
404
398
  # Process interaction
405
- result = await mf.process(
399
+ result = mf.process(
406
400
  block_index=1,
407
401
  user_input=user_input,
408
402
  mode=ProcessMode.COMPLETE
@@ -419,7 +413,7 @@ user_input = {
419
413
  }
420
414
 
421
415
  # Process interaction
422
- result = await mf.process(
416
+ result = mf.process(
423
417
  block_index=1,
424
418
  user_input=user_input,
425
419
  mode=ProcessMode.COMPLETE
@@ -462,10 +456,10 @@ class CustomAPIProvider(LLMProvider):
462
456
  def __init__(self, base_url: str, api_key: str):
463
457
  self.base_url = base_url
464
458
  self.api_key = api_key
465
- self.client = httpx.AsyncClient()
459
+ self.client = httpx.Client()
466
460
 
467
- async def complete(self, prompt: str) -> LLMResult:
468
- response = await self.client.post(
461
+ def complete(self, prompt: str) -> LLMResult:
462
+ response = self.client.post(
469
463
  f"{self.base_url}/complete",
470
464
  headers={"Authorization": f"Bearer {self.api_key}"},
471
465
  json={"prompt": prompt, "max_tokens": 1000}
@@ -473,14 +467,14 @@ class CustomAPIProvider(LLMProvider):
473
467
  data = response.json()
474
468
  return LLMResult(content=data["text"])
475
469
 
476
- async def stream(self, prompt: str):
477
- async with self.client.stream(
470
+ def stream(self, prompt: str):
471
+ with self.client.stream(
478
472
  "POST",
479
473
  f"{self.base_url}/stream",
480
474
  headers={"Authorization": f"Bearer {self.api_key}"},
481
475
  json={"prompt": prompt}
482
476
  ) as response:
483
- async for chunk in response.aiter_text():
477
+ for chunk in response.iter_text():
484
478
  if chunk.strip():
485
479
  yield chunk
486
480
 
@@ -492,7 +486,7 @@ mf = MarkdownFlow(document, llm_provider=provider)
492
486
  ### Multi-Block Document Processing
493
487
 
494
488
  ```python
495
- async def process_conversation():
489
+ def process_conversation():
496
490
  conversation = """
497
491
  # AI Assistant
498
492
 
@@ -529,7 +523,7 @@ Would you like to start with the basics?
529
523
  for i, block in enumerate(blocks):
530
524
  if block.block_type == BlockType.CONTENT:
531
525
  print(f"\n--- Processing Block {i} ---")
532
- result = await mf.process(
526
+ result = mf.process(
533
527
  block_index=i,
534
528
  mode=ProcessMode.COMPLETE,
535
529
  variables=variables
@@ -544,9 +538,8 @@ Would you like to start with the basics?
544
538
 
545
539
  ```python
546
540
  from markdown_flow import MarkdownFlow, ProcessMode
547
- import asyncio
548
541
 
549
- async def stream_with_progress():
542
+ def stream_with_progress():
550
543
  document = """
551
544
  Generate a comprehensive Python tutorial for {{user_name}}
552
545
  focusing on {{topic}} with practical examples.
@@ -560,12 +553,12 @@ Include code samples, explanations, and practice exercises.
560
553
  content = ""
561
554
  chunk_count = 0
562
555
 
563
- async for chunk in mf.process(
556
+ for chunk in mf.process(
564
557
  block_index=0,
565
558
  mode=ProcessMode.STREAM,
566
559
  variables={
567
560
  'user_name': 'developer',
568
- 'topic': 'async programming'
561
+ 'topic': 'synchronous programming'
569
562
  }
570
563
  ):
571
564
  content += chunk.content
@@ -599,13 +592,13 @@ class InteractiveDocumentBuilder:
599
592
  self.user_responses = {}
600
593
  self.current_block = 0
601
594
 
602
- async def start_interaction(self):
595
+ def start_interaction(self):
603
596
  blocks = self.mf.get_all_blocks()
604
597
 
605
598
  for i, block in enumerate(blocks):
606
599
  if block.block_type == BlockType.CONTENT:
607
600
  # Process content block with current variables
608
- result = await self.mf.process(
601
+ result = self.mf.process(
609
602
  block_index=i,
610
603
  mode=ProcessMode.COMPLETE,
611
604
  variables=self.user_responses
@@ -614,14 +607,14 @@ class InteractiveDocumentBuilder:
614
607
 
615
608
  elif block.block_type == BlockType.INTERACTION:
616
609
  # Handle user interaction
617
- response = await self.handle_interaction(block.content)
610
+ response = self.handle_interaction(block.content)
618
611
  if response:
619
612
  self.user_responses.update(response)
620
613
 
621
- async def handle_interaction(self, interaction_content: str):
622
- from markdown_flow.utils import InteractionParser
614
+ def handle_interaction(self, interaction_content: str):
615
+ from markdown_flow.parser import InteractionParser
623
616
 
624
- interaction = InteractionParser.parse(interaction_content)
617
+ interaction = InteractionParser().parse(interaction_content)
625
618
  print(f"\n{interaction_content}")
626
619
 
627
620
  if interaction.name == "BUTTONS_ONLY":
@@ -635,7 +628,7 @@ class InteractiveDocumentBuilder:
635
628
  return {interaction.variable: selected}
636
629
  except (ValueError, IndexError):
637
630
  print("Invalid choice")
638
- return await self.handle_interaction(interaction_content)
631
+ return self.handle_interaction(interaction_content)
639
632
 
640
633
  elif interaction.name == "TEXT_ONLY":
641
634
  response = input(f"{interaction.question}: ")
@@ -657,7 +650,7 @@ Great choice, {{name}}! {{subject}} is an excellent field to study.
657
650
  """
658
651
 
659
652
  builder = InteractiveDocumentBuilder(template, your_llm_provider)
660
- await builder.start_interaction()
653
+ builder.start_interaction()
661
654
  ```
662
655
 
663
656
  ### Variable System Deep Dive
@@ -55,7 +55,7 @@ llm_provider = YourLLMProvider(api_key="your-key")
55
55
  mf = MarkdownFlow(document, llm_provider=llm_provider)
56
56
 
57
57
  # Process with different modes
58
- result = await mf.process(
58
+ result = mf.process(
59
59
  block_index=0,
60
60
  mode=ProcessMode.COMPLETE,
61
61
  variables={'name': 'Alice', 'level': 'Intermediate'}
@@ -66,7 +66,7 @@ result = await mf.process(
66
66
 
67
67
  ```python
68
68
  # Stream processing for real-time responses
69
- async for chunk in mf.process(
69
+ for chunk in mf.process(
70
70
  block_index=0,
71
71
  mode=ProcessMode.STREAM,
72
72
  variables={'name': 'Bob'}
@@ -104,7 +104,7 @@ user_input = {
104
104
  'skills': ['Python', 'JavaScript', 'Go'] # Multi-selection
105
105
  }
106
106
 
107
- result = await mf.process(
107
+ result = mf.process(
108
108
  block_index=1, # Process skills interaction
109
109
  user_input=user_input,
110
110
  mode=ProcessMode.COMPLETE
@@ -130,13 +130,13 @@ class MarkdownFlow:
130
130
  def get_all_blocks(self) -> List[Block]: ...
131
131
  def extract_variables(self) -> Set[str]: ...
132
132
 
133
- async def process(
133
+ def process(
134
134
  self,
135
135
  block_index: int,
136
136
  mode: ProcessMode = ProcessMode.COMPLETE,
137
137
  variables: Optional[Dict[str, str]] = None,
138
138
  user_input: Optional[str] = None
139
- ) -> LLMResult: ...
139
+ ) -> LLMResult | Generator[LLMResult, None, None]: ...
140
140
  ```
141
141
 
142
142
  **Methods:**
@@ -166,24 +166,19 @@ Processing mode enumeration for different use cases.
166
166
 
167
167
  ```python
168
168
  class ProcessMode(Enum):
169
- PROMPT_ONLY = "prompt_only" # Generate prompts without LLM calls
170
- COMPLETE = "complete" # Non-streaming LLM processing
171
- STREAM = "stream" # Streaming LLM responses
169
+ COMPLETE = "complete" # Non-streaming LLM processing
170
+ STREAM = "stream" # Streaming LLM responses
172
171
  ```
173
172
 
174
173
  **Usage:**
175
174
 
176
175
  ```python
177
- # Generate prompt only
178
- prompt_result = await mf.process(0, ProcessMode.PROMPT_ONLY)
179
- print(prompt_result.content) # Raw prompt text
180
-
181
176
  # Complete response
182
- complete_result = await mf.process(0, ProcessMode.COMPLETE)
177
+ complete_result = mf.process(0, ProcessMode.COMPLETE)
183
178
  print(complete_result.content) # Full LLM response
184
179
 
185
180
  # Streaming response
186
- async for chunk in mf.process(0, ProcessMode.STREAM):
181
+ for chunk in mf.process(0, ProcessMode.STREAM):
187
182
  print(chunk.content, end='')
188
183
  ```
189
184
 
@@ -193,14 +188,14 @@ Abstract base class for implementing LLM providers.
193
188
 
194
189
  ```python
195
190
  from abc import ABC, abstractmethod
196
- from typing import AsyncGenerator
191
+ from typing import Generator
197
192
 
198
193
  class LLMProvider(ABC):
199
194
  @abstractmethod
200
- async def complete(self, prompt: str) -> LLMResult: ...
195
+ def complete(self, messages: list[dict[str, str]]) -> str: ...
201
196
 
202
197
  @abstractmethod
203
- async def stream(self, prompt: str) -> AsyncGenerator[str, None]: ...
198
+ def stream(self, messages: list[dict[str, str]]) -> Generator[str, None, None]: ...
204
199
  ```
205
200
 
206
201
  **Custom Implementation:**
@@ -208,25 +203,24 @@ class LLMProvider(ABC):
208
203
  ```python
209
204
  class OpenAIProvider(LLMProvider):
210
205
  def __init__(self, api_key: str):
211
- self.client = openai.AsyncOpenAI(api_key=api_key)
206
+ self.client = openai.OpenAI(api_key=api_key)
212
207
 
213
- async def complete(self, prompt: str) -> LLMResult:
214
- response = await self.client.completions.create(
208
+ def complete(self, messages: list[dict[str, str]]) -> str:
209
+ response = self.client.chat.completions.create(
215
210
  model="gpt-3.5-turbo",
216
- prompt=prompt,
217
- max_tokens=500
211
+ messages=messages
218
212
  )
219
- return LLMResult(content=response.choices[0].text.strip())
213
+ return response.choices[0].message.content
220
214
 
221
- async def stream(self, prompt: str):
222
- stream = await self.client.completions.create(
215
+ def stream(self, messages: list[dict[str, str]]):
216
+ stream = self.client.chat.completions.create(
223
217
  model="gpt-3.5-turbo",
224
- prompt=prompt,
218
+ messages=messages,
225
219
  stream=True
226
220
  )
227
- async for chunk in stream:
228
- if chunk.choices[0].text:
229
- yield chunk.choices[0].text
221
+ for chunk in stream:
222
+ if chunk.choices[0].delta.content:
223
+ yield chunk.choices[0].delta.content
230
224
  ```
231
225
 
232
226
  ### Block Types
@@ -384,7 +378,7 @@ The new version introduces multi-select interaction support with improvements to
384
378
  user_input = "Python"
385
379
 
386
380
  # Process interaction
387
- result = await mf.process(
381
+ result = mf.process(
388
382
  block_index=1,
389
383
  user_input=user_input,
390
384
  mode=ProcessMode.COMPLETE
@@ -401,7 +395,7 @@ user_input = {
401
395
  }
402
396
 
403
397
  # Process interaction
404
- result = await mf.process(
398
+ result = mf.process(
405
399
  block_index=1,
406
400
  user_input=user_input,
407
401
  mode=ProcessMode.COMPLETE
@@ -444,10 +438,10 @@ class CustomAPIProvider(LLMProvider):
444
438
  def __init__(self, base_url: str, api_key: str):
445
439
  self.base_url = base_url
446
440
  self.api_key = api_key
447
- self.client = httpx.AsyncClient()
441
+ self.client = httpx.Client()
448
442
 
449
- async def complete(self, prompt: str) -> LLMResult:
450
- response = await self.client.post(
443
+ def complete(self, prompt: str) -> LLMResult:
444
+ response = self.client.post(
451
445
  f"{self.base_url}/complete",
452
446
  headers={"Authorization": f"Bearer {self.api_key}"},
453
447
  json={"prompt": prompt, "max_tokens": 1000}
@@ -455,14 +449,14 @@ class CustomAPIProvider(LLMProvider):
455
449
  data = response.json()
456
450
  return LLMResult(content=data["text"])
457
451
 
458
- async def stream(self, prompt: str):
459
- async with self.client.stream(
452
+ def stream(self, prompt: str):
453
+ with self.client.stream(
460
454
  "POST",
461
455
  f"{self.base_url}/stream",
462
456
  headers={"Authorization": f"Bearer {self.api_key}"},
463
457
  json={"prompt": prompt}
464
458
  ) as response:
465
- async for chunk in response.aiter_text():
459
+ for chunk in response.iter_text():
466
460
  if chunk.strip():
467
461
  yield chunk
468
462
 
@@ -474,7 +468,7 @@ mf = MarkdownFlow(document, llm_provider=provider)
474
468
  ### Multi-Block Document Processing
475
469
 
476
470
  ```python
477
- async def process_conversation():
471
+ def process_conversation():
478
472
  conversation = """
479
473
  # AI Assistant
480
474
 
@@ -511,7 +505,7 @@ Would you like to start with the basics?
511
505
  for i, block in enumerate(blocks):
512
506
  if block.block_type == BlockType.CONTENT:
513
507
  print(f"\n--- Processing Block {i} ---")
514
- result = await mf.process(
508
+ result = mf.process(
515
509
  block_index=i,
516
510
  mode=ProcessMode.COMPLETE,
517
511
  variables=variables
@@ -526,9 +520,8 @@ Would you like to start with the basics?
526
520
 
527
521
  ```python
528
522
  from markdown_flow import MarkdownFlow, ProcessMode
529
- import asyncio
530
523
 
531
- async def stream_with_progress():
524
+ def stream_with_progress():
532
525
  document = """
533
526
  Generate a comprehensive Python tutorial for {{user_name}}
534
527
  focusing on {{topic}} with practical examples.
@@ -542,12 +535,12 @@ Include code samples, explanations, and practice exercises.
542
535
  content = ""
543
536
  chunk_count = 0
544
537
 
545
- async for chunk in mf.process(
538
+ for chunk in mf.process(
546
539
  block_index=0,
547
540
  mode=ProcessMode.STREAM,
548
541
  variables={
549
542
  'user_name': 'developer',
550
- 'topic': 'async programming'
543
+ 'topic': 'synchronous programming'
551
544
  }
552
545
  ):
553
546
  content += chunk.content
@@ -581,13 +574,13 @@ class InteractiveDocumentBuilder:
581
574
  self.user_responses = {}
582
575
  self.current_block = 0
583
576
 
584
- async def start_interaction(self):
577
+ def start_interaction(self):
585
578
  blocks = self.mf.get_all_blocks()
586
579
 
587
580
  for i, block in enumerate(blocks):
588
581
  if block.block_type == BlockType.CONTENT:
589
582
  # Process content block with current variables
590
- result = await self.mf.process(
583
+ result = self.mf.process(
591
584
  block_index=i,
592
585
  mode=ProcessMode.COMPLETE,
593
586
  variables=self.user_responses
@@ -596,14 +589,14 @@ class InteractiveDocumentBuilder:
596
589
 
597
590
  elif block.block_type == BlockType.INTERACTION:
598
591
  # Handle user interaction
599
- response = await self.handle_interaction(block.content)
592
+ response = self.handle_interaction(block.content)
600
593
  if response:
601
594
  self.user_responses.update(response)
602
595
 
603
- async def handle_interaction(self, interaction_content: str):
604
- from markdown_flow.utils import InteractionParser
596
+ def handle_interaction(self, interaction_content: str):
597
+ from markdown_flow.parser import InteractionParser
605
598
 
606
- interaction = InteractionParser.parse(interaction_content)
599
+ interaction = InteractionParser().parse(interaction_content)
607
600
  print(f"\n{interaction_content}")
608
601
 
609
602
  if interaction.name == "BUTTONS_ONLY":
@@ -617,7 +610,7 @@ class InteractiveDocumentBuilder:
617
610
  return {interaction.variable: selected}
618
611
  except (ValueError, IndexError):
619
612
  print("Invalid choice")
620
- return await self.handle_interaction(interaction_content)
613
+ return self.handle_interaction(interaction_content)
621
614
 
622
615
  elif interaction.name == "TEXT_ONLY":
623
616
  response = input(f"{interaction.question}: ")
@@ -639,7 +632,7 @@ Great choice, {{name}}! {{subject}} is an excellent field to study.
639
632
  """
640
633
 
641
634
  builder = InteractiveDocumentBuilder(template, your_llm_provider)
642
- await builder.start_interaction()
635
+ builder.start_interaction()
643
636
  ```
644
637
 
645
638
  ### Variable System Deep Dive
@@ -9,7 +9,7 @@ Core Features:
9
9
  - Extract variable placeholders ({{variable}} and %{{variable}} formats)
10
10
  - Build LLM-ready prompts and message formats
11
11
  - Handle user interaction validation and input processing
12
- - Support multiple processing modes: PROMPT_ONLY, COMPLETE, STREAM
12
+ - Support multiple processing modes: COMPLETE, STREAM
13
13
 
14
14
  Supported Interaction Types:
15
15
  - TEXT_ONLY: ?[%{{var}}...question] - Text input only
@@ -35,7 +35,6 @@ Basic Usage:
35
35
  result = mf.process(0, variables={'name': 'John'}, mode=ProcessMode.COMPLETE)
36
36
 
37
37
  # Different processing modes
38
- prompt_result = mf.process(0, mode=ProcessMode.PROMPT_ONLY)
39
38
  complete_result = mf.process(0, mode=ProcessMode.COMPLETE)
40
39
  stream_result = mf.process(0, mode=ProcessMode.STREAM)
41
40
 
@@ -53,7 +52,7 @@ Import Guide:
53
52
  from .core import MarkdownFlow
54
53
  from .enums import BlockType, InputType
55
54
  from .llm import LLMProvider, LLMResult, ProcessMode
56
- from .utils import (
55
+ from .parser import (
57
56
  InteractionParser,
58
57
  InteractionType,
59
58
  extract_interaction_question,
@@ -83,4 +82,5 @@ __all__ = [
83
82
  "replace_variables_in_text",
84
83
  ]
85
84
 
86
- __version__ = "0.2.19"
85
+ __version__ = "0.2.30"
86
+ # __version__ = "0.2.29-alpha-1"