markdown-flow 0.2.16__tar.gz → 0.2.18__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of markdown-flow might be problematic. Click here for more details.

Files changed (19) hide show
  1. {markdown_flow-0.2.16 → markdown_flow-0.2.18}/PKG-INFO +36 -118
  2. {markdown_flow-0.2.16 → markdown_flow-0.2.18}/README.md +35 -117
  3. {markdown_flow-0.2.16 → markdown_flow-0.2.18}/markdown_flow/__init__.py +5 -5
  4. {markdown_flow-0.2.16 → markdown_flow-0.2.18}/markdown_flow/constants.py +43 -18
  5. {markdown_flow-0.2.16 → markdown_flow-0.2.18}/markdown_flow/core.py +40 -471
  6. {markdown_flow-0.2.16 → markdown_flow-0.2.18}/markdown_flow/llm.py +6 -9
  7. {markdown_flow-0.2.16 → markdown_flow-0.2.18}/markdown_flow/utils.py +6 -8
  8. {markdown_flow-0.2.16 → markdown_flow-0.2.18}/markdown_flow.egg-info/PKG-INFO +36 -118
  9. {markdown_flow-0.2.16 → markdown_flow-0.2.18}/markdown_flow.egg-info/SOURCES.txt +2 -1
  10. markdown_flow-0.2.18/tests/test_preserved_simple.py +170 -0
  11. {markdown_flow-0.2.16 → markdown_flow-0.2.18}/LICENSE +0 -0
  12. {markdown_flow-0.2.16 → markdown_flow-0.2.18}/markdown_flow/enums.py +0 -0
  13. {markdown_flow-0.2.16 → markdown_flow-0.2.18}/markdown_flow/exceptions.py +0 -0
  14. {markdown_flow-0.2.16 → markdown_flow-0.2.18}/markdown_flow/models.py +0 -0
  15. {markdown_flow-0.2.16 → markdown_flow-0.2.18}/markdown_flow.egg-info/dependency_links.txt +0 -0
  16. {markdown_flow-0.2.16 → markdown_flow-0.2.18}/markdown_flow.egg-info/top_level.txt +0 -0
  17. {markdown_flow-0.2.16 → markdown_flow-0.2.18}/pyproject.toml +0 -0
  18. {markdown_flow-0.2.16 → markdown_flow-0.2.18}/setup.cfg +0 -0
  19. {markdown_flow-0.2.16 → markdown_flow-0.2.18}/tests/test_dynamic_interaction.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: markdown-flow
3
- Version: 0.2.16
3
+ Version: 0.2.18
4
4
  Summary: An agent library designed to parse and process MarkdownFlow documents
5
5
  Project-URL: Homepage, https://github.com/ai-shifu/markdown-flow-agent-py
6
6
  Project-URL: Bug Tracker, https://github.com/ai-shifu/markdown-flow-agent-py/issues
@@ -73,7 +73,7 @@ llm_provider = YourLLMProvider(api_key="your-key")
73
73
  mf = MarkdownFlow(document, llm_provider=llm_provider)
74
74
 
75
75
  # Process with different modes
76
- result = mf.process(
76
+ result = await mf.process(
77
77
  block_index=0,
78
78
  mode=ProcessMode.COMPLETE,
79
79
  variables={'name': 'Alice', 'level': 'Intermediate'}
@@ -84,7 +84,7 @@ result = mf.process(
84
84
 
85
85
  ```python
86
86
  # Stream processing for real-time responses
87
- for chunk in mf.process(
87
+ async for chunk in mf.process(
88
88
  block_index=0,
89
89
  mode=ProcessMode.STREAM,
90
90
  variables={'name': 'Bob'}
@@ -92,36 +92,6 @@ for chunk in mf.process(
92
92
  print(chunk.content, end='')
93
93
  ```
94
94
 
95
- ### Dynamic Interaction Generation ✨
96
-
97
- Transform natural language content into interactive elements automatically:
98
-
99
- ```python
100
- from markdown_flow import MarkdownFlow, ProcessMode
101
-
102
- # Dynamic interaction generation works automatically
103
- mf = MarkdownFlow(
104
- document="询问用户的菜品偏好,并记录到变量{{菜品选择}}",
105
- llm_provider=llm_provider,
106
- document_prompt="你是中餐厅服务员,提供川菜、粤菜、鲁菜等选项"
107
- )
108
-
109
- # Process with Function Calling
110
- result = mf.process(0, ProcessMode.COMPLETE)
111
-
112
- if result.transformed_to_interaction:
113
- print(f"Generated interaction: {result.content}")
114
- # Output: ?[%{{菜品选择}} 宫保鸡丁||麻婆豆腐||水煮鱼||...其他菜品]
115
-
116
- # Continue with user input
117
- user_result = mf.process(
118
- block_index=0,
119
- mode=ProcessMode.COMPLETE,
120
- user_input={"菜品选择": ["宫保鸡丁", "麻婆豆腐"]},
121
- dynamic_interaction_format=result.content
122
- )
123
- ```
124
-
125
95
  ### Interactive Elements
126
96
 
127
97
  ```python
@@ -152,66 +122,13 @@ user_input = {
152
122
  'skills': ['Python', 'JavaScript', 'Go'] # Multi-selection
153
123
  }
154
124
 
155
- result = mf.process(
125
+ result = await mf.process(
156
126
  block_index=1, # Process skills interaction
157
127
  user_input=user_input,
158
128
  mode=ProcessMode.COMPLETE
159
129
  )
160
130
  ```
161
131
 
162
- ## ✨ Key Features
163
-
164
- ### 🏗️ Three-Layer Architecture
165
-
166
- - **Document Level**: Parse `---` separators and `?[]` interaction patterns
167
- - **Block Level**: Categorize as CONTENT, INTERACTION, or PRESERVED_CONTENT
168
- - **Interaction Level**: Handle 6 different interaction types with smart validation
169
-
170
- ### 🔄 Dynamic Interaction Generation
171
-
172
- - **Natural Language Input**: Write content in plain language
173
- - **AI-Powered Conversion**: LLM automatically detects interaction needs using Function Calling
174
- - **Structured Data Generation**: LLM returns structured data, core builds MarkdownFlow format
175
- - **Language Agnostic**: Support for any language with proper document prompts
176
- - **Context Awareness**: Both original and resolved variable contexts provided to LLM
177
-
178
- ### 🤖 Unified LLM Integration
179
-
180
- - **Single Interface**: One `complete()` method for both regular and Function Calling modes
181
- - **Automatic Detection**: Tools parameter determines processing mode automatically
182
- - **Consistent Returns**: Always returns `LLMResult` with structured metadata
183
- - **Error Handling**: Automatic fallback from Function Calling to regular completion
184
- - **Provider Agnostic**: Abstract interface supports any LLM service
185
-
186
- ### 📝 Variable System
187
-
188
- - **Replaceable Variables**: `{{variable}}` for content personalization
189
- - **Preserved Variables**: `%{{variable}}` for LLM understanding in interactions
190
- - **Multi-Value Support**: Handle both single values and arrays
191
- - **Smart Extraction**: Automatic detection from document content
192
-
193
- ### 🎯 Interaction Types
194
-
195
- - **Text Input**: `?[%{{var}}...question]` - Free text entry
196
- - **Single Select**: `?[%{{var}} A|B|C]` - Choose one option
197
- - **Multi Select**: `?[%{{var}} A||B||C]` - Choose multiple options
198
- - **Mixed Mode**: `?[%{{var}} A||B||...custom]` - Predefined + custom input
199
- - **Display Buttons**: `?[Continue|Cancel]` - Action buttons without assignment
200
- - **Value Separation**: `?[%{{var}} Display//value|...]` - Different display/stored values
201
-
202
- ### 🔒 Content Preservation
203
-
204
- - **Multiline Format**: `!===content!===` blocks output exactly as written
205
- - **Inline Format**: `===content===` for single-line preserved content
206
- - **Variable Support**: Preserved content can contain variables for substitution
207
-
208
- ### ⚡ Performance Optimized
209
-
210
- - **Pre-compiled Regex**: All patterns compiled once for maximum performance
211
- - **Synchronous Interface**: Clean synchronous operations with optional streaming
212
- - **Stream Processing**: Real-time streaming responses supported
213
- - **Memory Efficient**: Lazy evaluation and generator patterns
214
-
215
132
  ## 📖 API Reference
216
133
 
217
134
  ### Core Classes
@@ -231,7 +148,7 @@ class MarkdownFlow:
231
148
  def get_all_blocks(self) -> List[Block]: ...
232
149
  def extract_variables(self) -> Set[str]: ...
233
150
 
234
- def process(
151
+ async def process(
235
152
  self,
236
153
  block_index: int,
237
154
  mode: ProcessMode = ProcessMode.COMPLETE,
@@ -276,15 +193,15 @@ class ProcessMode(Enum):
276
193
 
277
194
  ```python
278
195
  # Generate prompt only
279
- prompt_result = mf.process(0, ProcessMode.PROMPT_ONLY)
196
+ prompt_result = await mf.process(0, ProcessMode.PROMPT_ONLY)
280
197
  print(prompt_result.content) # Raw prompt text
281
198
 
282
199
  # Complete response
283
- complete_result = mf.process(0, ProcessMode.COMPLETE)
200
+ complete_result = await mf.process(0, ProcessMode.COMPLETE)
284
201
  print(complete_result.content) # Full LLM response
285
202
 
286
203
  # Streaming response
287
- for chunk in mf.process(0, ProcessMode.STREAM):
204
+ async for chunk in mf.process(0, ProcessMode.STREAM):
288
205
  print(chunk.content, end='')
289
206
  ```
290
207
 
@@ -294,14 +211,14 @@ Abstract base class for implementing LLM providers.
294
211
 
295
212
  ```python
296
213
  from abc import ABC, abstractmethod
297
- from typing import Generator
214
+ from typing import AsyncGenerator
298
215
 
299
216
  class LLMProvider(ABC):
300
217
  @abstractmethod
301
- def complete(self, prompt: str) -> LLMResult: ...
218
+ async def complete(self, prompt: str) -> LLMResult: ...
302
219
 
303
220
  @abstractmethod
304
- def stream(self, prompt: str) -> Generator[str, None, None]: ...
221
+ async def stream(self, prompt: str) -> AsyncGenerator[str, None]: ...
305
222
  ```
306
223
 
307
224
  **Custom Implementation:**
@@ -309,23 +226,23 @@ class LLMProvider(ABC):
309
226
  ```python
310
227
  class OpenAIProvider(LLMProvider):
311
228
  def __init__(self, api_key: str):
312
- self.client = openai.OpenAI(api_key=api_key)
229
+ self.client = openai.AsyncOpenAI(api_key=api_key)
313
230
 
314
- def complete(self, prompt: str) -> LLMResult:
315
- response = self.client.completions.create(
231
+ async def complete(self, prompt: str) -> LLMResult:
232
+ response = await self.client.completions.create(
316
233
  model="gpt-3.5-turbo",
317
234
  prompt=prompt,
318
235
  max_tokens=500
319
236
  )
320
237
  return LLMResult(content=response.choices[0].text.strip())
321
238
 
322
- def stream(self, prompt: str):
323
- stream = self.client.completions.create(
239
+ async def stream(self, prompt: str):
240
+ stream = await self.client.completions.create(
324
241
  model="gpt-3.5-turbo",
325
242
  prompt=prompt,
326
243
  stream=True
327
244
  )
328
- for chunk in stream:
245
+ async for chunk in stream:
329
246
  if chunk.choices[0].text:
330
247
  yield chunk.choices[0].text
331
248
  ```
@@ -485,7 +402,7 @@ The new version introduces multi-select interaction support with improvements to
485
402
  user_input = "Python"
486
403
 
487
404
  # Process interaction
488
- result = mf.process(
405
+ result = await mf.process(
489
406
  block_index=1,
490
407
  user_input=user_input,
491
408
  mode=ProcessMode.COMPLETE
@@ -502,7 +419,7 @@ user_input = {
502
419
  }
503
420
 
504
421
  # Process interaction
505
- result = mf.process(
422
+ result = await mf.process(
506
423
  block_index=1,
507
424
  user_input=user_input,
508
425
  mode=ProcessMode.COMPLETE
@@ -545,10 +462,10 @@ class CustomAPIProvider(LLMProvider):
545
462
  def __init__(self, base_url: str, api_key: str):
546
463
  self.base_url = base_url
547
464
  self.api_key = api_key
548
- self.client = httpx.Client()
465
+ self.client = httpx.AsyncClient()
549
466
 
550
- def complete(self, prompt: str) -> LLMResult:
551
- response = self.client.post(
467
+ async def complete(self, prompt: str) -> LLMResult:
468
+ response = await self.client.post(
552
469
  f"{self.base_url}/complete",
553
470
  headers={"Authorization": f"Bearer {self.api_key}"},
554
471
  json={"prompt": prompt, "max_tokens": 1000}
@@ -556,14 +473,14 @@ class CustomAPIProvider(LLMProvider):
556
473
  data = response.json()
557
474
  return LLMResult(content=data["text"])
558
475
 
559
- def stream(self, prompt: str):
560
- with self.client.stream(
476
+ async def stream(self, prompt: str):
477
+ async with self.client.stream(
561
478
  "POST",
562
479
  f"{self.base_url}/stream",
563
480
  headers={"Authorization": f"Bearer {self.api_key}"},
564
481
  json={"prompt": prompt}
565
482
  ) as response:
566
- for chunk in response.iter_text():
483
+ async for chunk in response.aiter_text():
567
484
  if chunk.strip():
568
485
  yield chunk
569
486
 
@@ -575,7 +492,7 @@ mf = MarkdownFlow(document, llm_provider=provider)
575
492
  ### Multi-Block Document Processing
576
493
 
577
494
  ```python
578
- def process_conversation():
495
+ async def process_conversation():
579
496
  conversation = """
580
497
  # AI Assistant
581
498
 
@@ -612,7 +529,7 @@ Would you like to start with the basics?
612
529
  for i, block in enumerate(blocks):
613
530
  if block.block_type == BlockType.CONTENT:
614
531
  print(f"\n--- Processing Block {i} ---")
615
- result = mf.process(
532
+ result = await mf.process(
616
533
  block_index=i,
617
534
  mode=ProcessMode.COMPLETE,
618
535
  variables=variables
@@ -627,8 +544,9 @@ Would you like to start with the basics?
627
544
 
628
545
  ```python
629
546
  from markdown_flow import MarkdownFlow, ProcessMode
547
+ import asyncio
630
548
 
631
- def stream_with_progress():
549
+ async def stream_with_progress():
632
550
  document = """
633
551
  Generate a comprehensive Python tutorial for {{user_name}}
634
552
  focusing on {{topic}} with practical examples.
@@ -642,7 +560,7 @@ Include code samples, explanations, and practice exercises.
642
560
  content = ""
643
561
  chunk_count = 0
644
562
 
645
- for chunk in mf.process(
563
+ async for chunk in mf.process(
646
564
  block_index=0,
647
565
  mode=ProcessMode.STREAM,
648
566
  variables={
@@ -681,13 +599,13 @@ class InteractiveDocumentBuilder:
681
599
  self.user_responses = {}
682
600
  self.current_block = 0
683
601
 
684
- def start_interaction(self):
602
+ async def start_interaction(self):
685
603
  blocks = self.mf.get_all_blocks()
686
604
 
687
605
  for i, block in enumerate(blocks):
688
606
  if block.block_type == BlockType.CONTENT:
689
607
  # Process content block with current variables
690
- result = self.mf.process(
608
+ result = await self.mf.process(
691
609
  block_index=i,
692
610
  mode=ProcessMode.COMPLETE,
693
611
  variables=self.user_responses
@@ -696,11 +614,11 @@ class InteractiveDocumentBuilder:
696
614
 
697
615
  elif block.block_type == BlockType.INTERACTION:
698
616
  # Handle user interaction
699
- response = self.handle_interaction(block.content)
617
+ response = await self.handle_interaction(block.content)
700
618
  if response:
701
619
  self.user_responses.update(response)
702
620
 
703
- def handle_interaction(self, interaction_content: str):
621
+ async def handle_interaction(self, interaction_content: str):
704
622
  from markdown_flow.utils import InteractionParser
705
623
 
706
624
  interaction = InteractionParser.parse(interaction_content)
@@ -717,7 +635,7 @@ class InteractiveDocumentBuilder:
717
635
  return {interaction.variable: selected}
718
636
  except (ValueError, IndexError):
719
637
  print("Invalid choice")
720
- return self.handle_interaction(interaction_content)
638
+ return await self.handle_interaction(interaction_content)
721
639
 
722
640
  elif interaction.name == "TEXT_ONLY":
723
641
  response = input(f"{interaction.question}: ")
@@ -739,7 +657,7 @@ Great choice, {{name}}! {{subject}} is an excellent field to study.
739
657
  """
740
658
 
741
659
  builder = InteractiveDocumentBuilder(template, your_llm_provider)
742
- builder.start_interaction()
660
+ await builder.start_interaction()
743
661
  ```
744
662
 
745
663
  ### Variable System Deep Dive
@@ -55,7 +55,7 @@ llm_provider = YourLLMProvider(api_key="your-key")
55
55
  mf = MarkdownFlow(document, llm_provider=llm_provider)
56
56
 
57
57
  # Process with different modes
58
- result = mf.process(
58
+ result = await mf.process(
59
59
  block_index=0,
60
60
  mode=ProcessMode.COMPLETE,
61
61
  variables={'name': 'Alice', 'level': 'Intermediate'}
@@ -66,7 +66,7 @@ result = mf.process(
66
66
 
67
67
  ```python
68
68
  # Stream processing for real-time responses
69
- for chunk in mf.process(
69
+ async for chunk in mf.process(
70
70
  block_index=0,
71
71
  mode=ProcessMode.STREAM,
72
72
  variables={'name': 'Bob'}
@@ -74,36 +74,6 @@ for chunk in mf.process(
74
74
  print(chunk.content, end='')
75
75
  ```
76
76
 
77
- ### Dynamic Interaction Generation ✨
78
-
79
- Transform natural language content into interactive elements automatically:
80
-
81
- ```python
82
- from markdown_flow import MarkdownFlow, ProcessMode
83
-
84
- # Dynamic interaction generation works automatically
85
- mf = MarkdownFlow(
86
- document="询问用户的菜品偏好,并记录到变量{{菜品选择}}",
87
- llm_provider=llm_provider,
88
- document_prompt="你是中餐厅服务员,提供川菜、粤菜、鲁菜等选项"
89
- )
90
-
91
- # Process with Function Calling
92
- result = mf.process(0, ProcessMode.COMPLETE)
93
-
94
- if result.transformed_to_interaction:
95
- print(f"Generated interaction: {result.content}")
96
- # Output: ?[%{{菜品选择}} 宫保鸡丁||麻婆豆腐||水煮鱼||...其他菜品]
97
-
98
- # Continue with user input
99
- user_result = mf.process(
100
- block_index=0,
101
- mode=ProcessMode.COMPLETE,
102
- user_input={"菜品选择": ["宫保鸡丁", "麻婆豆腐"]},
103
- dynamic_interaction_format=result.content
104
- )
105
- ```
106
-
107
77
  ### Interactive Elements
108
78
 
109
79
  ```python
@@ -134,66 +104,13 @@ user_input = {
134
104
  'skills': ['Python', 'JavaScript', 'Go'] # Multi-selection
135
105
  }
136
106
 
137
- result = mf.process(
107
+ result = await mf.process(
138
108
  block_index=1, # Process skills interaction
139
109
  user_input=user_input,
140
110
  mode=ProcessMode.COMPLETE
141
111
  )
142
112
  ```
143
113
 
144
- ## ✨ Key Features
145
-
146
- ### 🏗️ Three-Layer Architecture
147
-
148
- - **Document Level**: Parse `---` separators and `?[]` interaction patterns
149
- - **Block Level**: Categorize as CONTENT, INTERACTION, or PRESERVED_CONTENT
150
- - **Interaction Level**: Handle 6 different interaction types with smart validation
151
-
152
- ### 🔄 Dynamic Interaction Generation
153
-
154
- - **Natural Language Input**: Write content in plain language
155
- - **AI-Powered Conversion**: LLM automatically detects interaction needs using Function Calling
156
- - **Structured Data Generation**: LLM returns structured data, core builds MarkdownFlow format
157
- - **Language Agnostic**: Support for any language with proper document prompts
158
- - **Context Awareness**: Both original and resolved variable contexts provided to LLM
159
-
160
- ### 🤖 Unified LLM Integration
161
-
162
- - **Single Interface**: One `complete()` method for both regular and Function Calling modes
163
- - **Automatic Detection**: Tools parameter determines processing mode automatically
164
- - **Consistent Returns**: Always returns `LLMResult` with structured metadata
165
- - **Error Handling**: Automatic fallback from Function Calling to regular completion
166
- - **Provider Agnostic**: Abstract interface supports any LLM service
167
-
168
- ### 📝 Variable System
169
-
170
- - **Replaceable Variables**: `{{variable}}` for content personalization
171
- - **Preserved Variables**: `%{{variable}}` for LLM understanding in interactions
172
- - **Multi-Value Support**: Handle both single values and arrays
173
- - **Smart Extraction**: Automatic detection from document content
174
-
175
- ### 🎯 Interaction Types
176
-
177
- - **Text Input**: `?[%{{var}}...question]` - Free text entry
178
- - **Single Select**: `?[%{{var}} A|B|C]` - Choose one option
179
- - **Multi Select**: `?[%{{var}} A||B||C]` - Choose multiple options
180
- - **Mixed Mode**: `?[%{{var}} A||B||...custom]` - Predefined + custom input
181
- - **Display Buttons**: `?[Continue|Cancel]` - Action buttons without assignment
182
- - **Value Separation**: `?[%{{var}} Display//value|...]` - Different display/stored values
183
-
184
- ### 🔒 Content Preservation
185
-
186
- - **Multiline Format**: `!===content!===` blocks output exactly as written
187
- - **Inline Format**: `===content===` for single-line preserved content
188
- - **Variable Support**: Preserved content can contain variables for substitution
189
-
190
- ### ⚡ Performance Optimized
191
-
192
- - **Pre-compiled Regex**: All patterns compiled once for maximum performance
193
- - **Synchronous Interface**: Clean synchronous operations with optional streaming
194
- - **Stream Processing**: Real-time streaming responses supported
195
- - **Memory Efficient**: Lazy evaluation and generator patterns
196
-
197
114
  ## 📖 API Reference
198
115
 
199
116
  ### Core Classes
@@ -213,7 +130,7 @@ class MarkdownFlow:
213
130
  def get_all_blocks(self) -> List[Block]: ...
214
131
  def extract_variables(self) -> Set[str]: ...
215
132
 
216
- def process(
133
+ async def process(
217
134
  self,
218
135
  block_index: int,
219
136
  mode: ProcessMode = ProcessMode.COMPLETE,
@@ -258,15 +175,15 @@ class ProcessMode(Enum):
258
175
 
259
176
  ```python
260
177
  # Generate prompt only
261
- prompt_result = mf.process(0, ProcessMode.PROMPT_ONLY)
178
+ prompt_result = await mf.process(0, ProcessMode.PROMPT_ONLY)
262
179
  print(prompt_result.content) # Raw prompt text
263
180
 
264
181
  # Complete response
265
- complete_result = mf.process(0, ProcessMode.COMPLETE)
182
+ complete_result = await mf.process(0, ProcessMode.COMPLETE)
266
183
  print(complete_result.content) # Full LLM response
267
184
 
268
185
  # Streaming response
269
- for chunk in mf.process(0, ProcessMode.STREAM):
186
+ async for chunk in mf.process(0, ProcessMode.STREAM):
270
187
  print(chunk.content, end='')
271
188
  ```
272
189
 
@@ -276,14 +193,14 @@ Abstract base class for implementing LLM providers.
276
193
 
277
194
  ```python
278
195
  from abc import ABC, abstractmethod
279
- from typing import Generator
196
+ from typing import AsyncGenerator
280
197
 
281
198
  class LLMProvider(ABC):
282
199
  @abstractmethod
283
- def complete(self, prompt: str) -> LLMResult: ...
200
+ async def complete(self, prompt: str) -> LLMResult: ...
284
201
 
285
202
  @abstractmethod
286
- def stream(self, prompt: str) -> Generator[str, None, None]: ...
203
+ async def stream(self, prompt: str) -> AsyncGenerator[str, None]: ...
287
204
  ```
288
205
 
289
206
  **Custom Implementation:**
@@ -291,23 +208,23 @@ class LLMProvider(ABC):
291
208
  ```python
292
209
  class OpenAIProvider(LLMProvider):
293
210
  def __init__(self, api_key: str):
294
- self.client = openai.OpenAI(api_key=api_key)
211
+ self.client = openai.AsyncOpenAI(api_key=api_key)
295
212
 
296
- def complete(self, prompt: str) -> LLMResult:
297
- response = self.client.completions.create(
213
+ async def complete(self, prompt: str) -> LLMResult:
214
+ response = await self.client.completions.create(
298
215
  model="gpt-3.5-turbo",
299
216
  prompt=prompt,
300
217
  max_tokens=500
301
218
  )
302
219
  return LLMResult(content=response.choices[0].text.strip())
303
220
 
304
- def stream(self, prompt: str):
305
- stream = self.client.completions.create(
221
+ async def stream(self, prompt: str):
222
+ stream = await self.client.completions.create(
306
223
  model="gpt-3.5-turbo",
307
224
  prompt=prompt,
308
225
  stream=True
309
226
  )
310
- for chunk in stream:
227
+ async for chunk in stream:
311
228
  if chunk.choices[0].text:
312
229
  yield chunk.choices[0].text
313
230
  ```
@@ -467,7 +384,7 @@ The new version introduces multi-select interaction support with improvements to
467
384
  user_input = "Python"
468
385
 
469
386
  # Process interaction
470
- result = mf.process(
387
+ result = await mf.process(
471
388
  block_index=1,
472
389
  user_input=user_input,
473
390
  mode=ProcessMode.COMPLETE
@@ -484,7 +401,7 @@ user_input = {
484
401
  }
485
402
 
486
403
  # Process interaction
487
- result = mf.process(
404
+ result = await mf.process(
488
405
  block_index=1,
489
406
  user_input=user_input,
490
407
  mode=ProcessMode.COMPLETE
@@ -527,10 +444,10 @@ class CustomAPIProvider(LLMProvider):
527
444
  def __init__(self, base_url: str, api_key: str):
528
445
  self.base_url = base_url
529
446
  self.api_key = api_key
530
- self.client = httpx.Client()
447
+ self.client = httpx.AsyncClient()
531
448
 
532
- def complete(self, prompt: str) -> LLMResult:
533
- response = self.client.post(
449
+ async def complete(self, prompt: str) -> LLMResult:
450
+ response = await self.client.post(
534
451
  f"{self.base_url}/complete",
535
452
  headers={"Authorization": f"Bearer {self.api_key}"},
536
453
  json={"prompt": prompt, "max_tokens": 1000}
@@ -538,14 +455,14 @@ class CustomAPIProvider(LLMProvider):
538
455
  data = response.json()
539
456
  return LLMResult(content=data["text"])
540
457
 
541
- def stream(self, prompt: str):
542
- with self.client.stream(
458
+ async def stream(self, prompt: str):
459
+ async with self.client.stream(
543
460
  "POST",
544
461
  f"{self.base_url}/stream",
545
462
  headers={"Authorization": f"Bearer {self.api_key}"},
546
463
  json={"prompt": prompt}
547
464
  ) as response:
548
- for chunk in response.iter_text():
465
+ async for chunk in response.aiter_text():
549
466
  if chunk.strip():
550
467
  yield chunk
551
468
 
@@ -557,7 +474,7 @@ mf = MarkdownFlow(document, llm_provider=provider)
557
474
  ### Multi-Block Document Processing
558
475
 
559
476
  ```python
560
- def process_conversation():
477
+ async def process_conversation():
561
478
  conversation = """
562
479
  # AI Assistant
563
480
 
@@ -594,7 +511,7 @@ Would you like to start with the basics?
594
511
  for i, block in enumerate(blocks):
595
512
  if block.block_type == BlockType.CONTENT:
596
513
  print(f"\n--- Processing Block {i} ---")
597
- result = mf.process(
514
+ result = await mf.process(
598
515
  block_index=i,
599
516
  mode=ProcessMode.COMPLETE,
600
517
  variables=variables
@@ -609,8 +526,9 @@ Would you like to start with the basics?
609
526
 
610
527
  ```python
611
528
  from markdown_flow import MarkdownFlow, ProcessMode
529
+ import asyncio
612
530
 
613
- def stream_with_progress():
531
+ async def stream_with_progress():
614
532
  document = """
615
533
  Generate a comprehensive Python tutorial for {{user_name}}
616
534
  focusing on {{topic}} with practical examples.
@@ -624,7 +542,7 @@ Include code samples, explanations, and practice exercises.
624
542
  content = ""
625
543
  chunk_count = 0
626
544
 
627
- for chunk in mf.process(
545
+ async for chunk in mf.process(
628
546
  block_index=0,
629
547
  mode=ProcessMode.STREAM,
630
548
  variables={
@@ -663,13 +581,13 @@ class InteractiveDocumentBuilder:
663
581
  self.user_responses = {}
664
582
  self.current_block = 0
665
583
 
666
- def start_interaction(self):
584
+ async def start_interaction(self):
667
585
  blocks = self.mf.get_all_blocks()
668
586
 
669
587
  for i, block in enumerate(blocks):
670
588
  if block.block_type == BlockType.CONTENT:
671
589
  # Process content block with current variables
672
- result = self.mf.process(
590
+ result = await self.mf.process(
673
591
  block_index=i,
674
592
  mode=ProcessMode.COMPLETE,
675
593
  variables=self.user_responses
@@ -678,11 +596,11 @@ class InteractiveDocumentBuilder:
678
596
 
679
597
  elif block.block_type == BlockType.INTERACTION:
680
598
  # Handle user interaction
681
- response = self.handle_interaction(block.content)
599
+ response = await self.handle_interaction(block.content)
682
600
  if response:
683
601
  self.user_responses.update(response)
684
602
 
685
- def handle_interaction(self, interaction_content: str):
603
+ async def handle_interaction(self, interaction_content: str):
686
604
  from markdown_flow.utils import InteractionParser
687
605
 
688
606
  interaction = InteractionParser.parse(interaction_content)
@@ -699,7 +617,7 @@ class InteractiveDocumentBuilder:
699
617
  return {interaction.variable: selected}
700
618
  except (ValueError, IndexError):
701
619
  print("Invalid choice")
702
- return self.handle_interaction(interaction_content)
620
+ return await self.handle_interaction(interaction_content)
703
621
 
704
622
  elif interaction.name == "TEXT_ONLY":
705
623
  response = input(f"{interaction.question}: ")
@@ -721,7 +639,7 @@ Great choice, {{name}}! {{subject}} is an excellent field to study.
721
639
  """
722
640
 
723
641
  builder = InteractiveDocumentBuilder(template, your_llm_provider)
724
- builder.start_interaction()
642
+ await builder.start_interaction()
725
643
  ```
726
644
 
727
645
  ### Variable System Deep Dive
@@ -32,12 +32,12 @@ Basic Usage:
32
32
  blocks = mf.get_all_blocks()
33
33
 
34
34
  # Process blocks using unified interface
35
- result = await mf.process(0, variables={'name': 'John'}, mode=ProcessMode.COMPLETE)
35
+ result = mf.process(0, variables={'name': 'John'}, mode=ProcessMode.COMPLETE)
36
36
 
37
37
  # Different processing modes
38
- prompt_result = await mf.process(0, mode=ProcessMode.PROMPT_ONLY)
39
- complete_result = await mf.process(0, mode=ProcessMode.COMPLETE)
40
- stream_result = await mf.process(0, mode=ProcessMode.STREAM)
38
+ prompt_result = mf.process(0, mode=ProcessMode.PROMPT_ONLY)
39
+ complete_result = mf.process(0, mode=ProcessMode.COMPLETE)
40
+ stream_result = mf.process(0, mode=ProcessMode.STREAM)
41
41
 
42
42
  Variable System:
43
43
  - {{variable}} - Regular variables, replaced with actual values
@@ -83,4 +83,4 @@ __all__ = [
83
83
  "replace_variables_in_text",
84
84
  ]
85
85
 
86
- __version__ = "0.2.16"
86
+ __version__ = "0.2.18"