fastworkflow 2.15.5__py3-none-any.whl → 2.17.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fastworkflow/_workflows/command_metadata_extraction/_commands/ErrorCorrection/you_misunderstood.py +1 -1
- fastworkflow/_workflows/command_metadata_extraction/_commands/IntentDetection/what_can_i_do.py +16 -2
- fastworkflow/_workflows/command_metadata_extraction/_commands/wildcard.py +27 -570
- fastworkflow/_workflows/command_metadata_extraction/intent_detection.py +360 -0
- fastworkflow/_workflows/command_metadata_extraction/parameter_extraction.py +411 -0
- fastworkflow/chat_session.py +379 -206
- fastworkflow/cli.py +80 -165
- fastworkflow/command_context_model.py +73 -7
- fastworkflow/command_executor.py +14 -5
- fastworkflow/command_metadata_api.py +106 -6
- fastworkflow/examples/fastworkflow.env +2 -1
- fastworkflow/examples/fastworkflow.passwords.env +2 -1
- fastworkflow/examples/retail_workflow/_commands/exchange_delivered_order_items.py +32 -3
- fastworkflow/examples/retail_workflow/_commands/find_user_id_by_email.py +6 -5
- fastworkflow/examples/retail_workflow/_commands/modify_pending_order_items.py +32 -3
- fastworkflow/examples/retail_workflow/_commands/return_delivered_order_items.py +13 -2
- fastworkflow/examples/retail_workflow/_commands/transfer_to_human_agents.py +1 -1
- fastworkflow/intent_clarification_agent.py +131 -0
- fastworkflow/mcp_server.py +3 -3
- fastworkflow/run/__main__.py +33 -40
- fastworkflow/run_fastapi_mcp/README.md +373 -0
- fastworkflow/run_fastapi_mcp/__main__.py +1300 -0
- fastworkflow/run_fastapi_mcp/conversation_store.py +391 -0
- fastworkflow/run_fastapi_mcp/jwt_manager.py +341 -0
- fastworkflow/run_fastapi_mcp/mcp_specific.py +103 -0
- fastworkflow/run_fastapi_mcp/redoc_2_standalone_html.py +40 -0
- fastworkflow/run_fastapi_mcp/utils.py +517 -0
- fastworkflow/train/__main__.py +1 -1
- fastworkflow/utils/chat_adapter.py +99 -0
- fastworkflow/utils/python_utils.py +4 -4
- fastworkflow/utils/react.py +258 -0
- fastworkflow/utils/signatures.py +338 -139
- fastworkflow/workflow.py +1 -5
- fastworkflow/workflow_agent.py +185 -133
- {fastworkflow-2.15.5.dist-info → fastworkflow-2.17.13.dist-info}/METADATA +16 -18
- {fastworkflow-2.15.5.dist-info → fastworkflow-2.17.13.dist-info}/RECORD +40 -30
- fastworkflow/run_agent/__main__.py +0 -294
- fastworkflow/run_agent/agent_module.py +0 -194
- /fastworkflow/{run_agent → run_fastapi_mcp}/__init__.py +0 -0
- {fastworkflow-2.15.5.dist-info → fastworkflow-2.17.13.dist-info}/LICENSE +0 -0
- {fastworkflow-2.15.5.dist-info → fastworkflow-2.17.13.dist-info}/WHEEL +0 -0
- {fastworkflow-2.15.5.dist-info → fastworkflow-2.17.13.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,391 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Conversation persistence layer for FastWorkflow
|
|
3
|
+
Provides Rdict-backed storage for multi-turn conversations with AI-generated topics/summaries
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import json
|
|
7
|
+
import os
|
|
8
|
+
from re import I
|
|
9
|
+
import time
|
|
10
|
+
from typing import Any, Optional
|
|
11
|
+
|
|
12
|
+
import dspy
|
|
13
|
+
from pydantic import BaseModel
|
|
14
|
+
from speedict import Rdict
|
|
15
|
+
|
|
16
|
+
from fastworkflow.utils.logging import logger
|
|
17
|
+
from fastworkflow.utils.dspy_utils import get_lm
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def extract_turns_from_history(conversation_history: 'dspy.History') -> list[dict[str, Any]]:
|
|
21
|
+
# sourcery skip: remove-unused-enumerate
|
|
22
|
+
"""
|
|
23
|
+
Extract turns from dspy.History format to Rdict format.
|
|
24
|
+
|
|
25
|
+
dspy.History.messages format:
|
|
26
|
+
[
|
|
27
|
+
{
|
|
28
|
+
"conversation summary": "summary text1",
|
|
29
|
+
"conversation_traces": "conversation_traces1",
|
|
30
|
+
"feedback": {...} or None
|
|
31
|
+
},
|
|
32
|
+
...
|
|
33
|
+
]
|
|
34
|
+
|
|
35
|
+
Rdict turn format:
|
|
36
|
+
[
|
|
37
|
+
{
|
|
38
|
+
"conversation summary": "...",
|
|
39
|
+
"conversation_traces": "...",
|
|
40
|
+
"feedback": {...} or None
|
|
41
|
+
},
|
|
42
|
+
...
|
|
43
|
+
]
|
|
44
|
+
|
|
45
|
+
Note: dspy.History stores conversation summaries, detailed traces, and optional feedback.
|
|
46
|
+
All fields are extracted and preserved for complete conversation persistence.
|
|
47
|
+
"""
|
|
48
|
+
turns = []
|
|
49
|
+
|
|
50
|
+
turns.extend(
|
|
51
|
+
{
|
|
52
|
+
"conversation summary": msg_dict.get("conversation summary"),
|
|
53
|
+
"conversation_traces": msg_dict.get("conversation_traces"),
|
|
54
|
+
"feedback": msg_dict.get("feedback"), # Preserve existing feedback
|
|
55
|
+
}
|
|
56
|
+
for msg_dict in conversation_history.messages
|
|
57
|
+
)
|
|
58
|
+
return turns
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def restore_history_from_turns(turns: list[dict[str, Any]]) -> 'dspy.History':
|
|
62
|
+
"""
|
|
63
|
+
Restore dspy.History from Rdict turns.
|
|
64
|
+
|
|
65
|
+
Converts back from Rdict format to dspy.History format.
|
|
66
|
+
Restores conversation summary, conversation_traces, and feedback for each turn.
|
|
67
|
+
"""
|
|
68
|
+
messages = []
|
|
69
|
+
|
|
70
|
+
messages.extend(
|
|
71
|
+
{
|
|
72
|
+
"conversation summary": turn.get("conversation summary"),
|
|
73
|
+
"conversation_traces": turn.get("conversation_traces"),
|
|
74
|
+
"feedback": turn.get("feedback"), # Restore feedback if present
|
|
75
|
+
}
|
|
76
|
+
for turn in turns
|
|
77
|
+
)
|
|
78
|
+
return dspy.History(messages=messages)
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
class ConversationSummary(BaseModel):
|
|
82
|
+
"""Summary of a conversation"""
|
|
83
|
+
conversation_id: int
|
|
84
|
+
topic: str
|
|
85
|
+
summary: str
|
|
86
|
+
created_at: int
|
|
87
|
+
updated_at: int
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
class ConversationStore:
|
|
91
|
+
"""Rdict-backed conversation persistence per user"""
|
|
92
|
+
|
|
93
|
+
def __init__(self, channel_id: str, base_folder: str):
|
|
94
|
+
self.channel_id = channel_id
|
|
95
|
+
self.db_path = os.path.join(base_folder, f"{channel_id}.rdb")
|
|
96
|
+
os.makedirs(base_folder, exist_ok=True)
|
|
97
|
+
|
|
98
|
+
def _get_db(self) -> Rdict:
|
|
99
|
+
"""Get Rdict instance"""
|
|
100
|
+
return Rdict(self.db_path)
|
|
101
|
+
|
|
102
|
+
def get_last_conversation_id(self) -> Optional[int]:
|
|
103
|
+
"""Get the last conversation ID for this user"""
|
|
104
|
+
try:
|
|
105
|
+
db = self._get_db()
|
|
106
|
+
meta = db.get("meta", {})
|
|
107
|
+
return meta.get("last_conversation_id")
|
|
108
|
+
finally:
|
|
109
|
+
db.close()
|
|
110
|
+
|
|
111
|
+
def _increment_conversation_id(self, db: Rdict) -> int:
|
|
112
|
+
"""Increment and return new conversation ID"""
|
|
113
|
+
meta = db.get("meta", {"last_conversation_id": 0})
|
|
114
|
+
new_id = meta["last_conversation_id"] + 1
|
|
115
|
+
meta["last_conversation_id"] = new_id
|
|
116
|
+
db["meta"] = meta
|
|
117
|
+
return new_id
|
|
118
|
+
|
|
119
|
+
def reserve_next_conversation_id(self) -> int:
|
|
120
|
+
"""Reserve the next conversation ID by incrementing the counter without creating a conversation"""
|
|
121
|
+
db = self._get_db()
|
|
122
|
+
try:
|
|
123
|
+
return self._increment_conversation_id(db)
|
|
124
|
+
finally:
|
|
125
|
+
db.close()
|
|
126
|
+
|
|
127
|
+
def _ensure_unique_topic(self, db: Rdict, candidate_topic: str) -> str:
|
|
128
|
+
"""Ensure topic is unique per user with case/whitespace insensitive comparison"""
|
|
129
|
+
# Normalize for comparison
|
|
130
|
+
normalized_candidate = candidate_topic.lower().strip()
|
|
131
|
+
|
|
132
|
+
# Get all existing topics
|
|
133
|
+
existing_topics = []
|
|
134
|
+
meta = db.get("meta", {"last_conversation_id": 0})
|
|
135
|
+
for i in range(1, meta.get("last_conversation_id", 0) + 1):
|
|
136
|
+
conv_key = f"conv:{i}"
|
|
137
|
+
if conv_key in db:
|
|
138
|
+
conv = db[conv_key]
|
|
139
|
+
existing_topics.append(conv.get("topic", ""))
|
|
140
|
+
|
|
141
|
+
# Check for collision
|
|
142
|
+
collision_count = 0
|
|
143
|
+
final_topic = candidate_topic
|
|
144
|
+
while any(final_topic.lower().strip() == t.lower().strip() for t in existing_topics):
|
|
145
|
+
collision_count += 1
|
|
146
|
+
final_topic = f"{candidate_topic} {collision_count}"
|
|
147
|
+
|
|
148
|
+
return final_topic
|
|
149
|
+
|
|
150
|
+
def save_conversation(
|
|
151
|
+
self,
|
|
152
|
+
topic: str,
|
|
153
|
+
summary: str,
|
|
154
|
+
turns: list[dict[str, Any]],
|
|
155
|
+
conversation_id: Optional[int] = None
|
|
156
|
+
) -> int:
|
|
157
|
+
"""
|
|
158
|
+
Save a conversation and return its ID.
|
|
159
|
+
|
|
160
|
+
Args:
|
|
161
|
+
topic: Conversation topic
|
|
162
|
+
summary: Conversation summary
|
|
163
|
+
turns: List of conversation turns
|
|
164
|
+
conversation_id: Optional specific ID to use. If None, increments to get next ID.
|
|
165
|
+
|
|
166
|
+
Returns:
|
|
167
|
+
The conversation ID used
|
|
168
|
+
"""
|
|
169
|
+
db = self._get_db()
|
|
170
|
+
try:
|
|
171
|
+
if conversation_id is not None:
|
|
172
|
+
# Use the specified ID (assumes it's valid and reserved)
|
|
173
|
+
conv_id = conversation_id
|
|
174
|
+
else:
|
|
175
|
+
# Increment to get next ID
|
|
176
|
+
conv_id = self._increment_conversation_id(db)
|
|
177
|
+
|
|
178
|
+
unique_topic = self._ensure_unique_topic(db, topic)
|
|
179
|
+
|
|
180
|
+
conversation = {
|
|
181
|
+
"topic": unique_topic,
|
|
182
|
+
"summary": summary,
|
|
183
|
+
"created_at": int(time.time() * 1000),
|
|
184
|
+
"updated_at": int(time.time() * 1000),
|
|
185
|
+
"turns": turns
|
|
186
|
+
}
|
|
187
|
+
db[f"conv:{conv_id}"] = conversation
|
|
188
|
+
return conv_id
|
|
189
|
+
finally:
|
|
190
|
+
db.close()
|
|
191
|
+
|
|
192
|
+
def get_conversation(self, conv_id: int) -> Optional[dict[str, Any]]:
|
|
193
|
+
"""Get a conversation by ID"""
|
|
194
|
+
db = self._get_db()
|
|
195
|
+
try:
|
|
196
|
+
return db.get(f"conv:{conv_id}")
|
|
197
|
+
finally:
|
|
198
|
+
db.close()
|
|
199
|
+
|
|
200
|
+
def get_conversation_by_topic(self, topic: str) -> Optional[tuple[int, dict[str, Any]]]:
|
|
201
|
+
"""Get conversation ID and data by topic (case/whitespace insensitive)"""
|
|
202
|
+
db = self._get_db()
|
|
203
|
+
try:
|
|
204
|
+
meta = db.get("meta", {"last_conversation_id": 0})
|
|
205
|
+
normalized_topic = topic.lower().strip()
|
|
206
|
+
|
|
207
|
+
for i in range(1, meta.get("last_conversation_id", 0) + 1):
|
|
208
|
+
conv_key = f"conv:{i}"
|
|
209
|
+
if conv_key in db:
|
|
210
|
+
conv = db[conv_key]
|
|
211
|
+
if conv.get("topic", "").lower().strip() == normalized_topic:
|
|
212
|
+
return i, conv
|
|
213
|
+
return None
|
|
214
|
+
finally:
|
|
215
|
+
db.close()
|
|
216
|
+
|
|
217
|
+
def list_conversations(self, limit: int) -> list[ConversationSummary]:
|
|
218
|
+
"""List conversations ordered by updated_at desc, up to limit"""
|
|
219
|
+
db = self._get_db()
|
|
220
|
+
try:
|
|
221
|
+
meta = db.get("meta", {"last_conversation_id": 0})
|
|
222
|
+
conversations = []
|
|
223
|
+
|
|
224
|
+
for i in range(1, meta.get("last_conversation_id", 0) + 1):
|
|
225
|
+
conv_key = f"conv:{i}"
|
|
226
|
+
if conv_key in db:
|
|
227
|
+
conv = db[conv_key]
|
|
228
|
+
conversations.append(
|
|
229
|
+
ConversationSummary(
|
|
230
|
+
conversation_id=i,
|
|
231
|
+
topic=conv.get("topic", ""),
|
|
232
|
+
summary=conv.get("summary", ""),
|
|
233
|
+
created_at=conv.get("created_at", 0),
|
|
234
|
+
updated_at=conv.get("updated_at", 0)
|
|
235
|
+
)
|
|
236
|
+
)
|
|
237
|
+
|
|
238
|
+
# Sort by updated_at desc and limit
|
|
239
|
+
conversations.sort(key=lambda c: c.updated_at, reverse=True)
|
|
240
|
+
return conversations[:limit]
|
|
241
|
+
finally:
|
|
242
|
+
db.close()
|
|
243
|
+
|
|
244
|
+
def update_conversation(
|
|
245
|
+
self,
|
|
246
|
+
conv_id: int,
|
|
247
|
+
topic: str,
|
|
248
|
+
summary: str,
|
|
249
|
+
turns: list[dict[str, Any]]
|
|
250
|
+
) -> None:
|
|
251
|
+
"""Update an existing conversation with new topic, summary, and turns"""
|
|
252
|
+
db = self._get_db()
|
|
253
|
+
try:
|
|
254
|
+
conv_key = f"conv:{conv_id}"
|
|
255
|
+
if conv_key not in db:
|
|
256
|
+
raise ValueError(f"Conversation {conv_id} not found")
|
|
257
|
+
|
|
258
|
+
conv = db[conv_key]
|
|
259
|
+
unique_topic = self._ensure_unique_topic(db, topic)
|
|
260
|
+
|
|
261
|
+
# Preserve created_at, update other fields
|
|
262
|
+
conv["topic"] = unique_topic
|
|
263
|
+
conv["summary"] = summary
|
|
264
|
+
conv["updated_at"] = int(time.time() * 1000)
|
|
265
|
+
conv["turns"] = turns
|
|
266
|
+
|
|
267
|
+
db[conv_key] = conv
|
|
268
|
+
finally:
|
|
269
|
+
db.close()
|
|
270
|
+
|
|
271
|
+
def update_conversation_topic_summary(
|
|
272
|
+
self,
|
|
273
|
+
conv_id: int,
|
|
274
|
+
topic: str,
|
|
275
|
+
summary: str
|
|
276
|
+
) -> None:
|
|
277
|
+
"""
|
|
278
|
+
Update only the topic and summary of an existing conversation.
|
|
279
|
+
Used when finalizing a conversation (turns already saved incrementally).
|
|
280
|
+
"""
|
|
281
|
+
db = self._get_db()
|
|
282
|
+
try:
|
|
283
|
+
conv_key = f"conv:{conv_id}"
|
|
284
|
+
if conv_key not in db:
|
|
285
|
+
raise ValueError(f"Conversation {conv_id} not found")
|
|
286
|
+
|
|
287
|
+
conv = db[conv_key]
|
|
288
|
+
unique_topic = self._ensure_unique_topic(db, topic)
|
|
289
|
+
|
|
290
|
+
# Only update topic, summary, and timestamp - preserve turns
|
|
291
|
+
conv["topic"] = unique_topic
|
|
292
|
+
conv["summary"] = summary
|
|
293
|
+
conv["updated_at"] = int(time.time() * 1000)
|
|
294
|
+
|
|
295
|
+
db[conv_key] = conv
|
|
296
|
+
finally:
|
|
297
|
+
db.close()
|
|
298
|
+
|
|
299
|
+
def save_conversation_turns(
|
|
300
|
+
self,
|
|
301
|
+
conversation_id: int,
|
|
302
|
+
turns: list[dict[str, Any]]
|
|
303
|
+
) -> int:
|
|
304
|
+
"""
|
|
305
|
+
Create a new conversation with placeholder topic/summary, or update existing turns.
|
|
306
|
+
Used for incremental saves without generating topic/summary.
|
|
307
|
+
|
|
308
|
+
Args:
|
|
309
|
+
conversation_id: The conversation ID to use
|
|
310
|
+
turns: List of conversation turns
|
|
311
|
+
|
|
312
|
+
Returns:
|
|
313
|
+
The conversation ID used
|
|
314
|
+
"""
|
|
315
|
+
db = self._get_db()
|
|
316
|
+
try:
|
|
317
|
+
conv_key = f"conv:{conversation_id}"
|
|
318
|
+
|
|
319
|
+
if conv_key in db:
|
|
320
|
+
# Conversation exists, just update turns
|
|
321
|
+
conv = db[conv_key]
|
|
322
|
+
conv["updated_at"] = int(time.time() * 1000)
|
|
323
|
+
conv["turns"] = turns
|
|
324
|
+
db[conv_key] = conv
|
|
325
|
+
else:
|
|
326
|
+
# Create new conversation with placeholder topic/summary
|
|
327
|
+
conversation = {
|
|
328
|
+
"topic": "", # Will be generated later
|
|
329
|
+
"summary": "", # Will be generated later
|
|
330
|
+
"created_at": int(time.time() * 1000),
|
|
331
|
+
"updated_at": int(time.time() * 1000),
|
|
332
|
+
"turns": turns
|
|
333
|
+
}
|
|
334
|
+
db[conv_key] = conversation
|
|
335
|
+
|
|
336
|
+
return conversation_id
|
|
337
|
+
finally:
|
|
338
|
+
db.close()
|
|
339
|
+
|
|
340
|
+
# NOTE: update_turn_feedback() removed - feedback is now saved via save_conversation_turns()
|
|
341
|
+
# in the incremental save flow after modifying conversation_history in memory
|
|
342
|
+
|
|
343
|
+
def get_all_conversations_for_dump(self) -> list[dict[str, Any]]:
|
|
344
|
+
"""Get all conversations for admin dump"""
|
|
345
|
+
db = self._get_db()
|
|
346
|
+
try:
|
|
347
|
+
meta = db.get("meta", {"last_conversation_id": 0})
|
|
348
|
+
conversations = []
|
|
349
|
+
|
|
350
|
+
for i in range(1, meta.get("last_conversation_id", 0) + 1):
|
|
351
|
+
conv_key = f"conv:{i}"
|
|
352
|
+
if conv_key in db:
|
|
353
|
+
conv = db[conv_key]
|
|
354
|
+
conversations.append({
|
|
355
|
+
"channel_id": self.channel_id,
|
|
356
|
+
"conversation_id": i,
|
|
357
|
+
**conv
|
|
358
|
+
})
|
|
359
|
+
|
|
360
|
+
return conversations
|
|
361
|
+
finally:
|
|
362
|
+
db.close()
|
|
363
|
+
|
|
364
|
+
|
|
365
|
+
def generate_topic_and_summary(turns: list[dict[str, Any]]) -> tuple[str, str]:
|
|
366
|
+
"""
|
|
367
|
+
Generate topic and summary for a conversation using DSPy.
|
|
368
|
+
|
|
369
|
+
Only passes conversation summaries (not verbose traces) to the AI model
|
|
370
|
+
for better quality topic/summary generation.
|
|
371
|
+
"""
|
|
372
|
+
class TopicSummarySignature(dspy.Signature):
|
|
373
|
+
"""Generate a concise topic and summary for a conversation"""
|
|
374
|
+
conversation_turns: str = dspy.InputField(desc="JSON representation of conversation turns")
|
|
375
|
+
topic: str = dspy.OutputField(desc="Short topic (3-6 words)")
|
|
376
|
+
summary: str = dspy.OutputField(desc="Brief summary paragraph")
|
|
377
|
+
|
|
378
|
+
# Extract only summaries for topic/summary generation (not verbose traces)
|
|
379
|
+
summaries_only = [
|
|
380
|
+
{"conversation summary": turn.get("conversation summary", "")}
|
|
381
|
+
for turn in turns
|
|
382
|
+
]
|
|
383
|
+
turns_str = json.dumps(summaries_only, indent=2)
|
|
384
|
+
|
|
385
|
+
# Configure DSPy with the conversation store LM using context manager
|
|
386
|
+
lm = get_lm("LLM_CONVERSATION_STORE", "LITELLM_API_KEY_CONVERSATION_STORE")
|
|
387
|
+
with dspy.context(lm=lm):
|
|
388
|
+
generator = dspy.ChainOfThought(TopicSummarySignature)
|
|
389
|
+
result = generator(conversation_turns=turns_str)
|
|
390
|
+
return result.topic, result.summary
|
|
391
|
+
|