fusesell 1.3.42__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fusesell-1.3.42.dist-info/METADATA +873 -0
- fusesell-1.3.42.dist-info/RECORD +35 -0
- fusesell-1.3.42.dist-info/WHEEL +5 -0
- fusesell-1.3.42.dist-info/entry_points.txt +2 -0
- fusesell-1.3.42.dist-info/licenses/LICENSE +21 -0
- fusesell-1.3.42.dist-info/top_level.txt +2 -0
- fusesell.py +20 -0
- fusesell_local/__init__.py +37 -0
- fusesell_local/api.py +343 -0
- fusesell_local/cli.py +1480 -0
- fusesell_local/config/__init__.py +11 -0
- fusesell_local/config/default_email_templates.json +34 -0
- fusesell_local/config/default_prompts.json +19 -0
- fusesell_local/config/default_scoring_criteria.json +154 -0
- fusesell_local/config/prompts.py +245 -0
- fusesell_local/config/settings.py +277 -0
- fusesell_local/pipeline.py +978 -0
- fusesell_local/stages/__init__.py +19 -0
- fusesell_local/stages/base_stage.py +603 -0
- fusesell_local/stages/data_acquisition.py +1820 -0
- fusesell_local/stages/data_preparation.py +1238 -0
- fusesell_local/stages/follow_up.py +1728 -0
- fusesell_local/stages/initial_outreach.py +2972 -0
- fusesell_local/stages/lead_scoring.py +1452 -0
- fusesell_local/utils/__init__.py +36 -0
- fusesell_local/utils/agent_context.py +552 -0
- fusesell_local/utils/auto_setup.py +361 -0
- fusesell_local/utils/birthday_email_manager.py +467 -0
- fusesell_local/utils/data_manager.py +4857 -0
- fusesell_local/utils/event_scheduler.py +959 -0
- fusesell_local/utils/llm_client.py +342 -0
- fusesell_local/utils/logger.py +203 -0
- fusesell_local/utils/output_helpers.py +2443 -0
- fusesell_local/utils/timezone_detector.py +914 -0
- fusesell_local/utils/validators.py +436 -0
|
@@ -0,0 +1,978 @@
|
|
|
1
|
+
"""
|
|
2
|
+
FuseSell Pipeline Orchestrator
|
|
3
|
+
Manages the execution of all pipeline stages in sequence
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from typing import Dict, Any, List, Optional
|
|
7
|
+
import copy
|
|
8
|
+
import time
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
import uuid
|
|
11
|
+
|
|
12
|
+
from .stages import (
|
|
13
|
+
DataAcquisitionStage,
|
|
14
|
+
DataPreparationStage,
|
|
15
|
+
LeadScoringStage,
|
|
16
|
+
InitialOutreachStage,
|
|
17
|
+
FollowUpStage
|
|
18
|
+
)
|
|
19
|
+
from .utils.data_manager import LocalDataManager
|
|
20
|
+
from .utils.logger import get_logger, log_execution_start, log_execution_complete
|
|
21
|
+
from .utils.validators import InputValidator
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class FuseSellPipeline:
|
|
25
|
+
"""
|
|
26
|
+
Main pipeline orchestrator for FuseSell local execution.
|
|
27
|
+
Manages stage execution, data flow, and error handling.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
def __init__(self, config: Dict[str, Any]):
|
|
31
|
+
"""
|
|
32
|
+
Initialize pipeline with configuration.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
config: Pipeline configuration dictionary
|
|
36
|
+
"""
|
|
37
|
+
self.config = config
|
|
38
|
+
self.execution_id = config.get('execution_id') or self._generate_execution_id()
|
|
39
|
+
self.logger = get_logger("pipeline")
|
|
40
|
+
|
|
41
|
+
# Initialize components
|
|
42
|
+
self.data_manager = LocalDataManager(config.get('data_dir', './fusesell_data'))
|
|
43
|
+
self.validator = InputValidator()
|
|
44
|
+
|
|
45
|
+
# Initialize stages
|
|
46
|
+
self.stages = self._initialize_stages()
|
|
47
|
+
|
|
48
|
+
# Execution state
|
|
49
|
+
self.stage_results = {}
|
|
50
|
+
self.start_time = None
|
|
51
|
+
self.end_time = None
|
|
52
|
+
|
|
53
|
+
def _initialize_stages(self) -> List:
|
|
54
|
+
"""
|
|
55
|
+
Initialize all pipeline stages.
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
List of initialized stage instances
|
|
59
|
+
"""
|
|
60
|
+
stages = []
|
|
61
|
+
|
|
62
|
+
# Only initialize stages that are not skipped
|
|
63
|
+
skip_stages = self.config.get('skip_stages', [])
|
|
64
|
+
stop_after = self.config.get('stop_after')
|
|
65
|
+
|
|
66
|
+
stage_classes = [
|
|
67
|
+
('data_acquisition', DataAcquisitionStage),
|
|
68
|
+
('data_preparation', DataPreparationStage),
|
|
69
|
+
('lead_scoring', LeadScoringStage),
|
|
70
|
+
('initial_outreach', InitialOutreachStage),
|
|
71
|
+
('follow_up', FollowUpStage)
|
|
72
|
+
]
|
|
73
|
+
|
|
74
|
+
for stage_name, stage_class in stage_classes:
|
|
75
|
+
if stage_name not in skip_stages:
|
|
76
|
+
try:
|
|
77
|
+
# Pass shared data_manager instance to avoid multiple database initializations
|
|
78
|
+
stage = stage_class(self.config, self.data_manager)
|
|
79
|
+
stages.append(stage)
|
|
80
|
+
self.logger.debug(f"Initialized {stage_name} stage with shared data manager")
|
|
81
|
+
except Exception as e:
|
|
82
|
+
self.logger.error(f"Failed to initialize {stage_name} stage: {str(e)}")
|
|
83
|
+
raise
|
|
84
|
+
|
|
85
|
+
# Stop adding stages if we've reached the stop point
|
|
86
|
+
if stop_after == stage_name:
|
|
87
|
+
break
|
|
88
|
+
|
|
89
|
+
return stages
|
|
90
|
+
|
|
91
|
+
def execute(self) -> Dict[str, Any]:
|
|
92
|
+
"""
|
|
93
|
+
Execute the complete pipeline or continue existing execution.
|
|
94
|
+
|
|
95
|
+
Returns:
|
|
96
|
+
Dictionary containing execution results
|
|
97
|
+
"""
|
|
98
|
+
self.start_time = time.time()
|
|
99
|
+
|
|
100
|
+
try:
|
|
101
|
+
# Check if this is a continuation
|
|
102
|
+
if self.config.get('continue_execution'):
|
|
103
|
+
return self._continue_execution()
|
|
104
|
+
|
|
105
|
+
# New execution flow
|
|
106
|
+
# Validate configuration
|
|
107
|
+
self._validate_configuration()
|
|
108
|
+
|
|
109
|
+
# Log execution start
|
|
110
|
+
log_execution_start(self.execution_id, self.config)
|
|
111
|
+
|
|
112
|
+
# Save execution record
|
|
113
|
+
self._save_execution_record()
|
|
114
|
+
|
|
115
|
+
# Create execution context
|
|
116
|
+
context = self._create_execution_context()
|
|
117
|
+
|
|
118
|
+
# Execute stages sequentially
|
|
119
|
+
runtime_index = 0
|
|
120
|
+
for stage in self.stages:
|
|
121
|
+
# Add runtime_index to context for operation tracking
|
|
122
|
+
context['runtime_index'] = runtime_index
|
|
123
|
+
|
|
124
|
+
stage_result = self._execute_stage(stage, context)
|
|
125
|
+
|
|
126
|
+
# Update context with stage results
|
|
127
|
+
context['stage_results'][stage.stage_name] = stage_result
|
|
128
|
+
|
|
129
|
+
# Update task runtime index
|
|
130
|
+
try:
|
|
131
|
+
self.data_manager.update_task_status(
|
|
132
|
+
task_id=self.execution_id,
|
|
133
|
+
status="running",
|
|
134
|
+
runtime_index=runtime_index
|
|
135
|
+
)
|
|
136
|
+
except Exception as e:
|
|
137
|
+
self.logger.warning(f"Failed to update task runtime index: {str(e)}")
|
|
138
|
+
|
|
139
|
+
# Check if pipeline should stop
|
|
140
|
+
if stage.should_stop_pipeline(stage_result):
|
|
141
|
+
self.logger.warning(f"Pipeline stopped after {stage.stage_name} stage")
|
|
142
|
+
break
|
|
143
|
+
|
|
144
|
+
runtime_index += 1
|
|
145
|
+
|
|
146
|
+
# Compile final results
|
|
147
|
+
results = self._compile_results(context)
|
|
148
|
+
|
|
149
|
+
# Note: executions is now a view - status updated via llm_worker_task
|
|
150
|
+
|
|
151
|
+
# Update task status (correct schema)
|
|
152
|
+
try:
|
|
153
|
+
self.data_manager.update_task_status(
|
|
154
|
+
task_id=self.execution_id,
|
|
155
|
+
status="completed",
|
|
156
|
+
runtime_index=runtime_index
|
|
157
|
+
)
|
|
158
|
+
except Exception as e:
|
|
159
|
+
self.logger.warning(f"Failed to update final task status: {str(e)}")
|
|
160
|
+
|
|
161
|
+
return results
|
|
162
|
+
|
|
163
|
+
except Exception as e:
|
|
164
|
+
self.logger.error(f"Pipeline execution failed: {str(e)}")
|
|
165
|
+
|
|
166
|
+
# Update task status to failed
|
|
167
|
+
try:
|
|
168
|
+
self.data_manager.update_task_status(
|
|
169
|
+
task_id=self.execution_id,
|
|
170
|
+
status="failed",
|
|
171
|
+
runtime_index=0
|
|
172
|
+
)
|
|
173
|
+
except Exception as update_error:
|
|
174
|
+
self.logger.warning(f"Failed to update task status to failed: {str(update_error)}")
|
|
175
|
+
|
|
176
|
+
error_result = {
|
|
177
|
+
'error': str(e),
|
|
178
|
+
'error_type': type(e).__name__,
|
|
179
|
+
'stage_results': self.stage_results
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
return error_result
|
|
183
|
+
|
|
184
|
+
finally:
|
|
185
|
+
self.end_time = time.time()
|
|
186
|
+
duration = self.end_time - self.start_time if self.start_time else 0
|
|
187
|
+
|
|
188
|
+
status = 'completed' if not hasattr(self, '_failed') else 'failed'
|
|
189
|
+
log_execution_complete(self.execution_id, status, duration)
|
|
190
|
+
|
|
191
|
+
# Generate performance analytics
|
|
192
|
+
self._log_performance_analytics(duration)
|
|
193
|
+
|
|
194
|
+
def _log_performance_analytics(self, total_duration: float) -> None:
|
|
195
|
+
"""
|
|
196
|
+
Log detailed performance analytics for the pipeline execution.
|
|
197
|
+
|
|
198
|
+
Args:
|
|
199
|
+
total_duration: Total pipeline execution time in seconds
|
|
200
|
+
"""
|
|
201
|
+
try:
|
|
202
|
+
# Collect timing data from stage results
|
|
203
|
+
stage_timings = []
|
|
204
|
+
total_stage_time = 0.0
|
|
205
|
+
|
|
206
|
+
for stage_name, result in self.stage_results.items():
|
|
207
|
+
if isinstance(result, dict) and 'timing' in result:
|
|
208
|
+
timing = result['timing']
|
|
209
|
+
duration = timing.get('duration_seconds', 0.0)
|
|
210
|
+
stage_timings.append({
|
|
211
|
+
'stage': stage_name,
|
|
212
|
+
'duration': duration,
|
|
213
|
+
'percentage': (duration / total_duration * 100) if total_duration > 0 else 0
|
|
214
|
+
})
|
|
215
|
+
total_stage_time += duration
|
|
216
|
+
|
|
217
|
+
# Log performance summary
|
|
218
|
+
self.logger.info("=" * 60)
|
|
219
|
+
self.logger.info(f"PERFORMANCE ANALYTICS - Execution {self.execution_id}")
|
|
220
|
+
self.logger.info("=" * 60)
|
|
221
|
+
self.logger.info(f"Total Pipeline Duration: {total_duration:.2f} seconds")
|
|
222
|
+
self.logger.info(f"Total Stage Duration: {total_stage_time:.2f} seconds")
|
|
223
|
+
|
|
224
|
+
if total_duration > 0:
|
|
225
|
+
overhead = total_duration - total_stage_time
|
|
226
|
+
overhead_pct = (overhead / total_duration * 100)
|
|
227
|
+
self.logger.info(f"Pipeline Overhead: {overhead:.2f} seconds ({overhead_pct:.1f}%)")
|
|
228
|
+
|
|
229
|
+
self.logger.info("-" * 40)
|
|
230
|
+
self.logger.info("Stage Performance Breakdown:")
|
|
231
|
+
|
|
232
|
+
for timing in sorted(stage_timings, key=lambda x: x['duration'], reverse=True):
|
|
233
|
+
self.logger.info(f" {timing['stage']:<20}: {timing['duration']:>6.2f}s ({timing['percentage']:>5.1f}%)")
|
|
234
|
+
|
|
235
|
+
# Performance insights
|
|
236
|
+
if stage_timings:
|
|
237
|
+
slowest_stage = max(stage_timings, key=lambda x: x['duration'])
|
|
238
|
+
fastest_stage = min(stage_timings, key=lambda x: x['duration'])
|
|
239
|
+
|
|
240
|
+
self.logger.info("-" * 40)
|
|
241
|
+
self.logger.info(f"Slowest Stage: {slowest_stage['stage']} ({slowest_stage['duration']:.2f}s)")
|
|
242
|
+
self.logger.info(f"Fastest Stage: {fastest_stage['stage']} ({fastest_stage['duration']:.2f}s)")
|
|
243
|
+
|
|
244
|
+
if slowest_stage['duration'] > 0 and fastest_stage['duration'] > 0:
|
|
245
|
+
ratio = slowest_stage['duration'] / fastest_stage['duration']
|
|
246
|
+
self.logger.info(f"Performance Ratio: {ratio:.1f}x difference")
|
|
247
|
+
|
|
248
|
+
# Validation: Total time should roughly equal sum of stage durations
|
|
249
|
+
if total_duration > 0:
|
|
250
|
+
time_discrepancy = abs(total_duration - total_stage_time)
|
|
251
|
+
discrepancy_percentage = (time_discrepancy / total_duration * 100)
|
|
252
|
+
|
|
253
|
+
self.logger.info("-" * 40)
|
|
254
|
+
self.logger.info("TIMING VALIDATION:")
|
|
255
|
+
if discrepancy_percentage < 5.0:
|
|
256
|
+
self.logger.info(
|
|
257
|
+
f"[OK] Timing validation PASSED (discrepancy: {discrepancy_percentage:.1f}%)"
|
|
258
|
+
)
|
|
259
|
+
else:
|
|
260
|
+
self.logger.warning(
|
|
261
|
+
f"[WARN] Timing validation WARNING (discrepancy: {discrepancy_percentage:.1f}%)"
|
|
262
|
+
)
|
|
263
|
+
self.logger.warning(
|
|
264
|
+
f" Expected ~{total_stage_time:.2f}s, got {total_duration:.2f}s"
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
self.logger.info("=" * 60)
|
|
268
|
+
|
|
269
|
+
except Exception as e:
|
|
270
|
+
self.logger.warning(f"Failed to generate performance analytics: {str(e)}")
|
|
271
|
+
|
|
272
|
+
def _generate_performance_analytics(self, total_duration: float) -> Dict[str, Any]:
|
|
273
|
+
"""
|
|
274
|
+
Generate performance analytics data for inclusion in results.
|
|
275
|
+
|
|
276
|
+
Args:
|
|
277
|
+
total_duration: Total pipeline execution time in seconds
|
|
278
|
+
|
|
279
|
+
Returns:
|
|
280
|
+
Performance analytics dictionary
|
|
281
|
+
"""
|
|
282
|
+
try:
|
|
283
|
+
# Collect timing data from stage results
|
|
284
|
+
stage_timings = []
|
|
285
|
+
total_stage_time = 0.0
|
|
286
|
+
|
|
287
|
+
for stage_name, result in self.stage_results.items():
|
|
288
|
+
if isinstance(result, dict) and 'timing' in result:
|
|
289
|
+
timing = result['timing']
|
|
290
|
+
duration = timing.get('duration_seconds', 0.0)
|
|
291
|
+
stage_timings.append({
|
|
292
|
+
'stage': stage_name,
|
|
293
|
+
'duration_seconds': duration,
|
|
294
|
+
'percentage_of_total': (duration / total_duration * 100) if total_duration > 0 else 0,
|
|
295
|
+
'start_time': timing.get('start_time'),
|
|
296
|
+
'end_time': timing.get('end_time')
|
|
297
|
+
})
|
|
298
|
+
total_stage_time += duration
|
|
299
|
+
|
|
300
|
+
# Calculate overhead
|
|
301
|
+
overhead = total_duration - total_stage_time
|
|
302
|
+
overhead_percentage = (overhead / total_duration * 100) if total_duration > 0 else 0
|
|
303
|
+
|
|
304
|
+
# Find performance insights
|
|
305
|
+
insights = {}
|
|
306
|
+
if stage_timings:
|
|
307
|
+
slowest_stage = max(stage_timings, key=lambda x: x['duration_seconds'])
|
|
308
|
+
fastest_stage = min(stage_timings, key=lambda x: x['duration_seconds'])
|
|
309
|
+
|
|
310
|
+
insights = {
|
|
311
|
+
'slowest_stage': {
|
|
312
|
+
'name': slowest_stage['stage'],
|
|
313
|
+
'duration_seconds': slowest_stage['duration_seconds']
|
|
314
|
+
},
|
|
315
|
+
'fastest_stage': {
|
|
316
|
+
'name': fastest_stage['stage'],
|
|
317
|
+
'duration_seconds': fastest_stage['duration_seconds']
|
|
318
|
+
},
|
|
319
|
+
'performance_ratio': (slowest_stage['duration_seconds'] / fastest_stage['duration_seconds'])
|
|
320
|
+
if fastest_stage['duration_seconds'] > 0 else 0
|
|
321
|
+
}
|
|
322
|
+
|
|
323
|
+
return {
|
|
324
|
+
'total_duration_seconds': total_duration,
|
|
325
|
+
'total_stage_duration_seconds': total_stage_time,
|
|
326
|
+
'pipeline_overhead_seconds': overhead,
|
|
327
|
+
'pipeline_overhead_percentage': overhead_percentage,
|
|
328
|
+
'stage_count': len(stage_timings),
|
|
329
|
+
'average_stage_duration': total_stage_time / len(stage_timings) if stage_timings else 0,
|
|
330
|
+
'stage_timings': stage_timings,
|
|
331
|
+
'performance_insights': insights,
|
|
332
|
+
'generated_at': datetime.now().isoformat()
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
except Exception as e:
|
|
336
|
+
self.logger.warning(f"Failed to generate performance analytics data: {str(e)}")
|
|
337
|
+
return {
|
|
338
|
+
'error': str(e),
|
|
339
|
+
'total_duration_seconds': total_duration,
|
|
340
|
+
'generated_at': datetime.now().isoformat()
|
|
341
|
+
}
|
|
342
|
+
|
|
343
|
+
def _validate_configuration(self) -> None:
|
|
344
|
+
"""
|
|
345
|
+
Validate pipeline configuration.
|
|
346
|
+
|
|
347
|
+
Raises:
|
|
348
|
+
ValueError: If configuration is invalid
|
|
349
|
+
"""
|
|
350
|
+
errors = self.validator.validate_config(self.config)
|
|
351
|
+
if errors:
|
|
352
|
+
error_msg = "Configuration validation failed:\n" + "\n".join(f"- {error}" for error in errors)
|
|
353
|
+
raise ValueError(error_msg)
|
|
354
|
+
|
|
355
|
+
def _create_execution_context(self) -> Dict[str, Any]:
|
|
356
|
+
"""
|
|
357
|
+
Create execution context for pipeline stages.
|
|
358
|
+
|
|
359
|
+
Returns:
|
|
360
|
+
Execution context dictionary
|
|
361
|
+
"""
|
|
362
|
+
return {
|
|
363
|
+
'execution_id': self.execution_id,
|
|
364
|
+
'config': self.config,
|
|
365
|
+
'input_data': self._extract_input_data(),
|
|
366
|
+
'stage_results': {},
|
|
367
|
+
'data_manager': self.data_manager,
|
|
368
|
+
'start_time': self.start_time
|
|
369
|
+
}
|
|
370
|
+
|
|
371
|
+
def _extract_input_data(self) -> Dict[str, Any]:
|
|
372
|
+
"""
|
|
373
|
+
Extract input data from configuration.
|
|
374
|
+
|
|
375
|
+
Returns:
|
|
376
|
+
Input data dictionary
|
|
377
|
+
"""
|
|
378
|
+
return {
|
|
379
|
+
'org_id': self.config['org_id'],
|
|
380
|
+
'org_name': self.config['org_name'],
|
|
381
|
+
'team_id': self.config.get('team_id'),
|
|
382
|
+
'team_name': self.config.get('team_name'),
|
|
383
|
+
'project_code': self.config.get('project_code'),
|
|
384
|
+
'staff_name': self.config.get('staff_name', 'Sales Team'),
|
|
385
|
+
'customer_name': self.config.get('customer_name', ''),
|
|
386
|
+
'language': self.config.get('language', 'english'),
|
|
387
|
+
# Data sources (matching executor schema)
|
|
388
|
+
'input_website': self.config.get('input_website', ''),
|
|
389
|
+
'input_description': self.config.get('input_description', ''),
|
|
390
|
+
'input_business_card': self.config.get('input_business_card', ''),
|
|
391
|
+
'input_linkedin_url': self.config.get('input_linkedin_url', ''),
|
|
392
|
+
'input_facebook_url': self.config.get('input_facebook_url', ''),
|
|
393
|
+
'input_freetext': self.config.get('input_freetext', ''),
|
|
394
|
+
|
|
395
|
+
# Context fields
|
|
396
|
+
'customer_id': self.config.get('customer_id', 'null'),
|
|
397
|
+
'full_input': self.config.get('full_input'),
|
|
398
|
+
|
|
399
|
+
# Action and continuation fields (for server executor compatibility)
|
|
400
|
+
'action': self.config.get('action', 'draft_write'),
|
|
401
|
+
'selected_draft_id': self.config.get('selected_draft_id', ''),
|
|
402
|
+
'reason': self.config.get('reason', ''),
|
|
403
|
+
'recipient_address': self.config.get('recipient_address', ''),
|
|
404
|
+
'recipient_name': self.config.get('recipient_name', ''),
|
|
405
|
+
'customer_email': self.config.get('customer_email', ''),
|
|
406
|
+
'interaction_type': self.config.get('interaction_type', 'email'),
|
|
407
|
+
'human_action_id': self.config.get('human_action_id', ''),
|
|
408
|
+
|
|
409
|
+
# Scheduling preferences
|
|
410
|
+
'send_immediately': self.config.get('send_immediately', False),
|
|
411
|
+
'customer_timezone': self.config.get('customer_timezone', ''),
|
|
412
|
+
'business_hours_start': self.config.get('business_hours_start', '08:00'),
|
|
413
|
+
'business_hours_end': self.config.get('business_hours_end', '20:00'),
|
|
414
|
+
'delay_hours': self.config.get('delay_hours', 2)
|
|
415
|
+
}
|
|
416
|
+
|
|
417
|
+
def _execute_stage(self, stage, context: Dict[str, Any]) -> Dict[str, Any]:
|
|
418
|
+
"""
|
|
419
|
+
Execute a single pipeline stage with business logic validation.
|
|
420
|
+
|
|
421
|
+
Args:
|
|
422
|
+
stage: Stage instance to execute
|
|
423
|
+
context: Execution context
|
|
424
|
+
|
|
425
|
+
Returns:
|
|
426
|
+
Stage execution result
|
|
427
|
+
"""
|
|
428
|
+
stage_start_time = time.time()
|
|
429
|
+
|
|
430
|
+
operation_id = None
|
|
431
|
+
try:
|
|
432
|
+
self.logger.info(f"Executing {stage.stage_name} stage")
|
|
433
|
+
|
|
434
|
+
# Apply business logic pre-checks
|
|
435
|
+
if not self._should_execute_stage(stage.stage_name, context):
|
|
436
|
+
skip_result = {
|
|
437
|
+
'status': 'skipped',
|
|
438
|
+
'reason': 'Business logic condition not met',
|
|
439
|
+
'stage': stage.stage_name,
|
|
440
|
+
'timestamp': datetime.now().isoformat()
|
|
441
|
+
}
|
|
442
|
+
self.stage_results[stage.stage_name] = skip_result
|
|
443
|
+
return skip_result
|
|
444
|
+
|
|
445
|
+
# Prepare stage input
|
|
446
|
+
stage_input = self._prepare_stage_input(stage.stage_name, context)
|
|
447
|
+
validation_errors = self.validator.validate_stage_input(stage.stage_name, stage_input)
|
|
448
|
+
|
|
449
|
+
if validation_errors:
|
|
450
|
+
error_msg = f"Stage input validation failed: {'; '.join(validation_errors)}"
|
|
451
|
+
raise ValueError(error_msg)
|
|
452
|
+
|
|
453
|
+
# Create operation record (server-compatible tracking)
|
|
454
|
+
try:
|
|
455
|
+
runtime_index = context.get('runtime_index', 0)
|
|
456
|
+
chain_index = len([s for s in self.stage_results.keys()]) # Current position in chain
|
|
457
|
+
|
|
458
|
+
operation_id = self.data_manager.create_operation(
|
|
459
|
+
task_id=self.execution_id,
|
|
460
|
+
executor_name=stage.stage_name,
|
|
461
|
+
runtime_index=runtime_index,
|
|
462
|
+
chain_index=chain_index,
|
|
463
|
+
input_data=stage_input
|
|
464
|
+
)
|
|
465
|
+
|
|
466
|
+
# Add operation_id to context for stage use
|
|
467
|
+
context['operation_id'] = operation_id
|
|
468
|
+
|
|
469
|
+
except Exception as e:
|
|
470
|
+
self.logger.warning(f"Failed to create operation record: {str(e)}")
|
|
471
|
+
operation_id = None
|
|
472
|
+
|
|
473
|
+
# Execute stage with timing
|
|
474
|
+
result = stage.execute_with_timing(context)
|
|
475
|
+
|
|
476
|
+
# Update operation with success result
|
|
477
|
+
if operation_id:
|
|
478
|
+
try:
|
|
479
|
+
self.data_manager.update_operation_status(
|
|
480
|
+
operation_id=operation_id,
|
|
481
|
+
execution_status='done',
|
|
482
|
+
output_data=result
|
|
483
|
+
)
|
|
484
|
+
except Exception as e:
|
|
485
|
+
self.logger.warning(f"Failed to update operation status: {str(e)}")
|
|
486
|
+
|
|
487
|
+
# Apply business logic post-checks
|
|
488
|
+
if self._should_stop_after_stage(stage.stage_name, result, context):
|
|
489
|
+
result['pipeline_stop'] = True
|
|
490
|
+
result['stop_reason'] = self._get_stop_reason(stage.stage_name, result)
|
|
491
|
+
|
|
492
|
+
# Save stage result if configured (backward compatibility)
|
|
493
|
+
if self.config.get('save_intermediate', True):
|
|
494
|
+
stage.save_stage_result(context, result)
|
|
495
|
+
|
|
496
|
+
# Store result
|
|
497
|
+
self.stage_results[stage.stage_name] = result
|
|
498
|
+
|
|
499
|
+
return result
|
|
500
|
+
|
|
501
|
+
except Exception as e:
|
|
502
|
+
stage.log_stage_error(context, e)
|
|
503
|
+
|
|
504
|
+
error_result = stage.create_error_result(e, context)
|
|
505
|
+
self.stage_results[stage.stage_name] = error_result
|
|
506
|
+
|
|
507
|
+
# Update operation with failure result
|
|
508
|
+
if operation_id:
|
|
509
|
+
try:
|
|
510
|
+
error_output = {
|
|
511
|
+
'error': str(e),
|
|
512
|
+
'error_type': type(e).__name__,
|
|
513
|
+
'stage': stage.stage_name,
|
|
514
|
+
'timestamp': datetime.now().isoformat()
|
|
515
|
+
}
|
|
516
|
+
self.data_manager.update_operation_status(
|
|
517
|
+
operation_id=operation_id,
|
|
518
|
+
execution_status='failed',
|
|
519
|
+
output_data=error_output
|
|
520
|
+
)
|
|
521
|
+
except Exception as update_error:
|
|
522
|
+
self.logger.warning(f"Failed to update operation failure status: {str(update_error)}")
|
|
523
|
+
|
|
524
|
+
# Save error result (backward compatibility)
|
|
525
|
+
if self.config.get('save_intermediate', True):
|
|
526
|
+
stage.save_stage_result(context, error_result)
|
|
527
|
+
|
|
528
|
+
raise
|
|
529
|
+
|
|
530
|
+
finally:
|
|
531
|
+
stage_duration = time.time() - stage_start_time
|
|
532
|
+
self.logger.debug(f"Stage {stage.stage_name} completed in {stage_duration:.2f} seconds")
|
|
533
|
+
|
|
534
|
+
def _prepare_stage_input(self, stage_name: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
|
535
|
+
"""
|
|
536
|
+
Prepare input data for a specific stage.
|
|
537
|
+
|
|
538
|
+
Args:
|
|
539
|
+
stage_name: Name of the stage
|
|
540
|
+
context: Execution context
|
|
541
|
+
|
|
542
|
+
Returns:
|
|
543
|
+
Stage-specific input data
|
|
544
|
+
"""
|
|
545
|
+
base_input = context['input_data'].copy()
|
|
546
|
+
stage_results = context['stage_results']
|
|
547
|
+
|
|
548
|
+
# Add stage-specific data based on previous results
|
|
549
|
+
if stage_name == 'data_preparation' and 'data_acquisition' in stage_results:
|
|
550
|
+
acquisition_result = stage_results['data_acquisition']
|
|
551
|
+
if acquisition_result.get('status') == 'success':
|
|
552
|
+
base_input['raw_customer_data'] = acquisition_result.get('data', {})
|
|
553
|
+
|
|
554
|
+
elif stage_name == 'lead_scoring' and 'data_preparation' in stage_results:
|
|
555
|
+
prep_result = stage_results['data_preparation']
|
|
556
|
+
if prep_result.get('status') == 'success':
|
|
557
|
+
prep_data = prep_result.get('data', {})
|
|
558
|
+
base_input.update(prep_data)
|
|
559
|
+
|
|
560
|
+
elif stage_name == 'initial_outreach':
|
|
561
|
+
# Combine data from previous stages
|
|
562
|
+
customer_data = {}
|
|
563
|
+
if 'data_preparation' in stage_results:
|
|
564
|
+
prep_data = stage_results['data_preparation'].get('data', {})
|
|
565
|
+
if isinstance(prep_data, dict):
|
|
566
|
+
customer_data = copy.deepcopy(prep_data)
|
|
567
|
+
else:
|
|
568
|
+
existing_customer_data = base_input.get('customer_data', {})
|
|
569
|
+
if isinstance(existing_customer_data, dict):
|
|
570
|
+
customer_data = copy.deepcopy(existing_customer_data)
|
|
571
|
+
|
|
572
|
+
if not isinstance(customer_data, dict):
|
|
573
|
+
customer_data = {}
|
|
574
|
+
|
|
575
|
+
primary_contact = dict(customer_data.get('primaryContact') or {})
|
|
576
|
+
input_email = (
|
|
577
|
+
base_input.get('recipient_address')
|
|
578
|
+
or base_input.get('customer_email')
|
|
579
|
+
or primary_contact.get('email')
|
|
580
|
+
or customer_data.get('contact_email')
|
|
581
|
+
)
|
|
582
|
+
if input_email:
|
|
583
|
+
primary_contact['email'] = input_email
|
|
584
|
+
customer_data['contact_email'] = input_email
|
|
585
|
+
|
|
586
|
+
input_name = (
|
|
587
|
+
base_input.get('recipient_name')
|
|
588
|
+
or base_input.get('customer_name')
|
|
589
|
+
or primary_contact.get('name')
|
|
590
|
+
or customer_data.get('contact_name')
|
|
591
|
+
)
|
|
592
|
+
if input_name:
|
|
593
|
+
primary_contact['name'] = input_name
|
|
594
|
+
customer_data['contact_name'] = input_name
|
|
595
|
+
|
|
596
|
+
if primary_contact:
|
|
597
|
+
customer_data['primaryContact'] = primary_contact
|
|
598
|
+
|
|
599
|
+
base_input['customer_data'] = customer_data
|
|
600
|
+
|
|
601
|
+
if 'lead_scoring' in stage_results:
|
|
602
|
+
scoring_data = stage_results['lead_scoring'].get('data', {})
|
|
603
|
+
base_input['lead_scores'] = scoring_data.get('scores', [])
|
|
604
|
+
|
|
605
|
+
elif stage_name == 'follow_up':
|
|
606
|
+
# Add interaction history from previous outreach
|
|
607
|
+
if 'initial_outreach' in stage_results:
|
|
608
|
+
outreach_data = stage_results['initial_outreach'].get('data', {})
|
|
609
|
+
base_input['previous_interactions'] = [outreach_data]
|
|
610
|
+
|
|
611
|
+
return base_input
|
|
612
|
+
|
|
613
|
+
def _compile_results(self, context: Dict[str, Any]) -> Dict[str, Any]:
|
|
614
|
+
"""
|
|
615
|
+
Compile final execution results.
|
|
616
|
+
|
|
617
|
+
Args:
|
|
618
|
+
context: Execution context
|
|
619
|
+
|
|
620
|
+
Returns:
|
|
621
|
+
Compiled results dictionary
|
|
622
|
+
"""
|
|
623
|
+
duration = time.time() - self.start_time if self.start_time else 0
|
|
624
|
+
|
|
625
|
+
# Determine overall status
|
|
626
|
+
status = 'completed'
|
|
627
|
+
for stage_result in self.stage_results.values():
|
|
628
|
+
if stage_result.get('status') in ['error', 'fail']:
|
|
629
|
+
status = 'failed'
|
|
630
|
+
break
|
|
631
|
+
|
|
632
|
+
# Extract key data from stage results
|
|
633
|
+
customer_data = {}
|
|
634
|
+
lead_scores = []
|
|
635
|
+
email_drafts = []
|
|
636
|
+
|
|
637
|
+
for stage_name, result in self.stage_results.items():
|
|
638
|
+
if result.get('status') == 'success':
|
|
639
|
+
data = result.get('data', {})
|
|
640
|
+
|
|
641
|
+
if stage_name == 'data_preparation':
|
|
642
|
+
customer_data = data
|
|
643
|
+
elif stage_name == 'lead_scoring':
|
|
644
|
+
lead_scores = data.get('scores', [])
|
|
645
|
+
elif stage_name == 'initial_outreach':
|
|
646
|
+
drafts = data.get('drafts') or data.get('email_drafts') or []
|
|
647
|
+
email_drafts.extend(drafts)
|
|
648
|
+
elif stage_name == 'follow_up':
|
|
649
|
+
drafts = data.get('drafts') or data.get('email_drafts') or []
|
|
650
|
+
email_drafts.extend(drafts)
|
|
651
|
+
|
|
652
|
+
# Generate performance analytics
|
|
653
|
+
performance_analytics = self._generate_performance_analytics(duration)
|
|
654
|
+
|
|
655
|
+
return {
|
|
656
|
+
'execution_id': self.execution_id,
|
|
657
|
+
'status': status,
|
|
658
|
+
'started_at': datetime.fromtimestamp(self.start_time).isoformat() if self.start_time else None,
|
|
659
|
+
'completed_at': datetime.fromtimestamp(self.end_time).isoformat() if self.end_time else None,
|
|
660
|
+
'duration_seconds': duration,
|
|
661
|
+
'performance_analytics': performance_analytics,
|
|
662
|
+
'config': {
|
|
663
|
+
'org_id': self.config['org_id'],
|
|
664
|
+
'org_name': self.config['org_name'],
|
|
665
|
+
'input_website': self.config.get('input_website'),
|
|
666
|
+
'language': self.config.get('language', 'english')
|
|
667
|
+
},
|
|
668
|
+
'stage_results': self.stage_results,
|
|
669
|
+
'customer_data': customer_data,
|
|
670
|
+
'lead_scores': lead_scores,
|
|
671
|
+
'email_drafts': email_drafts,
|
|
672
|
+
'stages_executed': list(self.stage_results.keys()),
|
|
673
|
+
'stages_successful': [
|
|
674
|
+
name for name, result in self.stage_results.items()
|
|
675
|
+
if result.get('status') == 'success'
|
|
676
|
+
]
|
|
677
|
+
}
|
|
678
|
+
|
|
679
|
+
def _save_execution_record(self) -> None:
|
|
680
|
+
"""Save initial task record to database (server-compatible schema)."""
|
|
681
|
+
try:
|
|
682
|
+
# Create server-compatible request body
|
|
683
|
+
request_body = {
|
|
684
|
+
'org_id': self.config['org_id'],
|
|
685
|
+
'org_name': self.config.get('org_name'),
|
|
686
|
+
'team_id': self.config.get('team_id'),
|
|
687
|
+
'team_name': self.config.get('team_name'),
|
|
688
|
+
'project_code': self.config.get('project_code'),
|
|
689
|
+
'staff_name': self.config.get('staff_name', 'Sales Team'),
|
|
690
|
+
'language': self.config.get('language', 'english'),
|
|
691
|
+
'customer_info': self.config.get('input_description', ''),
|
|
692
|
+
'input_website': self.config.get('input_website', ''),
|
|
693
|
+
'input_description': self.config.get('input_description', ''),
|
|
694
|
+
'input_business_card': self.config.get('input_business_card', ''),
|
|
695
|
+
'input_linkedin_url': self.config.get('input_linkedin_url', ''),
|
|
696
|
+
'input_facebook_url': self.config.get('input_facebook_url', ''),
|
|
697
|
+
'input_freetext': self.config.get('input_freetext', ''),
|
|
698
|
+
'full_input': self.config.get('full_input', ''),
|
|
699
|
+
'action': self.config.get('action', 'draft_write'),
|
|
700
|
+
'execution_id': self.execution_id
|
|
701
|
+
}
|
|
702
|
+
|
|
703
|
+
# Save as task using server-compatible schema
|
|
704
|
+
self.data_manager.create_task(
|
|
705
|
+
task_id=self.execution_id,
|
|
706
|
+
plan_id=self.config.get('plan_id', '569cdcbd-cf6d-4e33-b0b2-d2f6f15a0832'),
|
|
707
|
+
org_id=self.config['org_id'],
|
|
708
|
+
request_body=request_body,
|
|
709
|
+
status="running"
|
|
710
|
+
)
|
|
711
|
+
|
|
712
|
+
# Note: executions is now a view that maps to llm_worker_task
|
|
713
|
+
# No need for separate executions table save
|
|
714
|
+
|
|
715
|
+
except Exception as e:
|
|
716
|
+
self.logger.error(f"Failed to save task record: {str(e)}")
|
|
717
|
+
raise
|
|
718
|
+
|
|
719
|
+
def _update_execution_status(self, status: str, results: Dict[str, Any]) -> None:
|
|
720
|
+
"""
|
|
721
|
+
Update execution status in database.
|
|
722
|
+
|
|
723
|
+
Args:
|
|
724
|
+
status: Execution status
|
|
725
|
+
results: Execution results
|
|
726
|
+
"""
|
|
727
|
+
try:
|
|
728
|
+
self.data_manager.update_execution_status(
|
|
729
|
+
execution_id=self.execution_id,
|
|
730
|
+
status=status,
|
|
731
|
+
results=results
|
|
732
|
+
)
|
|
733
|
+
except Exception as e:
|
|
734
|
+
self.logger.warning(f"Failed to update execution status: {str(e)}")
|
|
735
|
+
|
|
736
|
+
def _should_execute_stage(self, stage_name: str, context: Dict[str, Any]) -> bool:
|
|
737
|
+
"""
|
|
738
|
+
Apply business logic to determine if a stage should execute.
|
|
739
|
+
|
|
740
|
+
Args:
|
|
741
|
+
stage_name: Name of the stage
|
|
742
|
+
context: Execution context
|
|
743
|
+
|
|
744
|
+
Returns:
|
|
745
|
+
True if stage should execute, False otherwise
|
|
746
|
+
"""
|
|
747
|
+
stage_results = context.get('stage_results', {})
|
|
748
|
+
|
|
749
|
+
# Data Acquisition: Always execute if it's the first stage
|
|
750
|
+
if stage_name == 'dataacquisition':
|
|
751
|
+
return True
|
|
752
|
+
|
|
753
|
+
# Data Preparation: Requires successful Data Acquisition
|
|
754
|
+
if stage_name == 'datapreparation':
|
|
755
|
+
acquisition_result = stage_results.get('dataacquisition', {})
|
|
756
|
+
return acquisition_result.get('status') == 'success'
|
|
757
|
+
|
|
758
|
+
# Lead Scoring: Requires successful Data Preparation
|
|
759
|
+
if stage_name == 'leadscoring':
|
|
760
|
+
prep_result = stage_results.get('datapreparation', {})
|
|
761
|
+
return prep_result.get('status') == 'success'
|
|
762
|
+
|
|
763
|
+
# Initial Outreach: Requires successful Lead Scoring
|
|
764
|
+
if stage_name == 'initialoutreach':
|
|
765
|
+
scoring_result = stage_results.get('leadscoring', {})
|
|
766
|
+
return scoring_result.get('status') == 'success'
|
|
767
|
+
|
|
768
|
+
# Follow Up: Requires successful Initial Outreach
|
|
769
|
+
if stage_name == 'followup':
|
|
770
|
+
outreach_result = stage_results.get('initialoutreach', {})
|
|
771
|
+
return outreach_result.get('status') == 'success'
|
|
772
|
+
|
|
773
|
+
return True
|
|
774
|
+
|
|
775
|
+
def _should_stop_after_stage(self, stage_name: str, result: Dict[str, Any], context: Dict[str, Any]) -> bool:
|
|
776
|
+
"""
|
|
777
|
+
Apply business logic to determine if pipeline should stop after a stage.
|
|
778
|
+
|
|
779
|
+
Args:
|
|
780
|
+
stage_name: Name of the completed stage
|
|
781
|
+
result: Stage execution result
|
|
782
|
+
context: Execution context
|
|
783
|
+
|
|
784
|
+
Returns:
|
|
785
|
+
True if pipeline should stop, False otherwise
|
|
786
|
+
"""
|
|
787
|
+
# Critical stop condition: Data Acquisition website failure
|
|
788
|
+
if stage_name == 'dataacquisition':
|
|
789
|
+
if result.get('data', {}).get('status_info_website') == 'fail':
|
|
790
|
+
self.logger.warning("Data Acquisition failed: website extraction failed")
|
|
791
|
+
return True
|
|
792
|
+
|
|
793
|
+
# Business rule: Stop after Initial Outreach draft generation (human-in-the-loop)
|
|
794
|
+
if stage_name == 'initial_outreach':
|
|
795
|
+
if result.get('status') == 'success':
|
|
796
|
+
action = result.get('data', {}).get('action', 'draft_write')
|
|
797
|
+
if action == 'draft_write':
|
|
798
|
+
self.logger.info("Stopping after Initial Outreach draft generation for human review")
|
|
799
|
+
return True
|
|
800
|
+
|
|
801
|
+
# Stop on any error
|
|
802
|
+
if result.get('status') in ['error', 'fail']:
|
|
803
|
+
return True
|
|
804
|
+
|
|
805
|
+
return False
|
|
806
|
+
|
|
807
|
+
def _get_stop_reason(self, stage_name: str, result: Dict[str, Any]) -> str:
|
|
808
|
+
"""
|
|
809
|
+
Get the reason for stopping the pipeline.
|
|
810
|
+
|
|
811
|
+
Args:
|
|
812
|
+
stage_name: Name of the stage that triggered the stop
|
|
813
|
+
result: Stage execution result
|
|
814
|
+
|
|
815
|
+
Returns:
|
|
816
|
+
Human-readable stop reason
|
|
817
|
+
"""
|
|
818
|
+
if stage_name == 'dataacquisition':
|
|
819
|
+
if result.get('data', {}).get('status_info_website') == 'fail':
|
|
820
|
+
return "Website extraction failed in Data Acquisition stage"
|
|
821
|
+
|
|
822
|
+
if stage_name == 'initial_outreach':
|
|
823
|
+
action = result.get('data', {}).get('action', 'draft_write')
|
|
824
|
+
if action == 'draft_write':
|
|
825
|
+
return "Draft generated in Initial Outreach - waiting for human review"
|
|
826
|
+
|
|
827
|
+
if result.get('status') in ['error', 'fail']:
|
|
828
|
+
return f"Stage {stage_name} failed with error: {result.get('error_message', 'Unknown error')}"
|
|
829
|
+
|
|
830
|
+
return "Pipeline stopped due to business logic condition"
|
|
831
|
+
|
|
832
|
+
def _continue_execution(self) -> Dict[str, Any]:
|
|
833
|
+
"""
|
|
834
|
+
Continue an existing execution with a specific action.
|
|
835
|
+
|
|
836
|
+
Returns:
|
|
837
|
+
Dictionary containing execution results
|
|
838
|
+
"""
|
|
839
|
+
continue_execution_id = self.config['continue_execution']
|
|
840
|
+
action = self.config['action']
|
|
841
|
+
|
|
842
|
+
self.logger.info(f"Continuing execution {continue_execution_id} with action: {action}")
|
|
843
|
+
|
|
844
|
+
try:
|
|
845
|
+
# Load existing execution data
|
|
846
|
+
existing_execution = self.data_manager.get_execution(continue_execution_id)
|
|
847
|
+
if not existing_execution:
|
|
848
|
+
raise ValueError(f"Execution {continue_execution_id} not found")
|
|
849
|
+
|
|
850
|
+
# Load previous stage results
|
|
851
|
+
stage_results = self.data_manager.get_stage_results(continue_execution_id)
|
|
852
|
+
|
|
853
|
+
# Create continuation context
|
|
854
|
+
context = self._create_continuation_context(existing_execution, stage_results)
|
|
855
|
+
|
|
856
|
+
# Determine which stage to execute based on action
|
|
857
|
+
target_stage = self._get_target_stage_for_action(action)
|
|
858
|
+
|
|
859
|
+
if not target_stage:
|
|
860
|
+
raise ValueError(f"No suitable stage found for action: {action}")
|
|
861
|
+
|
|
862
|
+
# Execute the specific action
|
|
863
|
+
stage_result = self._execute_continuation_action(target_stage, context, action)
|
|
864
|
+
|
|
865
|
+
# Update execution record
|
|
866
|
+
self._update_execution_status('continued', {
|
|
867
|
+
'action': action,
|
|
868
|
+
'stage': target_stage.stage_name,
|
|
869
|
+
'result': stage_result,
|
|
870
|
+
'continued_from': continue_execution_id
|
|
871
|
+
})
|
|
872
|
+
|
|
873
|
+
return {
|
|
874
|
+
'execution_id': self.execution_id,
|
|
875
|
+
'continued_from': continue_execution_id,
|
|
876
|
+
'action': action,
|
|
877
|
+
'status': 'completed',
|
|
878
|
+
'result': stage_result,
|
|
879
|
+
'timestamp': datetime.now().isoformat()
|
|
880
|
+
}
|
|
881
|
+
|
|
882
|
+
except Exception as e:
|
|
883
|
+
self.logger.error(f"Failed to continue execution: {str(e)}")
|
|
884
|
+
error_result = {
|
|
885
|
+
'execution_id': self.execution_id,
|
|
886
|
+
'continued_from': continue_execution_id,
|
|
887
|
+
'action': action,
|
|
888
|
+
'status': 'failed',
|
|
889
|
+
'error': str(e),
|
|
890
|
+
'timestamp': datetime.now().isoformat()
|
|
891
|
+
}
|
|
892
|
+
self._update_execution_status('failed', error_result)
|
|
893
|
+
return error_result
|
|
894
|
+
|
|
895
|
+
def _create_continuation_context(self, existing_execution: Dict[str, Any], stage_results: List[Dict[str, Any]]) -> Dict[str, Any]:
|
|
896
|
+
"""
|
|
897
|
+
Create execution context for continuation.
|
|
898
|
+
|
|
899
|
+
Args:
|
|
900
|
+
existing_execution: Previous execution record
|
|
901
|
+
stage_results: Previous stage results
|
|
902
|
+
|
|
903
|
+
Returns:
|
|
904
|
+
Continuation context
|
|
905
|
+
"""
|
|
906
|
+
# Reconstruct stage results dictionary
|
|
907
|
+
results_dict = {}
|
|
908
|
+
for result in stage_results:
|
|
909
|
+
stage_name = result['stage_name']
|
|
910
|
+
results_dict[stage_name] = result['output_data']
|
|
911
|
+
|
|
912
|
+
return {
|
|
913
|
+
'execution_id': self.execution_id,
|
|
914
|
+
'original_execution_id': existing_execution['execution_id'],
|
|
915
|
+
'config': self.config,
|
|
916
|
+
'original_config': existing_execution.get('config', {}),
|
|
917
|
+
'stage_results': results_dict,
|
|
918
|
+
'continuation_action': self.config['action'],
|
|
919
|
+
'draft_id': self.config.get('draft_id'),
|
|
920
|
+
'reason': self.config.get('reason', ''),
|
|
921
|
+
'start_time': self.start_time
|
|
922
|
+
}
|
|
923
|
+
|
|
924
|
+
def _get_target_stage_for_action(self, action: str):
|
|
925
|
+
"""
|
|
926
|
+
Get the appropriate stage for the given action.
|
|
927
|
+
|
|
928
|
+
Args:
|
|
929
|
+
action: Action to perform
|
|
930
|
+
|
|
931
|
+
Returns:
|
|
932
|
+
Stage instance or None
|
|
933
|
+
"""
|
|
934
|
+
# Map actions to stages
|
|
935
|
+
action_stage_map = {
|
|
936
|
+
'draft_write': 'initialoutreach',
|
|
937
|
+
'draft_rewrite': 'initialoutreach',
|
|
938
|
+
'send': 'initialoutreach',
|
|
939
|
+
'close': 'initialoutreach'
|
|
940
|
+
}
|
|
941
|
+
|
|
942
|
+
target_stage_name = action_stage_map.get(action)
|
|
943
|
+
if not target_stage_name:
|
|
944
|
+
return None
|
|
945
|
+
|
|
946
|
+
# Find the stage instance
|
|
947
|
+
for stage in self.stages:
|
|
948
|
+
if stage.stage_name == target_stage_name:
|
|
949
|
+
return stage
|
|
950
|
+
|
|
951
|
+
return None
|
|
952
|
+
|
|
953
|
+
def _execute_continuation_action(self, stage, context: Dict[str, Any], action: str) -> Dict[str, Any]:
|
|
954
|
+
"""
|
|
955
|
+
Execute a specific continuation action.
|
|
956
|
+
|
|
957
|
+
Args:
|
|
958
|
+
stage: Stage instance to execute
|
|
959
|
+
context: Continuation context
|
|
960
|
+
action: Specific action to perform
|
|
961
|
+
|
|
962
|
+
Returns:
|
|
963
|
+
Action execution result
|
|
964
|
+
"""
|
|
965
|
+
self.logger.info(f"Executing continuation action: {action}")
|
|
966
|
+
|
|
967
|
+
# Add action-specific context
|
|
968
|
+
context['action'] = action
|
|
969
|
+
context['is_continuation'] = True
|
|
970
|
+
|
|
971
|
+
# Execute the stage with continuation context and timing
|
|
972
|
+
return stage.execute_with_timing(context)
|
|
973
|
+
|
|
974
|
+
def _generate_execution_id(self) -> str:
|
|
975
|
+
"""Generate unique execution ID."""
|
|
976
|
+
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
|
977
|
+
unique_id = str(uuid.uuid4())[:8]
|
|
978
|
+
return f"fusesell_{timestamp}_{unique_id}"
|