fusesell 1.3.42__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. fusesell-1.3.42.dist-info/METADATA +873 -0
  2. fusesell-1.3.42.dist-info/RECORD +35 -0
  3. fusesell-1.3.42.dist-info/WHEEL +5 -0
  4. fusesell-1.3.42.dist-info/entry_points.txt +2 -0
  5. fusesell-1.3.42.dist-info/licenses/LICENSE +21 -0
  6. fusesell-1.3.42.dist-info/top_level.txt +2 -0
  7. fusesell.py +20 -0
  8. fusesell_local/__init__.py +37 -0
  9. fusesell_local/api.py +343 -0
  10. fusesell_local/cli.py +1480 -0
  11. fusesell_local/config/__init__.py +11 -0
  12. fusesell_local/config/default_email_templates.json +34 -0
  13. fusesell_local/config/default_prompts.json +19 -0
  14. fusesell_local/config/default_scoring_criteria.json +154 -0
  15. fusesell_local/config/prompts.py +245 -0
  16. fusesell_local/config/settings.py +277 -0
  17. fusesell_local/pipeline.py +978 -0
  18. fusesell_local/stages/__init__.py +19 -0
  19. fusesell_local/stages/base_stage.py +603 -0
  20. fusesell_local/stages/data_acquisition.py +1820 -0
  21. fusesell_local/stages/data_preparation.py +1238 -0
  22. fusesell_local/stages/follow_up.py +1728 -0
  23. fusesell_local/stages/initial_outreach.py +2972 -0
  24. fusesell_local/stages/lead_scoring.py +1452 -0
  25. fusesell_local/utils/__init__.py +36 -0
  26. fusesell_local/utils/agent_context.py +552 -0
  27. fusesell_local/utils/auto_setup.py +361 -0
  28. fusesell_local/utils/birthday_email_manager.py +467 -0
  29. fusesell_local/utils/data_manager.py +4857 -0
  30. fusesell_local/utils/event_scheduler.py +959 -0
  31. fusesell_local/utils/llm_client.py +342 -0
  32. fusesell_local/utils/logger.py +203 -0
  33. fusesell_local/utils/output_helpers.py +2443 -0
  34. fusesell_local/utils/timezone_detector.py +914 -0
  35. fusesell_local/utils/validators.py +436 -0
@@ -0,0 +1,1452 @@
1
+ """
2
+ Lead Scoring Stage - Evaluate customer-product fit using weighted scoring
3
+ Converted from fusesell_lead_scoring.yml
4
+ """
5
+
6
+ import json
7
+ from typing import Dict, Any, List, Optional
8
+ from datetime import datetime
9
+ from .base_stage import BaseStage
10
+
11
+
12
+ class LeadScoringStage(BaseStage):
13
+ """
14
+ Lead Scoring stage for evaluating customer-product fit using weighted criteria.
15
+ Converts YAML workflow logic to Python implementation.
16
+ """
17
+
18
+ def execute(self, context: Dict[str, Any]) -> Dict[str, Any]:
19
+ """
20
+ Execute lead scoring stage.
21
+
22
+ Args:
23
+ context: Execution context
24
+
25
+ Returns:
26
+ Stage execution result
27
+ """
28
+ try:
29
+ # Get data from previous stage (data preparation)
30
+ prep_data = self._get_preparation_data(context)
31
+
32
+ # Get scoring criteria
33
+ scoring_criteria = self._get_scoring_criteria()
34
+
35
+ # Get products to evaluate
36
+ products = self._get_products_for_evaluation()
37
+
38
+ # Score each product against the customer
39
+ lead_scores = []
40
+ for product in products:
41
+ # Enhanced product evaluation with multiple analysis methods
42
+ score_result = self._comprehensive_product_evaluation(prep_data, product, scoring_criteria)
43
+ if score_result:
44
+ lead_scores.append(score_result)
45
+
46
+ # Analyze and rank results
47
+ scoring_analysis = self._analyze_scoring_results(lead_scores)
48
+
49
+ # Add product comparison analysis
50
+ product_comparison = self._compare_products(lead_scores)
51
+ scoring_analysis['product_comparison'] = product_comparison
52
+
53
+ # Save results to database
54
+ self._save_scoring_results(context, lead_scores)
55
+
56
+ # Prepare final output
57
+ scoring_data = {
58
+ 'lead_scoring': lead_scores,
59
+ 'analysis': scoring_analysis,
60
+ 'total_products_evaluated': len(products),
61
+ 'customer_id': context.get('execution_id'),
62
+ 'scoring_timestamp': datetime.now().isoformat()
63
+ }
64
+
65
+ # Save to database
66
+ self.save_stage_result(context, scoring_data)
67
+
68
+ result = self.create_success_result(scoring_data, context)
69
+ return result
70
+
71
+ except Exception as e:
72
+ self.log_stage_error(context, e)
73
+ return self.handle_stage_error(e, context)
74
+
75
+ def _get_preparation_data(self, context: Dict[str, Any]) -> Dict[str, Any]:
76
+ """
77
+ Get data from the data preparation stage.
78
+
79
+ Args:
80
+ context: Execution context
81
+
82
+ Returns:
83
+ Data preparation results
84
+ """
85
+ # Try to get from stage results first
86
+ stage_results = context.get('stage_results', {})
87
+ if 'data_preparation' in stage_results:
88
+ return stage_results['data_preparation'].get('data', {})
89
+
90
+ # Fallback: create mock data for testing
91
+ return self._get_mock_preparation_data(context)
92
+
93
+ def _get_mock_preparation_data(self, context: Dict[str, Any]) -> Dict[str, Any]:
94
+ """
95
+ Get mock preparation data for testing.
96
+
97
+ Args:
98
+ context: Execution context
99
+
100
+ Returns:
101
+ Mock preparation data
102
+ """
103
+ input_data = context.get('input_data', {})
104
+ return {
105
+ 'companyInfo': {
106
+ 'name': input_data.get('customer_name', 'Test Company'),
107
+ 'industry': 'Technology',
108
+ 'size': 'Medium (50-200 employees)',
109
+ 'annualRevenue': '$2-5M',
110
+ 'address': input_data.get('customer_address', ''),
111
+ 'website': input_data.get('customer_website', '')
112
+ },
113
+ 'painPoints': [
114
+ {
115
+ 'category': 'Operational Efficiency',
116
+ 'description': 'Manual processes causing delays',
117
+ 'impact': 'High'
118
+ }
119
+ ],
120
+ 'primaryContact': {
121
+ 'name': input_data.get('contact_name', ''),
122
+ 'email': input_data.get('contact_email', ''),
123
+ 'phone': input_data.get('contact_phone', '')
124
+ }
125
+ }
126
+
127
+ def _get_scoring_criteria(self) -> List[Dict[str, Any]]:
128
+ """
129
+ Get scoring criteria configuration.
130
+
131
+ Returns:
132
+ List of scoring criteria
133
+ """
134
+ try:
135
+ # Get org ID from config
136
+ org_id = self.config.get('org_id')
137
+
138
+ if org_id:
139
+ # Try to get criteria from gs_company_criteria table (matching original YAML logic)
140
+ criteria = self.data_manager.get_gs_company_criteria(org_id)
141
+ if criteria:
142
+ self.logger.info(f"Loaded {len(criteria)} scoring criteria from gs_company_criteria table for org: {org_id}")
143
+ return criteria
144
+ else:
145
+ self.logger.warning(f"No scoring criteria found in gs_company_criteria for organization: {org_id}")
146
+
147
+ # Fallback: try local scoring criteria
148
+ local_criteria = self.data_manager.get_scoring_criteria(org_id)
149
+ if local_criteria:
150
+ self.logger.info(f"Loaded {len(local_criteria)} scoring criteria from local config for org: {org_id}")
151
+ return local_criteria
152
+
153
+ # Final fallback: use default criteria
154
+ self.logger.info("Using default scoring criteria")
155
+ return self._get_default_scoring_criteria()
156
+
157
+ except Exception as e:
158
+ self.logger.warning(f"Failed to load scoring criteria: {str(e)}")
159
+ return self._get_default_scoring_criteria()
160
+
161
+ def _get_default_scoring_criteria(self) -> List[Dict[str, Any]]:
162
+ """
163
+ Get default scoring criteria based on original YAML logic.
164
+
165
+ Returns:
166
+ Default scoring criteria
167
+ """
168
+ return [
169
+ {
170
+ 'name': 'industry_fit',
171
+ 'weight': 25,
172
+ 'definition': 'How well the customer\'s industry aligns with the product\'s target market',
173
+ 'guidelines': 'Score based on industry match and market penetration',
174
+ 'scoring_factors': [
175
+ 'Direct industry match: 80-100',
176
+ 'Related industry: 60-79',
177
+ 'Adjacent market: 40-59',
178
+ 'Different industry: 0-39'
179
+ ]
180
+ },
181
+ {
182
+ 'name': 'company_size',
183
+ 'weight': 20,
184
+ 'definition': 'How well the customer\'s company size fits the product\'s target segment',
185
+ 'guidelines': 'Score based on employee count and revenue alignment',
186
+ 'scoring_factors': [
187
+ 'Perfect size match: 80-100',
188
+ 'Good size match: 60-79',
189
+ 'Acceptable size: 40-59',
190
+ 'Size mismatch: 0-39'
191
+ ]
192
+ },
193
+ {
194
+ 'name': 'pain_points',
195
+ 'weight': 30,
196
+ 'definition': 'How well the product addresses the customer\'s identified pain points',
197
+ 'guidelines': 'Score based on pain point alignment and solution fit',
198
+ 'scoring_factors': [
199
+ 'Direct pain point solution: 80-100',
200
+ 'Partial pain point solution: 60-79',
201
+ 'Indirect solution: 40-59',
202
+ 'No pain point match: 0-39'
203
+ ]
204
+ },
205
+ {
206
+ 'name': 'product_fit',
207
+ 'weight': 15,
208
+ 'definition': 'Overall product-customer compatibility',
209
+ 'guidelines': 'Score based on feature alignment and use case match',
210
+ 'scoring_factors': [
211
+ 'Excellent feature match: 80-100',
212
+ 'Good feature match: 60-79',
213
+ 'Basic feature match: 40-59',
214
+ 'Poor feature match: 0-39'
215
+ ]
216
+ },
217
+ {
218
+ 'name': 'geographic_market_fit',
219
+ 'weight': 10,
220
+ 'definition': 'Geographic alignment between customer location and product availability',
221
+ 'guidelines': 'Score based on market presence and localization',
222
+ 'scoring_factors': [
223
+ 'Strong market presence: 80-100',
224
+ 'Moderate presence: 60-79',
225
+ 'Limited presence: 40-59',
226
+ 'No market presence: 0-39'
227
+ ]
228
+ }
229
+ ]
230
+
231
+ def _get_products_for_evaluation(self) -> List[Dict[str, Any]]:
232
+ """
233
+ Get products to evaluate against the customer.
234
+
235
+ Returns:
236
+ List of products for evaluation
237
+ """
238
+ try:
239
+ # Get team ID from config
240
+ team_id = self.config.get('team_id')
241
+ org_id = self.config.get('org_id')
242
+
243
+ if team_id:
244
+ # Try to get products from team settings (matching original YAML logic)
245
+ products = self.data_manager.get_products_by_team(team_id)
246
+ if products:
247
+ self.logger.info(f"Loaded {len(products)} products from team settings for team: {team_id}")
248
+ return products
249
+ else:
250
+ self.logger.warning(f"No products found for team: {team_id}")
251
+
252
+ if org_id:
253
+ # Fallback: get all products for the organization
254
+ products = self.data_manager.get_products_by_org(org_id)
255
+ if products:
256
+ self.logger.info(f"Loaded {len(products)} products from organization: {org_id}")
257
+ return products
258
+ else:
259
+ self.logger.warning(f"No products found for organization: {org_id}")
260
+
261
+ # Final fallback: use mock products
262
+ self.logger.warning("No team or org products found, using mock products")
263
+ return self._get_mock_products()
264
+
265
+ except Exception as e:
266
+ self.logger.warning(f"Failed to load products: {str(e)}")
267
+ return self._get_mock_products()
268
+
269
+ def _load_products_from_config(self) -> Optional[List[Dict[str, Any]]]:
270
+ """
271
+ Load products from configuration files.
272
+
273
+ Returns:
274
+ List of products or None if not available
275
+ """
276
+ try:
277
+ # This would load from local configuration files
278
+ # For now, return None to use mock data
279
+ return None
280
+
281
+ except Exception as e:
282
+ self.logger.error(f"Failed to load products from config: {str(e)}")
283
+ return None
284
+
285
+ def _get_mock_products(self) -> List[Dict[str, Any]]:
286
+ """
287
+ Get mock products for testing and dry run.
288
+
289
+ Returns:
290
+ List of mock products
291
+ """
292
+ return [
293
+ {
294
+ 'id': 'prod-12345678-1234-1234-1234-123456789012',
295
+ 'productName': 'FuseSell AI Pro',
296
+ 'shortDescription': 'AI-powered sales automation platform',
297
+ 'longDescription': 'Comprehensive sales automation solution with AI-driven lead scoring, email generation, and customer analysis',
298
+ 'painPointsSolved': [
299
+ 'Manual lead qualification processes',
300
+ 'Inconsistent email outreach',
301
+ 'Poor lead prioritization',
302
+ 'Time-consuming customer research'
303
+ ],
304
+ 'targetUsers': [
305
+ 'Sales teams',
306
+ 'Marketing professionals',
307
+ 'Business development managers'
308
+ ],
309
+ 'keyFeatures': [
310
+ 'AI lead scoring',
311
+ 'Automated email generation',
312
+ 'Customer data analysis',
313
+ 'Pipeline management'
314
+ ],
315
+ 'competitiveAdvantages': [
316
+ 'Advanced AI algorithms',
317
+ 'Local data processing',
318
+ 'Customizable workflows',
319
+ 'Integration capabilities'
320
+ ],
321
+ 'localization': [
322
+ 'North America',
323
+ 'Europe',
324
+ 'Asia-Pacific'
325
+ ],
326
+ 'marketInsights': {
327
+ 'targetIndustries': ['Technology', 'SaaS', 'Professional Services'],
328
+ 'idealCompanySize': '50-500 employees',
329
+ 'averageDealSize': '$10,000-$50,000'
330
+ }
331
+ },
332
+ {
333
+ 'id': 'prod-87654321-4321-4321-4321-210987654321',
334
+ 'productName': 'FuseSell Starter',
335
+ 'shortDescription': 'Entry-level sales automation tool',
336
+ 'longDescription': 'Basic sales automation features for small teams getting started with sales technology',
337
+ 'painPointsSolved': [
338
+ 'Manual contact management',
339
+ 'Basic email automation needs',
340
+ 'Simple lead tracking'
341
+ ],
342
+ 'targetUsers': [
343
+ 'Small sales teams',
344
+ 'Startups',
345
+ 'Solo entrepreneurs'
346
+ ],
347
+ 'keyFeatures': [
348
+ 'Contact management',
349
+ 'Email templates',
350
+ 'Basic reporting',
351
+ 'Lead tracking'
352
+ ],
353
+ 'competitiveAdvantages': [
354
+ 'Easy to use',
355
+ 'Affordable pricing',
356
+ 'Quick setup',
357
+ 'Essential features'
358
+ ],
359
+ 'localization': [
360
+ 'Global'
361
+ ],
362
+ 'marketInsights': {
363
+ 'targetIndustries': ['All industries'],
364
+ 'idealCompanySize': '1-50 employees',
365
+ 'averageDealSize': '$1,000-$5,000'
366
+ }
367
+ }
368
+ ]
369
+
370
+ def _comprehensive_product_evaluation(self, customer_data: Dict[str, Any], product: Dict[str, Any], criteria: List[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
371
+ """
372
+ Comprehensive product evaluation with multiple analysis methods.
373
+
374
+ Args:
375
+ customer_data: Customer information from data preparation
376
+ product: Product information
377
+ criteria: Scoring criteria
378
+
379
+ Returns:
380
+ Enhanced scoring result with additional analysis
381
+ """
382
+ try:
383
+ # Primary LLM-based scoring
384
+ primary_score = self._score_customer_product_fit(customer_data, product, criteria)
385
+ if not primary_score:
386
+ return None
387
+
388
+ # Add product-specific analysis
389
+ product_analysis = self._analyze_product_specifics(customer_data, product)
390
+ primary_score['product_analysis'] = product_analysis
391
+
392
+ # Add competitive positioning
393
+ competitive_position = self._evaluate_competitive_position(customer_data, product)
394
+ primary_score['competitive_position'] = competitive_position
395
+
396
+ # Add implementation feasibility
397
+ implementation_analysis = self._assess_implementation_feasibility(customer_data, product)
398
+ primary_score['implementation_feasibility'] = implementation_analysis
399
+
400
+ # Add ROI estimation
401
+ roi_analysis = self._estimate_roi_potential(customer_data, product)
402
+ primary_score['roi_analysis'] = roi_analysis
403
+
404
+ return primary_score
405
+
406
+ except Exception as e:
407
+ self.logger.error(f"Comprehensive product evaluation failed: {str(e)}")
408
+ return self._score_customer_product_fit(customer_data, product, criteria)
409
+
410
+ def _analyze_product_specifics(self, customer_data: Dict[str, Any], product: Dict[str, Any]) -> Dict[str, Any]:
411
+ """
412
+ Analyze product-specific factors for the customer.
413
+
414
+ Args:
415
+ customer_data: Customer information
416
+ product: Product information
417
+
418
+ Returns:
419
+ Product-specific analysis
420
+ """
421
+ try:
422
+ company_info = customer_data.get('companyInfo', {})
423
+ pain_points = customer_data.get('painPoints', [])
424
+
425
+ analysis = {
426
+ 'feature_alignment': self._assess_feature_alignment(pain_points, product),
427
+ 'scalability_match': self._assess_scalability_match(company_info, product),
428
+ 'integration_complexity': self._assess_integration_complexity(customer_data, product),
429
+ 'customization_needs': self._assess_customization_needs(customer_data, product)
430
+ }
431
+
432
+ return analysis
433
+
434
+ except Exception as e:
435
+ self.logger.error(f"Product specifics analysis failed: {str(e)}")
436
+ return {
437
+ 'feature_alignment': 'Unable to assess',
438
+ 'scalability_match': 'Unable to assess',
439
+ 'integration_complexity': 'Unable to assess',
440
+ 'customization_needs': 'Unable to assess'
441
+ }
442
+
443
+ def _assess_feature_alignment(self, pain_points: List[Dict[str, Any]], product: Dict[str, Any]) -> Dict[str, Any]:
444
+ """
445
+ Assess how well product features align with customer pain points.
446
+
447
+ Args:
448
+ pain_points: Customer pain points
449
+ product: Product information
450
+
451
+ Returns:
452
+ Feature alignment assessment
453
+ """
454
+ try:
455
+ product_features = product.get('keyFeatures', [])
456
+ pain_points_solved = product.get('painPointsSolved', [])
457
+
458
+ # Count pain point matches
459
+ matched_pain_points = 0
460
+ total_pain_points = len(pain_points)
461
+
462
+ for pain_point in pain_points:
463
+ pain_category = pain_point.get('category', '').lower()
464
+ pain_description = pain_point.get('description', '').lower()
465
+
466
+ # Check if any product features address this pain point
467
+ for solved_pain in pain_points_solved:
468
+ if (pain_category in solved_pain.lower() or
469
+ any(word in solved_pain.lower() for word in pain_description.split()[:3])):
470
+ matched_pain_points += 1
471
+ break
472
+
473
+ alignment_score = (matched_pain_points / total_pain_points * 100) if total_pain_points > 0 else 0
474
+
475
+ return {
476
+ 'alignment_score': round(alignment_score, 1),
477
+ 'matched_pain_points': matched_pain_points,
478
+ 'total_pain_points': total_pain_points,
479
+ 'key_feature_matches': product_features[:3] # Top 3 relevant features
480
+ }
481
+
482
+ except Exception as e:
483
+ self.logger.error(f"Feature alignment assessment failed: {str(e)}")
484
+ return {
485
+ 'alignment_score': 0,
486
+ 'matched_pain_points': 0,
487
+ 'total_pain_points': len(pain_points),
488
+ 'key_feature_matches': []
489
+ }
490
+
491
+ def _assess_scalability_match(self, company_info: Dict[str, Any], product: Dict[str, Any]) -> Dict[str, Any]:
492
+ """
493
+ Assess how well the product scales with the company.
494
+
495
+ Args:
496
+ company_info: Company information
497
+ product: Product information
498
+
499
+ Returns:
500
+ Scalability assessment
501
+ """
502
+ try:
503
+ company_size = company_info.get('size', '').lower()
504
+ market_insights = product.get('marketInsights', {})
505
+ ideal_company_size = market_insights.get('idealCompanySize', '').lower()
506
+
507
+ # Simple scalability assessment
508
+ scalability_rating = 'Good'
509
+ if 'small' in company_size and 'small' in ideal_company_size:
510
+ scalability_rating = 'Excellent'
511
+ elif 'large' in company_size and 'large' in ideal_company_size:
512
+ scalability_rating = 'Excellent'
513
+ elif ('medium' in company_size and 'medium' in ideal_company_size) or ('50' in ideal_company_size and 'medium' in company_size):
514
+ scalability_rating = 'Excellent'
515
+ elif 'startup' in company_size and ('small' in ideal_company_size or 'startup' in ideal_company_size):
516
+ scalability_rating = 'Excellent'
517
+
518
+ return {
519
+ 'scalability_rating': scalability_rating,
520
+ 'company_size': company_size,
521
+ 'ideal_size_match': ideal_company_size,
522
+ 'growth_potential': 'High' if scalability_rating == 'Excellent' else 'Medium'
523
+ }
524
+
525
+ except Exception as e:
526
+ self.logger.error(f"Scalability assessment failed: {str(e)}")
527
+ return {
528
+ 'scalability_rating': 'Unknown',
529
+ 'company_size': company_info.get('size', 'Unknown'),
530
+ 'ideal_size_match': 'Unknown',
531
+ 'growth_potential': 'Unknown'
532
+ }
533
+
534
+ def _assess_integration_complexity(self, customer_data: Dict[str, Any], product: Dict[str, Any]) -> Dict[str, Any]:
535
+ """
536
+ Assess integration complexity for the customer.
537
+
538
+ Args:
539
+ customer_data: Customer information
540
+ product: Product information
541
+
542
+ Returns:
543
+ Integration complexity assessment
544
+ """
545
+ try:
546
+ current_tech_stack = customer_data.get('currentTechStack', [])
547
+ installation_requirements = product.get('installationRequirements', '')
548
+
549
+ # Simple complexity assessment based on tech stack
550
+ complexity_level = 'Medium'
551
+ if len(current_tech_stack) == 0:
552
+ complexity_level = 'High' # No existing tech stack
553
+ elif len(current_tech_stack) > 5:
554
+ complexity_level = 'Low' # Sophisticated tech stack
555
+
556
+ return {
557
+ 'complexity_level': complexity_level,
558
+ 'current_tech_stack_size': len(current_tech_stack),
559
+ 'integration_time_estimate': self._estimate_integration_time(complexity_level),
560
+ 'technical_requirements': installation_requirements[:200] if installation_requirements else 'Standard requirements'
561
+ }
562
+
563
+ except Exception as e:
564
+ self.logger.error(f"Integration complexity assessment failed: {str(e)}")
565
+ return {
566
+ 'complexity_level': 'Unknown',
567
+ 'current_tech_stack_size': 0,
568
+ 'integration_time_estimate': 'Unknown',
569
+ 'technical_requirements': 'Unknown'
570
+ }
571
+
572
+ def _estimate_integration_time(self, complexity_level: str) -> str:
573
+ """
574
+ Estimate integration time based on complexity.
575
+
576
+ Args:
577
+ complexity_level: Complexity level (Low/Medium/High)
578
+
579
+ Returns:
580
+ Time estimate string
581
+ """
582
+ time_estimates = {
583
+ 'Low': '2-4 weeks',
584
+ 'Medium': '4-8 weeks',
585
+ 'High': '8-12 weeks',
586
+ 'Unknown': '4-8 weeks'
587
+ }
588
+ return time_estimates.get(complexity_level, '4-8 weeks')
589
+
590
+ def _assess_customization_needs(self, customer_data: Dict[str, Any], product: Dict[str, Any]) -> Dict[str, Any]:
591
+ """
592
+ Assess customization needs for the customer.
593
+
594
+ Args:
595
+ customer_data: Customer information
596
+ product: Product information
597
+
598
+ Returns:
599
+ Customization needs assessment
600
+ """
601
+ try:
602
+ company_info = customer_data.get('companyInfo', {})
603
+ industry = company_info.get('industry', '').lower()
604
+
605
+ # Assess customization needs based on industry and company specifics
606
+ customization_level = 'Standard'
607
+ if 'healthcare' in industry or 'finance' in industry or 'legal' in industry:
608
+ customization_level = 'High' # Regulated industries
609
+ elif 'manufacturing' in industry or 'retail' in industry:
610
+ customization_level = 'Medium' # Industry-specific needs
611
+
612
+ return {
613
+ 'customization_level': customization_level,
614
+ 'industry_specific_needs': industry,
615
+ 'estimated_customization_effort': self._estimate_customization_effort(customization_level),
616
+ 'standard_features_coverage': '70-90%' if customization_level == 'Standard' else '50-70%'
617
+ }
618
+
619
+ except Exception as e:
620
+ self.logger.error(f"Customization needs assessment failed: {str(e)}")
621
+ return {
622
+ 'customization_level': 'Unknown',
623
+ 'industry_specific_needs': 'Unknown',
624
+ 'estimated_customization_effort': 'Unknown',
625
+ 'standard_features_coverage': 'Unknown'
626
+ }
627
+
628
+ def _estimate_customization_effort(self, customization_level: str) -> str:
629
+ """
630
+ Estimate customization effort based on level.
631
+
632
+ Args:
633
+ customization_level: Customization level
634
+
635
+ Returns:
636
+ Effort estimate string
637
+ """
638
+ effort_estimates = {
639
+ 'Standard': 'Minimal (configuration only)',
640
+ 'Medium': 'Moderate (some custom development)',
641
+ 'High': 'Significant (extensive customization)',
642
+ 'Unknown': 'To be determined'
643
+ }
644
+ return effort_estimates.get(customization_level, 'To be determined')
645
+
646
+ def _evaluate_competitive_position(self, customer_data: Dict[str, Any], product: Dict[str, Any]) -> Dict[str, Any]:
647
+ """
648
+ Evaluate competitive positioning for this customer.
649
+
650
+ Args:
651
+ customer_data: Customer information
652
+ product: Product information
653
+
654
+ Returns:
655
+ Competitive position analysis
656
+ """
657
+ try:
658
+ competitive_advantages = product.get('competitiveAdvantages', [])
659
+
660
+ return {
661
+ 'key_differentiators': competitive_advantages[:3],
662
+ 'competitive_strength': 'Strong' if len(competitive_advantages) > 3 else 'Moderate',
663
+ 'market_position': self._assess_market_position(product),
664
+ 'customer_value_props': self._identify_customer_value_props(customer_data, competitive_advantages)
665
+ }
666
+
667
+ except Exception as e:
668
+ self.logger.error(f"Competitive position evaluation failed: {str(e)}")
669
+ return {
670
+ 'key_differentiators': [],
671
+ 'competitive_strength': 'Unknown',
672
+ 'market_position': 'Unknown',
673
+ 'customer_value_props': []
674
+ }
675
+
676
+ def _assess_market_position(self, product: Dict[str, Any]) -> str:
677
+ """
678
+ Assess market position of the product.
679
+
680
+ Args:
681
+ product: Product information
682
+
683
+ Returns:
684
+ Market position assessment
685
+ """
686
+ market_insights = product.get('marketInsights', {})
687
+ if market_insights:
688
+ return 'Established'
689
+ else:
690
+ return 'Emerging'
691
+
692
+ def _identify_customer_value_props(self, customer_data: Dict[str, Any], competitive_advantages: List[str]) -> List[str]:
693
+ """
694
+ Identify customer-specific value propositions.
695
+
696
+ Args:
697
+ customer_data: Customer information
698
+ competitive_advantages: Product competitive advantages
699
+
700
+ Returns:
701
+ Customer-specific value propositions
702
+ """
703
+ try:
704
+ pain_points = customer_data.get('painPoints', [])
705
+ value_props = []
706
+
707
+ for advantage in competitive_advantages[:3]:
708
+ # Match advantages to pain points
709
+ for pain_point in pain_points:
710
+ if any(word in advantage.lower() for word in pain_point.get('description', '').lower().split()[:3]):
711
+ value_props.append(f"{advantage} - addresses {pain_point.get('category', 'business')} challenges")
712
+ break
713
+ else:
714
+ value_props.append(advantage)
715
+
716
+ return value_props[:3]
717
+
718
+ except Exception as e:
719
+ self.logger.error(f"Value proposition identification failed: {str(e)}")
720
+ return competitive_advantages[:3]
721
+
722
+ def _assess_implementation_feasibility(self, customer_data: Dict[str, Any], product: Dict[str, Any]) -> Dict[str, Any]:
723
+ """
724
+ Assess implementation feasibility for the customer.
725
+
726
+ Args:
727
+ customer_data: Customer information
728
+ product: Product information
729
+
730
+ Returns:
731
+ Implementation feasibility assessment
732
+ """
733
+ try:
734
+ company_info = customer_data.get('companyInfo', {})
735
+ financial_info = customer_data.get('financialInfo', {})
736
+
737
+ # Assess budget alignment
738
+ market_insights = product.get('marketInsights', {})
739
+ average_deal_size = market_insights.get('averageDealSize', '')
740
+
741
+ budget_alignment = self._assess_budget_alignment(financial_info, average_deal_size)
742
+
743
+ return {
744
+ 'feasibility_rating': self._calculate_feasibility_rating(budget_alignment, company_info),
745
+ 'budget_alignment': budget_alignment,
746
+ 'implementation_timeline': self._estimate_implementation_timeline(company_info),
747
+ 'success_probability': self._estimate_success_probability(customer_data, product),
748
+ 'key_success_factors': self._identify_success_factors(customer_data, product)
749
+ }
750
+
751
+ except Exception as e:
752
+ self.logger.error(f"Implementation feasibility assessment failed: {str(e)}")
753
+ return {
754
+ 'feasibility_rating': 'Unknown',
755
+ 'budget_alignment': 'Unknown',
756
+ 'implementation_timeline': 'Unknown',
757
+ 'success_probability': 'Unknown',
758
+ 'key_success_factors': []
759
+ }
760
+
761
+ def _assess_budget_alignment(self, financial_info: Dict[str, Any], average_deal_size: str) -> str:
762
+ """
763
+ Assess budget alignment with product pricing.
764
+
765
+ Args:
766
+ financial_info: Customer financial information
767
+ average_deal_size: Product average deal size
768
+
769
+ Returns:
770
+ Budget alignment assessment
771
+ """
772
+ try:
773
+ annual_revenue = financial_info.get('estimatedAnnualRevenue', '')
774
+
775
+ if not annual_revenue or not average_deal_size:
776
+ return 'Unknown'
777
+
778
+ # Simple budget assessment logic
779
+ if '$10,000' in average_deal_size and ('$2-5M' in annual_revenue or '$5-10M' in annual_revenue):
780
+ return 'Good'
781
+ elif '$1,000' in average_deal_size:
782
+ return 'Excellent'
783
+ else:
784
+ return 'Moderate'
785
+
786
+ except Exception as e:
787
+ self.logger.error(f"Budget alignment assessment failed: {str(e)}")
788
+ return 'Unknown'
789
+
790
+ def _calculate_feasibility_rating(self, budget_alignment: str, company_info: Dict[str, Any]) -> str:
791
+ """
792
+ Calculate overall feasibility rating.
793
+
794
+ Args:
795
+ budget_alignment: Budget alignment assessment
796
+ company_info: Company information
797
+
798
+ Returns:
799
+ Feasibility rating
800
+ """
801
+ if budget_alignment == 'Excellent':
802
+ return 'High'
803
+ elif budget_alignment == 'Good':
804
+ return 'Medium-High'
805
+ elif budget_alignment == 'Moderate':
806
+ return 'Medium'
807
+ else:
808
+ return 'Low-Medium'
809
+
810
+ def _estimate_implementation_timeline(self, company_info: Dict[str, Any]) -> str:
811
+ """
812
+ Estimate implementation timeline based on company size.
813
+
814
+ Args:
815
+ company_info: Company information
816
+
817
+ Returns:
818
+ Timeline estimate
819
+ """
820
+ company_size = company_info.get('size', '').lower()
821
+
822
+ if 'small' in company_size or 'startup' in company_size:
823
+ return '1-3 months'
824
+ elif 'large' in company_size:
825
+ return '6-12 months'
826
+ else:
827
+ return '3-6 months'
828
+
829
+ def _estimate_success_probability(self, customer_data: Dict[str, Any], product: Dict[str, Any]) -> str:
830
+ """
831
+ Estimate probability of successful implementation.
832
+
833
+ Args:
834
+ customer_data: Customer information
835
+ product: Product information
836
+
837
+ Returns:
838
+ Success probability estimate
839
+ """
840
+ # Simple success probability based on pain point alignment
841
+ pain_points = customer_data.get('painPoints', [])
842
+ pain_points_solved = product.get('painPointsSolved', [])
843
+
844
+ if len(pain_points_solved) >= len(pain_points):
845
+ return 'High (80-90%)'
846
+ elif len(pain_points_solved) >= len(pain_points) * 0.5:
847
+ return 'Medium-High (70-80%)'
848
+ else:
849
+ return 'Medium (60-70%)'
850
+
851
+ def _identify_success_factors(self, customer_data: Dict[str, Any], product: Dict[str, Any]) -> List[str]:
852
+ """
853
+ Identify key success factors for implementation.
854
+
855
+ Args:
856
+ customer_data: Customer information
857
+ product: Product information
858
+
859
+ Returns:
860
+ List of success factors
861
+ """
862
+ return [
863
+ 'Strong executive sponsorship',
864
+ 'Dedicated implementation team',
865
+ 'Clear success metrics definition',
866
+ 'Adequate training and change management',
867
+ 'Phased rollout approach'
868
+ ]
869
+
870
+ def _estimate_roi_potential(self, customer_data: Dict[str, Any], product: Dict[str, Any]) -> Dict[str, Any]:
871
+ """
872
+ Estimate ROI potential for the customer.
873
+
874
+ Args:
875
+ customer_data: Customer information
876
+ product: Product information
877
+
878
+ Returns:
879
+ ROI analysis
880
+ """
881
+ try:
882
+ pain_points = customer_data.get('painPoints', [])
883
+ company_info = customer_data.get('companyInfo', {})
884
+
885
+ # Estimate ROI based on pain points addressed
886
+ roi_factors = []
887
+ estimated_savings = 0
888
+
889
+ for pain_point in pain_points:
890
+ category = pain_point.get('category', '').lower()
891
+ impact = pain_point.get('impact', '').lower()
892
+
893
+ if 'operational' in category or 'efficiency' in category:
894
+ if 'high' in impact:
895
+ estimated_savings += 15 # 15% efficiency gain
896
+ roi_factors.append('Operational efficiency improvements')
897
+ else:
898
+ estimated_savings += 8
899
+ roi_factors.append('Process optimization')
900
+
901
+ elif 'technology' in category:
902
+ if 'high' in impact:
903
+ estimated_savings += 12
904
+ roi_factors.append('Technology modernization benefits')
905
+ else:
906
+ estimated_savings += 6
907
+ roi_factors.append('System integration improvements')
908
+
909
+ # Calculate payback period estimate
910
+ payback_period = self._estimate_payback_period(estimated_savings)
911
+
912
+ return {
913
+ 'estimated_roi_percentage': f"{estimated_savings}%",
914
+ 'payback_period': payback_period,
915
+ 'roi_factors': roi_factors[:3],
916
+ 'confidence_level': 'Medium' if estimated_savings > 10 else 'Low',
917
+ 'annual_benefit_estimate': self._estimate_annual_benefits(company_info, estimated_savings)
918
+ }
919
+
920
+ except Exception as e:
921
+ self.logger.error(f"ROI estimation failed: {str(e)}")
922
+ return {
923
+ 'estimated_roi_percentage': 'Unknown',
924
+ 'payback_period': 'Unknown',
925
+ 'roi_factors': [],
926
+ 'confidence_level': 'Unknown',
927
+ 'annual_benefit_estimate': 'Unknown'
928
+ }
929
+
930
+ def _estimate_payback_period(self, roi_percentage: float) -> str:
931
+ """
932
+ Estimate payback period based on ROI percentage.
933
+
934
+ Args:
935
+ roi_percentage: Estimated ROI percentage
936
+
937
+ Returns:
938
+ Payback period estimate
939
+ """
940
+ if roi_percentage >= 20:
941
+ return '6-12 months'
942
+ elif roi_percentage >= 10:
943
+ return '12-18 months'
944
+ elif roi_percentage >= 5:
945
+ return '18-24 months'
946
+ else:
947
+ return '24+ months'
948
+
949
+ def _estimate_annual_benefits(self, company_info: Dict[str, Any], roi_percentage: float) -> str:
950
+ """
951
+ Estimate annual benefits based on company size and ROI.
952
+
953
+ Args:
954
+ company_info: Company information
955
+ roi_percentage: Estimated ROI percentage
956
+
957
+ Returns:
958
+ Annual benefits estimate
959
+ """
960
+ try:
961
+ annual_revenue = company_info.get('annualRevenue', '').lower()
962
+
963
+ if '$10m' in annual_revenue or '$5-10m' in annual_revenue:
964
+ base_benefit = 500000 # $500K base for $5-10M revenue
965
+ elif '$2-5m' in annual_revenue:
966
+ base_benefit = 200000 # $200K base for $2-5M revenue
967
+ elif '$1-2m' in annual_revenue:
968
+ base_benefit = 100000 # $100K base for $1-2M revenue
969
+ else:
970
+ base_benefit = 50000 # $50K base for smaller companies
971
+
972
+ estimated_benefit = base_benefit * (roi_percentage / 100)
973
+
974
+ if estimated_benefit >= 100000:
975
+ return f"${estimated_benefit/1000:.0f}K+"
976
+ else:
977
+ return f"${estimated_benefit:.0f}+"
978
+
979
+ except Exception as e:
980
+ self.logger.error(f"Annual benefits estimation failed: {str(e)}")
981
+ return "To be determined"
982
+
983
+ def _compare_products(self, lead_scores: List[Dict[str, Any]]) -> Dict[str, Any]:
984
+ """
985
+ Compare products and provide recommendations.
986
+
987
+ Args:
988
+ lead_scores: List of scoring results
989
+
990
+ Returns:
991
+ Product comparison analysis
992
+ """
993
+ try:
994
+ if len(lead_scores) <= 1:
995
+ return {
996
+ 'comparison_available': False,
997
+ 'reason': 'Only one product evaluated'
998
+ }
999
+
1000
+ # Sort by total weighted score
1001
+ sorted_scores = sorted(lead_scores, key=lambda x: x['total_weighted_score'], reverse=True)
1002
+
1003
+ top_product = sorted_scores[0]
1004
+ second_product = sorted_scores[1] if len(sorted_scores) > 1 else None
1005
+
1006
+ comparison = {
1007
+ 'comparison_available': True,
1008
+ 'top_recommendation': {
1009
+ 'product_name': top_product['product_name'],
1010
+ 'score': top_product['total_weighted_score'],
1011
+ 'key_strengths': self._extract_top_criteria(top_product['scores'])
1012
+ },
1013
+ 'alternative_option': None,
1014
+ 'score_gap': 0,
1015
+ 'recommendation_confidence': 'High'
1016
+ }
1017
+
1018
+ if second_product:
1019
+ score_gap = top_product['total_weighted_score'] - second_product['total_weighted_score']
1020
+ comparison['alternative_option'] = {
1021
+ 'product_name': second_product['product_name'],
1022
+ 'score': second_product['total_weighted_score'],
1023
+ 'key_strengths': self._extract_top_criteria(second_product['scores'])
1024
+ }
1025
+ comparison['score_gap'] = score_gap
1026
+
1027
+ if score_gap < 10:
1028
+ comparison['recommendation_confidence'] = 'Medium'
1029
+ comparison['recommendation_note'] = 'Close scores - consider other factors'
1030
+ elif score_gap > 30:
1031
+ comparison['recommendation_confidence'] = 'Very High'
1032
+ comparison['recommendation_note'] = 'Clear winner identified'
1033
+
1034
+ return comparison
1035
+
1036
+ except Exception as e:
1037
+ self.logger.error(f"Product comparison failed: {str(e)}")
1038
+ return {
1039
+ 'comparison_available': False,
1040
+ 'reason': 'Comparison analysis failed'
1041
+ }
1042
+
1043
+ def _extract_top_criteria(self, scores: Dict[str, Dict[str, Any]]) -> List[str]:
1044
+ """
1045
+ Extract top scoring criteria for a product.
1046
+
1047
+ Args:
1048
+ scores: Criteria scores dictionary
1049
+
1050
+ Returns:
1051
+ List of top criteria names
1052
+ """
1053
+ try:
1054
+ # Sort criteria by score
1055
+ sorted_criteria = sorted(scores.items(), key=lambda x: x[1].get('score', 0), reverse=True)
1056
+
1057
+ # Return top 2 criteria names
1058
+ return [criterion[0].replace('_', ' ').title() for criterion in sorted_criteria[:2]]
1059
+
1060
+ except Exception as e:
1061
+ self.logger.error(f"Top criteria extraction failed: {str(e)}")
1062
+ return []
1063
+
1064
+ def _score_customer_product_fit(self, customer_data: Dict[str, Any], product: Dict[str, Any], criteria: List[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
1065
+ """
1066
+ Score customer-product fit using LLM analysis.
1067
+
1068
+ Args:
1069
+ customer_data: Customer information from data preparation
1070
+ product: Product information
1071
+ criteria: Scoring criteria
1072
+
1073
+ Returns:
1074
+ Scoring result or None if failed
1075
+ """
1076
+ try:
1077
+ if self.is_dry_run():
1078
+ return self._get_mock_scoring_result(product)
1079
+
1080
+ # Create the LLM prompt based on original YAML instruction
1081
+ prompt = self._create_scoring_prompt(customer_data, product, criteria)
1082
+
1083
+ # Call LLM with specific parameters from original YAML
1084
+ response = self.call_llm(prompt, temperature=0.3)
1085
+
1086
+ # Parse the JSON response
1087
+ scoring_result = self.parse_json_response(response)
1088
+
1089
+ # Validate and clean the scoring result
1090
+ validated_result = self._validate_scoring_result(scoring_result, product)
1091
+
1092
+ self.logger.info(f"Successfully scored product {product.get('productName', 'Unknown')}")
1093
+ return validated_result
1094
+
1095
+ except Exception as e:
1096
+ self.logger.error(f"Product scoring failed for {product.get('productName', 'Unknown')}: {str(e)}")
1097
+ return self._get_fallback_scoring_result(product)
1098
+
1099
+ def _create_scoring_prompt(self, customer_data: Dict[str, Any], product: Dict[str, Any], criteria: List[Dict[str, Any]]) -> str:
1100
+ """
1101
+ Create the LLM scoring prompt based on original YAML instruction.
1102
+
1103
+ Args:
1104
+ customer_data: Customer information
1105
+ product: Product information
1106
+ criteria: Scoring criteria
1107
+
1108
+ Returns:
1109
+ Formatted prompt string
1110
+ """
1111
+ # Extract customer address for geographic scoring
1112
+ company_info = customer_data.get('companyInfo', {})
1113
+ customer_address = company_info.get('address', '')
1114
+
1115
+ prompt = f"""Role: You are a lead scoring expert.
1116
+
1117
+ Objective: Your task is to evaluate the potential fit between a customer and a specific product based on the provided criteria. Use the following data to perform lead scoring.
1118
+
1119
+ ## Instructions
1120
+ 1. **Evaluate Criteria**:
1121
+ - Assess the customer against the **5 criteria** provided.
1122
+ - Assign a score (0–100) for each criterion based on the scoring guidelines in the criteria JSON.
1123
+
1124
+ 2. **Special Handling for Geographic Market Fit**:
1125
+ - Use the `localization` field from the product info and the customer's address
1126
+ - If the customer address is **not provided**, assign a default score of 50.
1127
+
1128
+ 3. **Output Structure**: Return the analysis as a JSON object with the following format:
1129
+ {{
1130
+ "product_name": "productName",
1131
+ "product_id": "Get ID from 'id' of product info, ensure exactly 36 characters",
1132
+ "scores": {{
1133
+ "industry_fit": {{ "score": 0–100, "justification": "Brief explanation" }},
1134
+ "company_size": {{ "score": 0–100, "justification": "Brief explanation" }},
1135
+ "pain_points": {{ "score": 0–100, "justification": "Brief explanation" }},
1136
+ "product_fit": {{ "score": 0–100, "justification": "Brief explanation" }},
1137
+ "geographic_market_fit": {{ "score": 0–100, "justification": "Brief explanation" }}
1138
+ }},
1139
+ "total_weighted_score": 0–100
1140
+ }}
1141
+ 4. Scoring Rules:
1142
+ - If information is not provided, assign a score of 0 for that criterion.
1143
+ - Ensure the product_id is exactly 36 characters long and matches the id field in the product info.
1144
+ - Do not include any explanations or comments outside of the JSON structure.
1145
+ - Do not use keyword ```json in response
1146
+
1147
+ The Input Data:
1148
+ Product Information: {json.dumps(product, indent=2)}
1149
+ Scoring Criteria: {json.dumps(criteria, indent=2)}
1150
+ Customer Information: {json.dumps(customer_data, indent=2)}
1151
+ Customer address: {customer_address}"""
1152
+
1153
+ return prompt
1154
+
1155
+ def _get_mock_scoring_result(self, product: Dict[str, Any]) -> Dict[str, Any]:
1156
+ """
1157
+ Get mock scoring result for dry run mode.
1158
+
1159
+ Args:
1160
+ product: Product information
1161
+
1162
+ Returns:
1163
+ Mock scoring result
1164
+ """
1165
+ product_name = product.get('productName', 'Unknown Product')
1166
+ product_id = product.get('id', 'mock-product-id-1234-5678-9012-345678901234')
1167
+
1168
+ # Ensure product_id is exactly 36 characters
1169
+ if len(product_id) != 36:
1170
+ product_id = f"mock-{product_id[:31]}" if len(product_id) > 31 else f"mock-{product_id}-{'0' * (31 - len(product_id))}"
1171
+
1172
+ return {
1173
+ 'product_name': product_name,
1174
+ 'product_id': product_id,
1175
+ 'scores': {
1176
+ 'industry_fit': {
1177
+ 'score': 75,
1178
+ 'justification': 'Good industry alignment with technology sector'
1179
+ },
1180
+ 'company_size': {
1181
+ 'score': 80,
1182
+ 'justification': 'Company size fits well within target segment'
1183
+ },
1184
+ 'pain_points': {
1185
+ 'score': 85,
1186
+ 'justification': 'Product directly addresses identified operational efficiency challenges'
1187
+ },
1188
+ 'product_fit': {
1189
+ 'score': 70,
1190
+ 'justification': 'Strong feature alignment with customer needs'
1191
+ },
1192
+ 'geographic_market_fit': {
1193
+ 'score': 90,
1194
+ 'justification': 'Strong market presence in customer location'
1195
+ }
1196
+ },
1197
+ 'total_weighted_score': 78
1198
+ }
1199
+
1200
+ def _validate_scoring_result(self, scoring_result: Dict[str, Any], product: Dict[str, Any]) -> Dict[str, Any]:
1201
+ """
1202
+ Validate and clean the scoring result.
1203
+
1204
+ Args:
1205
+ scoring_result: Raw scoring result from LLM
1206
+ product: Product information
1207
+
1208
+ Returns:
1209
+ Validated scoring result
1210
+ """
1211
+ try:
1212
+ # Ensure required fields exist
1213
+ if 'product_name' not in scoring_result:
1214
+ scoring_result['product_name'] = product.get('productName', 'Unknown')
1215
+
1216
+ if 'product_id' not in scoring_result:
1217
+ scoring_result['product_id'] = product.get('id', 'unknown-product-id')
1218
+
1219
+ # Ensure product_id is exactly 36 characters
1220
+ product_id = scoring_result['product_id']
1221
+ if len(product_id) != 36:
1222
+ # Try to use the original product ID
1223
+ original_id = product.get('id', '')
1224
+ if len(original_id) == 36:
1225
+ scoring_result['product_id'] = original_id
1226
+ else:
1227
+ # Generate a valid 36-character ID
1228
+ scoring_result['product_id'] = f"prod-{product_id[:31]}" if len(product_id) > 31 else f"prod-{product_id}-{'0' * (31 - len(product_id))}"
1229
+
1230
+ # Validate scores
1231
+ scores = scoring_result.get('scores', {})
1232
+ required_criteria = ['industry_fit', 'company_size', 'pain_points', 'product_fit', 'geographic_market_fit']
1233
+
1234
+ for criterion in required_criteria:
1235
+ if criterion not in scores:
1236
+ scores[criterion] = {'score': 0, 'justification': 'No data available'}
1237
+ elif not isinstance(scores[criterion], dict):
1238
+ scores[criterion] = {'score': 0, 'justification': 'Invalid data format'}
1239
+ else:
1240
+ # Ensure score is within valid range
1241
+ score_value = scores[criterion].get('score', 0)
1242
+ if not isinstance(score_value, (int, float)) or score_value < 0 or score_value > 100:
1243
+ scores[criterion]['score'] = 0
1244
+
1245
+ # Ensure justification exists
1246
+ if 'justification' not in scores[criterion]:
1247
+ scores[criterion]['justification'] = 'No justification provided'
1248
+
1249
+ # Calculate total weighted score if not provided or invalid
1250
+ if 'total_weighted_score' not in scoring_result or not isinstance(scoring_result['total_weighted_score'], (int, float)):
1251
+ scoring_result['total_weighted_score'] = self._calculate_weighted_score(scores)
1252
+
1253
+ return scoring_result
1254
+
1255
+ except Exception as e:
1256
+ self.logger.error(f"Scoring result validation failed: {str(e)}")
1257
+ return self._get_fallback_scoring_result(product)
1258
+
1259
+ def _calculate_weighted_score(self, scores: Dict[str, Dict[str, Any]]) -> float:
1260
+ """
1261
+ Calculate weighted total score based on criteria weights.
1262
+
1263
+ Args:
1264
+ scores: Individual criterion scores
1265
+
1266
+ Returns:
1267
+ Weighted total score
1268
+ """
1269
+ try:
1270
+ criteria = self._get_default_scoring_criteria()
1271
+ total_weighted = 0
1272
+ total_weight = 0
1273
+
1274
+ for criterion in criteria:
1275
+ criterion_name = criterion['name']
1276
+ weight = criterion['weight']
1277
+
1278
+ if criterion_name in scores:
1279
+ score = scores[criterion_name].get('score', 0)
1280
+ total_weighted += score * (weight / 100)
1281
+ total_weight += weight
1282
+
1283
+ if total_weight > 0:
1284
+ return round(total_weighted, 1)
1285
+ else:
1286
+ return 0.0
1287
+
1288
+ except Exception as e:
1289
+ self.logger.error(f"Weighted score calculation failed: {str(e)}")
1290
+ return 0.0
1291
+
1292
+ def _get_fallback_scoring_result(self, product: Dict[str, Any]) -> Dict[str, Any]:
1293
+ """
1294
+ Get fallback scoring result when LLM scoring fails.
1295
+
1296
+ Args:
1297
+ product: Product information
1298
+
1299
+ Returns:
1300
+ Fallback scoring result
1301
+ """
1302
+ product_name = product.get('productName', 'Unknown Product')
1303
+ product_id = product.get('id', 'fallback-product-id-1234-5678-9012-345678901234')
1304
+
1305
+ return {
1306
+ 'product_name': product_name,
1307
+ 'product_id': product_id,
1308
+ 'scores': {
1309
+ 'industry_fit': {'score': 0, 'justification': 'Unable to evaluate due to processing error'},
1310
+ 'company_size': {'score': 0, 'justification': 'Unable to evaluate due to processing error'},
1311
+ 'pain_points': {'score': 0, 'justification': 'Unable to evaluate due to processing error'},
1312
+ 'product_fit': {'score': 0, 'justification': 'Unable to evaluate due to processing error'},
1313
+ 'geographic_market_fit': {'score': 50, 'justification': 'Default score assigned due to processing error'}
1314
+ },
1315
+ 'total_weighted_score': 10
1316
+ }
1317
+
1318
+ def _analyze_scoring_results(self, lead_scores: List[Dict[str, Any]]) -> Dict[str, Any]:
1319
+ """
1320
+ Analyze scoring results and provide insights.
1321
+
1322
+ Args:
1323
+ lead_scores: List of scoring results
1324
+
1325
+ Returns:
1326
+ Analysis insights
1327
+ """
1328
+ try:
1329
+ if not lead_scores:
1330
+ return {
1331
+ 'highest_score': 0,
1332
+ 'lowest_score': 0,
1333
+ 'average_score': 0,
1334
+ 'recommended_product': None,
1335
+ 'insights': ['No products were evaluated']
1336
+ }
1337
+
1338
+ scores = [score['total_weighted_score'] for score in lead_scores]
1339
+ highest_score = max(scores)
1340
+ lowest_score = min(scores)
1341
+ average_score = sum(scores) / len(scores)
1342
+
1343
+ # Find recommended product (highest scoring)
1344
+ recommended_product = None
1345
+ for score in lead_scores:
1346
+ if score['total_weighted_score'] == highest_score:
1347
+ recommended_product = {
1348
+ 'product_name': score['product_name'],
1349
+ 'product_id': score['product_id'],
1350
+ 'score': score['total_weighted_score']
1351
+ }
1352
+ break
1353
+
1354
+ # Generate insights
1355
+ insights = []
1356
+ if highest_score >= 80:
1357
+ insights.append('Excellent product-customer fit identified')
1358
+ elif highest_score >= 60:
1359
+ insights.append('Good product-customer fit with some optimization opportunities')
1360
+ elif highest_score >= 40:
1361
+ insights.append('Moderate fit - consider addressing key gaps before outreach')
1362
+ else:
1363
+ insights.append('Low fit scores - may need different products or customer qualification')
1364
+
1365
+ if len(lead_scores) > 1:
1366
+ score_range = highest_score - lowest_score
1367
+ if score_range > 30:
1368
+ insights.append('Significant variation in product fit - focus on highest scoring options')
1369
+ else:
1370
+ insights.append('Similar fit scores across products - consider other factors for selection')
1371
+
1372
+ return {
1373
+ 'highest_score': highest_score,
1374
+ 'lowest_score': lowest_score,
1375
+ 'average_score': round(average_score, 1),
1376
+ 'recommended_product': recommended_product,
1377
+ 'insights': insights,
1378
+ 'total_products_evaluated': len(lead_scores)
1379
+ }
1380
+
1381
+ except Exception as e:
1382
+ self.logger.error(f"Scoring analysis failed: {str(e)}")
1383
+ return {
1384
+ 'highest_score': 0,
1385
+ 'lowest_score': 0,
1386
+ 'average_score': 0,
1387
+ 'recommended_product': None,
1388
+ 'insights': ['Analysis failed due to processing error'],
1389
+ 'total_products_evaluated': len(lead_scores) if lead_scores else 0
1390
+ }
1391
+
1392
+ def _save_scoring_results(self, context: Dict[str, Any], lead_scores: List[Dict[str, Any]]) -> None:
1393
+ """
1394
+ Save scoring results to local database.
1395
+
1396
+ Args:
1397
+ context: Execution context
1398
+ lead_scores: List of scoring results
1399
+ """
1400
+ try:
1401
+ execution_id = context.get('execution_id')
1402
+
1403
+ for score_result in lead_scores:
1404
+ # Save to lead_scores table
1405
+ score_data = {
1406
+ 'execution_id': execution_id,
1407
+ 'customer_id': execution_id, # Using execution_id as customer_id
1408
+ 'product_id': score_result.get('product_id'),
1409
+ 'score': score_result.get('total_weighted_score', 0),
1410
+ 'criteria_breakdown': json.dumps(score_result.get('scores', {}))
1411
+ }
1412
+
1413
+ # Save to database using data manager
1414
+ self.data_manager.save_lead_score(
1415
+ execution_id=score_data['execution_id'],
1416
+ customer_id=score_data['customer_id'],
1417
+ product_id=score_data['product_id'],
1418
+ score=score_data['score'],
1419
+ criteria_breakdown=json.loads(score_data['criteria_breakdown'])
1420
+ )
1421
+ self.logger.info(f"Scoring data saved to database: {score_result.get('product_name')}")
1422
+
1423
+ except Exception as e:
1424
+ self.logger.warning(f"Failed to save scoring results: {str(e)}")
1425
+
1426
+ def validate_input(self, context: Dict[str, Any]) -> bool:
1427
+ """
1428
+ Validate input data for lead scoring stage.
1429
+
1430
+ Args:
1431
+ context: Execution context
1432
+
1433
+ Returns:
1434
+ True if input is valid
1435
+ """
1436
+ # Check if we have data from data preparation stage
1437
+ stage_results = context.get('stage_results', {})
1438
+ if 'data_preparation' in stage_results:
1439
+ return True
1440
+
1441
+ # Fallback: check if we have basic input data
1442
+ input_data = context.get('input_data', {})
1443
+ return bool(input_data.get('customer_name') or input_data.get('customer_website'))
1444
+
1445
+ def get_required_fields(self) -> List[str]:
1446
+ """
1447
+ Get list of required input fields for this stage.
1448
+
1449
+ Returns:
1450
+ List of required field names
1451
+ """
1452
+ return [] # This stage depends on data_preparation stage output