fusesell 1.3.1__py3-none-any.whl → 1.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fusesell might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fusesell
3
- Version: 1.3.1
3
+ Version: 1.3.3
4
4
  Summary: Local implementation of FuseSell AI sales automation pipeline
5
5
  Author-email: FuseSell Team <team@fusesell.ai>
6
6
  License-Expression: MIT
@@ -1,7 +1,7 @@
1
1
  fusesell.py,sha256=t5PjkhWEJGINp4k517u0EX0ge7lzuHOUHHro-BE1mGk,596
2
- fusesell-1.3.1.dist-info/licenses/LICENSE,sha256=GDz1ZoC4lB0kwjERpzqc_OdA_awYVso2aBnUH-ErW_w,1070
3
- fusesell_local/__init__.py,sha256=4eRHhad_hjqLaoWSd_wZzPRBuX_V_Aqqf2_o93NeHao,967
4
- fusesell_local/api.py,sha256=AcPune5YJdgi7nsMeusCUqc49z5UiycsQb6n3yiV_No,10839
2
+ fusesell-1.3.3.dist-info/licenses/LICENSE,sha256=GDz1ZoC4lB0kwjERpzqc_OdA_awYVso2aBnUH-ErW_w,1070
3
+ fusesell_local/__init__.py,sha256=Q6pGa5zMldeiz9JEKEbO3rcIJphGt8qmZBl03G7wMMM,966
4
+ fusesell_local/api.py,sha256=SABxRr0TsB1Xfhl_OlL2IiMKYLG0jxbsaBKVNsMh-Qo,10972
5
5
  fusesell_local/cli.py,sha256=MYnVxuEf5KTR4VcO3sc-VtP9NkWlSixJsYfOWST2Ds0,65859
6
6
  fusesell_local/pipeline.py,sha256=RMF_kgwNEc1ka8-CDJyzIOTSo8PGtR_zPKAgRevhlNo,39913
7
7
  fusesell_local/config/__init__.py,sha256=0ErO7QiSDqKn-LHcjIRdLZzh5QaRTkRsIlwfgpkkDz8,209
@@ -12,24 +12,24 @@ fusesell_local/stages/base_stage.py,sha256=ldo5xuHZto7ceEg3i_3rxAx0xPccK4n2jaxEJ
12
12
  fusesell_local/stages/data_acquisition.py,sha256=Td3mwakJRoEYbi3od4v2ZzKOHLgLSgccZVxH3ezs1_4,71081
13
13
  fusesell_local/stages/data_preparation.py,sha256=XWLg9b1w2NrMxLcrWDqB95mRmLQmVIMXpKNaBNr98TQ,52751
14
14
  fusesell_local/stages/follow_up.py,sha256=H9Xek6EoIbHrerQvGTRswXDNFH6zq71DcRxxj0zpo2g,77747
15
- fusesell_local/stages/initial_outreach.py,sha256=8Ra-Nmq4njAG1iMNiUW7FQbYnX0h9p5F59OXlyGdFdU,135285
15
+ fusesell_local/stages/initial_outreach.py,sha256=98KMaGP_aFkCV4K8j8HgURmNEgbVTYZSvXfLOlXX3Mc,127216
16
16
  fusesell_local/stages/lead_scoring.py,sha256=ir3l849eMGrGLf0OYUcmA1F3FwyYhAplS4niU3R2GRY,60658
17
17
  fusesell_local/tests/conftest.py,sha256=TWUtlP6cNPVOYkTPz-j9BzS_KnXdPWy8D-ObPLHvXYs,366
18
- fusesell_local/tests/test_api.py,sha256=763rUVb5pAuAQOovug6Ka0T9eGK8-WVOC_J08M7TETo,1827
18
+ fusesell_local/tests/test_api.py,sha256=vXlNaIDxqTTIFHRHs5zdUAPrxEleyoNxVOeyGuNgoQo,2304
19
19
  fusesell_local/tests/test_cli.py,sha256=iNgU8nDlVrcQM5MpBUTIJ5q3oh2-jgX77hJeaqBxToM,1007
20
20
  fusesell_local/tests/test_data_manager_products.py,sha256=g8EUSxTqdg18VifzhuOtDDywiMYzwOWFADny5Vntc28,4691
21
21
  fusesell_local/tests/test_data_manager_sales_process.py,sha256=NbwxQ9oBKCZfrkRQYxzHHQ08F7epqPUsyeGz_vm3kf8,4447
22
22
  fusesell_local/tests/test_data_manager_teams.py,sha256=kjk4V4r9ja4EVREIiQMxkuZd470SSwRHJAvpHln9KO4,4578
23
- fusesell_local/utils/__init__.py,sha256=TVemlo0wpckhNUxP3a1Tky3ekswy8JdIHaXBlkKXKBQ,330
23
+ fusesell_local/utils/__init__.py,sha256=onCrMaFAr_RepjhilcATvo2VBsSwglbaDSS7M5UVNQ0,374
24
24
  fusesell_local/utils/birthday_email_manager.py,sha256=NKLoUyzPedyhewZPma21SOoU8p9wPquehloer7TRA9U,20478
25
25
  fusesell_local/utils/data_manager.py,sha256=FHW9nvLXDgf-HYNFwxZlegZp0OgB3altszW6INIgyLM,188910
26
26
  fusesell_local/utils/event_scheduler.py,sha256=TDk1v19cNgLhn2aJriQfpvZnwBcRpOWyHLDvkefW110,39834
27
- fusesell_local/utils/llm_client.py,sha256=FVc25UlGt6hro7h5Iw7PHSXY3E3_67Xc-SUbHuMSRs0,10437
27
+ fusesell_local/utils/llm_client.py,sha256=eNfbZBcyawBvZGLDPuyeectwBUVi5fjeD5GeLkEqWXI,12271
28
28
  fusesell_local/utils/logger.py,sha256=sWlV8Tjyz_Z8J4zXKOnNalh8_iD6ytfrwPZpD-wcEOs,6259
29
29
  fusesell_local/utils/timezone_detector.py,sha256=0cAE4c8ZXqCA8AvxRKm6PrFKmAmsbq3HOHR6w-mW3KQ,39997
30
30
  fusesell_local/utils/validators.py,sha256=Z1VzeoxFsnuzlIA_ZaMWoy-0Cgyqupd47kIdljlMDbs,15438
31
- fusesell-1.3.1.dist-info/METADATA,sha256=hYcoE3sJK-2TPuxHSUv5yxaoNYu1_B5gW1bfScica5w,35074
32
- fusesell-1.3.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
33
- fusesell-1.3.1.dist-info/entry_points.txt,sha256=Vqek7tbiX7iF4rQkCRBZvT5WrB0HUduqKTsI2ZjhsXo,53
34
- fusesell-1.3.1.dist-info/top_level.txt,sha256=VP9y1K6DEq6gNq2UgLd7ChujxViF6OzeAVCK7IUBXPA,24
35
- fusesell-1.3.1.dist-info/RECORD,,
31
+ fusesell-1.3.3.dist-info/METADATA,sha256=KtGJ5IyUrdyBtlS2QwMsspFdzf61sJMSxzmZY-TO2t0,35074
32
+ fusesell-1.3.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
33
+ fusesell-1.3.3.dist-info/entry_points.txt,sha256=Vqek7tbiX7iF4rQkCRBZvT5WrB0HUduqKTsI2ZjhsXo,53
34
+ fusesell-1.3.3.dist-info/top_level.txt,sha256=VP9y1K6DEq6gNq2UgLd7ChujxViF6OzeAVCK7IUBXPA,24
35
+ fusesell-1.3.3.dist-info/RECORD,,
@@ -32,6 +32,6 @@ __all__ = [
32
32
  "validate_config",
33
33
  ]
34
34
 
35
- __version__ = "1.3.1"
35
+ __version__ = "1.3.3"
36
36
  __author__ = "FuseSell Team"
37
37
  __description__ = "Local implementation of FuseSell AI sales automation pipeline"
fusesell_local/api.py CHANGED
@@ -16,6 +16,7 @@ from typing import Any, Callable, Dict, Mapping, MutableMapping, Optional, Seque
16
16
  from .pipeline import FuseSellPipeline
17
17
  from .utils.logger import setup_logging as _setup_logging
18
18
  from .utils.validators import InputValidator
19
+ from .utils.llm_client import normalize_llm_base_url
19
20
 
20
21
 
21
22
  class ConfigValidationError(ValueError):
@@ -138,6 +139,8 @@ def build_config(options: OptionsType) -> Dict[str, Any]:
138
139
  "dry_run": _coerce_bool(_get("dry_run")),
139
140
  }
140
141
 
142
+ config["llm_base_url"] = normalize_llm_base_url(config.get("llm_base_url"))
143
+
141
144
  return config
142
145
 
143
146
 
@@ -338,4 +341,3 @@ def execute_pipeline(
338
341
  }
339
342
 
340
343
  return run_pipeline(config)
341
-
@@ -710,136 +710,119 @@ class InitialOutreachStage(BaseStage):
710
710
  if self.is_dry_run():
711
711
  return self._get_mock_email_drafts(customer_data, recommended_product, context)
712
712
 
713
- try:
714
- input_data = context.get('input_data', {})
715
- rep_profile = rep_profile or {}
716
- recipient_identity = self._resolve_recipient_identity(customer_data, context)
717
- if recipient_identity.get('first_name') and not context.get('customer_first_name'):
718
- context['customer_first_name'] = recipient_identity['first_name']
719
- context.setdefault('_recipient_identity', recipient_identity)
720
- if rep_profile:
721
- primary_name = rep_profile.get('name')
722
- if primary_name:
723
- input_data['staff_name'] = primary_name
724
- self.config['staff_name'] = primary_name
725
- if rep_profile.get('email'):
726
- input_data.setdefault('staff_email', rep_profile.get('email'))
727
- if rep_profile.get('phone') or rep_profile.get('primary_phone'):
728
- input_data.setdefault('staff_phone', rep_profile.get('phone') or rep_profile.get('primary_phone'))
729
- if rep_profile.get('position'):
730
- input_data.setdefault('staff_title', rep_profile.get('position'))
731
- if rep_profile.get('website'):
732
- input_data.setdefault('staff_website', rep_profile.get('website'))
733
-
734
- company_info = customer_data.get('companyInfo', {})
735
- contact_info = customer_data.get('primaryContact', {})
736
- pain_points = customer_data.get('painPoints', [])
737
-
738
- prompt_drafts = self._generate_email_drafts_from_prompt(
739
- customer_data,
740
- recommended_product,
741
- scoring_data,
742
- context
743
- )
744
- if prompt_drafts:
745
- return prompt_drafts
746
-
747
- # Generate multiple draft variations with different approaches
748
- draft_approaches = [
749
- {
750
- 'name': 'professional_direct',
751
- 'tone': 'professional and direct',
752
- 'focus': 'business value and ROI',
753
- 'length': 'concise'
754
- },
755
- {
756
- 'name': 'consultative',
757
- 'tone': 'consultative and helpful',
758
- 'focus': 'solving specific pain points',
759
- 'length': 'medium'
760
- },
761
- {
762
- 'name': 'industry_expert',
763
- 'tone': 'industry expert and insightful',
764
- 'focus': 'industry trends and challenges',
765
- 'length': 'detailed'
766
- },
767
- {
768
- 'name': 'relationship_building',
769
- 'tone': 'warm and relationship-focused',
770
- 'focus': 'building connection and trust',
771
- 'length': 'personal'
772
- }
773
- ]
774
-
775
- generated_drafts = []
776
-
777
- for approach in draft_approaches:
778
- try:
779
- # Generate email content for this approach
780
- email_content = self._generate_single_email_draft(
781
- customer_data, recommended_product, scoring_data,
782
- approach, context
783
- )
784
- email_content = self._ensure_html_email(email_content, context)
785
-
786
- # Generate subject lines for this approach
787
- subject_lines = self._generate_subject_lines(
788
- customer_data, recommended_product, approach, context
789
- )
790
-
791
- draft_id = f"uuid:{str(uuid.uuid4())}"
792
- draft_approach = approach['name']
793
- draft_type = "initial"
794
-
795
- # Select the best subject line (first one, or most relevant)
796
- selected_subject = subject_lines[0] if subject_lines else f"Partnership opportunity for {company_info.get('name', 'your company')}"
797
-
798
- draft = {
799
- 'draft_id': draft_id,
800
- 'approach': approach['name'],
801
- 'tone': approach['tone'],
802
- 'focus': approach['focus'],
803
- 'subject': selected_subject, # Single subject instead of array
804
- 'subject_alternatives': subject_lines[1:4] if len(subject_lines) > 1 else [], # Store alternatives separately
805
- 'email_body': email_content,
806
- 'email_format': 'html',
807
- 'recipient_email': recipient_identity.get('email'),
808
- 'recipient_name': recipient_identity.get('full_name'),
809
- 'customer_first_name': recipient_identity.get('first_name'),
810
- 'call_to_action': self._extract_call_to_action(email_content),
811
- 'personalization_score': self._calculate_personalization_score(email_content, customer_data),
812
- 'generated_at': datetime.now().isoformat(),
813
- 'status': 'draft',
814
- 'metadata': {
815
- 'customer_company': company_info.get('name', 'Unknown'),
816
- 'contact_name': contact_info.get('name', 'Unknown'),
817
- 'recipient_email': recipient_identity.get('email'),
818
- 'recipient_name': recipient_identity.get('full_name'),
819
- 'email_format': 'html',
820
- 'recommended_product': recommended_product.get('product_name', 'Unknown'),
821
- 'pain_points_addressed': len([p for p in pain_points if p.get('severity') in ['high', 'medium']]),
822
- 'generation_method': 'llm_powered'
823
- }
824
- }
825
-
826
- generated_drafts.append(draft)
827
-
828
- except Exception as e:
829
- self.logger.warning(f"Failed to generate draft for approach {approach['name']}: {str(e)}")
830
- continue
831
-
832
- if not generated_drafts:
833
- # Fallback to simple template if all LLM generations fail
834
- self.logger.warning("All LLM draft generations failed, using fallback template")
835
- return self._generate_fallback_draft(customer_data, recommended_product, context)
836
-
837
- self.logger.info(f"Generated {len(generated_drafts)} email drafts successfully")
838
- return generated_drafts
839
-
840
- except Exception as e:
841
- self.logger.error(f"Email draft generation failed: {str(e)}")
842
- return self._generate_fallback_draft(customer_data, recommended_product, context)
713
+ input_data = context.get('input_data', {}) or {}
714
+ rep_profile = rep_profile or {}
715
+ recipient_identity = self._resolve_recipient_identity(customer_data, context)
716
+ if recipient_identity.get('first_name') and not context.get('customer_first_name'):
717
+ context['customer_first_name'] = recipient_identity['first_name']
718
+ context.setdefault('_recipient_identity', recipient_identity)
719
+ if rep_profile:
720
+ primary_name = rep_profile.get('name')
721
+ if primary_name:
722
+ input_data['staff_name'] = primary_name
723
+ self.config['staff_name'] = primary_name
724
+ if rep_profile.get('email'):
725
+ input_data.setdefault('staff_email', rep_profile.get('email'))
726
+ if rep_profile.get('phone') or rep_profile.get('primary_phone'):
727
+ input_data.setdefault('staff_phone', rep_profile.get('phone') or rep_profile.get('primary_phone'))
728
+ if rep_profile.get('position'):
729
+ input_data.setdefault('staff_title', rep_profile.get('position'))
730
+ if rep_profile.get('website'):
731
+ input_data.setdefault('staff_website', rep_profile.get('website'))
732
+
733
+ company_info = customer_data.get('companyInfo', {}) or {}
734
+ contact_info = customer_data.get('primaryContact', {}) or {}
735
+ pain_points = customer_data.get('painPoints', [])
736
+
737
+ prompt_drafts = self._generate_email_drafts_from_prompt(
738
+ customer_data,
739
+ recommended_product,
740
+ scoring_data,
741
+ context
742
+ )
743
+ if prompt_drafts:
744
+ return prompt_drafts
745
+
746
+ draft_approaches = [
747
+ {
748
+ 'name': 'professional_direct',
749
+ 'tone': 'professional and direct',
750
+ 'focus': 'business value and ROI',
751
+ 'length': 'concise'
752
+ },
753
+ {
754
+ 'name': 'consultative',
755
+ 'tone': 'consultative and helpful',
756
+ 'focus': 'solving specific pain points',
757
+ 'length': 'medium'
758
+ },
759
+ {
760
+ 'name': 'industry_expert',
761
+ 'tone': 'industry expert and insightful',
762
+ 'focus': 'industry trends and challenges',
763
+ 'length': 'detailed'
764
+ },
765
+ {
766
+ 'name': 'relationship_building',
767
+ 'tone': 'warm and relationship-focused',
768
+ 'focus': 'building connection and trust',
769
+ 'length': 'personal'
770
+ }
771
+ ]
772
+
773
+ generated_drafts: List[Dict[str, Any]] = []
774
+
775
+ for approach in draft_approaches:
776
+ email_body = self._generate_single_email_draft(
777
+ customer_data,
778
+ recommended_product,
779
+ scoring_data,
780
+ approach,
781
+ context
782
+ )
783
+
784
+ subject_lines = self._generate_subject_lines(
785
+ customer_data, recommended_product, approach, context
786
+ )
787
+
788
+ draft_id = f"uuid:{str(uuid.uuid4())}"
789
+ selected_subject = subject_lines[0] if subject_lines else f"Partnership opportunity for {company_info.get('name', 'your company')}"
790
+
791
+ draft = {
792
+ 'draft_id': draft_id,
793
+ 'approach': approach['name'],
794
+ 'tone': approach['tone'],
795
+ 'focus': approach['focus'],
796
+ 'subject': selected_subject,
797
+ 'subject_alternatives': subject_lines[1:4] if len(subject_lines) > 1 else [],
798
+ 'email_body': email_body,
799
+ 'email_format': 'html',
800
+ 'recipient_email': recipient_identity.get('email'),
801
+ 'recipient_name': recipient_identity.get('full_name'),
802
+ 'customer_first_name': recipient_identity.get('first_name'),
803
+ 'call_to_action': self._extract_call_to_action(email_body),
804
+ 'personalization_score': self._calculate_personalization_score(email_body, customer_data),
805
+ 'generated_at': datetime.now().isoformat(),
806
+ 'status': 'draft',
807
+ 'metadata': {
808
+ 'customer_company': company_info.get('name', 'Unknown'),
809
+ 'contact_name': contact_info.get('name', 'Unknown'),
810
+ 'recipient_email': recipient_identity.get('email'),
811
+ 'recipient_name': recipient_identity.get('full_name'),
812
+ 'email_format': 'html',
813
+ 'recommended_product': recommended_product.get('product_name', 'Unknown') if recommended_product else 'Unknown',
814
+ 'pain_points_addressed': len([p for p in pain_points if p.get('severity') in ['high', 'medium']]),
815
+ 'generation_method': 'llm_powered'
816
+ }
817
+ }
818
+
819
+ generated_drafts.append(draft)
820
+
821
+ if not generated_drafts:
822
+ raise RuntimeError("LLM returned no outreach drafts; initial outreach cannot proceed.")
823
+
824
+ self.logger.info("Generated %s email drafts successfully", len(generated_drafts))
825
+ return generated_drafts
843
826
 
844
827
  def _generate_email_drafts_from_prompt(self, customer_data: Dict[str, Any], recommended_product: Dict[str, Any], scoring_data: Dict[str, Any], context: Dict[str, Any]) -> List[Dict[str, Any]]:
845
828
  """Attempt to generate drafts using configured prompt template."""
@@ -1426,9 +1409,9 @@ class InitialOutreachStage(BaseStage):
1426
1409
 
1427
1410
  return cleaned_content
1428
1411
 
1429
- except Exception as e:
1430
- self.logger.error(f"Failed to generate single email draft: {str(e)}")
1431
- return self._generate_template_email(customer_data, recommended_product, approach, context)
1412
+ except Exception as e:
1413
+ self.logger.error("LLM single draft generation failed for approach %s: %s", approach.get('name'), e)
1414
+ raise RuntimeError(f"Failed to generate draft for approach {approach.get('name')}") from e
1432
1415
 
1433
1416
  def _create_email_generation_prompt(self, customer_context: Dict[str, Any], approach: Dict[str, Any]) -> str:
1434
1417
  """Create LLM prompt for email generation."""
@@ -1671,118 +1654,6 @@ Generate 4 subject lines, one per line, no numbering or bullets:"""
1671
1654
 
1672
1655
  return min(score, 100)
1673
1656
 
1674
- def _generate_template_email(self, customer_data: Dict[str, Any], recommended_product: Dict[str, Any],
1675
- approach: Dict[str, Any], context: Dict[str, Any]) -> str:
1676
- """Generate a deterministic HTML email when LLM generation is unavailable."""
1677
- input_data = context.get('input_data', {}) or {}
1678
- company_info = customer_data.get('companyInfo', {}) or {}
1679
- contact_info = customer_data.get('primaryContact', {}) or {}
1680
- identity = context.get('_recipient_identity') or self._resolve_recipient_identity(customer_data, context)
1681
-
1682
- first_name = identity.get('first_name') or contact_info.get('name') or input_data.get('customer_name') or input_data.get('recipient_name') or 'there'
1683
- first_name = self._extract_first_name(first_name) if isinstance(first_name, str) else 'there'
1684
-
1685
- staff_name = input_data.get('staff_name') or self.config.get('staff_name', 'Sales Team')
1686
- org_name = input_data.get('org_name') or self.config.get('org_name', 'FuseSell')
1687
- company_name = company_info.get('name', 'your company')
1688
- industry = company_info.get('industry', 'your industry')
1689
- approach_name = approach.get('name', 'professional_direct')
1690
- approach_focus = approach.get('focus', 'business value')
1691
- approach_tone = approach.get('tone', 'professional')
1692
-
1693
- benefits: List[str] = []
1694
- if recommended_product:
1695
- product_name = recommended_product.get('product_name')
1696
- benefits = [b for b in (recommended_product.get('key_benefits') or []) if b]
1697
- if not benefits and product_name:
1698
- benefits = [
1699
- f"{product_name} accelerates {company_name}'s {approach_focus} goals",
1700
- f"Designed specifically for {industry} operators",
1701
- "Rapid onboarding with dedicated local support"
1702
- ]
1703
- if not benefits:
1704
- benefits = [
1705
- f"Measurable improvements in {approach_focus}",
1706
- f"Playbooks tailored for {industry} teams",
1707
- "Guided adoption with FuseSell specialists"
1708
- ]
1709
-
1710
- bullet_html = ''.join(f"<li>{benefit}</li>" for benefit in benefits)
1711
-
1712
- cta_map = {
1713
- 'professional_direct': f"Would you have 20 minutes this week to explore how FuseSell can lighten {company_name}'s {approach_focus} workload?",
1714
- 'consultative': f"Could we schedule a short working session to dig into your current {approach_focus} priorities?",
1715
- 'industry_expert': f"Shall we review the latest {industry} benchmarks together and map them to your roadmap?",
1716
- 'relationship_building': f"I'd love to hear how your team is approaching {approach_focus}; is a quick virtual coffee an option?"
1717
- }
1718
- cta_text = cta_map.get(approach_name, f"Would you be open to a brief call to discuss {approach_focus} priorities at {company_name}?")
1719
-
1720
- product_sentence = ""
1721
- if recommended_product and recommended_product.get('product_name'):
1722
- product_sentence = f"<p>We engineered <strong>{recommended_product['product_name']}</strong> specifically for teams tackling {approach_focus} in {industry}. It's a natural fit for {company_name}'s next phase.</p>"
1723
-
1724
- news = company_info.get('recentNews')
1725
- intro_sentence = f"<p>I'm reaching out because leaders at {company_name} are raising the same questions we hear from other {industry} innovators: how to keep {approach_focus} moving without burning out the team.</p>"
1726
- if news:
1727
- intro_sentence = f"<p>I noticed the recent update about {news}. Many {industry} peers use FuseSell to capitalise on moments exactly like this.</p>"
1728
-
1729
- html = (
1730
- "<html><body>"
1731
- f"<p>Hi {first_name},</p>"
1732
- f"{intro_sentence}"
1733
- f"<p>From our {approach_tone.lower()} conversations with {industry} operators, three ideas could help {company_name} right away:</p>"
1734
- f"<ul>{bullet_html}</ul>"
1735
- f"{product_sentence}"
1736
- f"<p>{cta_text}</p>"
1737
- f"<p>Best regards,<br>{staff_name}<br>{org_name}</p>"
1738
- "</body></html>"
1739
- )
1740
-
1741
- return html
1742
-
1743
- def _generate_fallback_draft(self, customer_data: Dict[str, Any], recommended_product: Dict[str, Any], context: Dict[str, Any]) -> List[Dict[str, Any]]:
1744
- """Generate fallback draft when LLM generation fails."""
1745
- draft_id = f"uuid:{str(uuid.uuid4())}"
1746
- recipient_identity = self._resolve_recipient_identity(customer_data, context)
1747
- context.setdefault('_recipient_identity', recipient_identity)
1748
- if recipient_identity.get('first_name') and not context.get('customer_first_name'):
1749
- context['customer_first_name'] = recipient_identity['first_name']
1750
-
1751
- template_body = self._generate_template_email(
1752
- customer_data,
1753
- recommended_product,
1754
- {'tone': 'professional'},
1755
- context
1756
- )
1757
- email_body = self._clean_email_content(template_body, context)
1758
-
1759
- fallback_subjects = self._generate_fallback_subject_lines(customer_data, recommended_product)
1760
-
1761
- return [{
1762
- 'draft_id': draft_id,
1763
- 'approach': 'fallback_template',
1764
- 'tone': 'professional',
1765
- 'focus': 'general outreach',
1766
- 'subject': fallback_subjects[0],
1767
- 'subject_alternatives': fallback_subjects[1:],
1768
- 'email_body': email_body,
1769
- 'email_format': 'html',
1770
- 'recipient_email': recipient_identity.get('email'),
1771
- 'recipient_name': recipient_identity.get('full_name'),
1772
- 'customer_first_name': recipient_identity.get('first_name'),
1773
- 'call_to_action': 'Would you be interested in a brief call?',
1774
- 'personalization_score': 50,
1775
- 'generated_at': datetime.now().isoformat(),
1776
- 'status': 'draft',
1777
- 'metadata': {
1778
- 'generation_method': 'template_fallback',
1779
- 'note': 'Generated using template due to LLM failure',
1780
- 'recipient_email': recipient_identity.get('email'),
1781
- 'recipient_name': recipient_identity.get('full_name'),
1782
- 'email_format': 'html'
1783
- }
1784
- }]
1785
-
1786
1657
  def _get_mock_email_drafts(self, customer_data: Dict[str, Any], recommended_product: Dict[str, Any], context: Dict[str, Any]) -> List[Dict[str, Any]]:
1787
1658
  """Get mock email drafts for dry run."""
1788
1659
  input_data = context.get('input_data', {})
@@ -24,20 +24,37 @@ def base_options(**overrides):
24
24
  return options
25
25
 
26
26
 
27
- def test_build_config_generates_defaults():
28
- config = build_config(base_options())
29
-
30
- assert config["execution_id"].startswith("fusesell_")
31
- assert config["output_format"] == "json"
32
- assert config["skip_stages"] == []
33
- assert config["send_immediately"] is False
34
-
35
-
36
- def test_validate_config_detects_missing_sources():
37
- config = build_config(
38
- base_options(
39
- input_description="",
40
- input_website="",
27
+ def test_build_config_generates_defaults():
28
+ config = build_config(base_options())
29
+
30
+ assert config["execution_id"].startswith("fusesell_")
31
+ assert config["output_format"] == "json"
32
+ assert config["skip_stages"] == []
33
+ assert config["send_immediately"] is False
34
+
35
+
36
+ def test_build_config_normalizes_llm_base_url():
37
+ config = build_config(
38
+ base_options(
39
+ llm_base_url="https://custom-llm.example.com",
40
+ )
41
+ )
42
+
43
+ assert config["llm_base_url"] == "https://custom-llm.example.com/v1"
44
+
45
+
46
+ def test_build_config_preserves_azure_base_url():
47
+ azure_url = "https://rtx-openai.openai.azure.com/openai/deployments/gpt4"
48
+ config = build_config(base_options(llm_base_url=azure_url))
49
+
50
+ assert config["llm_base_url"] == azure_url
51
+
52
+
53
+ def test_validate_config_detects_missing_sources():
54
+ config = build_config(
55
+ base_options(
56
+ input_description="",
57
+ input_website="",
41
58
  input_freetext="",
42
59
  )
43
60
  )
@@ -2,14 +2,15 @@
2
2
  FuseSell Utilities - Common utilities and helper functions
3
3
  """
4
4
 
5
- from .data_manager import LocalDataManager
6
- from .llm_client import LLMClient
7
- from .validators import InputValidator
8
- from .logger import setup_logging
9
-
10
- __all__ = [
11
- 'LocalDataManager',
12
- 'LLMClient',
13
- 'InputValidator',
14
- 'setup_logging'
15
- ]
5
+ from .data_manager import LocalDataManager
6
+ from .llm_client import LLMClient, normalize_llm_base_url
7
+ from .validators import InputValidator
8
+ from .logger import setup_logging
9
+
10
+ __all__ = [
11
+ 'LocalDataManager',
12
+ 'LLMClient',
13
+ 'normalize_llm_base_url',
14
+ 'InputValidator',
15
+ 'setup_logging'
16
+ ]
@@ -2,17 +2,74 @@
2
2
  LLM Client for OpenAI API integration
3
3
  """
4
4
 
5
- try:
6
- import openai
7
- OPENAI_AVAILABLE = True
8
- except ImportError:
9
- OPENAI_AVAILABLE = False
10
- openai = None
11
-
12
- from typing import Dict, Any, List, Optional
13
- import logging
14
- import time
15
- import json
5
+ try:
6
+ import openai
7
+ OPENAI_AVAILABLE = True
8
+ except ImportError:
9
+ OPENAI_AVAILABLE = False
10
+ openai = None
11
+
12
+ from typing import Dict, Any, List, Optional
13
+ import logging
14
+ import time
15
+ import json
16
+ from urllib.parse import urlsplit, urlunsplit
17
+
18
+
19
+ def normalize_llm_base_url(base_url: Optional[str], provider: Optional[str] = None) -> Optional[str]:
20
+ """
21
+ Ensure LLM base URLs point to the OpenAI-compatible /v1 endpoint unless they already target
22
+ Azure deployment paths that do not expect the suffix.
23
+
24
+ Args:
25
+ base_url: User-provided base URL.
26
+ provider: Optional provider hint (e.g., 'azure-openai').
27
+
28
+ Returns:
29
+ Normalized base URL with `/v1` appended when needed, or ``None`` if input is empty.
30
+ """
31
+ if not base_url:
32
+ return None
33
+
34
+ normalized = base_url.strip()
35
+ if not normalized:
36
+ return None
37
+
38
+ provider_hint = (provider or "").lower()
39
+ if provider_hint.startswith("azure") or "openai.azure.com" in normalized.lower():
40
+ return normalized.rstrip("/")
41
+
42
+ try:
43
+ parsed = urlsplit(normalized)
44
+ except ValueError:
45
+ parsed = None
46
+
47
+ if parsed and parsed.scheme and parsed.netloc:
48
+ path = parsed.path.rstrip("/")
49
+ segments = [segment for segment in path.split("/") if segment]
50
+
51
+ if not segments:
52
+ new_path = "/v1"
53
+ elif segments[-1] in {"v1", "v1beta"} or "v1" in segments or "deployments" in segments:
54
+ new_path = "/" + "/".join(segments)
55
+ else:
56
+ new_path = f"{path}/v1" if path else "/v1"
57
+
58
+ rebuilt = urlunsplit(
59
+ (
60
+ parsed.scheme,
61
+ parsed.netloc,
62
+ new_path,
63
+ parsed.query,
64
+ parsed.fragment,
65
+ )
66
+ )
67
+ return rebuilt.rstrip("/")
68
+
69
+ stripped = normalized.rstrip("/")
70
+ if stripped.endswith("/v1") or "/v1/" in stripped:
71
+ return stripped
72
+ return f"{stripped}/v1"
16
73
 
17
74
 
18
75
  class LLMClient:
@@ -35,13 +92,15 @@ class LLMClient:
35
92
 
36
93
  self.api_key = api_key
37
94
  self.model = model
38
- self.logger = logging.getLogger("fusesell.llm_client")
39
-
40
- # Initialize OpenAI client
41
- if base_url:
42
- self.client = openai.OpenAI(api_key=api_key, base_url=base_url)
43
- else:
44
- self.client = openai.OpenAI(api_key=api_key)
95
+ self.logger = logging.getLogger("fusesell.llm_client")
96
+
97
+ normalized_base_url = normalize_llm_base_url(base_url)
98
+
99
+ # Initialize OpenAI client
100
+ if normalized_base_url:
101
+ self.client = openai.OpenAI(api_key=api_key, base_url=normalized_base_url)
102
+ else:
103
+ self.client = openai.OpenAI(api_key=api_key)
45
104
 
46
105
  def chat_completion(
47
106
  self,
@@ -280,4 +339,4 @@ Response:"""
280
339
  return len(response) > 0
281
340
  except Exception as e:
282
341
  self.logger.error(f"API key validation failed: {str(e)}")
283
- return False
342
+ return False