@agent-blueprint/free-blueprints 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. package/README.md +72 -0
  2. package/dist/blueprints/employee-onboarding/files/agent-specifications.d.ts +1 -0
  3. package/dist/blueprints/employee-onboarding/files/agent-specifications.js +278 -0
  4. package/dist/blueprints/employee-onboarding/files/agent-specifications.js.map +1 -0
  5. package/dist/blueprints/employee-onboarding/files/agents.d.ts +1 -0
  6. package/dist/blueprints/employee-onboarding/files/agents.js +61 -0
  7. package/dist/blueprints/employee-onboarding/files/agents.js.map +1 -0
  8. package/dist/blueprints/employee-onboarding/files/architecture-decisions.d.ts +1 -0
  9. package/dist/blueprints/employee-onboarding/files/architecture-decisions.js +79 -0
  10. package/dist/blueprints/employee-onboarding/files/architecture-decisions.js.map +1 -0
  11. package/dist/blueprints/employee-onboarding/files/business-context.d.ts +1 -0
  12. package/dist/blueprints/employee-onboarding/files/business-context.js +107 -0
  13. package/dist/blueprints/employee-onboarding/files/business-context.js.map +1 -0
  14. package/dist/blueprints/employee-onboarding/files/evaluation-criteria.d.ts +1 -0
  15. package/dist/blueprints/employee-onboarding/files/evaluation-criteria.js +126 -0
  16. package/dist/blueprints/employee-onboarding/files/evaluation-criteria.js.map +1 -0
  17. package/dist/blueprints/employee-onboarding/files/getting-started.d.ts +1 -0
  18. package/dist/blueprints/employee-onboarding/files/getting-started.js +89 -0
  19. package/dist/blueprints/employee-onboarding/files/getting-started.js.map +1 -0
  20. package/dist/blueprints/employee-onboarding/files/guardrails-and-governance.d.ts +1 -0
  21. package/dist/blueprints/employee-onboarding/files/guardrails-and-governance.js +182 -0
  22. package/dist/blueprints/employee-onboarding/files/guardrails-and-governance.js.map +1 -0
  23. package/dist/blueprints/employee-onboarding/files/implementation-state.d.ts +1 -0
  24. package/dist/blueprints/employee-onboarding/files/implementation-state.js +91 -0
  25. package/dist/blueprints/employee-onboarding/files/implementation-state.js.map +1 -0
  26. package/dist/blueprints/employee-onboarding/files/platform-connectivity.d.ts +1 -0
  27. package/dist/blueprints/employee-onboarding/files/platform-connectivity.js +140 -0
  28. package/dist/blueprints/employee-onboarding/files/platform-connectivity.js.map +1 -0
  29. package/dist/blueprints/employee-onboarding/files/skill.d.ts +1 -0
  30. package/dist/blueprints/employee-onboarding/files/skill.js +126 -0
  31. package/dist/blueprints/employee-onboarding/files/skill.js.map +1 -0
  32. package/dist/blueprints/employee-onboarding/index.d.ts +2 -0
  33. package/dist/blueprints/employee-onboarding/index.js +37 -0
  34. package/dist/blueprints/employee-onboarding/index.js.map +1 -0
  35. package/dist/blueprints/index.d.ts +3 -0
  36. package/dist/blueprints/index.js +14 -0
  37. package/dist/blueprints/index.js.map +1 -0
  38. package/dist/blueprints/rfx-procurement/files/agent-specifications.d.ts +1 -0
  39. package/dist/blueprints/rfx-procurement/files/agent-specifications.js +313 -0
  40. package/dist/blueprints/rfx-procurement/files/agent-specifications.js.map +1 -0
  41. package/dist/blueprints/rfx-procurement/files/agents.d.ts +1 -0
  42. package/dist/blueprints/rfx-procurement/files/agents.js +66 -0
  43. package/dist/blueprints/rfx-procurement/files/agents.js.map +1 -0
  44. package/dist/blueprints/rfx-procurement/files/architecture-decisions.d.ts +1 -0
  45. package/dist/blueprints/rfx-procurement/files/architecture-decisions.js +95 -0
  46. package/dist/blueprints/rfx-procurement/files/architecture-decisions.js.map +1 -0
  47. package/dist/blueprints/rfx-procurement/files/business-context.d.ts +1 -0
  48. package/dist/blueprints/rfx-procurement/files/business-context.js +99 -0
  49. package/dist/blueprints/rfx-procurement/files/business-context.js.map +1 -0
  50. package/dist/blueprints/rfx-procurement/files/evaluation-criteria.d.ts +1 -0
  51. package/dist/blueprints/rfx-procurement/files/evaluation-criteria.js +165 -0
  52. package/dist/blueprints/rfx-procurement/files/evaluation-criteria.js.map +1 -0
  53. package/dist/blueprints/rfx-procurement/files/getting-started.d.ts +1 -0
  54. package/dist/blueprints/rfx-procurement/files/getting-started.js +97 -0
  55. package/dist/blueprints/rfx-procurement/files/getting-started.js.map +1 -0
  56. package/dist/blueprints/rfx-procurement/files/guardrails-and-governance.d.ts +1 -0
  57. package/dist/blueprints/rfx-procurement/files/guardrails-and-governance.js +159 -0
  58. package/dist/blueprints/rfx-procurement/files/guardrails-and-governance.js.map +1 -0
  59. package/dist/blueprints/rfx-procurement/files/implementation-state.d.ts +1 -0
  60. package/dist/blueprints/rfx-procurement/files/implementation-state.js +127 -0
  61. package/dist/blueprints/rfx-procurement/files/implementation-state.js.map +1 -0
  62. package/dist/blueprints/rfx-procurement/files/platform-connectivity.d.ts +1 -0
  63. package/dist/blueprints/rfx-procurement/files/platform-connectivity.js +153 -0
  64. package/dist/blueprints/rfx-procurement/files/platform-connectivity.js.map +1 -0
  65. package/dist/blueprints/rfx-procurement/files/skill.d.ts +1 -0
  66. package/dist/blueprints/rfx-procurement/files/skill.js +127 -0
  67. package/dist/blueprints/rfx-procurement/files/skill.js.map +1 -0
  68. package/dist/blueprints/rfx-procurement/index.d.ts +2 -0
  69. package/dist/blueprints/rfx-procurement/index.js +37 -0
  70. package/dist/blueprints/rfx-procurement/index.js.map +1 -0
  71. package/dist/server.d.ts +2 -0
  72. package/dist/server.js +55 -0
  73. package/dist/server.js.map +1 -0
  74. package/dist/transports/stdio.d.ts +2 -0
  75. package/dist/transports/stdio.js +7 -0
  76. package/dist/transports/stdio.js.map +1 -0
  77. package/dist/transports/worker.d.ts +4 -0
  78. package/dist/transports/worker.js +29 -0
  79. package/dist/transports/worker.js.map +1 -0
  80. package/dist/types.d.ts +34 -0
  81. package/dist/types.js +2 -0
  82. package/dist/types.js.map +1 -0
  83. package/package.json +42 -0
@@ -0,0 +1,313 @@
1
+ export const content = `# Agent Specifications
2
+
3
+ Detailed specifications for all 5 agents in the RFx Procurement Automation system. Each entry defines the agent's role, tools, data contracts, escalation conditions, and success metrics.
4
+
5
+ ---
6
+
7
+ ## 1. RFx Orchestrator
8
+
9
+ - **Role:** Manager
10
+ - **Type:** Orchestrator
11
+ - **Phase:** 1
12
+
13
+ ### Responsibilities
14
+
15
+ - Accept incoming RFx documents and classify them by type (RFP, RFQ, RFI).
16
+ - Create and maintain a tracking record for each RFx through its full lifecycle.
17
+ - Route tasks to the appropriate worker agents based on the current stage of processing.
18
+ - Enforce approval gates at configured checkpoints (e.g., before response submission).
19
+ - Monitor task completion, enforce timeouts, and handle retries and escalations.
20
+
21
+ ### Tools
22
+
23
+ #### assign_rfx_task
24
+
25
+ Assigns a specific task to a worker agent. Creates a task record linked to the RFx tracking record.
26
+
27
+ - **Inputs:** \`rfx_id\` (string), \`target_agent\` (string, one of: requirements_analyzer, content_library_matcher, response_composer, compliance_validator), \`task_type\` (string, e.g., "extract_requirements", "match_content", "compose_response", "validate_compliance"), \`payload\` (object, task-specific data including relevant document content or prior stage outputs).
28
+ - **Outputs:** \`task_id\` (string), \`status\` ("assigned"), \`assigned_at\` (ISO-8601 timestamp).
29
+
30
+ #### check_completion_status
31
+
32
+ Checks the current status of all tasks for a given RFx.
33
+
34
+ - **Inputs:** \`rfx_id\` (string).
35
+ - **Outputs:** \`rfx_id\` (string), \`overall_status\` (string: "intake", "requirements_extracted", "content_matched", "response_drafted", "compliance_validated", "pending_approval", "submitted", "failed"), \`tasks\` (array of objects with \`task_id\`, \`agent\`, \`status\`, \`completed_at\`, \`error\` if applicable).
36
+
37
+ #### enforce_approval_gate
38
+
39
+ Triggers a human approval checkpoint. Pauses processing until approval is received or the request is rejected.
40
+
41
+ - **Inputs:** \`rfx_id\` (string), \`gate_type\` (string: "phase1_exit", "response_review", "final_submission"), \`artifacts\` (array of objects describing what the approver should review, each with \`type\` and \`content_reference\`).
42
+ - **Outputs:** \`gate_id\` (string), \`status\` ("pending_approval"), \`approvers\` (array of strings, stakeholder identifiers), \`requested_at\` (ISO-8601 timestamp).
43
+
44
+ #### notify_stakeholders
45
+
46
+ Sends a notification to relevant stakeholders about RFx status changes, approaching deadlines, or required actions.
47
+
48
+ - **Inputs:** \`rfx_id\` (string), \`notification_type\` (string: "status_update", "deadline_warning", "approval_required", "escalation"), \`recipients\` (array of strings), \`message\` (string).
49
+ - **Outputs:** \`notification_id\` (string), \`sent_at\` (ISO-8601 timestamp), \`delivery_status\` (string).
50
+
51
+ ### Input Contract
52
+
53
+ The Orchestrator receives raw RFx documents (PDF, DOCX, or plain text) along with metadata: \`source_channel\` (where the document came from), \`received_at\` (timestamp), \`submitting_organization\` (if known), and \`priority\` (optional override).
54
+
55
+ ### Output Contract
56
+
57
+ The Orchestrator produces a complete RFx tracking record including: classification, extracted requirements (from Requirements Analyzer), matched content (from Content Library Matcher), draft responses (from Response Composer), compliance report (from Compliance Validator), approval status, and audit log of all actions taken.
58
+
59
+ ### Escalation Conditions
60
+
61
+ - Any worker fails twice on the same task.
62
+ - A deadline conflict is detected (submission deadline is within 48 hours and processing is incomplete).
63
+ - An approval gate is rejected by a reviewer.
64
+ - The RFx document cannot be classified (unknown format or type).
65
+
66
+ ### Success Metrics
67
+
68
+ - **Routing accuracy:** > 95% of tasks assigned to the correct worker on the first attempt.
69
+ - **End-to-end cycle time:** Total processing time from intake to draft-ready is measurable and decreasing over time.
70
+ - **Zero dropped RFx documents:** Every document that enters the system has a tracking record and reaches a terminal state.
71
+
72
+ ---
73
+
74
+ ## 2. Requirements Analyzer
75
+
76
+ - **Role:** Worker
77
+ - **Type:** Specialist
78
+ - **Phase:** 1
79
+
80
+ ### Responsibilities
81
+
82
+ - Parse RFx documents in multiple formats (PDF, DOCX, plain text) into structured content.
83
+ - Extract individual requirements from document text, including nested and conditional requirements.
84
+ - Categorize each requirement as mandatory, scored, or informational.
85
+ - Identify all deadlines, milestones, and time-bound constraints.
86
+ - Flag ambiguous or contradictory requirements for human review.
87
+
88
+ ### Tools
89
+
90
+ #### parse_rfx_document
91
+
92
+ Converts a raw RFx document into structured sections and paragraphs for downstream processing.
93
+
94
+ - **Inputs:** \`document_content\` (string or binary reference), \`document_format\` (string: "pdf", "docx", "txt", "html"), \`document_type\` (string: "rfp", "rfq", "rfi" if known, or "unknown").
95
+ - **Outputs:** \`parsed_document\` (object with \`sections\` array, each containing \`section_number\`, \`title\`, \`content\`, \`subsections\`), \`metadata\` (object with \`page_count\`, \`word_count\`, \`detected_type\`, \`issuing_organization\`), \`parse_confidence\` (number 0-1).
96
+
97
+ #### extract_requirements
98
+
99
+ Identifies and extracts individual requirements from parsed document sections.
100
+
101
+ - **Inputs:** \`parsed_sections\` (array of section objects from parse_rfx_document output), \`extraction_rules\` (optional object with custom keyword patterns or section hints).
102
+ - **Outputs:** \`requirements\` (array of objects, each with \`requirement_id\`, \`text\`, \`source_section\`, \`source_page\`, \`keywords\` array, \`confidence\` score 0-1).
103
+
104
+ #### categorize_requirements
105
+
106
+ Classifies extracted requirements by type and priority.
107
+
108
+ - **Inputs:** \`requirements\` (array of requirement objects from extract_requirements output).
109
+ - **Outputs:** \`categorized_requirements\` (array of objects extending each requirement with \`category\` (string: "mandatory", "scored", "informational"), \`priority\` (string: "critical", "high", "medium", "low"), \`evaluation_weight\` (number, if stated in the document), \`categorization_confidence\` (number 0-1)).
110
+
111
+ #### identify_deadlines
112
+
113
+ Extracts all time-bound constraints from the document.
114
+
115
+ - **Inputs:** \`parsed_document\` (object from parse_rfx_document), \`reference_date\` (ISO-8601, typically the document receipt date for calculating relative deadlines).
116
+ - **Outputs:** \`deadlines\` (array of objects, each with \`deadline_type\` (string: "submission", "questions_due", "site_visit", "notification", "other"), \`date\` (ISO-8601), \`description\` (string), \`source_section\` (string), \`is_mandatory\` (boolean)).
117
+
118
+ ### Input Contract
119
+
120
+ Receives a raw RFx document (content or reference) and its format. May also receive optional extraction hints from the Orchestrator based on the document type classification.
121
+
122
+ ### Output Contract
123
+
124
+ Returns structured data: parsed document sections, extracted and categorized requirements, and identified deadlines. All outputs include confidence scores. Requirements with confidence below 0.7 are flagged for human review.
125
+
126
+ ### Escalation Conditions
127
+
128
+ - Document parse confidence falls below 0.6 (heavily formatted, scanned image, or corrupted file).
129
+ - More than 20% of extracted requirements have categorization confidence below 0.7.
130
+ - Contradictory requirements detected (e.g., two mutually exclusive mandatory conditions).
131
+ - Document contains embedded files or references to external documents that cannot be resolved.
132
+
133
+ ### Success Metrics
134
+
135
+ - **Extraction recall:** > 95% of requirements identified compared to human baseline.
136
+ - **Categorization accuracy:** > 90% of requirements correctly categorized (mandatory/scored/informational).
137
+ - **Deadline identification:** 100% of submission deadlines correctly extracted.
138
+
139
+ ---
140
+
141
+ ## 3. Content Library Matcher
142
+
143
+ - **Role:** Worker
144
+ - **Type:** Specialist
145
+ - **Phase:** 2
146
+
147
+ ### Responsibilities
148
+
149
+ - Search organizational knowledge bases for content relevant to each extracted requirement.
150
+ - Score the relevance of matched content against specific requirement text.
151
+ - Identify content gaps where no existing material adequately addresses a requirement.
152
+ - Track content freshness and flag stale matches that may need SME review.
153
+
154
+ ### Tools
155
+
156
+ #### search_knowledge_base
157
+
158
+ Performs semantic and keyword search across configured content repositories.
159
+
160
+ - **Inputs:** \`query\` (string, the requirement text or a derived search query), \`filters\` (optional object with \`content_type\` array, \`max_age_days\` number, \`source_repository\` string), \`max_results\` (number, default 10).
161
+ - **Outputs:** \`results\` (array of objects, each with \`content_id\`, \`title\`, \`snippet\` (relevant excerpt), \`source\` (repository name and path), \`last_updated\` (ISO-8601), \`content_type\` (string: "previous_response", "policy_document", "technical_spec", "marketing_material")).
162
+
163
+ #### score_content_relevance
164
+
165
+ Evaluates how well a piece of content addresses a specific requirement.
166
+
167
+ - **Inputs:** \`requirement\` (object with \`requirement_id\`, \`text\`, \`category\`), \`content_candidates\` (array of content objects from search results).
168
+ - **Outputs:** \`scored_matches\` (array of objects, each with \`content_id\`, \`relevance_score\` (number 0-100), \`coverage_assessment\` (string: "full", "partial", "tangential"), \`reuse_recommendation\` (string: "use_as_is", "adapt", "reference_only"), \`staleness_flag\` (boolean, true if content is older than 12 months)).
169
+
170
+ #### identify_content_gaps
171
+
172
+ Analyzes the set of requirements against available content matches to find gaps.
173
+
174
+ - **Inputs:** \`requirements\` (array of categorized requirement objects), \`matches\` (array of scored match results, keyed by requirement_id).
175
+ - **Outputs:** \`gaps\` (array of objects, each with \`requirement_id\`, \`gap_type\` (string: "no_match", "low_relevance", "stale_only", "partial_coverage"), \`recommended_action\` (string: "generate_new", "request_sme_input", "adapt_partial_match"), \`priority\` (string based on requirement category and evaluation weight)).
176
+
177
+ ### Input Contract
178
+
179
+ Receives categorized requirements from the Requirements Analyzer (via the Orchestrator). Needs access to configured knowledge base endpoints.
180
+
181
+ ### Output Contract
182
+
183
+ Returns a mapping of requirements to content matches with relevance scores, plus a gap analysis identifying requirements that need original content or SME input.
184
+
185
+ ### Escalation Conditions
186
+
187
+ - Knowledge base connection fails or returns errors.
188
+ - More than 50% of mandatory requirements have no match above relevance score 60.
189
+ - Content freshness check reveals the majority of top matches are older than 18 months.
190
+
191
+ ### Success Metrics
192
+
193
+ - **Average relevance score:** > 80% for top match per requirement.
194
+ - **Gap identification accuracy:** > 90% agreement with human assessment of content coverage.
195
+ - **Search latency:** < 5 seconds per requirement query.
196
+
197
+ ---
198
+
199
+ ## 4. Response Composer
200
+
201
+ - **Role:** Worker
202
+ - **Type:** Specialist
203
+ - **Phase:** 2
204
+
205
+ ### Responsibilities
206
+
207
+ - Assemble draft responses from matched content, adapting tone and structure to match the RFx requirements.
208
+ - Generate new content for requirements where no adequate existing content was found.
209
+ - Maintain consistent formatting, voice, and structure across all response sections.
210
+ - Flag all generated (non-reused) content for mandatory SME review.
211
+
212
+ ### Tools
213
+
214
+ #### assemble_response_draft
215
+
216
+ Combines matched content into a structured response for a set of requirements.
217
+
218
+ - **Inputs:** \`requirements\` (array of requirement objects with their matched content), \`response_structure\` (object defining section order, numbering scheme, and formatting rules from the RFx), \`tone_guidelines\` (optional object with \`formality_level\`, \`voice\`, \`prohibited_terms\`).
219
+ - **Outputs:** \`draft_response\` (object with \`sections\` array, each containing \`section_id\`, \`requirement_ids\` addressed, \`content\` (assembled text), \`content_sources\` (array of content_ids used), \`is_generated\` (boolean, false for reused content), \`confidence\` (number 0-1)).
220
+
221
+ #### generate_gap_content
222
+
223
+ Creates original content for requirements where no suitable existing content was found.
224
+
225
+ - **Inputs:** \`requirement\` (object with full requirement details), \`context\` (object with \`organization_profile\` if available, \`related_content\` array of tangentially relevant matches, \`gap_type\` from Content Library Matcher).
226
+ - **Outputs:** \`generated_content\` (object with \`text\`, \`confidence\` (number 0-1), \`requires_sme_review\` (always true for generated content), \`review_notes\` (string describing what the SME should verify), \`sources_referenced\` (array of content_ids used as context)).
227
+
228
+ #### format_response_document
229
+
230
+ Applies formatting rules from the RFx to the assembled draft.
231
+
232
+ - **Inputs:** \`draft_sections\` (array of section objects from assemble_response_draft), \`formatting_rules\` (object with \`page_limit\`, \`font_requirements\`, \`margin_requirements\`, \`section_numbering_scheme\`, \`required_appendices\`).
233
+ - **Outputs:** \`formatted_document\` (object with \`sections\` array with formatting applied, \`total_pages\` (estimated), \`formatting_warnings\` (array of strings describing any rule violations, e.g., "Section 3 exceeds recommended length by 200 words")).
234
+
235
+ ### Input Contract
236
+
237
+ Receives requirement-to-content mappings and gap analysis from the Content Library Matcher (via Orchestrator). Also receives formatting rules extracted by the Requirements Analyzer.
238
+
239
+ ### Output Contract
240
+
241
+ Returns a complete draft response document with clear tracking of which content is reused vs. generated. All generated content is flagged for SME review. Formatting warnings are included for any rule violations.
242
+
243
+ ### Escalation Conditions
244
+
245
+ - More than 30% of the response consists of generated (non-reused) content. This indicates a significant knowledge base gap.
246
+ - Formatting rules cannot be satisfied (e.g., response exceeds page limit even after condensing).
247
+ - Contradictory formatting requirements are detected.
248
+
249
+ ### Success Metrics
250
+
251
+ - **Response completeness:** 100% of mandatory requirements have a corresponding response section.
252
+ - **Content reuse rate:** > 60% of response content sourced from existing library (higher is better, indicates library health).
253
+ - **Formatting compliance:** Zero formatting rule violations in final output.
254
+
255
+ ---
256
+
257
+ ## 5. Compliance Validator
258
+
259
+ - **Role:** Worker
260
+ - **Type:** Specialist
261
+ - **Phase:** 2
262
+
263
+ ### Responsibilities
264
+
265
+ - Cross-check every response section against the full set of mandatory requirements.
266
+ - Validate that formatting rules (page limits, font, margins, section structure) are met.
267
+ - Flag incomplete, non-responsive, or ambiguous response sections.
268
+ - Produce a compliance report summarizing pass/fail status for every requirement and formatting rule.
269
+
270
+ ### Tools
271
+
272
+ #### validate_mandatory_requirements
273
+
274
+ Checks that every mandatory requirement has a corresponding response that substantively addresses it.
275
+
276
+ - **Inputs:** \`mandatory_requirements\` (array of requirement objects where category is "mandatory"), \`response_sections\` (array of draft response sections with their addressed requirement_ids).
277
+ - **Outputs:** \`validation_results\` (array of objects, each with \`requirement_id\`, \`status\` (string: "addressed", "partially_addressed", "not_addressed", "non_responsive"), \`matched_section_id\` (string or null), \`assessment_notes\` (string describing any concerns), \`confidence\` (number 0-1)).
278
+
279
+ #### check_formatting_rules
280
+
281
+ Validates the formatted document against all formatting requirements specified in the RFx.
282
+
283
+ - **Inputs:** \`formatted_document\` (object from Response Composer), \`formatting_rules\` (object with all extracted formatting requirements).
284
+ - **Outputs:** \`formatting_compliance\` (object with \`overall_status\` (string: "compliant", "non_compliant", "warnings"), \`checks\` (array of objects, each with \`rule\` (string), \`status\` (string: "pass", "fail", "warning"), \`details\` (string))).
285
+
286
+ #### flag_incomplete_sections
287
+
288
+ Identifies response sections that are incomplete, vague, or non-responsive to their target requirements.
289
+
290
+ - **Inputs:** \`response_sections\` (array of draft response sections), \`requirements\` (array of all requirements with categories).
291
+ - **Outputs:** \`flags\` (array of objects, each with \`section_id\`, \`requirement_id\`, \`flag_type\` (string: "incomplete", "vague", "non_responsive", "placeholder_detected", "unsubstantiated_claim"), \`severity\` (string: "blocking", "warning"), \`description\` (string), \`recommended_action\` (string)).
292
+
293
+ ### Input Contract
294
+
295
+ Receives the formatted draft response from the Response Composer (via Orchestrator), the full set of categorized requirements from the Requirements Analyzer, and the formatting rules.
296
+
297
+ ### Output Contract
298
+
299
+ Returns a comprehensive compliance report with: per-requirement validation status, formatting compliance results, and flagged sections with severity and recommended actions. Any "blocking" flags prevent the response from proceeding to human approval without remediation.
300
+
301
+ ### Escalation Conditions
302
+
303
+ - Any mandatory requirement has status "not_addressed."
304
+ - Formatting compliance overall status is "non_compliant" and cannot be remediated automatically.
305
+ - More than 3 sections flagged as "non_responsive."
306
+
307
+ ### Success Metrics
308
+
309
+ - **Compliance check accuracy:** > 98% agreement with human compliance review.
310
+ - **False negative rate:** < 1% (missed compliance issues that a human reviewer catches).
311
+ - **Validation throughput:** Complete validation of a full RFx response in under 10 minutes.
312
+ `;
313
+ //# sourceMappingURL=agent-specifications.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"agent-specifications.js","sourceRoot":"","sources":["../../../../src/blueprints/rfx-procurement/files/agent-specifications.ts"],"names":[],"mappings":"AAAA,MAAM,CAAC,MAAM,OAAO,GAAG;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAuTtB,CAAC"}
@@ -0,0 +1 @@
1
+ export declare const content = "# Agent Coordination Protocol\n\nUniversal rules governing how the 5 RFx Procurement agents communicate and coordinate.\n\n## Message Format\n\nAll inter-agent messages use a standard envelope:\n\n```\n{\n \"message_id\": \"unique-uuid\",\n \"from_agent\": \"agent-name\",\n \"to_agent\": \"agent-name\",\n \"rfx_id\": \"tracking-id-for-the-rfx\",\n \"task_type\": \"string describing the task\",\n \"payload\": { ... task-specific data ... },\n \"timestamp\": \"ISO-8601\",\n \"correlation_id\": \"original-request-uuid for tracing\"\n}\n```\n\nEvery message must include `rfx_id` and `correlation_id` for traceability.\n\n## Routing Rules\n\n- **All communication flows through the Orchestrator.** Workers never send messages directly to other workers. If the Requirements Analyzer produces output that the Content Library Matcher needs, the Orchestrator receives it first and routes it.\n- **Workers are stateless.** They process a task and return a result. They do not maintain memory of previous tasks or other workers' outputs unless explicitly passed in the payload.\n- **The Orchestrator owns all state.** Task status, dependencies, completion tracking, and approval status live in the Orchestrator's tracking records.\n\n## Retry Policy\n\n- Workers must return a result or an explicit error within 5 minutes per task. The Orchestrator enforces this timeout.\n- On worker failure, the Orchestrator retries the same task once with the same payload.\n- If the retry fails, the task is marked as `failed` and escalated to a human operator with the error details and original payload attached.\n- Workers must be idempotent. Receiving the same task twice with the same `message_id` must produce the same result without side effects.\n\n## Escalation to Manager\n\nWorkers escalate to the Orchestrator (not directly to humans) when:\n\n- Confidence in the result falls below the agent's configured threshold (see agent specifications).\n- A mandatory requirement cannot be fulfilled or categorized.\n- Input data is malformed, incomplete, or in an unsupported format.\n- A dependency on another agent's output is missing or inconsistent.\n\nThe Orchestrator decides whether to retry, re-route, or escalate to a human.\n\n## State Propagation\n\n- The Orchestrator updates the RFx tracking record after every worker task completes.\n- Workers receive only the data they need for their specific task. They do not receive the full RFx tracking record.\n- When a worker's output changes the state of requirements or responses, the Orchestrator is responsible for propagating those changes to downstream tasks.\n\n## Idempotency\n\n- Every task is identified by its `message_id`. Workers must check whether they have already processed a `message_id` before executing.\n- If a duplicate `message_id` is received, the worker returns the previously computed result without re-executing.\n- This applies to all workers without exception. The Orchestrator may re-send tasks during retry or recovery scenarios.\n\n## Ordering and Dependencies\n\n- The Orchestrator enforces task dependencies. For example, the Content Library Matcher cannot run until the Requirements Analyzer has completed.\n- Workers do not enforce ordering. They process whatever task they receive. The Orchestrator is solely responsible for sending tasks in the correct sequence.\n- Parallel execution is allowed when tasks have no dependencies. The Orchestrator may send independent tasks to multiple workers simultaneously.\n";
@@ -0,0 +1,66 @@
1
+ export const content = `# Agent Coordination Protocol
2
+
3
+ Universal rules governing how the 5 RFx Procurement agents communicate and coordinate.
4
+
5
+ ## Message Format
6
+
7
+ All inter-agent messages use a standard envelope:
8
+
9
+ \`\`\`
10
+ {
11
+ "message_id": "unique-uuid",
12
+ "from_agent": "agent-name",
13
+ "to_agent": "agent-name",
14
+ "rfx_id": "tracking-id-for-the-rfx",
15
+ "task_type": "string describing the task",
16
+ "payload": { ... task-specific data ... },
17
+ "timestamp": "ISO-8601",
18
+ "correlation_id": "original-request-uuid for tracing"
19
+ }
20
+ \`\`\`
21
+
22
+ Every message must include \`rfx_id\` and \`correlation_id\` for traceability.
23
+
24
+ ## Routing Rules
25
+
26
+ - **All communication flows through the Orchestrator.** Workers never send messages directly to other workers. If the Requirements Analyzer produces output that the Content Library Matcher needs, the Orchestrator receives it first and routes it.
27
+ - **Workers are stateless.** They process a task and return a result. They do not maintain memory of previous tasks or other workers' outputs unless explicitly passed in the payload.
28
+ - **The Orchestrator owns all state.** Task status, dependencies, completion tracking, and approval status live in the Orchestrator's tracking records.
29
+
30
+ ## Retry Policy
31
+
32
+ - Workers must return a result or an explicit error within 5 minutes per task. The Orchestrator enforces this timeout.
33
+ - On worker failure, the Orchestrator retries the same task once with the same payload.
34
+ - If the retry fails, the task is marked as \`failed\` and escalated to a human operator with the error details and original payload attached.
35
+ - Workers must be idempotent. Receiving the same task twice with the same \`message_id\` must produce the same result without side effects.
36
+
37
+ ## Escalation to Manager
38
+
39
+ Workers escalate to the Orchestrator (not directly to humans) when:
40
+
41
+ - Confidence in the result falls below the agent's configured threshold (see agent specifications).
42
+ - A mandatory requirement cannot be fulfilled or categorized.
43
+ - Input data is malformed, incomplete, or in an unsupported format.
44
+ - A dependency on another agent's output is missing or inconsistent.
45
+
46
+ The Orchestrator decides whether to retry, re-route, or escalate to a human.
47
+
48
+ ## State Propagation
49
+
50
+ - The Orchestrator updates the RFx tracking record after every worker task completes.
51
+ - Workers receive only the data they need for their specific task. They do not receive the full RFx tracking record.
52
+ - When a worker's output changes the state of requirements or responses, the Orchestrator is responsible for propagating those changes to downstream tasks.
53
+
54
+ ## Idempotency
55
+
56
+ - Every task is identified by its \`message_id\`. Workers must check whether they have already processed a \`message_id\` before executing.
57
+ - If a duplicate \`message_id\` is received, the worker returns the previously computed result without re-executing.
58
+ - This applies to all workers without exception. The Orchestrator may re-send tasks during retry or recovery scenarios.
59
+
60
+ ## Ordering and Dependencies
61
+
62
+ - The Orchestrator enforces task dependencies. For example, the Content Library Matcher cannot run until the Requirements Analyzer has completed.
63
+ - Workers do not enforce ordering. They process whatever task they receive. The Orchestrator is solely responsible for sending tasks in the correct sequence.
64
+ - Parallel execution is allowed when tasks have no dependencies. The Orchestrator may send independent tasks to multiple workers simultaneously.
65
+ `;
66
+ //# sourceMappingURL=agents.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"agents.js","sourceRoot":"","sources":["../../../../src/blueprints/rfx-procurement/files/agents.ts"],"names":[],"mappings":"AAAA,MAAM,CAAC,MAAM,OAAO,GAAG;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAgEtB,CAAC"}
@@ -0,0 +1 @@
1
+ export declare const content = "# Architecture Decisions\n\nThis document records the key architectural decisions for the RFx Procurement Automation system and the reasoning behind each.\n\n---\n\n## 1. Why Manager-Workers (not Pipeline or Peer-to-Peer)\n\n**Decision:** Use a Manager-Workers pattern where the RFx Orchestrator (manager) coordinates all four worker agents.\n\n**Alternatives considered:**\n\n- **Pipeline pattern** (linear chain: Analyzer \u2192 Matcher \u2192 Composer \u2192 Validator). Simpler to implement, but too rigid. Real RFx processing is not strictly linear. The Compliance Validator may flag issues that require the Response Composer to revise, which may trigger additional Content Library Matcher searches. A pipeline cannot handle these feedback loops without becoming a tangled mess of backward references.\n- **Peer-to-peer** (agents communicate directly with each other). Maximum flexibility, but creates coordination complexity that scales quadratically with agent count. Debugging becomes difficult because there is no single point where you can observe the full state of an RFx being processed. Error handling and retry logic would need to be duplicated in every agent.\n\n**Why Manager-Workers wins:**\n\n- The Orchestrator provides a single observation point for the entire RFx lifecycle. You can inspect one record to see what has happened, what is pending, and what has failed.\n- Feedback loops are handled cleanly. The Orchestrator can re-route work to any agent at any stage without the agents needing to know about each other.\n- Adding or replacing a worker does not affect other workers. Each worker only knows about the Orchestrator's task contract.\n- The pattern maps naturally to enterprise automation platforms that support agent orchestration.\n\n---\n\n## 2. Why Phased Rollout\n\n**Decision:** Deploy in two phases. Phase 1 (4 weeks): Orchestrator + Requirements Analyzer only. Phase 2 (6 weeks): add remaining 3 workers.\n\n**Reasoning:**\n\n- The Requirements Analyzer is the foundation. If it cannot reliably extract and categorize requirements, none of the downstream agents will produce useful results. Testing this in isolation avoids wasting effort on content matching and response generation that would be built on unreliable inputs.\n- Phase 1 delivers standalone value. Even without automated response composition, structured requirements extraction with deadline tracking saves significant manual effort. Stakeholders see value immediately.\n- The Phase 1 decision gate (90% extraction accuracy, 40% time reduction, zero missed mandatory requirements) provides a clear go/no-go signal. If these metrics are not met, it is better to iterate on the analyzer than to build more agents on a shaky foundation.\n- Risk is contained. If the project is cancelled after Phase 1, the organization still has a working requirements extraction tool.\n\n---\n\n## 3. Orchestrator as Single Entry Point\n\n**Decision:** All RFx documents enter the system through the Orchestrator. No direct access to worker agents.\n\n**Reasoning:**\n\n- Every RFx gets a tracking record from the moment it enters the system. No documents can be processed without an audit trail.\n- The Orchestrator applies classification and routing logic before any work begins. This prevents workers from receiving documents they are not equipped to handle.\n- Access control is centralized. The Orchestrator enforces who can submit documents and who can view results, rather than duplicating this logic in every worker.\n- Rate limiting and prioritization happen at a single point. When multiple RFx documents arrive simultaneously, the Orchestrator decides processing order based on deadlines and priority.\n\n---\n\n## 4. Content Library as Shared Resource\n\n**Decision:** The knowledge base is a shared resource accessed only by the Content Library Matcher, not duplicated or directly accessed by other agents.\n\n**Reasoning:**\n\n- The Content Library Matcher owns the search and relevance scoring logic. If the Response Composer also searched the knowledge base directly, relevance scoring would diverge between the two agents, producing inconsistent results.\n- Centralizing knowledge base access makes it possible to track which content is being reused, how often, and with what relevance scores. This data drives knowledge base maintenance decisions (what to update, what to retire).\n- When the knowledge base technology changes (e.g., switching from keyword search to vector search, or migrating from one content repository to another), only the Content Library Matcher needs to be updated. The Response Composer's interface remains stable.\n- Content access patterns can be monitored and optimized in one place rather than across multiple agents.\n\n---\n\n## 5. Human-in-the-Loop Approval Gates\n\n**Decision:** Mandatory human approval checkpoints at three points: Phase 1 exit, response review, and final submission.\n\n**Where the gates are:**\n\n1. **Phase 1 exit gate.** Before proceeding to Phase 2, a human reviews the decision gate metrics and signs off. This prevents expanding automation before the foundation is proven.\n2. **Response review gate.** After the Response Composer produces a draft and before the Compliance Validator runs, a human SME reviews the content. This is where generated (non-reused) content gets verified.\n3. **Final submission gate.** After compliance validation, before the response is submitted to the issuing organization. This is the last-look checkpoint where a human confirms the response is ready.\n\n**Reasoning:**\n\n- RFx responses create legal and financial commitments. Fully automated submission without human review is an unacceptable risk for most organizations.\n- The approval gates are positioned to catch different types of issues: Phase 1 exit catches systemic problems, response review catches content accuracy issues, and final submission catches compliance and completeness issues.\n- Gates are designed to be lightweight. The human reviewer sees a structured summary of what changed since the last gate, not the entire document. The system highlights generated content, flagged sections, and compliance results.\n- Over time, as confidence in the system grows, organizations can choose to streamline gates (e.g., auto-approve when all content is reused and compliance is fully passing). But starting with explicit gates builds trust.\n\n---\n\n## 6. Stateless Workers with State in Orchestrator\n\n**Decision:** Worker agents do not maintain state between tasks. All state lives in the Orchestrator's tracking records.\n\n**Reasoning:**\n\n- Stateless workers are simpler to deploy, scale, and replace. If a worker instance fails, a new instance can pick up the next task without needing to recover state.\n- State in the Orchestrator means there is one place to inspect, debug, and audit. When something goes wrong, you look at the Orchestrator's tracking record, not at multiple worker internal states.\n- Idempotency is easier to enforce when workers do not accumulate state. Reprocessing a task produces the same result regardless of what the worker has done previously.\n- This pattern works equally well whether workers are deployed as separate processes, serverless functions, or agents on an enterprise platform. The deployment model does not affect the architecture.\n- The tradeoff is that workers receive their full context with every task (they cannot \"remember\" previous work on the same RFx). For this use case, the context required per task is small enough that this is not a meaningful performance concern.\n";
@@ -0,0 +1,95 @@
1
+ export const content = `# Architecture Decisions
2
+
3
+ This document records the key architectural decisions for the RFx Procurement Automation system and the reasoning behind each.
4
+
5
+ ---
6
+
7
+ ## 1. Why Manager-Workers (not Pipeline or Peer-to-Peer)
8
+
9
+ **Decision:** Use a Manager-Workers pattern where the RFx Orchestrator (manager) coordinates all four worker agents.
10
+
11
+ **Alternatives considered:**
12
+
13
+ - **Pipeline pattern** (linear chain: Analyzer → Matcher → Composer → Validator). Simpler to implement, but too rigid. Real RFx processing is not strictly linear. The Compliance Validator may flag issues that require the Response Composer to revise, which may trigger additional Content Library Matcher searches. A pipeline cannot handle these feedback loops without becoming a tangled mess of backward references.
14
+ - **Peer-to-peer** (agents communicate directly with each other). Maximum flexibility, but creates coordination complexity that scales quadratically with agent count. Debugging becomes difficult because there is no single point where you can observe the full state of an RFx being processed. Error handling and retry logic would need to be duplicated in every agent.
15
+
16
+ **Why Manager-Workers wins:**
17
+
18
+ - The Orchestrator provides a single observation point for the entire RFx lifecycle. You can inspect one record to see what has happened, what is pending, and what has failed.
19
+ - Feedback loops are handled cleanly. The Orchestrator can re-route work to any agent at any stage without the agents needing to know about each other.
20
+ - Adding or replacing a worker does not affect other workers. Each worker only knows about the Orchestrator's task contract.
21
+ - The pattern maps naturally to enterprise automation platforms that support agent orchestration.
22
+
23
+ ---
24
+
25
+ ## 2. Why Phased Rollout
26
+
27
+ **Decision:** Deploy in two phases. Phase 1 (4 weeks): Orchestrator + Requirements Analyzer only. Phase 2 (6 weeks): add remaining 3 workers.
28
+
29
+ **Reasoning:**
30
+
31
+ - The Requirements Analyzer is the foundation. If it cannot reliably extract and categorize requirements, none of the downstream agents will produce useful results. Testing this in isolation avoids wasting effort on content matching and response generation that would be built on unreliable inputs.
32
+ - Phase 1 delivers standalone value. Even without automated response composition, structured requirements extraction with deadline tracking saves significant manual effort. Stakeholders see value immediately.
33
+ - The Phase 1 decision gate (90% extraction accuracy, 40% time reduction, zero missed mandatory requirements) provides a clear go/no-go signal. If these metrics are not met, it is better to iterate on the analyzer than to build more agents on a shaky foundation.
34
+ - Risk is contained. If the project is cancelled after Phase 1, the organization still has a working requirements extraction tool.
35
+
36
+ ---
37
+
38
+ ## 3. Orchestrator as Single Entry Point
39
+
40
+ **Decision:** All RFx documents enter the system through the Orchestrator. No direct access to worker agents.
41
+
42
+ **Reasoning:**
43
+
44
+ - Every RFx gets a tracking record from the moment it enters the system. No documents can be processed without an audit trail.
45
+ - The Orchestrator applies classification and routing logic before any work begins. This prevents workers from receiving documents they are not equipped to handle.
46
+ - Access control is centralized. The Orchestrator enforces who can submit documents and who can view results, rather than duplicating this logic in every worker.
47
+ - Rate limiting and prioritization happen at a single point. When multiple RFx documents arrive simultaneously, the Orchestrator decides processing order based on deadlines and priority.
48
+
49
+ ---
50
+
51
+ ## 4. Content Library as Shared Resource
52
+
53
+ **Decision:** The knowledge base is a shared resource accessed only by the Content Library Matcher, not duplicated or directly accessed by other agents.
54
+
55
+ **Reasoning:**
56
+
57
+ - The Content Library Matcher owns the search and relevance scoring logic. If the Response Composer also searched the knowledge base directly, relevance scoring would diverge between the two agents, producing inconsistent results.
58
+ - Centralizing knowledge base access makes it possible to track which content is being reused, how often, and with what relevance scores. This data drives knowledge base maintenance decisions (what to update, what to retire).
59
+ - When the knowledge base technology changes (e.g., switching from keyword search to vector search, or migrating from one content repository to another), only the Content Library Matcher needs to be updated. The Response Composer's interface remains stable.
60
+ - Content access patterns can be monitored and optimized in one place rather than across multiple agents.
61
+
62
+ ---
63
+
64
+ ## 5. Human-in-the-Loop Approval Gates
65
+
66
+ **Decision:** Mandatory human approval checkpoints at three points: Phase 1 exit, response review, and final submission.
67
+
68
+ **Where the gates are:**
69
+
70
+ 1. **Phase 1 exit gate.** Before proceeding to Phase 2, a human reviews the decision gate metrics and signs off. This prevents expanding automation before the foundation is proven.
71
+ 2. **Response review gate.** After the Response Composer produces a draft and before the Compliance Validator runs, a human SME reviews the content. This is where generated (non-reused) content gets verified.
72
+ 3. **Final submission gate.** After compliance validation, before the response is submitted to the issuing organization. This is the last-look checkpoint where a human confirms the response is ready.
73
+
74
+ **Reasoning:**
75
+
76
+ - RFx responses create legal and financial commitments. Fully automated submission without human review is an unacceptable risk for most organizations.
77
+ - The approval gates are positioned to catch different types of issues: Phase 1 exit catches systemic problems, response review catches content accuracy issues, and final submission catches compliance and completeness issues.
78
+ - Gates are designed to be lightweight. The human reviewer sees a structured summary of what changed since the last gate, not the entire document. The system highlights generated content, flagged sections, and compliance results.
79
+ - Over time, as confidence in the system grows, organizations can choose to streamline gates (e.g., auto-approve when all content is reused and compliance is fully passing). But starting with explicit gates builds trust.
80
+
81
+ ---
82
+
83
+ ## 6. Stateless Workers with State in Orchestrator
84
+
85
+ **Decision:** Worker agents do not maintain state between tasks. All state lives in the Orchestrator's tracking records.
86
+
87
+ **Reasoning:**
88
+
89
+ - Stateless workers are simpler to deploy, scale, and replace. If a worker instance fails, a new instance can pick up the next task without needing to recover state.
90
+ - State in the Orchestrator means there is one place to inspect, debug, and audit. When something goes wrong, you look at the Orchestrator's tracking record, not at multiple worker internal states.
91
+ - Idempotency is easier to enforce when workers do not accumulate state. Reprocessing a task produces the same result regardless of what the worker has done previously.
92
+ - This pattern works equally well whether workers are deployed as separate processes, serverless functions, or agents on an enterprise platform. The deployment model does not affect the architecture.
93
+ - The tradeoff is that workers receive their full context with every task (they cannot "remember" previous work on the same RFx). For this use case, the context required per task is small enough that this is not a meaningful performance concern.
94
+ `;
95
+ //# sourceMappingURL=architecture-decisions.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"architecture-decisions.js","sourceRoot":"","sources":["../../../../src/blueprints/rfx-procurement/files/architecture-decisions.ts"],"names":[],"mappings":"AAAA,MAAM,CAAC,MAAM,OAAO,GAAG;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA6FtB,CAAC"}
@@ -0,0 +1 @@
1
+ export declare const content = "# Business Context\n\nThe procurement challenge, current state pain points, and target outcomes that motivate the RFx Procurement Automation system.\n\n---\n\n## The Procurement Challenge\n\nOrganizations that sell products or services to other organizations respond to RFx documents (Requests for Proposal, Requests for Quotation, Requests for Information) as a core part of their sales process. For many B2B and B2G companies, RFx responses account for 30-60% of new revenue.\n\nThe challenge is one of volume, complexity, and time pressure:\n\n- **Volume.** Large organizations may receive dozens of RFx invitations per month. Each requires a go/no-go decision and, if pursued, a complete response.\n- **Complexity.** A single RFP can run 50-200 pages with hundreds of individual requirements spanning technical capabilities, pricing, compliance, staffing, and past performance. Requirements are embedded in dense legal and procedural language, often with cross-references to external standards.\n- **Time pressure.** Response deadlines are typically 2-4 weeks from receipt. For complex RFPs, this is barely enough time for a thorough manual response. Late submissions are almost always disqualified.\n\nThese three forces combine to create a situation where proposal teams are perpetually overloaded, shortcuts are taken, and quality suffers.\n\n---\n\n## Current State Pain Points\n\n### Manual Response Assembly\n\nThe typical RFx response process involves a proposal manager reading the entire document, creating a response outline, manually searching for and copying content from previous responses, distributing sections to SMEs for drafting or review, and then assembling everything into a final document. Each of these steps is largely manual, repetitive, and error-prone.\n\nProposal teams report spending 60-80% of their time on logistics (finding content, formatting, chasing SMEs for input) rather than on the value-adding work of crafting compelling, tailored responses.\n\n### Missed Deadlines and Disqualification\n\nComplex RFx documents contain multiple embedded deadlines: question submission periods, intent-to-bid dates, mandatory pre-proposal conferences, and final submission deadlines. Missing any mandatory deadline can result in disqualification regardless of response quality.\n\nManual deadline tracking across multiple concurrent RFx efforts is unreliable. Teams discover deadline conflicts too late to resolve them, or miss secondary deadlines (like mandatory pre-bid conferences) buried in the document text.\n\n### Inconsistent Quality\n\nWhen multiple SMEs contribute to a single response, quality varies by section. Different writing styles, levels of detail, and levels of effort produce an uneven document. Evaluators notice this inconsistency, and it undermines the credibility of the overall response.\n\nOrganizations also struggle with version control. The \"final\" response document may include outdated content from a previous draft, or content that was revised in one section but not updated in related sections.\n\n### Compliance Gaps\n\nThe most damaging failure mode is a compliance gap: a mandatory requirement that was not addressed in the response. Many RFx evaluation frameworks start with a compliance check that eliminates non-compliant responses before any qualitative evaluation begins.\n\nCompliance gaps occur because requirements are difficult to extract from dense documents, mandatory vs. optional requirements are not always clearly distinguished, and manual cross-checking of hundreds of requirements against response sections is tedious and error-prone.\n\n### Knowledge Fragmentation\n\nOver years of responding to RFx documents, organizations accumulate substantial response content. But this content lives in disconnected locations: shared drives organized by project, individual team members' file systems, email attachments, and various document management systems.\n\nWhen a new RFx arrives, the proposal team knows that relevant content exists somewhere, but finding it takes longer than writing new content from scratch. The result is duplicated effort and inconsistent messaging across responses.\n\n---\n\n## Impact of Poor RFx Responses\n\nThe costs of an inefficient RFx response process are both direct and indirect:\n\n- **Lost revenue.** A non-compliant or low-quality response that would have been competitive with proper preparation represents directly lost revenue. For organizations where RFx-driven contracts are a primary revenue source, this impact is significant.\n- **Wasted effort.** Time spent on responses that fail due to compliance gaps or missed deadlines is entirely wasted. This includes not only the proposal team's time but also SME time diverted from other work.\n- **Opportunity cost.** Teams that are overloaded with manual RFx processing cannot pursue as many opportunities. The go/no-go decision becomes driven by capacity constraints rather than strategic fit.\n- **Talent burnout.** Experienced proposal professionals leave organizations where the process is consistently chaotic and last-minute. This further degrades response quality and institutional knowledge.\n\n---\n\n## Industry Benchmarks\n\nThese benchmarks provide context for setting improvement targets. Actual numbers vary by industry, RFx complexity, and organization size.\n\n| Metric | Typical Current State | Top Performers |\n|--------|----------------------|----------------|\n| Time from RFx receipt to draft-ready | 10-15 business days | 5-7 business days |\n| Proposal team time spent on logistics vs. content | 60-80% logistics | 30-40% logistics |\n| Win rate on pursued RFx | 15-25% | 35-50% |\n| Mandatory requirement compliance rate | 85-95% | 99-100% |\n| Content reuse from previous responses | 20-40% | 60-80% |\n| RFx abandoned due to capacity constraints | 20-30% of invitations | < 10% of invitations |\n\n---\n\n## Target Outcomes\n\nThe RFx Procurement Automation system targets the following improvements:\n\n1. **Reduce requirements extraction time by > 80%.** Automated parsing and categorization replaces manual document reading and requirement identification. Target: hours reduced to minutes per document.\n\n2. **Reduce overall response cycle time by > 50%.** End-to-end processing from intake to draft-ready is cut in half through automated content matching, response assembly, and compliance validation.\n\n3. **Achieve 100% mandatory requirement coverage.** Automated extraction and compliance validation ensures no mandatory requirement is missed. This is a pass/fail target, not a percentage improvement.\n\n4. **Increase content reuse rate to > 60%.** Automated knowledge base search and relevance scoring surfaces existing content that manual search misses. Reduces duplicated drafting effort.\n\n5. **Improve response consistency.** Automated formatting and tone management produces uniform quality across all response sections, regardless of how many contributors are involved.\n\n6. **Free proposal teams for high-value work.** By automating the logistics of response assembly, proposal professionals spend their time on strategy, tailoring, and relationship-building rather than on copying, formatting, and chasing.\n\n7. **Enable data-driven process improvement.** Systematic tracking of processing times, content reuse rates, compliance scores, and win rates provides the data needed to continuously improve both the automation and the underlying content library.\n";
@@ -0,0 +1,99 @@
1
+ export const content = `# Business Context
2
+
3
+ The procurement challenge, current state pain points, and target outcomes that motivate the RFx Procurement Automation system.
4
+
5
+ ---
6
+
7
+ ## The Procurement Challenge
8
+
9
+ Organizations that sell products or services to other organizations respond to RFx documents (Requests for Proposal, Requests for Quotation, Requests for Information) as a core part of their sales process. For many B2B and B2G companies, RFx responses account for 30-60% of new revenue.
10
+
11
+ The challenge is one of volume, complexity, and time pressure:
12
+
13
+ - **Volume.** Large organizations may receive dozens of RFx invitations per month. Each requires a go/no-go decision and, if pursued, a complete response.
14
+ - **Complexity.** A single RFP can run 50-200 pages with hundreds of individual requirements spanning technical capabilities, pricing, compliance, staffing, and past performance. Requirements are embedded in dense legal and procedural language, often with cross-references to external standards.
15
+ - **Time pressure.** Response deadlines are typically 2-4 weeks from receipt. For complex RFPs, this is barely enough time for a thorough manual response. Late submissions are almost always disqualified.
16
+
17
+ These three forces combine to create a situation where proposal teams are perpetually overloaded, shortcuts are taken, and quality suffers.
18
+
19
+ ---
20
+
21
+ ## Current State Pain Points
22
+
23
+ ### Manual Response Assembly
24
+
25
+ The typical RFx response process involves a proposal manager reading the entire document, creating a response outline, manually searching for and copying content from previous responses, distributing sections to SMEs for drafting or review, and then assembling everything into a final document. Each of these steps is largely manual, repetitive, and error-prone.
26
+
27
+ Proposal teams report spending 60-80% of their time on logistics (finding content, formatting, chasing SMEs for input) rather than on the value-adding work of crafting compelling, tailored responses.
28
+
29
+ ### Missed Deadlines and Disqualification
30
+
31
+ Complex RFx documents contain multiple embedded deadlines: question submission periods, intent-to-bid dates, mandatory pre-proposal conferences, and final submission deadlines. Missing any mandatory deadline can result in disqualification regardless of response quality.
32
+
33
+ Manual deadline tracking across multiple concurrent RFx efforts is unreliable. Teams discover deadline conflicts too late to resolve them, or miss secondary deadlines (like mandatory pre-bid conferences) buried in the document text.
34
+
35
+ ### Inconsistent Quality
36
+
37
+ When multiple SMEs contribute to a single response, quality varies by section. Different writing styles, levels of detail, and levels of effort produce an uneven document. Evaluators notice this inconsistency, and it undermines the credibility of the overall response.
38
+
39
+ Organizations also struggle with version control. The "final" response document may include outdated content from a previous draft, or content that was revised in one section but not updated in related sections.
40
+
41
+ ### Compliance Gaps
42
+
43
+ The most damaging failure mode is a compliance gap: a mandatory requirement that was not addressed in the response. Many RFx evaluation frameworks start with a compliance check that eliminates non-compliant responses before any qualitative evaluation begins.
44
+
45
+ Compliance gaps occur because requirements are difficult to extract from dense documents, mandatory vs. optional requirements are not always clearly distinguished, and manual cross-checking of hundreds of requirements against response sections is tedious and error-prone.
46
+
47
+ ### Knowledge Fragmentation
48
+
49
+ Over years of responding to RFx documents, organizations accumulate substantial response content. But this content lives in disconnected locations: shared drives organized by project, individual team members' file systems, email attachments, and various document management systems.
50
+
51
+ When a new RFx arrives, the proposal team knows that relevant content exists somewhere, but finding it takes longer than writing new content from scratch. The result is duplicated effort and inconsistent messaging across responses.
52
+
53
+ ---
54
+
55
+ ## Impact of Poor RFx Responses
56
+
57
+ The costs of an inefficient RFx response process are both direct and indirect:
58
+
59
+ - **Lost revenue.** A non-compliant or low-quality response that would have been competitive with proper preparation represents directly lost revenue. For organizations where RFx-driven contracts are a primary revenue source, this impact is significant.
60
+ - **Wasted effort.** Time spent on responses that fail due to compliance gaps or missed deadlines is entirely wasted. This includes not only the proposal team's time but also SME time diverted from other work.
61
+ - **Opportunity cost.** Teams that are overloaded with manual RFx processing cannot pursue as many opportunities. The go/no-go decision becomes driven by capacity constraints rather than strategic fit.
62
+ - **Talent burnout.** Experienced proposal professionals leave organizations where the process is consistently chaotic and last-minute. This further degrades response quality and institutional knowledge.
63
+
64
+ ---
65
+
66
+ ## Industry Benchmarks
67
+
68
+ These benchmarks provide context for setting improvement targets. Actual numbers vary by industry, RFx complexity, and organization size.
69
+
70
+ | Metric | Typical Current State | Top Performers |
71
+ |--------|----------------------|----------------|
72
+ | Time from RFx receipt to draft-ready | 10-15 business days | 5-7 business days |
73
+ | Proposal team time spent on logistics vs. content | 60-80% logistics | 30-40% logistics |
74
+ | Win rate on pursued RFx | 15-25% | 35-50% |
75
+ | Mandatory requirement compliance rate | 85-95% | 99-100% |
76
+ | Content reuse from previous responses | 20-40% | 60-80% |
77
+ | RFx abandoned due to capacity constraints | 20-30% of invitations | < 10% of invitations |
78
+
79
+ ---
80
+
81
+ ## Target Outcomes
82
+
83
+ The RFx Procurement Automation system targets the following improvements:
84
+
85
+ 1. **Reduce requirements extraction time by > 80%.** Automated parsing and categorization replaces manual document reading and requirement identification. Target: hours reduced to minutes per document.
86
+
87
+ 2. **Reduce overall response cycle time by > 50%.** End-to-end processing from intake to draft-ready is cut in half through automated content matching, response assembly, and compliance validation.
88
+
89
+ 3. **Achieve 100% mandatory requirement coverage.** Automated extraction and compliance validation ensures no mandatory requirement is missed. This is a pass/fail target, not a percentage improvement.
90
+
91
+ 4. **Increase content reuse rate to > 60%.** Automated knowledge base search and relevance scoring surfaces existing content that manual search misses. Reduces duplicated drafting effort.
92
+
93
+ 5. **Improve response consistency.** Automated formatting and tone management produces uniform quality across all response sections, regardless of how many contributors are involved.
94
+
95
+ 6. **Free proposal teams for high-value work.** By automating the logistics of response assembly, proposal professionals spend their time on strategy, tailoring, and relationship-building rather than on copying, formatting, and chasing.
96
+
97
+ 7. **Enable data-driven process improvement.** Systematic tracking of processing times, content reuse rates, compliance scores, and win rates provides the data needed to continuously improve both the automation and the underlying content library.
98
+ `;
99
+ //# sourceMappingURL=business-context.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"business-context.js","sourceRoot":"","sources":["../../../../src/blueprints/rfx-procurement/files/business-context.ts"],"names":[],"mappings":"AAAA,MAAM,CAAC,MAAM,OAAO,GAAG;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAiGtB,CAAC"}