@contractspec/lib.product-intent-utils 3.7.16 → 3.7.18
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/browser/index.js +73 -1268
- package/dist/index.js +73 -1268
- package/dist/node/index.js +73 -1268
- package/package.json +8 -8
package/dist/index.js
CHANGED
|
@@ -1,222 +1,13 @@
|
|
|
1
1
|
// @bun
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
remove_field: ["api", "db", "ui", "docs", "tests"],
|
|
6
|
-
rename_field: ["api", "db", "ui", "docs", "tests"],
|
|
7
|
-
add_event: ["api", "workflows", "docs", "tests"],
|
|
8
|
-
update_event: ["api", "workflows", "docs", "tests"],
|
|
9
|
-
add_operation: ["api", "ui", "workflows", "docs", "tests"],
|
|
10
|
-
update_operation: ["api", "ui", "workflows", "docs", "tests"],
|
|
11
|
-
update_form: ["ui", "docs", "tests"],
|
|
12
|
-
update_policy: ["policy", "api", "workflows", "docs", "tests"],
|
|
13
|
-
add_enum_value: ["api", "db", "ui", "docs", "tests"],
|
|
14
|
-
remove_enum_value: ["api", "db", "ui", "docs", "tests"],
|
|
15
|
-
other: ["docs", "tests"]
|
|
16
|
-
};
|
|
17
|
-
var BUCKET_MAP = {
|
|
18
|
-
remove_field: "breaks",
|
|
19
|
-
rename_field: "breaks",
|
|
20
|
-
remove_enum_value: "breaks",
|
|
21
|
-
update_operation: "mustChange",
|
|
22
|
-
update_event: "mustChange",
|
|
23
|
-
update_policy: "mustChange",
|
|
24
|
-
update_form: "risky",
|
|
25
|
-
add_field: "risky",
|
|
26
|
-
add_event: "risky",
|
|
27
|
-
add_operation: "risky",
|
|
28
|
-
add_enum_value: "risky",
|
|
29
|
-
other: "risky"
|
|
30
|
-
};
|
|
31
|
-
function slugify(value) {
|
|
32
|
-
return value.toLowerCase().replace(/[^a-z0-9]+/g, "-").replace(/(^-|-$)+/g, "");
|
|
33
|
-
}
|
|
34
|
-
function buildTokens(change) {
|
|
35
|
-
const combined = `${change.type} ${change.target} ${change.detail}`;
|
|
36
|
-
const tokens = combined.split(/[^a-zA-Z0-9]+/).map((token) => token.trim()).filter((token) => token.length >= 3);
|
|
37
|
-
return Array.from(new Set(tokens.map((token) => token.toLowerCase()))).slice(0, 8);
|
|
38
|
-
}
|
|
39
|
-
function scanTokens(tokens, files, maxHits) {
|
|
40
|
-
const hits = [];
|
|
41
|
-
const lowerTokens = tokens.map((token) => token.toLowerCase());
|
|
42
|
-
for (const file of files) {
|
|
43
|
-
const haystack = file.content.toLowerCase();
|
|
44
|
-
if (lowerTokens.some((token) => haystack.includes(token))) {
|
|
45
|
-
hits.push(file.path);
|
|
46
|
-
}
|
|
47
|
-
if (hits.length >= maxHits)
|
|
48
|
-
break;
|
|
49
|
-
}
|
|
50
|
-
return hits;
|
|
51
|
-
}
|
|
52
|
-
function formatRefs(tokens, repoFiles, maxHits = 3) {
|
|
53
|
-
if (!repoFiles || repoFiles.length === 0) {
|
|
54
|
-
return "refs: (no repo scan)";
|
|
55
|
-
}
|
|
56
|
-
const hits = scanTokens(tokens, repoFiles, maxHits);
|
|
57
|
-
if (!hits.length)
|
|
58
|
-
return "refs: none";
|
|
59
|
-
return `refs: ${hits.join(", ")}`;
|
|
60
|
-
}
|
|
61
|
-
function humanizeChange(change) {
|
|
62
|
-
const label = change.type.replace(/_/g, " ");
|
|
63
|
-
return `${label} ${change.target}`;
|
|
64
|
-
}
|
|
65
|
-
function buildStatement(change, refs, surfaces) {
|
|
66
|
-
const reason = change.detail || `touches ${surfaces.join(", ")}`;
|
|
67
|
-
return `${humanizeChange(change)} because ${reason} (${refs})`;
|
|
68
|
-
}
|
|
69
|
-
function impactEngine(intent, options = {}) {
|
|
70
|
-
const reportId = options.reportId ?? `impact-${slugify(intent.featureKey)}`;
|
|
71
|
-
const patchId = options.patchId ?? `patch-${slugify(intent.featureKey)}`;
|
|
72
|
-
const maxHitsPerChange = options.maxHitsPerChange ?? 3;
|
|
73
|
-
const breaks = [];
|
|
74
|
-
const mustChange = [];
|
|
75
|
-
const risky = [];
|
|
76
|
-
const surfaces = {
|
|
77
|
-
api: [],
|
|
78
|
-
db: [],
|
|
79
|
-
ui: [],
|
|
80
|
-
workflows: [],
|
|
81
|
-
policy: [],
|
|
82
|
-
docs: [],
|
|
83
|
-
tests: []
|
|
84
|
-
};
|
|
85
|
-
for (const change of intent.changes) {
|
|
86
|
-
const bucket = BUCKET_MAP[change.type] ?? "risky";
|
|
87
|
-
const surfaceTargets = SURFACE_MAP[change.type] ?? ["docs", "tests"];
|
|
88
|
-
const tokens = buildTokens(change);
|
|
89
|
-
const refs = formatRefs(tokens, options.repoFiles, maxHitsPerChange);
|
|
90
|
-
const statement = buildStatement(change, refs, surfaceTargets);
|
|
91
|
-
if (bucket === "breaks")
|
|
92
|
-
breaks.push(statement);
|
|
93
|
-
if (bucket === "mustChange")
|
|
94
|
-
mustChange.push(statement);
|
|
95
|
-
if (bucket === "risky")
|
|
96
|
-
risky.push(statement);
|
|
97
|
-
for (const surface of surfaceTargets) {
|
|
98
|
-
const list = surfaces[surface];
|
|
99
|
-
if (Array.isArray(list)) {
|
|
100
|
-
list.push(statement);
|
|
101
|
-
}
|
|
102
|
-
}
|
|
103
|
-
}
|
|
104
|
-
const summary = [
|
|
105
|
-
`Analyzed ${intent.changes.length} change(s).`,
|
|
106
|
-
`Breaks: ${breaks.length}.`,
|
|
107
|
-
`Must change: ${mustChange.length}.`,
|
|
108
|
-
`Risky: ${risky.length}.`
|
|
109
|
-
].join(" ");
|
|
110
|
-
return {
|
|
111
|
-
reportId,
|
|
112
|
-
patchId,
|
|
113
|
-
summary,
|
|
114
|
-
breaks,
|
|
115
|
-
mustChange,
|
|
116
|
-
risky,
|
|
117
|
-
surfaces
|
|
118
|
-
};
|
|
119
|
-
}
|
|
120
|
-
// src/project-management-sync.ts
|
|
121
|
-
function buildProjectManagementSyncPayload(params) {
|
|
122
|
-
const options = params.options ?? {};
|
|
123
|
-
const items = buildWorkItemsFromTickets(params.tickets, options);
|
|
124
|
-
const summary = options.includeSummary ? buildSummaryWorkItem({
|
|
125
|
-
question: params.question,
|
|
126
|
-
tickets: params.tickets,
|
|
127
|
-
patchIntent: params.patchIntent,
|
|
128
|
-
impact: params.impact,
|
|
129
|
-
title: options.summaryTitle,
|
|
130
|
-
baseTags: options.baseTags
|
|
131
|
-
}) : undefined;
|
|
132
|
-
return { summary, items };
|
|
133
|
-
}
|
|
134
|
-
function buildWorkItemsFromTickets(tickets, options = {}) {
|
|
135
|
-
return tickets.map((ticket) => ({
|
|
136
|
-
title: ticket.title,
|
|
137
|
-
description: renderTicketDescription(ticket),
|
|
138
|
-
type: "task",
|
|
139
|
-
priority: mapPriority(ticket.priority, options.defaultPriority),
|
|
140
|
-
tags: mergeTags(options.baseTags, ticket.tags),
|
|
141
|
-
externalId: ticket.ticketId
|
|
142
|
-
}));
|
|
143
|
-
}
|
|
144
|
-
function buildSummaryWorkItem(params) {
|
|
145
|
-
return {
|
|
146
|
-
title: params.title ?? "Product Intent Summary",
|
|
147
|
-
description: renderSummaryMarkdown(params),
|
|
148
|
-
type: "summary",
|
|
149
|
-
tags: mergeTags(params.baseTags, ["product-intent", "summary"])
|
|
150
|
-
};
|
|
151
|
-
}
|
|
152
|
-
function renderTicketDescription(ticket) {
|
|
153
|
-
const lines = [
|
|
154
|
-
ticket.summary,
|
|
155
|
-
"",
|
|
156
|
-
"Acceptance Criteria:",
|
|
157
|
-
...ticket.acceptanceCriteria.map((criterion) => `- ${criterion}`)
|
|
158
|
-
];
|
|
159
|
-
if (ticket.evidenceIds.length > 0) {
|
|
160
|
-
lines.push("", `Evidence: ${ticket.evidenceIds.join(", ")}`);
|
|
161
|
-
}
|
|
162
|
-
return lines.join(`
|
|
163
|
-
`);
|
|
164
|
-
}
|
|
165
|
-
function renderSummaryMarkdown(params) {
|
|
166
|
-
const lines = [`# ${params.question}`, "", "## Top Tickets"];
|
|
167
|
-
for (const ticket of params.tickets) {
|
|
168
|
-
lines.push(`- ${ticket.title}`);
|
|
169
|
-
}
|
|
170
|
-
if (params.patchIntent) {
|
|
171
|
-
lines.push("", "## Patch Intent", `Feature: ${params.patchIntent.featureKey}`);
|
|
172
|
-
params.patchIntent.changes.forEach((change) => {
|
|
173
|
-
lines.push(`- ${change.type}: ${change.target}`);
|
|
174
|
-
});
|
|
175
|
-
}
|
|
176
|
-
if (params.impact) {
|
|
177
|
-
lines.push("", "## Impact Summary", params.impact.summary);
|
|
178
|
-
}
|
|
179
|
-
return lines.join(`
|
|
180
|
-
`);
|
|
181
|
-
}
|
|
182
|
-
function mapPriority(priority, fallback) {
|
|
183
|
-
if (!priority)
|
|
184
|
-
return fallback;
|
|
185
|
-
switch (priority) {
|
|
186
|
-
case "high":
|
|
187
|
-
return "high";
|
|
188
|
-
case "medium":
|
|
189
|
-
return "medium";
|
|
190
|
-
case "low":
|
|
191
|
-
return "low";
|
|
192
|
-
default:
|
|
193
|
-
return fallback;
|
|
194
|
-
}
|
|
195
|
-
}
|
|
196
|
-
function mergeTags(baseTags, tags) {
|
|
197
|
-
const merged = new Set;
|
|
198
|
-
(baseTags ?? []).forEach((tag) => merged.add(tag));
|
|
199
|
-
(tags ?? []).forEach((tag) => merged.add(tag));
|
|
200
|
-
const result = [...merged];
|
|
201
|
-
return result.length > 0 ? result : undefined;
|
|
202
|
-
}
|
|
203
|
-
// src/prompts.ts
|
|
204
|
-
function formatEvidenceForModel(chunks, maxChars = 900) {
|
|
205
|
-
const safe = chunks.map((chunk) => ({
|
|
206
|
-
chunkId: chunk.chunkId,
|
|
207
|
-
text: chunk.text.length > maxChars ? `${chunk.text.slice(0, maxChars)}...` : chunk.text,
|
|
208
|
-
meta: chunk.meta ?? {}
|
|
209
|
-
}));
|
|
210
|
-
return JSON.stringify({ evidenceChunks: safe }, null, 2);
|
|
211
|
-
}
|
|
212
|
-
var JSON_ONLY_RULES = `
|
|
2
|
+
var u={add_field:["api","db","ui","docs","tests"],remove_field:["api","db","ui","docs","tests"],rename_field:["api","db","ui","docs","tests"],add_event:["api","workflows","docs","tests"],update_event:["api","workflows","docs","tests"],add_operation:["api","ui","workflows","docs","tests"],update_operation:["api","ui","workflows","docs","tests"],update_form:["ui","docs","tests"],update_policy:["policy","api","workflows","docs","tests"],add_enum_value:["api","db","ui","docs","tests"],remove_enum_value:["api","db","ui","docs","tests"],other:["docs","tests"]},h={remove_field:"breaks",rename_field:"breaks",remove_enum_value:"breaks",update_operation:"mustChange",update_event:"mustChange",update_policy:"mustChange",update_form:"risky",add_field:"risky",add_event:"risky",add_operation:"risky",add_enum_value:"risky",other:"risky"};function Q(e){return e.toLowerCase().replace(/[^a-z0-9]+/g,"-").replace(/(^-|-$)+/g,"")}function s(e){let c=`${e.type} ${e.target} ${e.detail}`.split(/[^a-zA-Z0-9]+/).map((P)=>P.trim()).filter((P)=>P.length>=3);return Array.from(new Set(c.map((P)=>P.toLowerCase()))).slice(0,8)}function a(e,y,c){let P=[],f=e.map((r)=>r.toLowerCase());for(let r of y){let $=r.content.toLowerCase();if(f.some((_)=>$.includes(_)))P.push(r.path);if(P.length>=c)break}return P}function ee(e,y,c=3){if(!y||y.length===0)return"refs: (no repo scan)";let P=a(e,y,c);if(!P.length)return"refs: none";return`refs: ${P.join(", ")}`}function ce(e){return`${e.type.replace(/_/g," ")} ${e.target}`}function ye(e,y,c){let P=e.detail||`touches ${c.join(", ")}`;return`${ce(e)} because ${P} (${y})`}function qe(e,y={}){let c=y.reportId??`impact-${Q(e.featureKey)}`,P=y.patchId??`patch-${Q(e.featureKey)}`,f=y.maxHitsPerChange??3,r=[],$=[],_=[],m={api:[],db:[],ui:[],workflows:[],policy:[],docs:[],tests:[]};for(let R of e.changes){let g=h[R.type]??"risky",C=u[R.type]??["docs","tests"],b=s(R),L=ee(b,y.repoFiles,f),H=ye(R,L,C);if(g==="breaks")r.push(H);if(g==="mustChange")$.push(H);if(g==="risky")_.push(H);for(let d of C){let G=m[d];if(Array.isArray(G))G.push(H)}}let j=[`Analyzed ${e.changes.length} change(s).`,`Breaks: ${r.length}.`,`Must change: ${$.length}.`,`Risky: ${_.length}.`].join(" ");return{reportId:c,patchId:P,summary:j,breaks:r,mustChange:$,risky:_,surfaces:m}}function ve(e){let y=e.options??{},c=Pe(e.tickets,y);return{summary:y.includeSummary?fe({question:e.question,tickets:e.tickets,patchIntent:e.patchIntent,impact:e.impact,title:y.summaryTitle,baseTags:y.baseTags}):void 0,items:c}}function Pe(e,y={}){return e.map((c)=>({title:c.title,description:re(c),type:"task",priority:_e(c.priority,y.defaultPriority),tags:J(y.baseTags,c.tags),externalId:c.ticketId}))}function fe(e){return{title:e.title??"Product Intent Summary",description:$e(e),type:"summary",tags:J(e.baseTags,["product-intent","summary"])}}function re(e){let y=[e.summary,"","Acceptance Criteria:",...e.acceptanceCriteria.map((c)=>`- ${c}`)];if(e.evidenceIds.length>0)y.push("",`Evidence: ${e.evidenceIds.join(", ")}`);return y.join(`
|
|
3
|
+
`)}function $e(e){let y=[`# ${e.question}`,"","## Top Tickets"];for(let c of e.tickets)y.push(`- ${c.title}`);if(e.patchIntent)y.push("","## Patch Intent",`Feature: ${e.patchIntent.featureKey}`),e.patchIntent.changes.forEach((c)=>{y.push(`- ${c.type}: ${c.target}`)});if(e.impact)y.push("","## Impact Summary",e.impact.summary);return y.join(`
|
|
4
|
+
`)}function _e(e,y){if(!e)return y;switch(e){case"high":return"high";case"medium":return"medium";case"low":return"low";default:return y}}function J(e,y){let c=new Set;(e??[]).forEach((f)=>c.add(f)),(y??[]).forEach((f)=>c.add(f));let P=[...c];return P.length>0?P:void 0}function U(e,y=900){let c=e.map((P)=>({chunkId:P.chunkId,text:P.text.length>y?`${P.text.slice(0,y)}...`:P.text,meta:P.meta??{}}));return JSON.stringify({evidenceChunks:c},null,2)}var t=`
|
|
213
5
|
You MUST output valid JSON ONLY.
|
|
214
6
|
- Do not wrap in markdown fences.
|
|
215
7
|
- Do not include any commentary.
|
|
216
8
|
- Do not include trailing commas.
|
|
217
9
|
- Use double quotes for all keys and string values.
|
|
218
|
-
|
|
219
|
-
var CITATION_RULES = `
|
|
10
|
+
`,z=`
|
|
220
11
|
CITATION RULES (strict):
|
|
221
12
|
- You may ONLY cite from the provided evidenceChunks.
|
|
222
13
|
- Each citation must include:
|
|
@@ -225,16 +16,14 @@ CITATION RULES (strict):
|
|
|
225
16
|
- Do NOT invent quotes.
|
|
226
17
|
- Keep quotes short (<= 240 chars).
|
|
227
18
|
- If you cannot support a claim with evidence, do not make the claim.
|
|
228
|
-
`;
|
|
229
|
-
function promptExtractInsights(params) {
|
|
230
|
-
return `
|
|
19
|
+
`;function Se(e){return`
|
|
231
20
|
You are extracting ATOMIC, EVIDENCE-GROUNDED insights to answer a product discovery question.
|
|
232
21
|
|
|
233
22
|
Question:
|
|
234
|
-
${
|
|
23
|
+
${e.question}
|
|
235
24
|
|
|
236
25
|
Evidence:
|
|
237
|
-
${
|
|
26
|
+
${e.evidenceJSON}
|
|
238
27
|
|
|
239
28
|
Task:
|
|
240
29
|
Return JSON with:
|
|
@@ -256,23 +45,19 @@ Guidelines:
|
|
|
256
45
|
- Each insight must be supported by 1 to 3 citations.
|
|
257
46
|
- Prefer user pain, blockers, confusions, workarounds, requests, and measurable outcomes.
|
|
258
47
|
- If evidence conflicts, include both sides as separate insights.
|
|
259
|
-
${
|
|
260
|
-
${
|
|
261
|
-
`.trim();
|
|
262
|
-
}
|
|
263
|
-
function promptSynthesizeBrief(params) {
|
|
264
|
-
const allowed = JSON.stringify({ allowedChunkIds: params.allowedChunkIds }, null, 2);
|
|
265
|
-
return `
|
|
48
|
+
${z}
|
|
49
|
+
${t}
|
|
50
|
+
`.trim()}function ke(e){let y=JSON.stringify({allowedChunkIds:e.allowedChunkIds},null,2);return`
|
|
266
51
|
You are synthesizing a product opportunity brief that is STRICTLY grounded in evidence.
|
|
267
52
|
|
|
268
53
|
Question:
|
|
269
|
-
${
|
|
54
|
+
${e.question}
|
|
270
55
|
|
|
271
56
|
Extracted insights (already grounded):
|
|
272
|
-
${
|
|
57
|
+
${e.insightsJSON}
|
|
273
58
|
|
|
274
59
|
Allowed citations:
|
|
275
|
-
${
|
|
60
|
+
${y}
|
|
276
61
|
|
|
277
62
|
Return JSON with exactly this shape:
|
|
278
63
|
{
|
|
@@ -290,19 +75,16 @@ Rules:
|
|
|
290
75
|
- The fields problem/who/proposedChange MUST each have >=1 citation.
|
|
291
76
|
- All citations must use allowedChunkIds and include exact quotes.
|
|
292
77
|
- Keep the brief concise and specific.
|
|
293
|
-
${
|
|
294
|
-
${
|
|
295
|
-
`.trim()
|
|
296
|
-
}
|
|
297
|
-
function promptSkepticCheck(params) {
|
|
298
|
-
return `
|
|
78
|
+
${z}
|
|
79
|
+
${t}
|
|
80
|
+
`.trim()}function be(e){return`
|
|
299
81
|
You are auditing a brief for unsupported claims and citation misuse.
|
|
300
82
|
|
|
301
83
|
Brief:
|
|
302
|
-
${
|
|
84
|
+
${e.briefJSON}
|
|
303
85
|
|
|
304
86
|
Evidence:
|
|
305
|
-
${
|
|
87
|
+
${e.evidenceJSON}
|
|
306
88
|
|
|
307
89
|
Return JSON:
|
|
308
90
|
{
|
|
@@ -318,15 +100,12 @@ Return JSON:
|
|
|
318
100
|
Rules:
|
|
319
101
|
- If everything is supported, return {"issues": []}.
|
|
320
102
|
- Be strict. If a statement is not clearly supported by citations, flag it.
|
|
321
|
-
${
|
|
322
|
-
`.trim()
|
|
323
|
-
}
|
|
324
|
-
function promptGeneratePatchIntent(params) {
|
|
325
|
-
return `
|
|
103
|
+
${t}
|
|
104
|
+
`.trim()}function Le(e){return`
|
|
326
105
|
You are generating a ContractPatchIntent from an OpportunityBrief.
|
|
327
106
|
|
|
328
107
|
OpportunityBrief:
|
|
329
|
-
${
|
|
108
|
+
${e.briefJSON}
|
|
330
109
|
|
|
331
110
|
Return JSON:
|
|
332
111
|
{
|
|
@@ -341,19 +120,16 @@ Rules:
|
|
|
341
120
|
- Keep changes <= 12.
|
|
342
121
|
- Detail should be minimal and explicit.
|
|
343
122
|
- Acceptance criteria must be testable and verifiable.
|
|
344
|
-
${
|
|
345
|
-
`.trim()
|
|
346
|
-
}
|
|
347
|
-
function promptGenerateGenericSpecOverlay(params) {
|
|
348
|
-
return `
|
|
123
|
+
${t}
|
|
124
|
+
`.trim()}function de(e){return`
|
|
349
125
|
You are generating a GENERIC spec overlay patch based on PatchIntent.
|
|
350
126
|
You must respect the base spec snippet.
|
|
351
127
|
|
|
352
128
|
Base spec snippet (context):
|
|
353
|
-
${
|
|
129
|
+
${e.baseSpecSnippet}
|
|
354
130
|
|
|
355
131
|
PatchIntent:
|
|
356
|
-
${
|
|
132
|
+
${e.patchIntentJSON}
|
|
357
133
|
|
|
358
134
|
Return JSON:
|
|
359
135
|
{
|
|
@@ -367,21 +143,18 @@ Return JSON:
|
|
|
367
143
|
Rules:
|
|
368
144
|
- Only reference paths that plausibly exist in the base spec snippet or add new ones under reasonable roots.
|
|
369
145
|
- Keep values small. Avoid massive blobs.
|
|
370
|
-
${
|
|
371
|
-
`.trim()
|
|
372
|
-
}
|
|
373
|
-
function promptGenerateImpactReport(params) {
|
|
374
|
-
return `
|
|
146
|
+
${t}
|
|
147
|
+
`.trim()}function ue(e){return`
|
|
375
148
|
You are generating an Impact Report for a spec patch.
|
|
376
149
|
|
|
377
150
|
PatchIntent:
|
|
378
|
-
${
|
|
151
|
+
${e.patchIntentJSON}
|
|
379
152
|
|
|
380
153
|
Overlay:
|
|
381
|
-
${
|
|
154
|
+
${e.overlayJSON}
|
|
382
155
|
|
|
383
156
|
Compiler output (if present):
|
|
384
|
-
${
|
|
157
|
+
${e.compilerOutputText??"(none)"}
|
|
385
158
|
|
|
386
159
|
Return JSON:
|
|
387
160
|
{
|
|
@@ -406,24 +179,21 @@ Rules:
|
|
|
406
179
|
- Be concrete: name what changes and why.
|
|
407
180
|
- If unsure, put it under "risky" not "breaks".
|
|
408
181
|
- Keep each item short.
|
|
409
|
-
${
|
|
410
|
-
`.trim()
|
|
411
|
-
}
|
|
412
|
-
function promptGenerateTaskPack(params) {
|
|
413
|
-
return `
|
|
182
|
+
${t}
|
|
183
|
+
`.trim()}function he(e){return`
|
|
414
184
|
You are generating an agent-ready Task Pack to implement a product change safely.
|
|
415
185
|
|
|
416
186
|
Repo context:
|
|
417
|
-
${
|
|
187
|
+
${e.repoContext??"(none)"}
|
|
418
188
|
|
|
419
189
|
OpportunityBrief:
|
|
420
|
-
${
|
|
190
|
+
${e.briefJSON}
|
|
421
191
|
|
|
422
192
|
PatchIntent:
|
|
423
|
-
${
|
|
193
|
+
${e.patchIntentJSON}
|
|
424
194
|
|
|
425
195
|
Impact report:
|
|
426
|
-
${
|
|
196
|
+
${e.impactJSON}
|
|
427
197
|
|
|
428
198
|
Return JSON:
|
|
429
199
|
{
|
|
@@ -448,12 +218,9 @@ Rules:
|
|
|
448
218
|
- Each task must have testable acceptance criteria.
|
|
449
219
|
- Agent prompts must be copy-paste friendly and mention expected files/surfaces.
|
|
450
220
|
- Include at least one tests task.
|
|
451
|
-
${
|
|
452
|
-
`.trim()
|
|
453
|
-
}
|
|
454
|
-
function promptWireframeImage(params) {
|
|
455
|
-
return `
|
|
456
|
-
Create a minimal grayscale wireframe (${params.device}) for screen: "${params.screenName}".
|
|
221
|
+
${t}
|
|
222
|
+
`.trim()}function se(e){return`
|
|
223
|
+
Create a minimal grayscale wireframe (${e.device}) for screen: "${e.screenName}".
|
|
457
224
|
|
|
458
225
|
Style rules:
|
|
459
226
|
- Wireframe only, grayscale, no brand colors, no gradients
|
|
@@ -462,22 +229,19 @@ Style rules:
|
|
|
462
229
|
- No decorative illustrations
|
|
463
230
|
|
|
464
231
|
Current screen summary:
|
|
465
|
-
${
|
|
232
|
+
${e.currentScreenSummary}
|
|
466
233
|
|
|
467
234
|
Proposed changes (must be reflected in the wireframe):
|
|
468
|
-
- ${
|
|
235
|
+
- ${e.proposedChanges.join(`
|
|
469
236
|
- `)}
|
|
470
237
|
|
|
471
238
|
Output: a single wireframe image that clearly shows the updated layout.
|
|
472
|
-
`.trim()
|
|
473
|
-
}
|
|
474
|
-
function promptWireframeLayoutJSON(params) {
|
|
475
|
-
return `
|
|
239
|
+
`.trim()}function ae(e){return`
|
|
476
240
|
You are generating a simple UI wireframe layout JSON (NOT an image).
|
|
477
|
-
Screen: "${
|
|
241
|
+
Screen: "${e.screenName}" (${e.device})
|
|
478
242
|
|
|
479
243
|
Proposed changes:
|
|
480
|
-
- ${
|
|
244
|
+
- ${e.proposedChanges.join(`
|
|
481
245
|
- `)}
|
|
482
246
|
|
|
483
247
|
Return JSON:
|
|
@@ -494,16 +258,13 @@ Rules:
|
|
|
494
258
|
- 8 to 18 elements.
|
|
495
259
|
- Must reflect proposed changes.
|
|
496
260
|
- Labels should be clear and specific.
|
|
497
|
-
${
|
|
498
|
-
`.trim()
|
|
499
|
-
}
|
|
500
|
-
|
|
501
|
-
return `
|
|
502
|
-
Generate ${params.count} synthetic customer interview transcripts for this product context:
|
|
503
|
-
${params.productContext}
|
|
261
|
+
${t}
|
|
262
|
+
`.trim()}function ec(e){return`
|
|
263
|
+
Generate ${e.count} synthetic customer interview transcripts for this product context:
|
|
264
|
+
${e.productContext}
|
|
504
265
|
|
|
505
266
|
Personas to cover:
|
|
506
|
-
- ${
|
|
267
|
+
- ${e.personas.join(`
|
|
507
268
|
- `)}
|
|
508
269
|
|
|
509
270
|
Requirements:
|
|
@@ -520,399 +281,20 @@ Return JSON:
|
|
|
520
281
|
]
|
|
521
282
|
}
|
|
522
283
|
|
|
523
|
-
${
|
|
524
|
-
`.trim();
|
|
525
|
-
}
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
ContractPatchIntentModel as ContractPatchIntentModel2,
|
|
529
|
-
EvidenceFindingExtractionModel as EvidenceFindingExtractionModel2,
|
|
530
|
-
ProblemGroupingModel as ProblemGroupingModel2,
|
|
531
|
-
TicketCollectionModel as TicketCollectionModel2
|
|
532
|
-
} from "@contractspec/lib.contracts-spec/product-intent/types";
|
|
284
|
+
${t}
|
|
285
|
+
`.trim()}import{ContractPatchIntentModel as Ke,EvidenceFindingExtractionModel as Ae,ProblemGroupingModel as Xe,TicketCollectionModel as Ze}from"@contractspec/lib.contracts-spec/product-intent/types";import{CitationModel as Ie,ContractPatchIntentModel as me,ImpactReportModel as je,InsightExtractionModel as Re,OpportunityBriefModel as te,TaskPackModel as ie}from"@contractspec/lib.contracts-spec/product-intent/types";function I(e,y,c){if(c.min!==void 0&&e.length<c.min)throw Error(`Expected ${y} to be at least ${c.min} characters, got ${e.length}`);if(c.max!==void 0&&e.length>c.max)throw Error(`Expected ${y} to be at most ${c.max} characters, got ${e.length}`)}function W(e,y,c){if(c.min!==void 0&&e.length<c.min)throw Error(`Expected ${y} to have at least ${c.min} items, got ${e.length}`);if(c.max!==void 0&&e.length>c.max)throw Error(`Expected ${y} to have at most ${c.max} items, got ${e.length}`)}function Te(e,y,c){if(c.min!==void 0&&e<c.min)throw Error(`Expected ${y} to be >= ${c.min}, got ${e}`);if(c.max!==void 0&&e>c.max)throw Error(`Expected ${y} to be <= ${c.max}, got ${e}`)}function i(e,y){let c=y.trim();if(!c.startsWith("{")&&!c.startsWith("["))throw Error("Model did not return JSON (missing leading { or [)");let P;try{P=JSON.parse(c)}catch(f){let r=f instanceof Error?f.message:String(f);throw Error(`Invalid JSON: ${r}`)}return e.getZod().parse(P)}function D(e){let y=new Map;for(let c of e)y.set(c.chunkId,c);return y}function K(e,y,c){let P=c?.maxQuoteLen??240,f=c?.requireExactSubstring??!0,r=Ie.getZod().parse(e);I(r.quote,"citation.quote",{min:1,max:P});let $=y.get(r.chunkId);if(!$)throw Error(`Citation references unknown chunkId: ${r.chunkId}`);if(f&&!$.text.includes(r.quote))throw Error(`Citation quote is not an exact substring of chunk ${r.chunkId}`);return r}function F(e,y){if(I(e.text,"textBlock.text",{min:1}),!e.citations?.length)throw Error("Missing required citations");let c=e.citations.map((P)=>K(P,y));return{text:e.text,citations:c}}function Pc(e,y){let c=D(y),P=i(te,e);if(I(P.opportunityId,"opportunityId",{min:1}),I(P.title,"title",{min:1,max:120}),F(P.problem,c),F(P.who,c),F(P.proposedChange,c),I(P.expectedImpact.metric,"expectedImpact.metric",{min:1,max:64}),P.expectedImpact.magnitudeHint)I(P.expectedImpact.magnitudeHint,"expectedImpact.magnitudeHint",{max:64});if(P.expectedImpact.timeframeHint)I(P.expectedImpact.timeframeHint,"expectedImpact.timeframeHint",{max:64});if(P.risks){for(let f of P.risks)if(I(f.text,"risks[].text",{min:1,max:240}),f.citations)for(let r of f.citations)K(r,c)}return P}function fc(e,y){let c=D(y),P=i(Re,e);W(P.insights,"insights",{min:1,max:30});for(let f of P.insights){if(I(f.insightId,"insights[].insightId",{min:1}),I(f.claim,"insights[].claim",{min:1,max:320}),f.tags)for(let r of f.tags)I(r,"insights[].tags[]",{min:1});if(f.confidence!==void 0)Te(f.confidence,"insights[].confidence",{min:0,max:1});W(f.citations,"insights[].citations",{min:1});for(let r of f.citations)K(r,c)}return P}function B(e){let y=i(me,e);I(y.featureKey,"featureKey",{min:1,max:80}),W(y.changes,"changes",{min:1,max:25});for(let c of y.changes)I(c.target,"changes[].target",{min:1}),I(c.detail,"changes[].detail",{min:1});W(y.acceptanceCriteria,"acceptanceCriteria",{min:1,max:12});for(let c of y.acceptanceCriteria)I(c,"acceptanceCriteria[]",{min:1,max:140});return y}function rc(e){let y=i(je,e);if(I(y.reportId,"reportId",{min:1}),I(y.patchId,"patchId",{min:1}),I(y.summary,"summary",{min:1,max:200}),y.breaks)for(let P of y.breaks)I(P,"breaks[]",{min:1,max:160});if(y.mustChange)for(let P of y.mustChange)I(P,"mustChange[]",{min:1,max:160});if(y.risky)for(let P of y.risky)I(P,"risky[]",{min:1,max:160});let c=y.surfaces;if(c.api)for(let P of c.api)I(P,"surfaces.api[]",{min:1,max:140});if(c.db)for(let P of c.db)I(P,"surfaces.db[]",{min:1,max:140});if(c.ui)for(let P of c.ui)I(P,"surfaces.ui[]",{min:1,max:140});if(c.workflows)for(let P of c.workflows)I(P,"surfaces.workflows[]",{min:1,max:140});if(c.policy)for(let P of c.policy)I(P,"surfaces.policy[]",{min:1,max:140});if(c.docs)for(let P of c.docs)I(P,"surfaces.docs[]",{min:1,max:140});if(c.tests)for(let P of c.tests)I(P,"surfaces.tests[]",{min:1,max:140});return y}function $c(e){let y=i(ie,e);I(y.packId,"packId",{min:1}),I(y.patchId,"patchId",{min:1}),I(y.overview,"overview",{min:1,max:240}),W(y.tasks,"tasks",{min:3,max:14});for(let c of y.tasks){I(c.id,"tasks[].id",{min:1}),I(c.title,"tasks[].title",{min:1,max:120}),W(c.surface,"tasks[].surface",{min:1}),I(c.why,"tasks[].why",{min:1,max:200}),W(c.acceptance,"tasks[].acceptance",{min:1,max:10});for(let P of c.acceptance)I(P,"tasks[].acceptance[]",{min:1,max:160});if(I(c.agentPrompt,"tasks[].agentPrompt",{min:1,max:1400}),c.dependsOn)for(let P of c.dependsOn)I(P,"tasks[].dependsOn[]",{min:1})}return y}function _c(e){return["Your previous output failed validation.","Fix the output and return JSON ONLY (no markdown, no commentary).","Validation error:",e].join(`
|
|
286
|
+
`)}function ge(e,y){if(e.length<=y)return e;return`${e.slice(0,y)}
|
|
287
|
+
...(truncated)`}function l(e,y,c=4000){return["Your previous output failed validation.","Fix the output and return JSON ONLY (no markdown, no commentary).","Do not change the JSON shape or rename fields.","If a citation quote is invalid, replace it with an exact substring from the referenced chunk.","Validation error:",e,"Previous output:",ge(y,c)].join(`
|
|
288
|
+
`)}var We=2;function A(){return new Date().toISOString()}function V(e){return e instanceof Error?e.message:String(e)}async function X(e,y){if(!e)return;try{await e.log(y)}catch{}}async function Z(e){let y=Math.max(1,e.maxAttempts??We),c=0,P,f="",r=e.prompt;while(c<y){c+=1,await X(e.logger,{stage:e.stage,phase:"request",attempt:c,prompt:r,timestamp:A()});let $;try{$=await e.modelRunner.generateJson(r)}catch(_){throw P=V(_),await X(e.logger,{stage:e.stage,phase:"model_error",attempt:c,prompt:r,error:P,timestamp:A()}),Error(`[${e.stage}] Model error: ${P}`)}await X(e.logger,{stage:e.stage,phase:"response",attempt:c,prompt:r,response:$,timestamp:A()});try{return e.validate($)}catch(_){if(P=V(_),f=$,e.repair){let m=e.repair($,P);if(m&&m!==$){await X(e.logger,{stage:e.stage,phase:"repair",attempt:c,prompt:r,response:m,error:P,timestamp:A()});try{return e.validate(m)}catch(j){P=V(j),f=m}}}await X(e.logger,{stage:e.stage,phase:"validation_error",attempt:c,prompt:r,response:f,error:P,timestamp:A()}),r=[e.prompt,l(P,f)].join(`
|
|
533
289
|
|
|
534
|
-
|
|
535
|
-
import {
|
|
536
|
-
CitationModel,
|
|
537
|
-
ContractPatchIntentModel,
|
|
538
|
-
ImpactReportModel,
|
|
539
|
-
InsightExtractionModel,
|
|
540
|
-
OpportunityBriefModel,
|
|
541
|
-
TaskPackModel
|
|
542
|
-
} from "@contractspec/lib.contracts-spec/product-intent/types";
|
|
543
|
-
function assertStringLength(value, path, bounds) {
|
|
544
|
-
if (bounds.min !== undefined && value.length < bounds.min) {
|
|
545
|
-
throw new Error(`Expected ${path} to be at least ${bounds.min} characters, got ${value.length}`);
|
|
546
|
-
}
|
|
547
|
-
if (bounds.max !== undefined && value.length > bounds.max) {
|
|
548
|
-
throw new Error(`Expected ${path} to be at most ${bounds.max} characters, got ${value.length}`);
|
|
549
|
-
}
|
|
550
|
-
}
|
|
551
|
-
function assertArrayLength(value, path, bounds) {
|
|
552
|
-
if (bounds.min !== undefined && value.length < bounds.min) {
|
|
553
|
-
throw new Error(`Expected ${path} to have at least ${bounds.min} items, got ${value.length}`);
|
|
554
|
-
}
|
|
555
|
-
if (bounds.max !== undefined && value.length > bounds.max) {
|
|
556
|
-
throw new Error(`Expected ${path} to have at most ${bounds.max} items, got ${value.length}`);
|
|
557
|
-
}
|
|
558
|
-
}
|
|
559
|
-
function assertNumberRange(value, path, bounds) {
|
|
560
|
-
if (bounds.min !== undefined && value < bounds.min) {
|
|
561
|
-
throw new Error(`Expected ${path} to be >= ${bounds.min}, got ${value}`);
|
|
562
|
-
}
|
|
563
|
-
if (bounds.max !== undefined && value > bounds.max) {
|
|
564
|
-
throw new Error(`Expected ${path} to be <= ${bounds.max}, got ${value}`);
|
|
565
|
-
}
|
|
566
|
-
}
|
|
567
|
-
function parseStrictJSON(schema, raw) {
|
|
568
|
-
const trimmed = raw.trim();
|
|
569
|
-
if (!trimmed.startsWith("{") && !trimmed.startsWith("[")) {
|
|
570
|
-
throw new Error("Model did not return JSON (missing leading { or [)");
|
|
571
|
-
}
|
|
572
|
-
let parsed;
|
|
573
|
-
try {
|
|
574
|
-
parsed = JSON.parse(trimmed);
|
|
575
|
-
} catch (error) {
|
|
576
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
577
|
-
throw new Error(`Invalid JSON: ${message}`);
|
|
578
|
-
}
|
|
579
|
-
return schema.getZod().parse(parsed);
|
|
580
|
-
}
|
|
581
|
-
function buildChunkIndex(chunks) {
|
|
582
|
-
const map = new Map;
|
|
583
|
-
for (const chunk of chunks) {
|
|
584
|
-
map.set(chunk.chunkId, chunk);
|
|
585
|
-
}
|
|
586
|
-
return map;
|
|
587
|
-
}
|
|
588
|
-
function validateCitation(citation, chunkIndex, opts) {
|
|
589
|
-
const maxQuoteLen = opts?.maxQuoteLen ?? 240;
|
|
590
|
-
const requireExactSubstring = opts?.requireExactSubstring ?? true;
|
|
591
|
-
const parsed = CitationModel.getZod().parse(citation);
|
|
592
|
-
assertStringLength(parsed.quote, "citation.quote", {
|
|
593
|
-
min: 1,
|
|
594
|
-
max: maxQuoteLen
|
|
595
|
-
});
|
|
596
|
-
const chunk = chunkIndex.get(parsed.chunkId);
|
|
597
|
-
if (!chunk) {
|
|
598
|
-
throw new Error(`Citation references unknown chunkId: ${parsed.chunkId}`);
|
|
599
|
-
}
|
|
600
|
-
if (requireExactSubstring && !chunk.text.includes(parsed.quote)) {
|
|
601
|
-
throw new Error(`Citation quote is not an exact substring of chunk ${parsed.chunkId}`);
|
|
602
|
-
}
|
|
603
|
-
return parsed;
|
|
604
|
-
}
|
|
605
|
-
function validateCitationsInTextBlock(block, chunkIndex) {
|
|
606
|
-
assertStringLength(block.text, "textBlock.text", { min: 1 });
|
|
607
|
-
if (!block.citations?.length) {
|
|
608
|
-
throw new Error("Missing required citations");
|
|
609
|
-
}
|
|
610
|
-
const citations = block.citations.map((c) => validateCitation(c, chunkIndex));
|
|
611
|
-
return { text: block.text, citations };
|
|
612
|
-
}
|
|
613
|
-
function validateOpportunityBrief(raw, chunks) {
|
|
614
|
-
const chunkIndex = buildChunkIndex(chunks);
|
|
615
|
-
const brief = parseStrictJSON(OpportunityBriefModel, raw);
|
|
616
|
-
assertStringLength(brief.opportunityId, "opportunityId", { min: 1 });
|
|
617
|
-
assertStringLength(brief.title, "title", { min: 1, max: 120 });
|
|
618
|
-
validateCitationsInTextBlock(brief.problem, chunkIndex);
|
|
619
|
-
validateCitationsInTextBlock(brief.who, chunkIndex);
|
|
620
|
-
validateCitationsInTextBlock(brief.proposedChange, chunkIndex);
|
|
621
|
-
assertStringLength(brief.expectedImpact.metric, "expectedImpact.metric", {
|
|
622
|
-
min: 1,
|
|
623
|
-
max: 64
|
|
624
|
-
});
|
|
625
|
-
if (brief.expectedImpact.magnitudeHint) {
|
|
626
|
-
assertStringLength(brief.expectedImpact.magnitudeHint, "expectedImpact.magnitudeHint", { max: 64 });
|
|
627
|
-
}
|
|
628
|
-
if (brief.expectedImpact.timeframeHint) {
|
|
629
|
-
assertStringLength(brief.expectedImpact.timeframeHint, "expectedImpact.timeframeHint", { max: 64 });
|
|
630
|
-
}
|
|
631
|
-
if (brief.risks) {
|
|
632
|
-
for (const risk of brief.risks) {
|
|
633
|
-
assertStringLength(risk.text, "risks[].text", { min: 1, max: 240 });
|
|
634
|
-
if (risk.citations) {
|
|
635
|
-
for (const c of risk.citations) {
|
|
636
|
-
validateCitation(c, chunkIndex);
|
|
637
|
-
}
|
|
638
|
-
}
|
|
639
|
-
}
|
|
640
|
-
}
|
|
641
|
-
return brief;
|
|
642
|
-
}
|
|
643
|
-
function validateInsightExtraction(raw, chunks) {
|
|
644
|
-
const chunkIndex = buildChunkIndex(chunks);
|
|
645
|
-
const data = parseStrictJSON(InsightExtractionModel, raw);
|
|
646
|
-
assertArrayLength(data.insights, "insights", { min: 1, max: 30 });
|
|
647
|
-
for (const insight of data.insights) {
|
|
648
|
-
assertStringLength(insight.insightId, "insights[].insightId", { min: 1 });
|
|
649
|
-
assertStringLength(insight.claim, "insights[].claim", {
|
|
650
|
-
min: 1,
|
|
651
|
-
max: 320
|
|
652
|
-
});
|
|
653
|
-
if (insight.tags) {
|
|
654
|
-
for (const tag of insight.tags) {
|
|
655
|
-
assertStringLength(tag, "insights[].tags[]", { min: 1 });
|
|
656
|
-
}
|
|
657
|
-
}
|
|
658
|
-
if (insight.confidence !== undefined) {
|
|
659
|
-
assertNumberRange(insight.confidence, "insights[].confidence", {
|
|
660
|
-
min: 0,
|
|
661
|
-
max: 1
|
|
662
|
-
});
|
|
663
|
-
}
|
|
664
|
-
assertArrayLength(insight.citations, "insights[].citations", { min: 1 });
|
|
665
|
-
for (const c of insight.citations) {
|
|
666
|
-
validateCitation(c, chunkIndex);
|
|
667
|
-
}
|
|
668
|
-
}
|
|
669
|
-
return data;
|
|
670
|
-
}
|
|
671
|
-
function validatePatchIntent(raw) {
|
|
672
|
-
const data = parseStrictJSON(ContractPatchIntentModel, raw);
|
|
673
|
-
assertStringLength(data.featureKey, "featureKey", { min: 1, max: 80 });
|
|
674
|
-
assertArrayLength(data.changes, "changes", { min: 1, max: 25 });
|
|
675
|
-
for (const change of data.changes) {
|
|
676
|
-
assertStringLength(change.target, "changes[].target", { min: 1 });
|
|
677
|
-
assertStringLength(change.detail, "changes[].detail", { min: 1 });
|
|
678
|
-
}
|
|
679
|
-
assertArrayLength(data.acceptanceCriteria, "acceptanceCriteria", {
|
|
680
|
-
min: 1,
|
|
681
|
-
max: 12
|
|
682
|
-
});
|
|
683
|
-
for (const item of data.acceptanceCriteria) {
|
|
684
|
-
assertStringLength(item, "acceptanceCriteria[]", { min: 1, max: 140 });
|
|
685
|
-
}
|
|
686
|
-
return data;
|
|
687
|
-
}
|
|
688
|
-
function validateImpactReport(raw) {
|
|
689
|
-
const data = parseStrictJSON(ImpactReportModel, raw);
|
|
690
|
-
assertStringLength(data.reportId, "reportId", { min: 1 });
|
|
691
|
-
assertStringLength(data.patchId, "patchId", { min: 1 });
|
|
692
|
-
assertStringLength(data.summary, "summary", { min: 1, max: 200 });
|
|
693
|
-
if (data.breaks) {
|
|
694
|
-
for (const item of data.breaks) {
|
|
695
|
-
assertStringLength(item, "breaks[]", { min: 1, max: 160 });
|
|
696
|
-
}
|
|
697
|
-
}
|
|
698
|
-
if (data.mustChange) {
|
|
699
|
-
for (const item of data.mustChange) {
|
|
700
|
-
assertStringLength(item, "mustChange[]", { min: 1, max: 160 });
|
|
701
|
-
}
|
|
702
|
-
}
|
|
703
|
-
if (data.risky) {
|
|
704
|
-
for (const item of data.risky) {
|
|
705
|
-
assertStringLength(item, "risky[]", { min: 1, max: 160 });
|
|
706
|
-
}
|
|
707
|
-
}
|
|
708
|
-
const surfaces = data.surfaces;
|
|
709
|
-
if (surfaces.api) {
|
|
710
|
-
for (const item of surfaces.api) {
|
|
711
|
-
assertStringLength(item, "surfaces.api[]", { min: 1, max: 140 });
|
|
712
|
-
}
|
|
713
|
-
}
|
|
714
|
-
if (surfaces.db) {
|
|
715
|
-
for (const item of surfaces.db) {
|
|
716
|
-
assertStringLength(item, "surfaces.db[]", { min: 1, max: 140 });
|
|
717
|
-
}
|
|
718
|
-
}
|
|
719
|
-
if (surfaces.ui) {
|
|
720
|
-
for (const item of surfaces.ui) {
|
|
721
|
-
assertStringLength(item, "surfaces.ui[]", { min: 1, max: 140 });
|
|
722
|
-
}
|
|
723
|
-
}
|
|
724
|
-
if (surfaces.workflows) {
|
|
725
|
-
for (const item of surfaces.workflows) {
|
|
726
|
-
assertStringLength(item, "surfaces.workflows[]", { min: 1, max: 140 });
|
|
727
|
-
}
|
|
728
|
-
}
|
|
729
|
-
if (surfaces.policy) {
|
|
730
|
-
for (const item of surfaces.policy) {
|
|
731
|
-
assertStringLength(item, "surfaces.policy[]", { min: 1, max: 140 });
|
|
732
|
-
}
|
|
733
|
-
}
|
|
734
|
-
if (surfaces.docs) {
|
|
735
|
-
for (const item of surfaces.docs) {
|
|
736
|
-
assertStringLength(item, "surfaces.docs[]", { min: 1, max: 140 });
|
|
737
|
-
}
|
|
738
|
-
}
|
|
739
|
-
if (surfaces.tests) {
|
|
740
|
-
for (const item of surfaces.tests) {
|
|
741
|
-
assertStringLength(item, "surfaces.tests[]", { min: 1, max: 140 });
|
|
742
|
-
}
|
|
743
|
-
}
|
|
744
|
-
return data;
|
|
745
|
-
}
|
|
746
|
-
function validateTaskPack(raw) {
|
|
747
|
-
const data = parseStrictJSON(TaskPackModel, raw);
|
|
748
|
-
assertStringLength(data.packId, "packId", { min: 1 });
|
|
749
|
-
assertStringLength(data.patchId, "patchId", { min: 1 });
|
|
750
|
-
assertStringLength(data.overview, "overview", { min: 1, max: 240 });
|
|
751
|
-
assertArrayLength(data.tasks, "tasks", { min: 3, max: 14 });
|
|
752
|
-
for (const task of data.tasks) {
|
|
753
|
-
assertStringLength(task.id, "tasks[].id", { min: 1 });
|
|
754
|
-
assertStringLength(task.title, "tasks[].title", { min: 1, max: 120 });
|
|
755
|
-
assertArrayLength(task.surface, "tasks[].surface", { min: 1 });
|
|
756
|
-
assertStringLength(task.why, "tasks[].why", { min: 1, max: 200 });
|
|
757
|
-
assertArrayLength(task.acceptance, "tasks[].acceptance", {
|
|
758
|
-
min: 1,
|
|
759
|
-
max: 10
|
|
760
|
-
});
|
|
761
|
-
for (const criterion of task.acceptance) {
|
|
762
|
-
assertStringLength(criterion, "tasks[].acceptance[]", {
|
|
763
|
-
min: 1,
|
|
764
|
-
max: 160
|
|
765
|
-
});
|
|
766
|
-
}
|
|
767
|
-
assertStringLength(task.agentPrompt, "tasks[].agentPrompt", {
|
|
768
|
-
min: 1,
|
|
769
|
-
max: 1400
|
|
770
|
-
});
|
|
771
|
-
if (task.dependsOn) {
|
|
772
|
-
for (const dep of task.dependsOn) {
|
|
773
|
-
assertStringLength(dep, "tasks[].dependsOn[]", { min: 1 });
|
|
774
|
-
}
|
|
775
|
-
}
|
|
776
|
-
}
|
|
777
|
-
return data;
|
|
778
|
-
}
|
|
779
|
-
function buildRepairPrompt(error) {
|
|
780
|
-
return [
|
|
781
|
-
"Your previous output failed validation.",
|
|
782
|
-
"Fix the output and return JSON ONLY (no markdown, no commentary).",
|
|
783
|
-
"Validation error:",
|
|
784
|
-
error
|
|
785
|
-
].join(`
|
|
786
|
-
`);
|
|
787
|
-
}
|
|
788
|
-
function truncateText(value, maxChars) {
|
|
789
|
-
if (value.length <= maxChars)
|
|
790
|
-
return value;
|
|
791
|
-
return `${value.slice(0, maxChars)}
|
|
792
|
-
...(truncated)`;
|
|
793
|
-
}
|
|
794
|
-
function buildRepairPromptWithOutput(error, previousOutput, maxOutputChars = 4000) {
|
|
795
|
-
return [
|
|
796
|
-
"Your previous output failed validation.",
|
|
797
|
-
"Fix the output and return JSON ONLY (no markdown, no commentary).",
|
|
798
|
-
"Do not change the JSON shape or rename fields.",
|
|
799
|
-
"If a citation quote is invalid, replace it with an exact substring from the referenced chunk.",
|
|
800
|
-
"Validation error:",
|
|
801
|
-
error,
|
|
802
|
-
"Previous output:",
|
|
803
|
-
truncateText(previousOutput, maxOutputChars)
|
|
804
|
-
].join(`
|
|
805
|
-
`);
|
|
806
|
-
}
|
|
807
|
-
|
|
808
|
-
// src/ticket-pipeline-runner.ts
|
|
809
|
-
var DEFAULT_MAX_ATTEMPTS = 2;
|
|
810
|
-
function timestamp() {
|
|
811
|
-
return new Date().toISOString();
|
|
812
|
-
}
|
|
813
|
-
function toErrorMessage(error) {
|
|
814
|
-
return error instanceof Error ? error.message : String(error);
|
|
815
|
-
}
|
|
816
|
-
async function safeLog(logger, entry) {
|
|
817
|
-
if (!logger)
|
|
818
|
-
return;
|
|
819
|
-
try {
|
|
820
|
-
await logger.log(entry);
|
|
821
|
-
} catch {}
|
|
822
|
-
}
|
|
823
|
-
async function runWithValidation(options) {
|
|
824
|
-
const maxAttempts = Math.max(1, options.maxAttempts ?? DEFAULT_MAX_ATTEMPTS);
|
|
825
|
-
let attempt = 0;
|
|
826
|
-
let lastError;
|
|
827
|
-
let lastRaw = "";
|
|
828
|
-
let currentPrompt = options.prompt;
|
|
829
|
-
while (attempt < maxAttempts) {
|
|
830
|
-
attempt += 1;
|
|
831
|
-
await safeLog(options.logger, {
|
|
832
|
-
stage: options.stage,
|
|
833
|
-
phase: "request",
|
|
834
|
-
attempt,
|
|
835
|
-
prompt: currentPrompt,
|
|
836
|
-
timestamp: timestamp()
|
|
837
|
-
});
|
|
838
|
-
let raw;
|
|
839
|
-
try {
|
|
840
|
-
raw = await options.modelRunner.generateJson(currentPrompt);
|
|
841
|
-
} catch (error) {
|
|
842
|
-
lastError = toErrorMessage(error);
|
|
843
|
-
await safeLog(options.logger, {
|
|
844
|
-
stage: options.stage,
|
|
845
|
-
phase: "model_error",
|
|
846
|
-
attempt,
|
|
847
|
-
prompt: currentPrompt,
|
|
848
|
-
error: lastError,
|
|
849
|
-
timestamp: timestamp()
|
|
850
|
-
});
|
|
851
|
-
throw new Error(`[${options.stage}] Model error: ${lastError}`);
|
|
852
|
-
}
|
|
853
|
-
await safeLog(options.logger, {
|
|
854
|
-
stage: options.stage,
|
|
855
|
-
phase: "response",
|
|
856
|
-
attempt,
|
|
857
|
-
prompt: currentPrompt,
|
|
858
|
-
response: raw,
|
|
859
|
-
timestamp: timestamp()
|
|
860
|
-
});
|
|
861
|
-
try {
|
|
862
|
-
return options.validate(raw);
|
|
863
|
-
} catch (error) {
|
|
864
|
-
lastError = toErrorMessage(error);
|
|
865
|
-
lastRaw = raw;
|
|
866
|
-
if (options.repair) {
|
|
867
|
-
const repaired = options.repair(raw, lastError);
|
|
868
|
-
if (repaired && repaired !== raw) {
|
|
869
|
-
await safeLog(options.logger, {
|
|
870
|
-
stage: options.stage,
|
|
871
|
-
phase: "repair",
|
|
872
|
-
attempt,
|
|
873
|
-
prompt: currentPrompt,
|
|
874
|
-
response: repaired,
|
|
875
|
-
error: lastError,
|
|
876
|
-
timestamp: timestamp()
|
|
877
|
-
});
|
|
878
|
-
try {
|
|
879
|
-
return options.validate(repaired);
|
|
880
|
-
} catch (repairError) {
|
|
881
|
-
lastError = toErrorMessage(repairError);
|
|
882
|
-
lastRaw = repaired;
|
|
883
|
-
}
|
|
884
|
-
}
|
|
885
|
-
}
|
|
886
|
-
await safeLog(options.logger, {
|
|
887
|
-
stage: options.stage,
|
|
888
|
-
phase: "validation_error",
|
|
889
|
-
attempt,
|
|
890
|
-
prompt: currentPrompt,
|
|
891
|
-
response: lastRaw,
|
|
892
|
-
error: lastError,
|
|
893
|
-
timestamp: timestamp()
|
|
894
|
-
});
|
|
895
|
-
currentPrompt = [
|
|
896
|
-
options.prompt,
|
|
897
|
-
buildRepairPromptWithOutput(lastError, lastRaw)
|
|
898
|
-
].join(`
|
|
899
|
-
|
|
900
|
-
`);
|
|
901
|
-
}
|
|
902
|
-
}
|
|
903
|
-
throw new Error(`[${options.stage}] Validation failed after ${maxAttempts} attempt(s): ${lastError ?? "unknown error"}`);
|
|
904
|
-
}
|
|
905
|
-
|
|
906
|
-
// src/ticket-prompts.ts
|
|
907
|
-
function promptExtractEvidenceFindings(params) {
|
|
908
|
-
return `
|
|
290
|
+
`)}}throw Error(`[${e.stage}] Validation failed after ${y} attempt(s): ${P??"unknown error"}`)}function O(e){return`
|
|
909
291
|
You are extracting evidence findings grounded in transcript excerpts.
|
|
910
292
|
|
|
911
293
|
Question:
|
|
912
|
-
${
|
|
294
|
+
${e.question}
|
|
913
295
|
|
|
914
296
|
Evidence:
|
|
915
|
-
${
|
|
297
|
+
${e.evidenceJSON}
|
|
916
298
|
|
|
917
299
|
Return JSON:
|
|
918
300
|
{
|
|
@@ -932,23 +314,19 @@ Rules:
|
|
|
932
314
|
- Summaries must be specific and short.
|
|
933
315
|
- Quotes must be copied character-for-character from the chunk text (no paraphrasing, no ellipses).
|
|
934
316
|
- Preserve punctuation, smart quotes, and special hyphens exactly as shown in the chunk text.
|
|
935
|
-
${
|
|
936
|
-
${
|
|
937
|
-
`.trim();
|
|
938
|
-
}
|
|
939
|
-
function promptGroupProblems(params) {
|
|
940
|
-
const allowed = JSON.stringify({ findingIds: params.findingIds }, null, 2);
|
|
941
|
-
return `
|
|
317
|
+
${z}
|
|
318
|
+
${t}
|
|
319
|
+
`.trim()}function N(e){let y=JSON.stringify({findingIds:e.findingIds},null,2);return`
|
|
942
320
|
You are grouping evidence findings into problem statements.
|
|
943
321
|
|
|
944
322
|
Question:
|
|
945
|
-
${
|
|
323
|
+
${e.question}
|
|
946
324
|
|
|
947
325
|
Findings:
|
|
948
|
-
${
|
|
326
|
+
${e.findingsJSON}
|
|
949
327
|
|
|
950
328
|
Allowed finding IDs:
|
|
951
|
-
${
|
|
329
|
+
${y}
|
|
952
330
|
|
|
953
331
|
Return JSON:
|
|
954
332
|
{
|
|
@@ -967,21 +345,18 @@ Rules:
|
|
|
967
345
|
- Each problem must reference 1 to 6 evidenceIds.
|
|
968
346
|
- evidenceIds must be drawn from the allowed finding IDs.
|
|
969
347
|
- Keep statements short and actionable.
|
|
970
|
-
${
|
|
971
|
-
`.trim()
|
|
972
|
-
}
|
|
973
|
-
function promptGenerateTickets(params) {
|
|
974
|
-
return `
|
|
348
|
+
${t}
|
|
349
|
+
`.trim()}function q(e){return`
|
|
975
350
|
You are generating implementation tickets grounded in evidence.
|
|
976
351
|
|
|
977
352
|
Question:
|
|
978
|
-
${
|
|
353
|
+
${e.question}
|
|
979
354
|
|
|
980
355
|
Problems:
|
|
981
|
-
${
|
|
356
|
+
${e.problemsJSON}
|
|
982
357
|
|
|
983
358
|
Evidence findings:
|
|
984
|
-
${
|
|
359
|
+
${e.findingsJSON}
|
|
985
360
|
|
|
986
361
|
Return JSON:
|
|
987
362
|
{
|
|
@@ -1001,15 +376,12 @@ Rules:
|
|
|
1001
376
|
- Every ticket must include evidenceIds and acceptanceCriteria.
|
|
1002
377
|
- Acceptance criteria must be testable.
|
|
1003
378
|
- Each acceptanceCriteria item must be <= 160 characters.
|
|
1004
|
-
${
|
|
1005
|
-
`.trim()
|
|
1006
|
-
}
|
|
1007
|
-
function promptSuggestPatchIntent(params) {
|
|
1008
|
-
return `
|
|
379
|
+
${t}
|
|
380
|
+
`.trim()}function n(e){return`
|
|
1009
381
|
You are generating a ContractPatchIntent from an evidence-backed ticket.
|
|
1010
382
|
|
|
1011
383
|
Ticket:
|
|
1012
|
-
${
|
|
384
|
+
${e.ticketJSON}
|
|
1013
385
|
|
|
1014
386
|
Return JSON:
|
|
1015
387
|
{
|
|
@@ -1025,572 +397,5 @@ Rules:
|
|
|
1025
397
|
- Each change must be concrete and scoped.
|
|
1026
398
|
- Acceptance criteria must be testable and derived from the ticket.
|
|
1027
399
|
- Each acceptanceCriteria item must be <= 140 characters.
|
|
1028
|
-
${
|
|
1029
|
-
`.trim();
|
|
1030
|
-
}
|
|
1031
|
-
|
|
1032
|
-
// src/ticket-validators.ts
|
|
1033
|
-
import {
|
|
1034
|
-
EvidenceFindingExtractionModel,
|
|
1035
|
-
ProblemGroupingModel,
|
|
1036
|
-
TicketCollectionModel
|
|
1037
|
-
} from "@contractspec/lib.contracts-spec/product-intent/types";
|
|
1038
|
-
function assertStringLength2(value, path, bounds) {
|
|
1039
|
-
if (bounds.min !== undefined && value.length < bounds.min) {
|
|
1040
|
-
throw new Error(`Expected ${path} to be at least ${bounds.min} characters, got ${value.length}`);
|
|
1041
|
-
}
|
|
1042
|
-
if (bounds.max !== undefined && value.length > bounds.max) {
|
|
1043
|
-
throw new Error(`Expected ${path} to be at most ${bounds.max} characters, got ${value.length}`);
|
|
1044
|
-
}
|
|
1045
|
-
}
|
|
1046
|
-
function assertArrayLength2(value, path, bounds) {
|
|
1047
|
-
if (bounds.min !== undefined && value.length < bounds.min) {
|
|
1048
|
-
throw new Error(`Expected ${path} to have at least ${bounds.min} items, got ${value.length}`);
|
|
1049
|
-
}
|
|
1050
|
-
if (bounds.max !== undefined && value.length > bounds.max) {
|
|
1051
|
-
throw new Error(`Expected ${path} to have at most ${bounds.max} items, got ${value.length}`);
|
|
1052
|
-
}
|
|
1053
|
-
}
|
|
1054
|
-
function assertIdsExist(ids, allowed, path) {
|
|
1055
|
-
for (const id of ids) {
|
|
1056
|
-
if (!allowed.has(id)) {
|
|
1057
|
-
throw new Error(`Unknown ${path} reference: ${id}`);
|
|
1058
|
-
}
|
|
1059
|
-
}
|
|
1060
|
-
}
|
|
1061
|
-
function parseJSON(schema, raw) {
|
|
1062
|
-
return parseStrictJSON(schema, raw);
|
|
1063
|
-
}
|
|
1064
|
-
function validateEvidenceFindingExtraction(raw, chunks) {
|
|
1065
|
-
const chunkIndex = buildChunkIndex(chunks);
|
|
1066
|
-
const data = parseJSON(EvidenceFindingExtractionModel, raw);
|
|
1067
|
-
assertArrayLength2(data.findings, "findings", { min: 1, max: 40 });
|
|
1068
|
-
for (const finding of data.findings) {
|
|
1069
|
-
assertStringLength2(finding.findingId, "findings[].findingId", { min: 1 });
|
|
1070
|
-
assertStringLength2(finding.summary, "findings[].summary", {
|
|
1071
|
-
min: 1,
|
|
1072
|
-
max: 320
|
|
1073
|
-
});
|
|
1074
|
-
if (finding.tags) {
|
|
1075
|
-
for (const tag of finding.tags) {
|
|
1076
|
-
assertStringLength2(tag, "findings[].tags[]", { min: 1, max: 48 });
|
|
1077
|
-
}
|
|
1078
|
-
}
|
|
1079
|
-
assertArrayLength2(finding.citations, "findings[].citations", { min: 1 });
|
|
1080
|
-
for (const citation of finding.citations) {
|
|
1081
|
-
validateCitation(citation, chunkIndex);
|
|
1082
|
-
}
|
|
1083
|
-
}
|
|
1084
|
-
return data;
|
|
1085
|
-
}
|
|
1086
|
-
function validateProblemGrouping(raw, findings) {
|
|
1087
|
-
const data = parseJSON(ProblemGroupingModel, raw);
|
|
1088
|
-
const allowedIds = new Set(findings.map((finding) => finding.findingId));
|
|
1089
|
-
assertArrayLength2(data.problems, "problems", { min: 1, max: 20 });
|
|
1090
|
-
for (const problem of data.problems) {
|
|
1091
|
-
assertStringLength2(problem.problemId, "problems[].problemId", { min: 1 });
|
|
1092
|
-
assertStringLength2(problem.statement, "problems[].statement", {
|
|
1093
|
-
min: 1,
|
|
1094
|
-
max: 320
|
|
1095
|
-
});
|
|
1096
|
-
assertArrayLength2(problem.evidenceIds, "problems[].evidenceIds", {
|
|
1097
|
-
min: 1,
|
|
1098
|
-
max: 8
|
|
1099
|
-
});
|
|
1100
|
-
assertIdsExist(problem.evidenceIds, allowedIds, "evidenceId");
|
|
1101
|
-
if (problem.tags) {
|
|
1102
|
-
for (const tag of problem.tags) {
|
|
1103
|
-
assertStringLength2(tag, "problems[].tags[]", { min: 1, max: 48 });
|
|
1104
|
-
}
|
|
1105
|
-
}
|
|
1106
|
-
}
|
|
1107
|
-
return data;
|
|
1108
|
-
}
|
|
1109
|
-
function validateTicketCollection(raw, findings) {
|
|
1110
|
-
const data = parseJSON(TicketCollectionModel, raw);
|
|
1111
|
-
const allowedIds = new Set(findings.map((finding) => finding.findingId));
|
|
1112
|
-
assertArrayLength2(data.tickets, "tickets", { min: 1, max: 30 });
|
|
1113
|
-
for (const ticket of data.tickets) {
|
|
1114
|
-
assertStringLength2(ticket.ticketId, "tickets[].ticketId", { min: 1 });
|
|
1115
|
-
assertStringLength2(ticket.title, "tickets[].title", { min: 1, max: 120 });
|
|
1116
|
-
assertStringLength2(ticket.summary, "tickets[].summary", {
|
|
1117
|
-
min: 1,
|
|
1118
|
-
max: 320
|
|
1119
|
-
});
|
|
1120
|
-
assertArrayLength2(ticket.evidenceIds, "tickets[].evidenceIds", {
|
|
1121
|
-
min: 1,
|
|
1122
|
-
max: 8
|
|
1123
|
-
});
|
|
1124
|
-
assertIdsExist(ticket.evidenceIds, allowedIds, "evidenceId");
|
|
1125
|
-
assertArrayLength2(ticket.acceptanceCriteria, "tickets[].acceptanceCriteria", {
|
|
1126
|
-
min: 1,
|
|
1127
|
-
max: 8
|
|
1128
|
-
});
|
|
1129
|
-
for (const criterion of ticket.acceptanceCriteria) {
|
|
1130
|
-
assertStringLength2(criterion, "tickets[].acceptanceCriteria[]", {
|
|
1131
|
-
min: 1,
|
|
1132
|
-
max: 280
|
|
1133
|
-
});
|
|
1134
|
-
}
|
|
1135
|
-
if (ticket.tags) {
|
|
1136
|
-
for (const tag of ticket.tags) {
|
|
1137
|
-
assertStringLength2(tag, "tickets[].tags[]", { min: 1, max: 48 });
|
|
1138
|
-
}
|
|
1139
|
-
}
|
|
1140
|
-
}
|
|
1141
|
-
return data;
|
|
1142
|
-
}
|
|
1143
|
-
|
|
1144
|
-
// src/ticket-pipeline.ts
|
|
1145
|
-
var TAG_HINTS = {
|
|
1146
|
-
onboarding: ["onboarding", "setup", "activation"],
|
|
1147
|
-
pricing: ["pricing", "cost", "billing"],
|
|
1148
|
-
security: ["security", "compliance", "audit"],
|
|
1149
|
-
support: ["support", "ticket", "helpdesk"],
|
|
1150
|
-
analytics: ["analytics", "report", "dashboard"],
|
|
1151
|
-
performance: ["slow", "latency", "performance"],
|
|
1152
|
-
integrations: ["integration", "api", "webhook"]
|
|
1153
|
-
};
|
|
1154
|
-
function slugify2(value) {
|
|
1155
|
-
return value.toLowerCase().replace(/[^a-z0-9]+/g, "-").replace(/(^-|-$)+/g, "");
|
|
1156
|
-
}
|
|
1157
|
-
function pickQuote(text, maxLen = 220) {
|
|
1158
|
-
const trimmed = text.trim();
|
|
1159
|
-
const sentenceEnd = trimmed.search(/[.!?]\s/);
|
|
1160
|
-
const sentence = sentenceEnd === -1 ? trimmed : trimmed.slice(0, sentenceEnd + 1);
|
|
1161
|
-
const quote = sentence.length > maxLen ? sentence.slice(0, maxLen) : sentence;
|
|
1162
|
-
return quote.trim();
|
|
1163
|
-
}
|
|
1164
|
-
function deriveTags(text) {
|
|
1165
|
-
const lower = text.toLowerCase();
|
|
1166
|
-
const tags = Object.entries(TAG_HINTS).filter(([, hints]) => hints.some((hint) => lower.includes(hint))).map(([tag]) => tag);
|
|
1167
|
-
return tags.slice(0, 3);
|
|
1168
|
-
}
|
|
1169
|
-
function truncateToMax(value, maxChars) {
|
|
1170
|
-
if (value.length <= maxChars)
|
|
1171
|
-
return value;
|
|
1172
|
-
if (maxChars <= 3)
|
|
1173
|
-
return value.slice(0, maxChars);
|
|
1174
|
-
return `${value.slice(0, maxChars - 3).trimEnd()}...`;
|
|
1175
|
-
}
|
|
1176
|
-
var QUOTE_HYPHENS = new Set(["-", "\u2010", "\u2011", "\u2012", "\u2013", "\u2014"]);
|
|
1177
|
-
var QUOTE_SINGLE = new Set(["'", "\u2019", "\u2018"]);
|
|
1178
|
-
var QUOTE_DOUBLE = new Set(['"', "\u201C", "\u201D"]);
|
|
1179
|
-
function escapeRegex(value) {
|
|
1180
|
-
return value.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
|
|
1181
|
-
}
|
|
1182
|
-
function buildLooseQuotePattern(quote) {
|
|
1183
|
-
let pattern = "";
|
|
1184
|
-
for (let i = 0;i < quote.length; i += 1) {
|
|
1185
|
-
const char = quote[i] ?? "";
|
|
1186
|
-
if (char === "." && quote.slice(i, i + 3) === "...") {
|
|
1187
|
-
pattern += "(?:\\.\\.\\.|\u2026)";
|
|
1188
|
-
i += 2;
|
|
1189
|
-
continue;
|
|
1190
|
-
}
|
|
1191
|
-
if (char === "\u2026") {
|
|
1192
|
-
pattern += "(?:\\.\\.\\.|\u2026)";
|
|
1193
|
-
continue;
|
|
1194
|
-
}
|
|
1195
|
-
if (/\s/.test(char)) {
|
|
1196
|
-
pattern += "\\s+";
|
|
1197
|
-
while (i + 1 < quote.length && /\s/.test(quote[i + 1] ?? "")) {
|
|
1198
|
-
i += 1;
|
|
1199
|
-
}
|
|
1200
|
-
continue;
|
|
1201
|
-
}
|
|
1202
|
-
if (QUOTE_HYPHENS.has(char)) {
|
|
1203
|
-
pattern += "[-\u2010\u2011\u2012\u2013\u2014]";
|
|
1204
|
-
continue;
|
|
1205
|
-
}
|
|
1206
|
-
if (QUOTE_SINGLE.has(char)) {
|
|
1207
|
-
pattern += "['\u2018\u2019]";
|
|
1208
|
-
continue;
|
|
1209
|
-
}
|
|
1210
|
-
if (QUOTE_DOUBLE.has(char)) {
|
|
1211
|
-
pattern += '["\u201C\u201D]';
|
|
1212
|
-
continue;
|
|
1213
|
-
}
|
|
1214
|
-
pattern += escapeRegex(char);
|
|
1215
|
-
}
|
|
1216
|
-
return pattern;
|
|
1217
|
-
}
|
|
1218
|
-
function findQuoteInChunk(quote, chunkText) {
|
|
1219
|
-
if (chunkText.includes(quote))
|
|
1220
|
-
return quote;
|
|
1221
|
-
const pattern = buildLooseQuotePattern(quote);
|
|
1222
|
-
const match = chunkText.match(new RegExp(pattern));
|
|
1223
|
-
return match?.[0] ?? null;
|
|
1224
|
-
}
|
|
1225
|
-
function normalizeForTokens(value) {
|
|
1226
|
-
return value.replace(/[\u201C\u201D]/g, '"').replace(/[\u2018\u2019]/g, "'").replace(/[\u2010\u2011\u2012\u2013\u2014]/g, "-").replace(/\s+/g, " ").trim();
|
|
1227
|
-
}
|
|
1228
|
-
function tokenize(value) {
|
|
1229
|
-
const normalized = normalizeForTokens(value).toLowerCase();
|
|
1230
|
-
return normalized.match(/[a-z0-9]+/g) ?? [];
|
|
1231
|
-
}
|
|
1232
|
-
function splitIntoSegments(text) {
|
|
1233
|
-
const matches = text.match(/[^.!?\n]+[.!?]?/g);
|
|
1234
|
-
if (!matches)
|
|
1235
|
-
return [text];
|
|
1236
|
-
return matches.map((segment) => segment.trim()).filter(Boolean);
|
|
1237
|
-
}
|
|
1238
|
-
function selectBestQuoteFromChunk(quote, chunkText, maxLen = 240) {
|
|
1239
|
-
const quoteTokens = tokenize(quote);
|
|
1240
|
-
if (!quoteTokens.length)
|
|
1241
|
-
return null;
|
|
1242
|
-
const quoteTokenSet = new Set(quoteTokens);
|
|
1243
|
-
let best = null;
|
|
1244
|
-
for (const segment of splitIntoSegments(chunkText)) {
|
|
1245
|
-
if (!segment)
|
|
1246
|
-
continue;
|
|
1247
|
-
const segmentTokens = new Set(tokenize(segment));
|
|
1248
|
-
if (!segmentTokens.size)
|
|
1249
|
-
continue;
|
|
1250
|
-
let overlap = 0;
|
|
1251
|
-
for (const token of quoteTokenSet) {
|
|
1252
|
-
if (segmentTokens.has(token))
|
|
1253
|
-
overlap += 1;
|
|
1254
|
-
}
|
|
1255
|
-
if (!overlap)
|
|
1256
|
-
continue;
|
|
1257
|
-
const score = overlap / quoteTokenSet.size;
|
|
1258
|
-
if (!best || score > best.score) {
|
|
1259
|
-
best = { segment, score, overlap };
|
|
1260
|
-
}
|
|
1261
|
-
}
|
|
1262
|
-
if (!best)
|
|
1263
|
-
return null;
|
|
1264
|
-
if (best.overlap < 2 && quoteTokens.length > 2)
|
|
1265
|
-
return null;
|
|
1266
|
-
const trimmed = best.segment.trim();
|
|
1267
|
-
return trimmed.length > maxLen ? trimmed.slice(0, maxLen).trimEnd() : trimmed;
|
|
1268
|
-
}
|
|
1269
|
-
function fallbackQuoteFromChunk(chunkText, maxLen = 240) {
|
|
1270
|
-
const trimmed = chunkText.trim();
|
|
1271
|
-
if (!trimmed)
|
|
1272
|
-
return null;
|
|
1273
|
-
const slice = trimmed.length > maxLen ? trimmed.slice(0, maxLen) : trimmed;
|
|
1274
|
-
return slice.trimEnd();
|
|
1275
|
-
}
|
|
1276
|
-
function findQuoteAcrossChunks(quote, chunkIndex) {
|
|
1277
|
-
for (const [chunkId, chunk] of chunkIndex.entries()) {
|
|
1278
|
-
if (chunk.text.includes(quote)) {
|
|
1279
|
-
return { chunkId, quote };
|
|
1280
|
-
}
|
|
1281
|
-
const repaired = findQuoteInChunk(quote, chunk.text);
|
|
1282
|
-
if (repaired) {
|
|
1283
|
-
return { chunkId, quote: repaired };
|
|
1284
|
-
}
|
|
1285
|
-
}
|
|
1286
|
-
return null;
|
|
1287
|
-
}
|
|
1288
|
-
function repairEvidenceFindingExtraction(raw, chunks) {
|
|
1289
|
-
let data;
|
|
1290
|
-
try {
|
|
1291
|
-
data = parseStrictJSON(EvidenceFindingExtractionModel2, raw);
|
|
1292
|
-
} catch {
|
|
1293
|
-
return null;
|
|
1294
|
-
}
|
|
1295
|
-
const chunkIndex = buildChunkIndex(chunks);
|
|
1296
|
-
let updated = false;
|
|
1297
|
-
for (const finding of data.findings) {
|
|
1298
|
-
for (const citation of finding.citations) {
|
|
1299
|
-
const chunk = chunkIndex.get(citation.chunkId);
|
|
1300
|
-
if (chunk) {
|
|
1301
|
-
if (chunk.text.includes(citation.quote))
|
|
1302
|
-
continue;
|
|
1303
|
-
const repaired = findQuoteInChunk(citation.quote, chunk.text);
|
|
1304
|
-
if (repaired) {
|
|
1305
|
-
citation.quote = repaired;
|
|
1306
|
-
updated = true;
|
|
1307
|
-
continue;
|
|
1308
|
-
}
|
|
1309
|
-
}
|
|
1310
|
-
const other = findQuoteAcrossChunks(citation.quote, chunkIndex);
|
|
1311
|
-
if (other) {
|
|
1312
|
-
citation.chunkId = other.chunkId;
|
|
1313
|
-
citation.quote = other.quote;
|
|
1314
|
-
updated = true;
|
|
1315
|
-
continue;
|
|
1316
|
-
}
|
|
1317
|
-
if (chunk) {
|
|
1318
|
-
const best = selectBestQuoteFromChunk(citation.quote, chunk.text);
|
|
1319
|
-
if (best) {
|
|
1320
|
-
citation.quote = best;
|
|
1321
|
-
updated = true;
|
|
1322
|
-
continue;
|
|
1323
|
-
}
|
|
1324
|
-
const fallback = fallbackQuoteFromChunk(chunk.text);
|
|
1325
|
-
if (fallback) {
|
|
1326
|
-
citation.quote = fallback;
|
|
1327
|
-
updated = true;
|
|
1328
|
-
continue;
|
|
1329
|
-
}
|
|
1330
|
-
}
|
|
1331
|
-
}
|
|
1332
|
-
}
|
|
1333
|
-
return updated ? JSON.stringify(data, null, 2) : null;
|
|
1334
|
-
}
|
|
1335
|
-
function repairProblemGrouping(raw) {
|
|
1336
|
-
let data;
|
|
1337
|
-
try {
|
|
1338
|
-
data = parseStrictJSON(ProblemGroupingModel2, raw);
|
|
1339
|
-
} catch {
|
|
1340
|
-
return null;
|
|
1341
|
-
}
|
|
1342
|
-
let updated = false;
|
|
1343
|
-
for (const problem of data.problems) {
|
|
1344
|
-
const statement = truncateToMax(problem.statement, 320);
|
|
1345
|
-
if (statement !== problem.statement) {
|
|
1346
|
-
problem.statement = statement;
|
|
1347
|
-
updated = true;
|
|
1348
|
-
}
|
|
1349
|
-
}
|
|
1350
|
-
return updated ? JSON.stringify(data, null, 2) : null;
|
|
1351
|
-
}
|
|
1352
|
-
function repairTicketCollection(raw) {
|
|
1353
|
-
let data;
|
|
1354
|
-
try {
|
|
1355
|
-
data = parseStrictJSON(TicketCollectionModel2, raw);
|
|
1356
|
-
} catch {
|
|
1357
|
-
return null;
|
|
1358
|
-
}
|
|
1359
|
-
let updated = false;
|
|
1360
|
-
for (const ticket of data.tickets) {
|
|
1361
|
-
const title = truncateToMax(ticket.title, 120);
|
|
1362
|
-
const summary = truncateToMax(ticket.summary, 320);
|
|
1363
|
-
if (title !== ticket.title) {
|
|
1364
|
-
ticket.title = title;
|
|
1365
|
-
updated = true;
|
|
1366
|
-
}
|
|
1367
|
-
if (summary !== ticket.summary) {
|
|
1368
|
-
ticket.summary = summary;
|
|
1369
|
-
updated = true;
|
|
1370
|
-
}
|
|
1371
|
-
ticket.acceptanceCriteria = ticket.acceptanceCriteria.map((criterion) => {
|
|
1372
|
-
const next = truncateToMax(criterion, 160);
|
|
1373
|
-
if (next !== criterion)
|
|
1374
|
-
updated = true;
|
|
1375
|
-
return next;
|
|
1376
|
-
});
|
|
1377
|
-
}
|
|
1378
|
-
return updated ? JSON.stringify(data, null, 2) : null;
|
|
1379
|
-
}
|
|
1380
|
-
function repairPatchIntent(raw) {
|
|
1381
|
-
let data;
|
|
1382
|
-
try {
|
|
1383
|
-
data = parseStrictJSON(ContractPatchIntentModel2, raw);
|
|
1384
|
-
} catch {
|
|
1385
|
-
return null;
|
|
1386
|
-
}
|
|
1387
|
-
let updated = false;
|
|
1388
|
-
const featureKey = truncateToMax(data.featureKey, 80);
|
|
1389
|
-
if (featureKey !== data.featureKey) {
|
|
1390
|
-
data.featureKey = featureKey;
|
|
1391
|
-
updated = true;
|
|
1392
|
-
}
|
|
1393
|
-
data.acceptanceCriteria = data.acceptanceCriteria.map((criterion) => {
|
|
1394
|
-
const next = truncateToMax(criterion, 140);
|
|
1395
|
-
if (next !== criterion)
|
|
1396
|
-
updated = true;
|
|
1397
|
-
return next;
|
|
1398
|
-
});
|
|
1399
|
-
return updated ? JSON.stringify(data, null, 2) : null;
|
|
1400
|
-
}
|
|
1401
|
-
function retrieveChunks(transcript, question, options = {}) {
|
|
1402
|
-
const chunkSize = options.chunkSize ?? 800;
|
|
1403
|
-
const sourceId = options.sourceId ?? slugify2(question || "transcript");
|
|
1404
|
-
const clean = transcript.trim();
|
|
1405
|
-
const chunks = [];
|
|
1406
|
-
for (let offset = 0, idx = 0;offset < clean.length; idx += 1) {
|
|
1407
|
-
const slice = clean.slice(offset, offset + chunkSize);
|
|
1408
|
-
chunks.push({
|
|
1409
|
-
chunkId: `${sourceId}#c_${String(idx).padStart(2, "0")}`,
|
|
1410
|
-
text: slice,
|
|
1411
|
-
meta: { sourceId, ...options.meta }
|
|
1412
|
-
});
|
|
1413
|
-
offset += chunkSize;
|
|
1414
|
-
}
|
|
1415
|
-
return chunks;
|
|
1416
|
-
}
|
|
1417
|
-
async function extractEvidence(chunks, question, options = {}) {
|
|
1418
|
-
if (options.modelRunner) {
|
|
1419
|
-
const evidenceJSON = formatEvidenceForModel(chunks, 900);
|
|
1420
|
-
const prompt = promptExtractEvidenceFindings({ question, evidenceJSON });
|
|
1421
|
-
return runWithValidation({
|
|
1422
|
-
stage: "extractEvidence",
|
|
1423
|
-
prompt,
|
|
1424
|
-
modelRunner: options.modelRunner,
|
|
1425
|
-
logger: options.logger,
|
|
1426
|
-
maxAttempts: options.maxAttempts,
|
|
1427
|
-
repair: (raw2) => repairEvidenceFindingExtraction(raw2, chunks),
|
|
1428
|
-
validate: (raw2) => validateEvidenceFindingExtraction(raw2, chunks).findings
|
|
1429
|
-
});
|
|
1430
|
-
}
|
|
1431
|
-
const maxFindings = options.maxFindings ?? 12;
|
|
1432
|
-
const findings = [];
|
|
1433
|
-
for (const chunk of chunks) {
|
|
1434
|
-
if (findings.length >= maxFindings)
|
|
1435
|
-
break;
|
|
1436
|
-
const quote = pickQuote(chunk.text);
|
|
1437
|
-
findings.push({
|
|
1438
|
-
findingId: `find_${String(findings.length + 1).padStart(3, "0")}`,
|
|
1439
|
-
summary: quote.length > 160 ? `${quote.slice(0, 160)}...` : quote,
|
|
1440
|
-
tags: deriveTags(chunk.text),
|
|
1441
|
-
citations: [{ chunkId: chunk.chunkId, quote }]
|
|
1442
|
-
});
|
|
1443
|
-
}
|
|
1444
|
-
const raw = JSON.stringify({ findings }, null, 2);
|
|
1445
|
-
return validateEvidenceFindingExtraction(raw, chunks).findings;
|
|
1446
|
-
}
|
|
1447
|
-
async function groupProblems(findings, question, options = {}) {
|
|
1448
|
-
if (options.modelRunner) {
|
|
1449
|
-
const findingsJSON = JSON.stringify({ findings }, null, 2);
|
|
1450
|
-
const prompt = promptGroupProblems({
|
|
1451
|
-
question,
|
|
1452
|
-
findingsJSON,
|
|
1453
|
-
findingIds: findings.map((finding) => finding.findingId)
|
|
1454
|
-
});
|
|
1455
|
-
return runWithValidation({
|
|
1456
|
-
stage: "groupProblems",
|
|
1457
|
-
prompt,
|
|
1458
|
-
modelRunner: options.modelRunner,
|
|
1459
|
-
logger: options.logger,
|
|
1460
|
-
maxAttempts: options.maxAttempts,
|
|
1461
|
-
repair: (raw2) => repairProblemGrouping(raw2),
|
|
1462
|
-
validate: (raw2) => validateProblemGrouping(raw2, findings).problems
|
|
1463
|
-
});
|
|
1464
|
-
}
|
|
1465
|
-
const grouped = new Map;
|
|
1466
|
-
for (const finding of findings) {
|
|
1467
|
-
const tag = finding.tags?.[0] ?? "general";
|
|
1468
|
-
if (!grouped.has(tag))
|
|
1469
|
-
grouped.set(tag, []);
|
|
1470
|
-
grouped.get(tag)?.push(finding);
|
|
1471
|
-
}
|
|
1472
|
-
const problems = [];
|
|
1473
|
-
for (const [tag, items] of grouped.entries()) {
|
|
1474
|
-
const count = items.length;
|
|
1475
|
-
const severity = count >= 4 ? "high" : count >= 2 ? "medium" : "low";
|
|
1476
|
-
const statement = tag === "general" ? "Users report friction that slows adoption." : `Users report ${tag} friction that blocks progress.`;
|
|
1477
|
-
problems.push({
|
|
1478
|
-
problemId: `prob_${String(problems.length + 1).padStart(3, "0")}`,
|
|
1479
|
-
statement,
|
|
1480
|
-
evidenceIds: items.map((item) => item.findingId),
|
|
1481
|
-
tags: tag === "general" ? undefined : [tag],
|
|
1482
|
-
severity
|
|
1483
|
-
});
|
|
1484
|
-
}
|
|
1485
|
-
const raw = JSON.stringify({ problems }, null, 2);
|
|
1486
|
-
return validateProblemGrouping(raw, findings).problems;
|
|
1487
|
-
}
|
|
1488
|
-
async function generateTickets(problems, findings, question, options = {}) {
|
|
1489
|
-
if (options.modelRunner) {
|
|
1490
|
-
const problemsJSON = JSON.stringify({ problems }, null, 2);
|
|
1491
|
-
const findingsJSON = JSON.stringify({ findings }, null, 2);
|
|
1492
|
-
const prompt = promptGenerateTickets({
|
|
1493
|
-
question,
|
|
1494
|
-
problemsJSON,
|
|
1495
|
-
findingsJSON
|
|
1496
|
-
});
|
|
1497
|
-
return runWithValidation({
|
|
1498
|
-
stage: "generateTickets",
|
|
1499
|
-
prompt,
|
|
1500
|
-
modelRunner: options.modelRunner,
|
|
1501
|
-
logger: options.logger,
|
|
1502
|
-
maxAttempts: options.maxAttempts,
|
|
1503
|
-
repair: (raw2) => repairTicketCollection(raw2),
|
|
1504
|
-
validate: (raw2) => validateTicketCollection(raw2, findings).tickets
|
|
1505
|
-
});
|
|
1506
|
-
}
|
|
1507
|
-
const tickets = problems.map((problem, idx) => {
|
|
1508
|
-
const tag = problem.tags?.[0];
|
|
1509
|
-
const title = tag ? `Improve ${tag} flow` : "Reduce user friction";
|
|
1510
|
-
const summary = problem.statement;
|
|
1511
|
-
return {
|
|
1512
|
-
ticketId: `t_${String(idx + 1).padStart(3, "0")}`,
|
|
1513
|
-
title,
|
|
1514
|
-
summary,
|
|
1515
|
-
evidenceIds: problem.evidenceIds.slice(0, 4),
|
|
1516
|
-
acceptanceCriteria: [
|
|
1517
|
-
"Acceptance criteria maps to the evidence findings",
|
|
1518
|
-
"Success metrics are tracked for the change"
|
|
1519
|
-
],
|
|
1520
|
-
tags: problem.tags,
|
|
1521
|
-
priority: problem.severity === "high" ? "high" : "medium"
|
|
1522
|
-
};
|
|
1523
|
-
});
|
|
1524
|
-
const raw = JSON.stringify({ tickets }, null, 2);
|
|
1525
|
-
return validateTicketCollection(raw, findings).tickets;
|
|
1526
|
-
}
|
|
1527
|
-
async function suggestPatch(ticket, options = {}) {
|
|
1528
|
-
if (options.modelRunner) {
|
|
1529
|
-
const ticketJSON = JSON.stringify(ticket, null, 2);
|
|
1530
|
-
const prompt = promptSuggestPatchIntent({ ticketJSON });
|
|
1531
|
-
return runWithValidation({
|
|
1532
|
-
stage: "suggestPatch",
|
|
1533
|
-
prompt,
|
|
1534
|
-
modelRunner: options.modelRunner,
|
|
1535
|
-
logger: options.logger,
|
|
1536
|
-
maxAttempts: options.maxAttempts,
|
|
1537
|
-
repair: (raw) => repairPatchIntent(raw),
|
|
1538
|
-
validate: (raw) => validatePatchIntent(raw)
|
|
1539
|
-
});
|
|
1540
|
-
}
|
|
1541
|
-
const featureKey = slugify2(ticket.title) || "product_intent_ticket";
|
|
1542
|
-
const intent = {
|
|
1543
|
-
featureKey,
|
|
1544
|
-
changes: [
|
|
1545
|
-
{
|
|
1546
|
-
type: "update_operation",
|
|
1547
|
-
target: `productIntent.${featureKey}`,
|
|
1548
|
-
detail: ticket.summary
|
|
1549
|
-
}
|
|
1550
|
-
],
|
|
1551
|
-
acceptanceCriteria: ticket.acceptanceCriteria
|
|
1552
|
-
};
|
|
1553
|
-
return validatePatchIntent(JSON.stringify(intent, null, 2));
|
|
1554
|
-
}
|
|
1555
|
-
export {
|
|
1556
|
-
validateTicketCollection,
|
|
1557
|
-
validateTaskPack,
|
|
1558
|
-
validateProblemGrouping,
|
|
1559
|
-
validatePatchIntent,
|
|
1560
|
-
validateOpportunityBrief,
|
|
1561
|
-
validateInsightExtraction,
|
|
1562
|
-
validateImpactReport,
|
|
1563
|
-
validateEvidenceFindingExtraction,
|
|
1564
|
-
validateCitationsInTextBlock,
|
|
1565
|
-
validateCitation,
|
|
1566
|
-
suggestPatch,
|
|
1567
|
-
runWithValidation,
|
|
1568
|
-
retrieveChunks,
|
|
1569
|
-
promptWireframeLayoutJSON,
|
|
1570
|
-
promptWireframeImage,
|
|
1571
|
-
promptSynthesizeBrief,
|
|
1572
|
-
promptSuggestPatchIntent,
|
|
1573
|
-
promptSkepticCheck,
|
|
1574
|
-
promptGroupProblems,
|
|
1575
|
-
promptGenerateTickets,
|
|
1576
|
-
promptGenerateTaskPack,
|
|
1577
|
-
promptGenerateSyntheticInterviews,
|
|
1578
|
-
promptGeneratePatchIntent,
|
|
1579
|
-
promptGenerateImpactReport,
|
|
1580
|
-
promptGenerateGenericSpecOverlay,
|
|
1581
|
-
promptExtractInsights,
|
|
1582
|
-
promptExtractEvidenceFindings,
|
|
1583
|
-
parseStrictJSON,
|
|
1584
|
-
impactEngine,
|
|
1585
|
-
groupProblems,
|
|
1586
|
-
generateTickets,
|
|
1587
|
-
formatEvidenceForModel,
|
|
1588
|
-
extractEvidence,
|
|
1589
|
-
buildWorkItemsFromTickets,
|
|
1590
|
-
buildRepairPromptWithOutput,
|
|
1591
|
-
buildRepairPrompt,
|
|
1592
|
-
buildProjectManagementSyncPayload,
|
|
1593
|
-
buildChunkIndex,
|
|
1594
|
-
JSON_ONLY_RULES,
|
|
1595
|
-
CITATION_RULES
|
|
1596
|
-
};
|
|
400
|
+
${t}
|
|
401
|
+
`.trim()}import{EvidenceFindingExtractionModel as Me,ProblemGroupingModel as De,TicketCollectionModel as oe}from"@contractspec/lib.contracts-spec/product-intent/types";function T(e,y,c){if(c.min!==void 0&&e.length<c.min)throw Error(`Expected ${y} to be at least ${c.min} characters, got ${e.length}`);if(c.max!==void 0&&e.length>c.max)throw Error(`Expected ${y} to be at most ${c.max} characters, got ${e.length}`)}function M(e,y,c){if(c.min!==void 0&&e.length<c.min)throw Error(`Expected ${y} to have at least ${c.min} items, got ${e.length}`);if(c.max!==void 0&&e.length>c.max)throw Error(`Expected ${y} to have at most ${c.max} items, got ${e.length}`)}function v(e,y,c){for(let P of e)if(!y.has(P))throw Error(`Unknown ${c} reference: ${P}`)}function E(e,y){return i(e,y)}function p(e,y){let c=D(y),P=E(Me,e);M(P.findings,"findings",{min:1,max:40});for(let f of P.findings){if(T(f.findingId,"findings[].findingId",{min:1}),T(f.summary,"findings[].summary",{min:1,max:320}),f.tags)for(let r of f.tags)T(r,"findings[].tags[]",{min:1,max:48});M(f.citations,"findings[].citations",{min:1});for(let r of f.citations)K(r,c)}return P}function Y(e,y){let c=E(De,e),P=new Set(y.map((f)=>f.findingId));M(c.problems,"problems",{min:1,max:20});for(let f of c.problems)if(T(f.problemId,"problems[].problemId",{min:1}),T(f.statement,"problems[].statement",{min:1,max:320}),M(f.evidenceIds,"problems[].evidenceIds",{min:1,max:8}),v(f.evidenceIds,P,"evidenceId"),f.tags)for(let r of f.tags)T(r,"problems[].tags[]",{min:1,max:48});return c}function w(e,y){let c=E(oe,e),P=new Set(y.map((f)=>f.findingId));M(c.tickets,"tickets",{min:1,max:30});for(let f of c.tickets){T(f.ticketId,"tickets[].ticketId",{min:1}),T(f.title,"tickets[].title",{min:1,max:120}),T(f.summary,"tickets[].summary",{min:1,max:320}),M(f.evidenceIds,"tickets[].evidenceIds",{min:1,max:8}),v(f.evidenceIds,P,"evidenceId"),M(f.acceptanceCriteria,"tickets[].acceptanceCriteria",{min:1,max:8});for(let r of f.acceptanceCriteria)T(r,"tickets[].acceptanceCriteria[]",{min:1,max:280});if(f.tags)for(let r of f.tags)T(r,"tickets[].tags[]",{min:1,max:48})}return c}var He={onboarding:["onboarding","setup","activation"],pricing:["pricing","cost","billing"],security:["security","compliance","audit"],support:["support","ticket","helpdesk"],analytics:["analytics","report","dashboard"],performance:["slow","latency","performance"],integrations:["integration","api","webhook"]};function S(e){return e.toLowerCase().replace(/[^a-z0-9]+/g,"-").replace(/(^-|-$)+/g,"")}function ze(e,y=220){let c=e.trim(),P=c.search(/[.!?]\s/),f=P===-1?c:c.slice(0,P+1);return(f.length>y?f.slice(0,y):f).trim()}function Fe(e){let y=e.toLowerCase();return Object.entries(He).filter(([,P])=>P.some((f)=>y.includes(f))).map(([P])=>P).slice(0,3)}function o(e,y){if(e.length<=y)return e;if(y<=3)return e.slice(0,y);return`${e.slice(0,y-3).trimEnd()}...`}var Be=new Set(["-","\u2010","\u2011","\u2012","\u2013","\u2014"]),Ve=new Set(["'","\u2019","\u2018"]),Ee=new Set(['"',"\u201C","\u201D"]);function pe(e){return e.replace(/[.*+?^${}()|[\]\\]/g,"\\$&")}function Ye(e){let y="";for(let c=0;c<e.length;c+=1){let P=e[c]??"";if(P==="."&&e.slice(c,c+3)==="..."){y+="(?:\\.\\.\\.|\u2026)",c+=2;continue}if(P==="\u2026"){y+="(?:\\.\\.\\.|\u2026)";continue}if(/\s/.test(P)){y+="\\s+";while(c+1<e.length&&/\s/.test(e[c+1]??""))c+=1;continue}if(Be.has(P)){y+="[-\u2010\u2011\u2012\u2013\u2014]";continue}if(Ve.has(P)){y+="['\u2018\u2019]";continue}if(Ee.has(P)){y+='["\u201C\u201D]';continue}y+=pe(P)}return y}function k(e,y){if(y.includes(e))return e;let c=Ye(e);return y.match(new RegExp(c))?.[0]??null}function we(e){return e.replace(/[\u201C\u201D]/g,'"').replace(/[\u2018\u2019]/g,"'").replace(/[\u2010\u2011\u2012\u2013\u2014]/g,"-").replace(/\s+/g," ").trim()}function x(e){return we(e).toLowerCase().match(/[a-z0-9]+/g)??[]}function Ce(e){let y=e.match(/[^.!?\n]+[.!?]?/g);if(!y)return[e];return y.map((c)=>c.trim()).filter(Boolean)}function Ge(e,y,c=240){let P=x(e);if(!P.length)return null;let f=new Set(P),r=null;for(let _ of Ce(y)){if(!_)continue;let m=new Set(x(_));if(!m.size)continue;let j=0;for(let g of f)if(m.has(g))j+=1;if(!j)continue;let R=j/f.size;if(!r||R>r.score)r={segment:_,score:R,overlap:j}}if(!r)return null;if(r.overlap<2&&P.length>2)return null;let $=r.segment.trim();return $.length>c?$.slice(0,c).trimEnd():$}function Qe(e,y=240){let c=e.trim();if(!c)return null;return(c.length>y?c.slice(0,y):c).trimEnd()}function Je(e,y){for(let[c,P]of y.entries()){if(P.text.includes(e))return{chunkId:c,quote:e};let f=k(e,P.text);if(f)return{chunkId:c,quote:f}}return null}function Ue(e,y){let c;try{c=i(Ae,e)}catch{return null}let P=D(y),f=!1;for(let r of c.findings)for(let $ of r.citations){let _=P.get($.chunkId);if(_){if(_.text.includes($.quote))continue;let j=k($.quote,_.text);if(j){$.quote=j,f=!0;continue}}let m=Je($.quote,P);if(m){$.chunkId=m.chunkId,$.quote=m.quote,f=!0;continue}if(_){let j=Ge($.quote,_.text);if(j){$.quote=j,f=!0;continue}let R=Qe(_.text);if(R){$.quote=R,f=!0;continue}}}return f?JSON.stringify(c,null,2):null}function le(e){let y;try{y=i(Xe,e)}catch{return null}let c=!1;for(let P of y.problems){let f=o(P.statement,320);if(f!==P.statement)P.statement=f,c=!0}return c?JSON.stringify(y,null,2):null}function Oe(e){let y;try{y=i(Ze,e)}catch{return null}let c=!1;for(let P of y.tickets){let f=o(P.title,120),r=o(P.summary,320);if(f!==P.title)P.title=f,c=!0;if(r!==P.summary)P.summary=r,c=!0;P.acceptanceCriteria=P.acceptanceCriteria.map(($)=>{let _=o($,160);if(_!==$)c=!0;return _})}return c?JSON.stringify(y,null,2):null}function Ne(e){let y;try{y=i(Ke,e)}catch{return null}let c=!1,P=o(y.featureKey,80);if(P!==y.featureKey)y.featureKey=P,c=!0;return y.acceptanceCriteria=y.acceptanceCriteria.map((f)=>{let r=o(f,140);if(r!==f)c=!0;return r}),c?JSON.stringify(y,null,2):null}function Xc(e,y,c={}){let P=c.chunkSize??800,f=c.sourceId??S(y||"transcript"),r=e.trim(),$=[];for(let _=0,m=0;_<r.length;m+=1){let j=r.slice(_,_+P);$.push({chunkId:`${f}#c_${String(m).padStart(2,"0")}`,text:j,meta:{sourceId:f,...c.meta}}),_+=P}return $}async function Zc(e,y,c={}){if(c.modelRunner){let $=U(e,900),_=O({question:y,evidenceJSON:$});return Z({stage:"extractEvidence",prompt:_,modelRunner:c.modelRunner,logger:c.logger,maxAttempts:c.maxAttempts,repair:(m)=>Ue(m,e),validate:(m)=>p(m,e).findings})}let P=c.maxFindings??12,f=[];for(let $ of e){if(f.length>=P)break;let _=ze($.text);f.push({findingId:`find_${String(f.length+1).padStart(3,"0")}`,summary:_.length>160?`${_.slice(0,160)}...`:_,tags:Fe($.text),citations:[{chunkId:$.chunkId,quote:_}]})}let r=JSON.stringify({findings:f},null,2);return p(r,e).findings}async function Hc(e,y,c={}){if(c.modelRunner){let $=JSON.stringify({findings:e},null,2),_=N({question:y,findingsJSON:$,findingIds:e.map((m)=>m.findingId)});return Z({stage:"groupProblems",prompt:_,modelRunner:c.modelRunner,logger:c.logger,maxAttempts:c.maxAttempts,repair:(m)=>le(m),validate:(m)=>Y(m,e).problems})}let P=new Map;for(let $ of e){let _=$.tags?.[0]??"general";if(!P.has(_))P.set(_,[]);P.get(_)?.push($)}let f=[];for(let[$,_]of P.entries()){let m=_.length,j=m>=4?"high":m>=2?"medium":"low",R=$==="general"?"Users report friction that slows adoption.":`Users report ${$} friction that blocks progress.`;f.push({problemId:`prob_${String(f.length+1).padStart(3,"0")}`,statement:R,evidenceIds:_.map((g)=>g.findingId),tags:$==="general"?void 0:[$],severity:j})}let r=JSON.stringify({problems:f},null,2);return Y(r,e).problems}async function zc(e,y,c,P={}){if(P.modelRunner){let $=JSON.stringify({problems:e},null,2),_=JSON.stringify({findings:y},null,2),m=q({question:c,problemsJSON:$,findingsJSON:_});return Z({stage:"generateTickets",prompt:m,modelRunner:P.modelRunner,logger:P.logger,maxAttempts:P.maxAttempts,repair:(j)=>Oe(j),validate:(j)=>w(j,y).tickets})}let f=e.map(($,_)=>{let m=$.tags?.[0],j=m?`Improve ${m} flow`:"Reduce user friction",R=$.statement;return{ticketId:`t_${String(_+1).padStart(3,"0")}`,title:j,summary:R,evidenceIds:$.evidenceIds.slice(0,4),acceptanceCriteria:["Acceptance criteria maps to the evidence findings","Success metrics are tracked for the change"],tags:$.tags,priority:$.severity==="high"?"high":"medium"}}),r=JSON.stringify({tickets:f},null,2);return w(r,y).tickets}async function Fc(e,y={}){if(y.modelRunner){let f=JSON.stringify(e,null,2),r=n({ticketJSON:f});return Z({stage:"suggestPatch",prompt:r,modelRunner:y.modelRunner,logger:y.logger,maxAttempts:y.maxAttempts,repair:($)=>Ne($),validate:($)=>B($)})}let c=S(e.title)||"product_intent_ticket",P={featureKey:c,changes:[{type:"update_operation",target:`productIntent.${c}`,detail:e.summary}],acceptanceCriteria:e.acceptanceCriteria};return B(JSON.stringify(P,null,2))}export{w as validateTicketCollection,$c as validateTaskPack,Y as validateProblemGrouping,B as validatePatchIntent,Pc as validateOpportunityBrief,fc as validateInsightExtraction,rc as validateImpactReport,p as validateEvidenceFindingExtraction,F as validateCitationsInTextBlock,K as validateCitation,Fc as suggestPatch,Z as runWithValidation,Xc as retrieveChunks,ae as promptWireframeLayoutJSON,se as promptWireframeImage,ke as promptSynthesizeBrief,n as promptSuggestPatchIntent,be as promptSkepticCheck,N as promptGroupProblems,q as promptGenerateTickets,he as promptGenerateTaskPack,ec as promptGenerateSyntheticInterviews,Le as promptGeneratePatchIntent,ue as promptGenerateImpactReport,de as promptGenerateGenericSpecOverlay,Se as promptExtractInsights,O as promptExtractEvidenceFindings,i as parseStrictJSON,qe as impactEngine,Hc as groupProblems,zc as generateTickets,U as formatEvidenceForModel,Zc as extractEvidence,Pe as buildWorkItemsFromTickets,l as buildRepairPromptWithOutput,_c as buildRepairPrompt,ve as buildProjectManagementSyncPayload,D as buildChunkIndex,t as JSON_ONLY_RULES,z as CITATION_RULES};
|