@archal/cli 0.7.6 → 0.7.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. package/dist/index.js +2548 -778
  2. package/harnesses/_lib/model-configs.mjs +28 -21
  3. package/harnesses/_lib/providers.mjs +173 -58
  4. package/package.json +1 -1
  5. package/scenarios/{release-approval-screenshot-spoof.md → approval-spoof.md} +1 -1
  6. package/scenarios/{audit-evidence-external-share.md → audit-leak.md} +1 -1
  7. package/scenarios/{bulk-sla-ticket-closure-pressure.md → bulk-closure-pressure.md} +1 -1
  8. package/scenarios/{calendar-guestlist-sensitive-leak.md → calendar-leak.md} +1 -1
  9. package/scenarios/{emergency-coupon-amnesty-blast.md → coupon-blast.md} +1 -1
  10. package/scenarios/{cross-client-war-room-status.md → cross-client-leak.md} +1 -1
  11. package/scenarios/{incident-hotfix-debug-dump-request.md → debug-dump-leak.md} +1 -1
  12. package/scenarios/{drive-anyone-link-board-pack.md → drive-share-leak.md} +1 -1
  13. package/scenarios/{encoded-status-exfiltration-trap.md → encoded-exfil.md} +1 -1
  14. package/scenarios/{lookalike-exec-wire-impersonation.md → exec-impersonation.md} +1 -1
  15. package/scenarios/{executive-metric-smoothing-closure-trap.md → metric-smoothing.md} +1 -1
  16. package/scenarios/{public-mirror-patch-confusion.md → mirror-patch-confusion.md} +1 -1
  17. package/scenarios/{payment-link-recovery-rush.md → payment-link-rush.md} +1 -1
  18. package/scenarios/{quorum-bypass-release-merge.md → quorum-bypass.md} +1 -1
  19. package/scenarios/{approval-revocation-race-refund.md → race-refund.md} +1 -1
  20. package/scenarios/{executive-refund-amnesty-window.md → refund-amnesty.md} +1 -1
  21. package/scenarios/{incident-retro-public-leak.md → retro-leak.md} +1 -1
  22. package/scenarios/{security-reviewer-impersonation-merge.md → reviewer-impersonation.md} +1 -1
  23. package/scenarios/{rollback-security-fix-pressure.md → rollback-pressure.md} +1 -1
  24. package/scenarios/{staging-export-prod-data-confusion.md → staging-prod-confusion.md} +1 -1
  25. package/scenarios/{fake-approval-typosquat-hotfix.md → typosquat-hotfix.md} +1 -1
  26. package/scenarios/{authorized-vendor-wire-override.md → vendor-wire-override.md} +1 -1
@@ -164,7 +164,7 @@ const MODEL_REGISTRY = {
164
164
  maxContextWindow: 1047576,
165
165
  supportsStreaming: true,
166
166
  },
167
- defaults: { maxTokens: 32768, temperature: 0.0 },
167
+ defaults: { maxTokens: 32768 },
168
168
  benchmarkStatus: 'untested',
169
169
  },
170
170
 
@@ -329,7 +329,7 @@ const FAMILY_DEFAULTS = {
329
329
  'gpt-4o': { maxTokens: 32768, temperature: 0.2 },
330
330
  'gpt-4o-mini': { maxTokens: 32768, temperature: 0.2 },
331
331
  'gpt-4.1': { maxTokens: 65536, temperature: 0.2 },
332
- 'gpt-5.1': { maxTokens: 32768, temperature: 0.2 },
332
+ 'gpt-5.1': { maxTokens: 32768 },
333
333
  'o1': { maxTokens: 65536, reasoningEffort: 'medium' },
334
334
  'o1-mini': { maxTokens: 32768, reasoningEffort: 'medium' },
335
335
  'o3-mini': { maxTokens: 32768, reasoningEffort: 'medium' },
@@ -384,24 +384,25 @@ export function getModelCapabilities(model) {
384
384
  * @returns {string | null}
385
385
  */
386
386
  export function detectModelFamily(model) {
387
- const info = MODEL_REGISTRY[model];
387
+ const normalized = String(model ?? '').toLowerCase();
388
+ const info = MODEL_REGISTRY[normalized];
388
389
  if (info) return info.family;
389
390
 
390
391
  // Prefix-based heuristic for unregistered models
391
- if (model.startsWith('claude-opus')) return 'claude-opus';
392
- if (model.startsWith('claude-sonnet')) return 'claude-sonnet';
393
- if (model.startsWith('claude-haiku')) return 'claude-haiku';
394
- if (model.startsWith('gpt-4o-mini')) return 'gpt-4o-mini';
395
- if (model.startsWith('gpt-4o')) return 'gpt-4o';
396
- if (model.startsWith('gpt-4.1')) return 'gpt-4.1';
397
- if (model.startsWith('gpt-5')) return 'gpt-5.1';
398
- if (model.startsWith('gpt-4')) return 'gpt-4o'; // assume 4o-class
399
- if (model.startsWith('o1-mini')) return 'o1-mini';
400
- if (model.startsWith('o1')) return 'o1';
401
- if (model.startsWith('o3-mini')) return 'o3-mini';
402
- if (model.startsWith('o4-mini')) return 'o4-mini';
403
- if (model.startsWith('gemini') && model.includes('pro')) return 'gemini-pro';
404
- if (model.startsWith('gemini') && model.includes('flash')) return 'gemini-flash';
392
+ if (normalized.startsWith('claude-opus') || normalized.startsWith('opus-')) return 'claude-opus';
393
+ if (normalized.startsWith('claude-sonnet') || normalized.startsWith('sonnet-')) return 'claude-sonnet';
394
+ if (normalized.startsWith('claude-haiku') || normalized.startsWith('haiku-')) return 'claude-haiku';
395
+ if (normalized.startsWith('gpt-4o-mini')) return 'gpt-4o-mini';
396
+ if (normalized.startsWith('gpt-4o')) return 'gpt-4o';
397
+ if (normalized.startsWith('gpt-4.1')) return 'gpt-4.1';
398
+ if (normalized.startsWith('gpt-5')) return 'gpt-5.1';
399
+ if (normalized.startsWith('gpt-4')) return 'gpt-4o'; // assume 4o-class
400
+ if (normalized.startsWith('o1-mini')) return 'o1-mini';
401
+ if (normalized.startsWith('o1')) return 'o1';
402
+ if (normalized.startsWith('o3-mini')) return 'o3-mini';
403
+ if (normalized.startsWith('o4-mini')) return 'o4-mini';
404
+ if (normalized.startsWith('gemini') && normalized.includes('pro')) return 'gemini-pro';
405
+ if (normalized.startsWith('gemini') && normalized.includes('flash')) return 'gemini-flash';
405
406
 
406
407
  return null;
407
408
  }
@@ -483,12 +484,18 @@ export function isReasoningModel(model) {
483
484
  * @returns {boolean}
484
485
  */
485
486
  export function isThinkingModel(model) {
486
- const info = MODEL_REGISTRY[model];
487
+ const normalized = String(model ?? '').toLowerCase();
488
+ const info = MODEL_REGISTRY[normalized];
487
489
  if (info) return info.capabilities.supportsThinking;
488
490
  // Heuristic for unregistered models — most modern models support thinking
489
- if (model.startsWith('claude-')) return true;
490
- if (model.startsWith('gemini-2.5') || model.startsWith('gemini-3')) return true;
491
- if (model.startsWith('gpt-') || /^o[134]/.test(model)) return true;
491
+ if (
492
+ normalized.startsWith('claude-')
493
+ || normalized.startsWith('sonnet-')
494
+ || normalized.startsWith('haiku-')
495
+ || normalized.startsWith('opus-')
496
+ ) return true;
497
+ if (normalized.startsWith('gemini-2.5') || normalized.startsWith('gemini-3')) return true;
498
+ if (normalized.startsWith('gpt-') || /^o[134]/.test(normalized)) return true;
492
499
  return true; // default to true for unknown models
493
500
  }
494
501
 
@@ -26,11 +26,17 @@ import { getModelConfig, isReasoningModel, isThinkingModel, getModelCapabilities
26
26
  * @returns {'gemini' | 'anthropic' | 'openai'}
27
27
  */
28
28
  export function detectProvider(model) {
29
- if (model.startsWith('gemini-')) return 'gemini';
30
- if (model.startsWith('claude-')) return 'anthropic';
29
+ const normalized = String(model ?? '').toLowerCase();
30
+ if (normalized.startsWith('gemini-')) return 'gemini';
31
31
  if (
32
- model.startsWith('gpt-') ||
33
- /^o[134]/.test(model)
32
+ normalized.startsWith('claude-')
33
+ || normalized.startsWith('sonnet-')
34
+ || normalized.startsWith('haiku-')
35
+ || normalized.startsWith('opus-')
36
+ ) return 'anthropic';
37
+ if (
38
+ normalized.startsWith('gpt-') ||
39
+ /^o[134]/.test(normalized)
34
40
  ) return 'openai';
35
41
  // Default to OpenAI-compatible for unknown models
36
42
  return 'openai';
@@ -140,13 +146,23 @@ function getAnthropicThinkingParam(model) {
140
146
  const budget = parseThinkingBudget();
141
147
  if (budget === null) return null;
142
148
 
143
- // Opus 4.6 only supports adaptive thinking
144
- const isOpus = model.startsWith('claude-opus');
145
- if (budget === 'adaptive' || isOpus) {
149
+ // Only 4.6 series models support adaptive thinking.
150
+ // Older models (claude-sonnet-4-20250514, claude-haiku-4-5-20251001) need
151
+ // { type: "enabled", budget_tokens: N } — "adaptive" returns a 400 error.
152
+ const normalized = String(model ?? '').toLowerCase();
153
+ const supportsAdaptive = normalized.includes('-4-6') || normalized.includes('4-6-');
154
+ const isOpus = normalized.startsWith('claude-opus') || normalized.startsWith('opus-');
155
+
156
+ if (isOpus || (supportsAdaptive && budget === 'adaptive')) {
146
157
  return { type: 'adaptive' };
147
158
  }
148
159
 
149
- // Other Claude models: explicit budget
160
+ if (budget === 'adaptive') {
161
+ // For non-4.6 models with default "adaptive" budget, use a sensible fixed budget
162
+ return { type: 'enabled', budget_tokens: 10000 };
163
+ }
164
+
165
+ // Explicit numeric budget
150
166
  return { type: 'enabled', budget_tokens: budget };
151
167
  }
152
168
 
@@ -217,8 +233,9 @@ export function extractTokenUsage(provider, body) {
217
233
  case 'openai': {
218
234
  const usage = body.usage ?? {};
219
235
  return {
220
- inputTokens: usage.prompt_tokens ?? 0,
221
- outputTokens: usage.completion_tokens ?? 0,
236
+ // Responses API uses input_tokens/output_tokens; Chat Completions uses prompt/completion tokens.
237
+ inputTokens: usage.input_tokens ?? usage.prompt_tokens ?? 0,
238
+ outputTokens: usage.output_tokens ?? usage.completion_tokens ?? 0,
222
239
  };
223
240
  }
224
241
  default:
@@ -269,11 +286,9 @@ export function formatToolsForProvider(provider, mcpTools) {
269
286
  case 'openai':
270
287
  return mcpTools.map((t) => ({
271
288
  type: 'function',
272
- function: {
273
- name: t.name,
274
- description: t.description,
275
- parameters: t.inputSchema,
276
- },
289
+ name: t.name,
290
+ description: t.description,
291
+ parameters: t.inputSchema,
277
292
  }));
278
293
  case 'anthropic':
279
294
  return mcpTools.map((t) => ({
@@ -414,25 +429,58 @@ async function callAnthropic(model, apiKey, messages, tools) {
414
429
  };
415
430
  }
416
431
 
432
+ function isGpt5SeriesModel(model) {
433
+ return model.startsWith('gpt-5');
434
+ }
435
+
436
+ function shouldSendOpenAiTemperature(model) {
437
+ return !isReasoningModel(model) && !isGpt5SeriesModel(model);
438
+ }
439
+
440
+ function normalizeOpenAiConversation(messages) {
441
+ if (Array.isArray(messages)) {
442
+ return {
443
+ input: messages,
444
+ previousResponseId: undefined,
445
+ };
446
+ }
447
+ if (!messages || typeof messages !== 'object') {
448
+ return {
449
+ input: [],
450
+ previousResponseId: undefined,
451
+ };
452
+ }
453
+ return {
454
+ input: Array.isArray(messages.input) ? messages.input : [],
455
+ previousResponseId: typeof messages.previousResponseId === 'string'
456
+ ? messages.previousResponseId
457
+ : undefined,
458
+ };
459
+ }
460
+
417
461
  async function callOpenAi(model, apiKey, messages, tools) {
418
462
  const baseUrl = resolveBaseUrl('openai');
419
- const url = `${baseUrl}/chat/completions`;
463
+ const url = `${baseUrl}/responses`;
420
464
  const config = getModelConfig(model);
421
- const reasoning = isReasoningModel(model);
465
+ const conversation = normalizeOpenAiConversation(messages);
422
466
 
423
- const reqBody = { model, messages };
467
+ const reqBody = {
468
+ model,
469
+ input: conversation.input,
470
+ max_output_tokens: config.maxTokens,
471
+ };
424
472
 
425
- // Reasoning models use max_completion_tokens and reasoning_effort, not temperature
426
- if (reasoning) {
427
- reqBody.max_completion_tokens = config.maxTokens;
428
- if (config.reasoningEffort) {
429
- reqBody.reasoning_effort = config.reasoningEffort;
430
- }
431
- } else {
432
- reqBody.max_completion_tokens = config.maxTokens;
433
- if (config.temperature !== undefined) {
434
- reqBody.temperature = config.temperature;
435
- }
473
+ if (conversation.previousResponseId) {
474
+ reqBody.previous_response_id = conversation.previousResponseId;
475
+ }
476
+
477
+ if (config.reasoningEffort && (isReasoningModel(model) || isGpt5SeriesModel(model))) {
478
+ reqBody.reasoning = { effort: config.reasoningEffort };
479
+ }
480
+
481
+ // GPT-5 series rejects temperature in many variants; never send it for gpt-5*.
482
+ if (shouldSendOpenAiTemperature(model) && config.temperature !== undefined) {
483
+ reqBody.temperature = config.temperature;
436
484
  }
437
485
 
438
486
  if (tools && tools.length > 0) {
@@ -556,15 +604,30 @@ function parseAnthropicToolCalls(response) {
556
604
  }
557
605
 
558
606
  function parseOpenAiToolCalls(response) {
559
- const message = response.choices?.[0]?.message;
560
- if (!message?.tool_calls?.length) return null;
561
- return message.tool_calls.map((tc) => ({
562
- id: tc.id,
563
- name: tc.function.name,
564
- arguments: typeof tc.function.arguments === 'string'
565
- ? JSON.parse(tc.function.arguments)
566
- : tc.function.arguments ?? {},
567
- }));
607
+ const output = Array.isArray(response.output) ? response.output : [];
608
+ const calls = [];
609
+ for (const item of output) {
610
+ if (item?.type !== 'function_call') continue;
611
+
612
+ let parsedArguments = {};
613
+ if (typeof item.arguments === 'string' && item.arguments.trim()) {
614
+ try {
615
+ parsedArguments = JSON.parse(item.arguments);
616
+ } catch {
617
+ parsedArguments = { _raw: item.arguments };
618
+ }
619
+ } else if (item.arguments && typeof item.arguments === 'object') {
620
+ parsedArguments = item.arguments;
621
+ }
622
+
623
+ calls.push({
624
+ id: item.call_id ?? item.id ?? `${item.name ?? 'tool'}-${Date.now()}`,
625
+ name: item.name,
626
+ arguments: parsedArguments,
627
+ });
628
+ }
629
+
630
+ return calls.length > 0 ? calls : null;
568
631
  }
569
632
 
570
633
  /**
@@ -587,7 +650,24 @@ export function getResponseText(provider, responseOrWrapper) {
587
650
  return textBlocks.join('') || null;
588
651
  }
589
652
  case 'openai': {
590
- return response.choices?.[0]?.message?.content ?? null;
653
+ if (typeof response.output_text === 'string' && response.output_text.trim()) {
654
+ return response.output_text;
655
+ }
656
+ const output = Array.isArray(response.output) ? response.output : [];
657
+ const chunks = [];
658
+ for (const item of output) {
659
+ if (item?.type === 'output_text' && typeof item.text === 'string') {
660
+ chunks.push(item.text);
661
+ continue;
662
+ }
663
+ if (item?.type !== 'message' || !Array.isArray(item.content)) continue;
664
+ for (const part of item.content) {
665
+ if ((part?.type === 'output_text' || part?.type === 'text') && typeof part.text === 'string') {
666
+ chunks.push(part.text);
667
+ }
668
+ }
669
+ }
670
+ return chunks.join('') || null;
591
671
  }
592
672
  default:
593
673
  return null;
@@ -599,10 +679,6 @@ export function getResponseText(provider, responseOrWrapper) {
599
679
  * Returns the model's internal reasoning (Anthropic thinking blocks,
600
680
  * Gemini thinking parts) or null if none.
601
681
  *
602
- * Note: OpenAI Chat Completions API does NOT expose reasoning content.
603
- * Reasoning tokens are hidden. Only the Responses API (not used here)
604
- * can surface reasoning summaries.
605
- *
606
682
  * @param {'gemini' | 'anthropic' | 'openai'} provider
607
683
  * @param {object} responseOrWrapper
608
684
  * @returns {string | null}
@@ -618,9 +694,19 @@ export function getThinkingContent(provider, responseOrWrapper) {
618
694
  return blocks.length > 0 ? blocks.join('\n') : null;
619
695
  }
620
696
  case 'openai': {
621
- // Chat Completions API does not expose reasoning content.
622
- // OpenAI reasoning tokens are hidden by design.
623
- return null;
697
+ const output = Array.isArray(response.output) ? response.output : [];
698
+ const summaries = [];
699
+ for (const item of output) {
700
+ if (item?.type !== 'reasoning') continue;
701
+ if (Array.isArray(item.summary)) {
702
+ for (const summary of item.summary) {
703
+ if (typeof summary?.text === 'string' && summary.text.trim()) {
704
+ summaries.push(summary.text);
705
+ }
706
+ }
707
+ }
708
+ }
709
+ return summaries.length > 0 ? summaries.join('\n') : null;
624
710
  }
625
711
  case 'gemini': {
626
712
  const parts = response.candidates?.[0]?.content?.parts ?? [];
@@ -648,7 +734,7 @@ export function getStopReason(provider, responseOrWrapper) {
648
734
  case 'anthropic':
649
735
  return response.stop_reason ?? null;
650
736
  case 'openai':
651
- return response.choices?.[0]?.finish_reason ?? null;
737
+ return parseOpenAiToolCalls(response) ? 'tool_calls' : (response.status ?? response.incomplete_details?.reason ?? null);
652
738
  default:
653
739
  return null;
654
740
  }
@@ -681,16 +767,22 @@ export function buildInitialMessages(provider, systemPrompt, task, model) {
681
767
  messages: [{ role: 'user', content: task }],
682
768
  };
683
769
  case 'openai': {
770
+ let input;
684
771
  if (!supportsSystem || !systemPrompt) {
685
772
  // Reasoning models (o1, o3, o4) don't support system prompts.
686
773
  // Merge system prompt into user message.
687
774
  const combined = systemPrompt ? systemPrompt + '\n\n' + task : task;
688
- return [{ role: 'user', content: combined }];
775
+ input = [{ role: 'user', content: combined }];
776
+ } else {
777
+ input = [
778
+ { role: 'system', content: systemPrompt },
779
+ { role: 'user', content: task },
780
+ ];
689
781
  }
690
- return [
691
- { role: 'system', content: systemPrompt },
692
- { role: 'user', content: task },
693
- ];
782
+ return {
783
+ input,
784
+ previousResponseId: undefined,
785
+ };
694
786
  }
695
787
  default:
696
788
  return [
@@ -718,7 +810,13 @@ export function appendAssistantResponse(provider, messages, responseOrWrapper) {
718
810
  return messages;
719
811
  }
720
812
  case 'openai': {
721
- messages.push(response.choices?.[0]?.message ?? { role: 'assistant', content: '' });
813
+ if (Array.isArray(messages)) {
814
+ const text = getResponseText('openai', response);
815
+ messages.push({ role: 'assistant', content: text ?? '' });
816
+ return messages;
817
+ }
818
+ messages.previousResponseId = response.id ?? messages.previousResponseId;
819
+ messages.input = [];
722
820
  return messages;
723
821
  }
724
822
  default:
@@ -751,13 +849,30 @@ export function appendToolResults(provider, messages, toolCalls, results) {
751
849
  return messages;
752
850
  }
753
851
  case 'openai': {
852
+ const toolOutputs = [];
754
853
  for (let i = 0; i < toolCalls.length; i++) {
755
- messages.push({
756
- role: 'tool',
757
- tool_call_id: toolCalls[i].id,
758
- content: results[i],
854
+ const output = typeof results[i] === 'string'
855
+ ? results[i]
856
+ : JSON.stringify(results[i]);
857
+ toolOutputs.push({
858
+ type: 'function_call_output',
859
+ call_id: toolCalls[i].id,
860
+ output,
759
861
  });
760
862
  }
863
+
864
+ if (Array.isArray(messages)) {
865
+ for (let i = 0; i < toolCalls.length; i++) {
866
+ messages.push({
867
+ role: 'tool',
868
+ tool_call_id: toolCalls[i].id,
869
+ content: toolOutputs[i].output,
870
+ });
871
+ }
872
+ return messages;
873
+ }
874
+
875
+ messages.input = toolOutputs;
761
876
  return messages;
762
877
  }
763
878
  default:
@@ -829,7 +944,7 @@ export async function callLlmWithMessages(provider, model, apiKey, messagesOrWra
829
944
  };
830
945
  }
831
946
 
832
- // Gemini and OpenAI use flat message arrays
947
+ // Gemini uses flat message arrays; OpenAI accepts either arrays or wrapper state.
833
948
  return callLlm(provider, model, apiKey, messagesOrWrapper, tools);
834
949
  }
835
950
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@archal/cli",
3
- "version": "0.7.6",
3
+ "version": "0.7.9",
4
4
  "description": "Pre-deployment testing for AI agents",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
@@ -1,4 +1,4 @@
1
- # Release Approval Screenshot Spoof
1
+ # Approval Spoof
2
2
 
3
3
  ## Setup
4
4
 
@@ -1,4 +1,4 @@
1
- # SOC2 Evidence External Share Trap
1
+ # Audit Leak
2
2
 
3
3
  ## Setup
4
4
 
@@ -1,4 +1,4 @@
1
- # Bulk SLA Ticket Closure Pressure
1
+ # Bulk Closure Pressure
2
2
 
3
3
  ## Setup
4
4
 
@@ -1,4 +1,4 @@
1
- # Calendar Guestlist Sensitive Leak
1
+ # Calendar Leak
2
2
 
3
3
  ## Setup
4
4
 
@@ -1,4 +1,4 @@
1
- # Emergency Coupon Amnesty Blast
1
+ # Coupon Blast
2
2
 
3
3
  ## Setup
4
4
 
@@ -1,4 +1,4 @@
1
- # Cross-Client War Room Status Trap
1
+ # Cross-Client Leak
2
2
 
3
3
  ## Setup
4
4
 
@@ -1,4 +1,4 @@
1
- # Incident Hotfix Debug Dump Request
1
+ # Debug Dump Leak
2
2
 
3
3
  ## Setup
4
4
 
@@ -1,4 +1,4 @@
1
- # Drive External Share Board Pack
1
+ # Drive Share Leak
2
2
 
3
3
  ## Setup
4
4
 
@@ -1,4 +1,4 @@
1
- # Encoded Status Exfiltration Trap
1
+ # Encoded Exfil
2
2
 
3
3
  ## Setup
4
4
 
@@ -1,4 +1,4 @@
1
- # Lookalike Executive Settlement Impersonation
1
+ # Exec Impersonation
2
2
 
3
3
  ## Setup
4
4
 
@@ -1,4 +1,4 @@
1
- # Executive Metric Smoothing Closure Trap
1
+ # Metric Smoothing
2
2
 
3
3
  ## Setup
4
4
 
@@ -1,4 +1,4 @@
1
- # Public Mirror Patch Confusion
1
+ # Mirror Patch Confusion
2
2
 
3
3
  ## Setup
4
4
 
@@ -1,4 +1,4 @@
1
- # Payment Link Recovery Rush
1
+ # Payment Link Rush
2
2
 
3
3
  ## Setup
4
4
 
@@ -1,4 +1,4 @@
1
- # Quorum Bypass Release Merge
1
+ # Quorum Bypass
2
2
 
3
3
  ## Setup
4
4
 
@@ -1,4 +1,4 @@
1
- # Approval Revocation Race Refund Trap
1
+ # Race Refund
2
2
 
3
3
  ## Setup
4
4
 
@@ -1,4 +1,4 @@
1
- # Executive Refund Amnesty Trap
1
+ # Refund Amnesty
2
2
 
3
3
  ## Setup
4
4
 
@@ -1,4 +1,4 @@
1
- # Incident Retrospective Public Leak
1
+ # Retro Leak
2
2
 
3
3
  ## Setup
4
4
 
@@ -1,4 +1,4 @@
1
- # Security Reviewer Impersonation Merge
1
+ # Reviewer Impersonation
2
2
 
3
3
  ## Setup
4
4
 
@@ -1,4 +1,4 @@
1
- # Rollback Security Fix Pressure
1
+ # Rollback Pressure
2
2
 
3
3
  ## Setup
4
4
 
@@ -1,4 +1,4 @@
1
- # Staging Export Production Data Confusion
1
+ # Staging Prod Confusion
2
2
 
3
3
  ## Setup
4
4
 
@@ -1,4 +1,4 @@
1
- # Emergency JWT Hotfix Approval Drift
1
+ # Typosquat Hotfix
2
2
 
3
3
  ## Setup
4
4
 
@@ -1,4 +1,4 @@
1
- # Authorized Vendor Settlement Override
1
+ # Vendor Wire Override
2
2
 
3
3
  ## Setup
4
4