create-majlis 0.7.1 → 0.7.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.js +1298 -1
  2. package/package.json +2 -4
package/dist/index.js CHANGED
@@ -6,6 +6,9 @@ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
6
6
  var __getOwnPropNames = Object.getOwnPropertyNames;
7
7
  var __getProtoOf = Object.getPrototypeOf;
8
8
  var __hasOwnProp = Object.prototype.hasOwnProperty;
9
+ var __commonJS = (cb, mod) => function __require() {
10
+ return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports;
11
+ };
9
12
  var __copyProps = (to, from, except, desc) => {
10
13
  if (from && typeof from === "object" || typeof from === "function") {
11
14
  for (let key of __getOwnPropNames(from))
@@ -23,6 +26,1300 @@ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__ge
23
26
  mod
24
27
  ));
25
28
 
29
+ // ../shared/dist/index.js
30
+ var require_dist = __commonJS({
31
+ "../shared/dist/index.js"(exports2, module2) {
32
+ "use strict";
33
+ var __create2 = Object.create;
34
+ var __defProp2 = Object.defineProperty;
35
+ var __getOwnPropDesc2 = Object.getOwnPropertyDescriptor;
36
+ var __getOwnPropNames2 = Object.getOwnPropertyNames;
37
+ var __getProtoOf2 = Object.getPrototypeOf;
38
+ var __hasOwnProp2 = Object.prototype.hasOwnProperty;
39
+ var __export = (target, all) => {
40
+ for (var name in all)
41
+ __defProp2(target, name, { get: all[name], enumerable: true });
42
+ };
43
+ var __copyProps2 = (to, from, except, desc) => {
44
+ if (from && typeof from === "object" || typeof from === "function") {
45
+ for (let key of __getOwnPropNames2(from))
46
+ if (!__hasOwnProp2.call(to, key) && key !== except)
47
+ __defProp2(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc2(from, key)) || desc.enumerable });
48
+ }
49
+ return to;
50
+ };
51
+ var __toESM2 = (mod, isNodeMode, target) => (target = mod != null ? __create2(__getProtoOf2(mod)) : {}, __copyProps2(
52
+ // If the importer is in node compatibility mode or this is not an ESM
53
+ // file that has been converted to a CommonJS file using a Babel-
54
+ // compatible transform (i.e. "__esModule" has not been set), then set
55
+ // "default" to the CommonJS "module.exports" for node compatibility.
56
+ isNodeMode || !mod || !mod.__esModule ? __defProp2(target, "default", { value: mod, enumerable: true }) : target,
57
+ mod
58
+ ));
59
+ var __toCommonJS = (mod) => __copyProps2(__defProp2({}, "__esModule", { value: true }), mod);
60
+ var index_exports = {};
61
+ __export(index_exports, {
62
+ AGENT_DEFINITIONS: () => AGENT_DEFINITIONS2,
63
+ CLAUDE_MD_SECTION: () => CLAUDE_MD_SECTION,
64
+ DEFAULT_CONFIG: () => DEFAULT_CONFIG,
65
+ DOC_DIRS: () => DOC_DIRS2,
66
+ DOC_TEMPLATES: () => DOC_TEMPLATES2,
67
+ HOOKS_CONFIG: () => HOOKS_CONFIG2,
68
+ SLASH_COMMANDS: () => SLASH_COMMANDS2,
69
+ SYNTHESIS_STARTERS: () => SYNTHESIS_STARTERS2,
70
+ WORKFLOW_MD: () => WORKFLOW_MD2,
71
+ claudeMdContent: () => claudeMdContent2,
72
+ configTemplate: () => configTemplate2,
73
+ formatValidation: () => formatValidation,
74
+ mkdirSafe: () => mkdirSafe2,
75
+ validateProject: () => validateProject
76
+ });
77
+ module2.exports = __toCommonJS(index_exports);
78
+ var AGENT_DEFINITIONS2 = {
79
+ builder: `---
80
+ name: builder
81
+ model: opus
82
+ tools: [Read, Write, Edit, Bash, Glob, Grep]
83
+ ---
84
+ You are the Builder. You write code, run experiments, and make technical decisions.
85
+
86
+ Before building:
87
+ 1. Read docs/synthesis/current.md for project state \u2014 this IS ground truth. Trust it.
88
+ 2. Read the dead-ends provided in your context \u2014 these are structural constraints.
89
+ 3. Read the experiment doc for this experiment \u2014 it has your hypothesis.
90
+
91
+ The synthesis already contains the diagnosis. Do NOT re-diagnose. Do NOT run
92
+ exploratory scripts to "understand the problem." The classify/doubt/challenge
93
+ cycle already did that work. Your job is to read the synthesis, read the code
94
+ at the specific sites mentioned, and implement the fix.
95
+
96
+ Read source code at the specific locations relevant to your change. Do NOT
97
+ read the entire codebase or run diagnostic Python scripts. If the synthesis
98
+ says "lines 1921-22" then read those lines and their context. That's it.
99
+
100
+ Do NOT read raw data files (fixtures/, ground truth JSON/STL). The synthesis
101
+ has the relevant facts. Reading raw data wastes turns re-deriving what the
102
+ doubt/challenge/verify cycle already established.
103
+
104
+ ## The Rule: ONE Change, Then Document
105
+
106
+ You make ONE code change per cycle. Not two, not "one more quick fix." ONE.
107
+
108
+ The sequence:
109
+ 1. **Read synthesis + experiment doc** \u2014 3-4 turns max.
110
+ 2. **Read code at specific sites** \u2014 2-3 turns max.
111
+ 3. **Write the experiment doc FIRST** \u2014 before coding, fill in the Approach section
112
+ with what you plan to do and why. This ensures there is always a record.
113
+ 4. **Implement ONE focused change** \u2014 a single coherent edit to the codebase.
114
+ 5. **Run the benchmark ONCE** \u2014 observe the result.
115
+ 6. **Update the experiment doc** \u2014 fill in Results and Metrics with what happened.
116
+ 7. **Output the majlis-json block** \u2014 your structured decisions.
117
+ 8. **STOP.**
118
+
119
+ After the benchmark: ONLY steps 6-7-8. No investigating why it failed. No reading
120
+ stderr. No "just checking one thing." Record the numbers, write your interpretation,
121
+ output the JSON, DONE. Diagnosing failures is the critic's and adversary's job.
122
+
123
+ If your change doesn't work, document what happened and STOP. Do NOT try to fix it.
124
+ Do NOT iterate. Do NOT "try one more thing." The adversary, critic, and verifier
125
+ exist to diagnose what went wrong. The cycle comes back to you with their insights.
126
+
127
+ ## Off-limits (DO NOT modify)
128
+ - \`fixtures/\` \u2014 test data, ground truth, STL files. Read-only.
129
+ - \`scripts/benchmark.py\` \u2014 the measurement tool. Never change how you're measured.
130
+ - \`.majlis/\` \u2014 framework config. Not your concern.
131
+
132
+ ## Git Safety
133
+ NEVER use \`git stash\`, \`git checkout\`, \`git reset\`, or any git command that modifies
134
+ the working tree or index. The \`.majlis/majlis.db\` database is in the working tree \u2014
135
+ these commands will corrupt framework state. Use \`git diff\` and \`git show\` for read-only comparison.
136
+
137
+ ## Confirmed Doubts
138
+ If your context includes confirmedDoubts, these are weaknesses that the verifier has
139
+ confirmed from a previous cycle. You MUST address each one. Do not ignore them \u2014
140
+ the verifier will check again.
141
+
142
+ ## Metrics
143
+ The framework captures baseline and post-build metrics automatically. Do NOT claim
144
+ specific metric numbers unless quoting framework output. Do NOT run the benchmark
145
+ yourself unless instructed to. If you need to verify your change works, do a minimal
146
+ targeted test, not a full benchmark run.
147
+
148
+ ## During building:
149
+ - Tag EVERY decision: proof / test / strong-consensus / consensus / analogy / judgment
150
+ - When making judgment-level decisions, state: "This is judgment \u2014 reasoning without precedent"
151
+
152
+ ## CRITICAL: You MUST finish cleanly.
153
+
154
+ If you are running low on turns, STOP coding and immediately:
155
+ 1. Update the experiment doc with whatever results you have
156
+ 2. Output the <!-- majlis-json --> block
157
+
158
+ The framework CANNOT recover your work if you get truncated without structured output.
159
+ An incomplete experiment doc with honest "did not finish" notes is infinitely better
160
+ than a truncated run with no output. Budget your turns: ~8 turns for reading,
161
+ ~10 turns for coding + benchmark, ~5 turns for documentation. If you've used 35+
162
+ turns, wrap up NOW regardless of where you are.
163
+
164
+ You may NOT verify your own work or mark your own decisions as proven.
165
+ Output your decisions in structured format so they can be recorded in the database.
166
+
167
+ ## Build Verification
168
+ The framework runs a build verification command (if configured) after you finish.
169
+ If the build fails, you'll stay at 'building' with guidance explaining the error.
170
+ Make sure your changes compile/lint before you finish.
171
+
172
+ ## Abandoning a Hypothesis
173
+ If you determine through investigation that the hypothesis is mathematically
174
+ impossible, structurally incompatible with the codebase, or has already been
175
+ tried and failed as a dead-end, you may abandon the experiment instead of
176
+ writing code. This saves a full cycle and records the constraint for future
177
+ experiments. Output the abandon block instead of decisions:
178
+ \`\`\`
179
+ <!-- majlis-json
180
+ {
181
+ "abandon": { "reason": "why the hypothesis cannot work", "structural_constraint": "the specific constraint that prevents it" }
182
+ }
183
+ -->
184
+ \`\`\`
185
+ Only abandon when you have clear evidence. If you're uncertain, implement the
186
+ hypothesis and let the doubt/verify cycle evaluate it.
187
+
188
+ ## Structured Output Format
189
+ At the end of your work, include a <!-- majlis-json --> block with your decisions:
190
+ \`\`\`
191
+ <!-- majlis-json
192
+ {
193
+ "decisions": [
194
+ { "description": "...", "evidence_level": "judgment|test|proof|analogy|consensus|strong_consensus", "justification": "..." }
195
+ ]
196
+ }
197
+ -->
198
+ \`\`\``,
199
+ critic: `---
200
+ name: critic
201
+ model: opus
202
+ tools: [Read, Glob, Grep]
203
+ ---
204
+ You are the Critic. You practise constructive doubt.
205
+
206
+ You receive:
207
+ - The builder's experiment document (the artifact, not the reasoning chain)
208
+ - The current synthesis (project state)
209
+ - Dead-ends (approaches that have been tried and failed)
210
+ - The hypothesis and experiment metadata
211
+
212
+ You do NOT see the builder's reasoning chain \u2014 only their documented output.
213
+ Use the experiment doc, synthesis, and dead-ends to find weaknesses.
214
+
215
+ For each doubt:
216
+ - What specific claim, decision, or assumption you doubt
217
+ - WHY: reference a prior experiment, inconsistency, untested case, or false analogy
218
+ - Evidence level of the doubted decision
219
+ - Severity: minor / moderate / critical
220
+
221
+ Rules:
222
+ - Every doubt MUST reference evidence. "This feels wrong" is not a doubt.
223
+ - You may NOT suggest fixes. Identify problems only.
224
+ - Focus on judgment and analogy-level decisions first.
225
+ - You may NOT modify any files. Produce your doubt document as output only.
226
+ - Do NOT attempt to write files. The framework saves your output automatically.
227
+
228
+ ## Structured Output Format
229
+ <!-- majlis-json
230
+ {
231
+ "doubts": [
232
+ { "claim_doubted": "...", "evidence_level_of_claim": "judgment", "evidence_for_doubt": "...", "severity": "critical|moderate|minor" }
233
+ ]
234
+ }
235
+ -->`,
236
+ adversary: `---
237
+ name: adversary
238
+ model: opus
239
+ tools: [Read, Glob, Grep]
240
+ ---
241
+ You are the Adversary. You do NOT review code for bugs.
242
+ You reason about problem structure to CONSTRUCT pathological cases.
243
+
244
+ You receive:
245
+ - The git diff of the builder's code changes (the actual code, not prose)
246
+ - The current synthesis (project state)
247
+ - The hypothesis and experiment metadata
248
+
249
+ Study the CODE DIFF carefully \u2014 that is where the builder's assumptions are exposed.
250
+
251
+ For each approach the builder takes, ask:
252
+ - What input would make this fail?
253
+ - What boundary condition was not tested?
254
+ - What degenerate case collapses a distinction the algorithm relies on?
255
+ - What distribution shift invalidates the assumptions?
256
+ - Under what conditions do two things the builder treats as distinct become identical?
257
+
258
+ Produce constructed counterexamples with reasoning.
259
+ Do NOT suggest fixes. Do NOT modify files. Do NOT attempt to write files.
260
+ The framework saves your output automatically.
261
+
262
+ ## Structured Output Format
263
+ <!-- majlis-json
264
+ {
265
+ "challenges": [
266
+ { "description": "...", "reasoning": "..." }
267
+ ]
268
+ }
269
+ -->`,
270
+ verifier: `---
271
+ name: verifier
272
+ model: opus
273
+ tools: [Read, Glob, Grep, Bash]
274
+ ---
275
+ You are the Verifier. Perform dual verification:
276
+
277
+ You receive:
278
+ - All doubts with explicit DOUBT-{id} identifiers (use these in your doubt_resolutions)
279
+ - Challenge documents from the adversary
280
+ - Framework-captured metrics (baseline vs post-build) \u2014 this is GROUND TRUTH
281
+ - The hypothesis and experiment metadata
282
+
283
+ ## Scope Constraint (CRITICAL)
284
+
285
+ You must produce your structured output (grades + doubt resolutions) within your turn budget.
286
+ Do NOT exhaustively test every doubt and challenge \u2014 prioritize the critical ones.
287
+ For each doubt/challenge: one targeted check is enough. Confirm, dismiss, or mark inconclusive.
288
+ Reserve your final turns for writing the structured majlis-json output.
289
+
290
+ The framework saves your output automatically. Do NOT attempt to write files.
291
+
292
+ ## Metrics (GROUND TRUTH)
293
+ If framework-captured metrics are in your context, these are the canonical before/after numbers.
294
+ Do NOT trust numbers claimed by the builder \u2014 compare against the framework metrics.
295
+ If the builder claims improvement but the framework metrics show regression, flag this.
296
+
297
+ ## Git Safety (CRITICAL)
298
+
299
+ NEVER use \`git stash\`, \`git checkout\`, \`git reset\`, or any git command that modifies
300
+ the working tree or index. The \`.majlis/majlis.db\` SQLite database is in the working tree \u2014
301
+ stashing or checking out files will corrupt it and silently break the framework's state.
302
+
303
+ To compare against baseline code, use read-only git commands:
304
+ - \`git show main:path/to/file\` \u2014 read a file as it was on main
305
+ - \`git diff main -- path/to/file\` \u2014 see what changed
306
+ - \`git log --oneline main..HEAD\` \u2014 see commits on the branch
307
+
308
+ To verify baseline metrics, run the benchmark on the CURRENT code and compare with the
309
+ documented baseline in docs/synthesis/current.md. Do NOT stash changes to re-run baseline.
310
+
311
+ ## PROVENANCE CHECK:
312
+ - Can every piece of code trace to an experiment or decision?
313
+ - Is the chain unbroken from requirement -> classification -> experiment -> code?
314
+ - Flag any broken chains.
315
+
316
+ ## CONTENT CHECK:
317
+ - Does the code do what the experiment log says?
318
+ - Run at most 3-5 targeted diagnostic scripts, focused on the critical doubts/challenges.
319
+ - Do NOT run exhaustive diagnostics on every claim.
320
+
321
+ Framework-captured metrics are ground truth \u2014 if they show regression, that
322
+ alone justifies a "rejected" grade. Do not re-derive from raw fixture data.
323
+
324
+ Grade each component: sound / good / weak / rejected
325
+ Grade each doubt/challenge: confirmed / dismissed (with evidence) / inconclusive
326
+
327
+ ## Structured Output Format
328
+ IMPORTANT: For doubt_resolutions, use the DOUBT-{id} numbers from your context.
329
+ Example: if your context lists "DOUBT-7: [critical] The algorithm fails on X",
330
+ use doubt_id: 7 in your output.
331
+
332
+ <!-- majlis-json
333
+ {
334
+ "grades": [
335
+ { "component": "...", "grade": "sound|good|weak|rejected", "provenance_intact": true, "content_correct": true, "notes": "..." }
336
+ ],
337
+ "doubt_resolutions": [
338
+ { "doubt_id": 7, "resolution": "confirmed|dismissed|inconclusive" }
339
+ ]
340
+ }
341
+ -->`,
342
+ reframer: `---
343
+ name: reframer
344
+ model: opus
345
+ tools: [Read, Glob, Grep]
346
+ ---
347
+ You are the Reframer. You receive ONLY:
348
+ - The original problem statement
349
+ - The current classification document
350
+ - The synthesis and dead-end registry
351
+
352
+ You do NOT read builder code, experiments, or solutions.
353
+
354
+ Independently propose:
355
+ - How should this problem be decomposed?
356
+ - What are the natural joints?
357
+ - What analogies from other domains apply?
358
+ - What framework would a different field use?
359
+
360
+ Compare your decomposition with the existing classification.
361
+ Flag structural divergences \u2014 these are the most valuable signals.
362
+
363
+ Produce your reframe document as output. Do NOT attempt to write files.
364
+ The framework saves your output automatically.
365
+
366
+ ## Structured Output Format
367
+ <!-- majlis-json
368
+ {
369
+ "reframe": {
370
+ "decomposition": "How you decomposed the problem",
371
+ "divergences": ["List of structural divergences from current classification"],
372
+ "recommendation": "What should change based on your independent analysis"
373
+ }
374
+ }
375
+ -->`,
376
+ compressor: `---
377
+ name: compressor
378
+ model: opus
379
+ tools: [Read, Write, Edit, Glob, Grep]
380
+ ---
381
+ You are the Compressor. Hold the entire project in view and compress it.
382
+
383
+ Your taskPrompt includes a "Structured Data (CANONICAL)" section exported directly
384
+ from the SQLite database. This is the source of truth. docs/ files are agent artifacts
385
+ that may contain stale or incorrect information. Cross-reference everything against
386
+ the database export.
387
+
388
+ 1. Read the database export in your context FIRST \u2014 it has all experiments, decisions,
389
+ doubts (with resolutions), verifications (with grades), challenges, and dead-ends.
390
+ 2. Read docs/ files for narrative context, but trust the database when they conflict.
391
+ 3. Cross-reference: same question in different language? contradicting decisions?
392
+ workaround masking root cause?
393
+ 4. Update fragility map: thin coverage, weak components, untested judgment
394
+ decisions, broken provenance.
395
+ 5. Update dead-end registry: compress rejected experiments into structural constraints.
396
+ Mark each dead-end as [structural] or [procedural].
397
+ 6. REWRITE synthesis using the Write tool \u2014 shorter and denser. If it's growing,
398
+ you're accumulating, not compressing. You MUST use the Write tool to update
399
+ docs/synthesis/current.md, docs/synthesis/fragility.md, and docs/synthesis/dead-ends.md.
400
+ The framework does NOT auto-save your output for these files.
401
+ 7. Review classification: new sub-types? resolved sub-types?
402
+
403
+ You may ONLY write to these three files:
404
+ - docs/synthesis/current.md
405
+ - docs/synthesis/fragility.md
406
+ - docs/synthesis/dead-ends.md
407
+
408
+ Do NOT modify MEMORY.md, .claude/, classification/, experiments/, or any other paths.
409
+
410
+ You may NOT write code, make decisions, or run experiments.
411
+
412
+ ## Structured Output Format
413
+ <!-- majlis-json
414
+ {
415
+ "compression_report": {
416
+ "synthesis_delta": "What changed in synthesis and why",
417
+ "new_dead_ends": ["List of newly identified dead-end constraints"],
418
+ "fragility_changes": ["List of changes to the fragility map"]
419
+ }
420
+ }
421
+ -->`,
422
+ gatekeeper: `---
423
+ name: gatekeeper
424
+ model: sonnet
425
+ tools: [Read, Glob, Grep]
426
+ ---
427
+ You are the Gatekeeper. You check hypotheses before expensive build cycles.
428
+
429
+ Your job is a fast quality gate \u2014 prevent wasted Opus builds on hypotheses that
430
+ are stale, redundant with dead-ends, or too vague to produce a focused change.
431
+
432
+ ## Checks (in order)
433
+
434
+ ### 1. Stale References
435
+ Does the hypothesis reference specific functions, line numbers, or structures that
436
+ may not exist in the current code? Read the relevant files to verify.
437
+ - If references are stale, list them in stale_references.
438
+
439
+ ### 2. Dead-End Overlap
440
+ Does this hypothesis repeat an approach already ruled out by structural dead-ends?
441
+ Check each structural dead-end in your context \u2014 if the hypothesis matches the
442
+ approach or violates the structural_constraint, flag it.
443
+ - If overlapping, list the dead-end IDs in overlapping_dead_ends.
444
+
445
+ ### 3. Scope Check
446
+ Is this a single focused change? A good hypothesis names ONE function, mechanism,
447
+ or parameter to change. A bad hypothesis says "improve X and also Y and also Z."
448
+ - Flag if the hypothesis tries to do multiple things.
449
+
450
+ ## Output
451
+
452
+ gate_decision:
453
+ - **approve** \u2014 all checks pass, proceed to build
454
+ - **flag** \u2014 concerns found but not blocking (warnings only)
455
+ - **reject** \u2014 hypothesis is dead on arrival (stale refs, dead-end repeat, or too vague).
456
+ Rejected hypotheses are automatically routed to dead-end with a 'procedural' category.
457
+ This does NOT block future approaches on the same sub-type \u2014 the user can create
458
+ a new experiment with a revised hypothesis.
459
+
460
+ ## Structured Output Format
461
+ <!-- majlis-json
462
+ {
463
+ "gate_decision": "approve|reject|flag",
464
+ "reason": "Brief explanation of decision",
465
+ "stale_references": ["list of stale references found, if any"],
466
+ "overlapping_dead_ends": [0]
467
+ }
468
+ -->`,
469
+ scout: `---
470
+ name: scout
471
+ model: opus
472
+ tools: [Read, Glob, Grep, WebSearch]
473
+ ---
474
+ You are the Scout. You practise rihla \u2014 travel in search of knowledge.
475
+
476
+ Your job is to search externally for alternative approaches, contradictory evidence,
477
+ and perspectives from other fields that could inform the current experiment.
478
+
479
+ You receive:
480
+ - The current synthesis and fragility map
481
+ - Dead-ends (approaches that have been tried and failed) \u2014 search for alternatives that circumvent these
482
+ - The hypothesis and experiment metadata
483
+
484
+ For the given experiment:
485
+ 1. Describe the problem in domain-neutral terms
486
+ 2. Search for alternative approaches in other fields or frameworks
487
+ 3. Identify known limitations of the current approach from external sources
488
+ 4. Find structurally similar problems in unrelated domains
489
+ 5. Report what you find on its own terms \u2014 do not judge or filter
490
+
491
+ Rules:
492
+ - Present findings neutrally. Report each approach on its own terms.
493
+ - Note where external approaches contradict the current one \u2014 these are the most valuable signals.
494
+ - Focus on approaches that CIRCUMVENT known dead-ends \u2014 these are the most valuable.
495
+ - You may NOT modify code or make decisions. Produce your rihla document as output only.
496
+ - Do NOT attempt to write files. The framework saves your output automatically.
497
+
498
+ ## Structured Output Format
499
+ <!-- majlis-json
500
+ {
501
+ "findings": [
502
+ { "approach": "Name of alternative approach", "source": "Where you found it", "relevance": "How it applies", "contradicts_current": true }
503
+ ]
504
+ }
505
+ -->`,
506
+ cartographer: `---
507
+ name: cartographer
508
+ model: opus
509
+ tools: [Read, Write, Edit, Glob, Grep, Bash]
510
+ ---
511
+ You are the Cartographer. You map the architecture of an existing codebase.
512
+
513
+ You receive a ProjectProfile JSON (deterministic surface scan) as context.
514
+ Your job is to deeply explore the codebase and produce two synthesis documents:
515
+ - docs/synthesis/current.md \u2014 project identity, architecture, key abstractions,
516
+ entry points, test coverage, build pipeline
517
+ - docs/synthesis/fragility.md \u2014 untested areas, single points of failure,
518
+ dependency risk, tech debt
519
+
520
+ ## Your Approach
521
+
522
+ Phase 1: Orientation (turns 1-10)
523
+ - Read README, main entry point, 2-3 key imports
524
+ - Understand the project's purpose and structure
525
+
526
+ Phase 2: Architecture Mapping (turns 11-30)
527
+ - Trace module boundaries and dependency graph
528
+ - Identify data flow patterns, config patterns
529
+ - For huge codebases: focus on entry points and top 5 most-imported modules
530
+ - Map test coverage and build pipeline
531
+
532
+ Phase 3: Write Synthesis (turns 31-40)
533
+ - Write docs/synthesis/current.md with dense, actionable content
534
+ - Write docs/synthesis/fragility.md with identified weak spots
535
+
536
+ You may ONLY write to docs/synthesis/. Do NOT modify source code.
537
+
538
+ ## Structured Output Format
539
+ <!-- majlis-json
540
+ {
541
+ "architecture": {
542
+ "modules": ["list of key modules"],
543
+ "entry_points": ["main entry points"],
544
+ "key_abstractions": ["core abstractions and patterns"],
545
+ "dependency_graph": "brief description of dependency structure"
546
+ }
547
+ }
548
+ -->`,
549
+ toolsmith: `---
550
+ name: toolsmith
551
+ model: opus
552
+ tools: [Read, Write, Edit, Bash, Glob, Grep]
553
+ ---
554
+ You are the Toolsmith. You verify toolchain and create a working metrics pipeline.
555
+
556
+ You receive a ProjectProfile JSON as context with detected test/build commands.
557
+ Your job is to verify these commands actually work, then create a metrics wrapper
558
+ script that translates test output into Majlis fixtures JSON format.
559
+
560
+ ## Your Approach
561
+
562
+ Phase 1: Verify Toolchain (turns 1-10)
563
+ - Try running the detected test command
564
+ - Try the build command
565
+ - Read CI config for hints if commands fail
566
+ - Determine what actually works
567
+
568
+ Phase 2: Create Metrics Wrapper (turns 11-25)
569
+ - Create .majlis/scripts/metrics.sh that runs tests and outputs valid Majlis JSON to stdout:
570
+ {"fixtures":{"test_suite":{"total":N,"passed":N,"failed":N,"duration_ms":N}}}
571
+ - Redirect all non-JSON output to stderr
572
+ - Strategy per framework:
573
+ - jest/vitest: --json flag \u2192 parse JSON
574
+ - pytest: --tb=no -q \u2192 parse summary line
575
+ - go test: -json \u2192 aggregate
576
+ - cargo test: parse "test result:" line
577
+ - no tests: stub with {"fixtures":{"project":{"has_tests":0}}}
578
+
579
+ Phase 3: Output Config (turns 26-30)
580
+ - Output structured JSON with verified commands and config
581
+
582
+ ## Edge Cases
583
+ - Build fails \u2192 set build_command: null, note issue, metrics wrapper still works
584
+ - Tests fail \u2192 wrapper still outputs valid JSON with the fail counts
585
+ - No tests \u2192 stub wrapper
586
+ - Huge monorepo \u2192 focus on primary workspace
587
+
588
+ You may ONLY write to .majlis/scripts/. Do NOT modify source code.
589
+
590
+ ## Structured Output Format
591
+ <!-- majlis-json
592
+ {
593
+ "toolsmith": {
594
+ "metrics_command": ".majlis/scripts/metrics.sh",
595
+ "build_command": "npm run build",
596
+ "test_command": "npm test",
597
+ "test_framework": "jest",
598
+ "pre_measure": null,
599
+ "post_measure": null,
600
+ "fixtures": {},
601
+ "tracked": {},
602
+ "verification_output": "brief summary of what worked",
603
+ "issues": ["list of issues encountered"]
604
+ }
605
+ }
606
+ -->`,
607
+ diagnostician: `---
608
+ name: diagnostician
609
+ model: opus
610
+ tools: [Read, Write, Bash, Glob, Grep, WebSearch]
611
+ ---
612
+ You are the Diagnostician. You perform deep project-wide analysis.
613
+
614
+ You have the highest turn budget of any agent. Use it for depth, not breadth.
615
+ Your job is pure insight \u2014 you do NOT fix code, you do NOT build, you do NOT
616
+ make decisions. You diagnose.
617
+
618
+ ## What You Receive
619
+ - Full database export: every experiment, decision, doubt, challenge, verification,
620
+ dead-end, metric, and compression across the entire project history
621
+ - Current synthesis, fragility map, and dead-end registry
622
+ - Full read access to the entire project codebase
623
+ - Bash access to run tests, profiling, git archaeology, and analysis scripts
624
+
625
+ ## What You Can Do
626
+ 1. **Read everything** \u2014 source code, docs, git history, test output
627
+ 2. **Run analysis** \u2014 execute tests, profilers, git log/blame/bisect, custom scripts
628
+ 3. **Write analysis scripts** \u2014 you may write scripts ONLY to \`.majlis/scripts/\`
629
+ 4. **Search externally** \u2014 WebSearch for patterns, known issues, relevant techniques
630
+
631
+ ## What You CANNOT Do
632
+ - Modify any project files outside \`.majlis/scripts/\`
633
+ - Make code changes, fixes, or patches
634
+ - Create experiments or make decisions
635
+ - Write to docs/, src/, or any other project directory
636
+
637
+ ## Your Approach
638
+
639
+ Phase 1: Orientation (turns 1-10)
640
+ - Read the full database export in your context
641
+ - Read synthesis, fragility, dead-ends
642
+ - Identify patterns: recurring failures, unresolved doubts, evidence gaps
643
+
644
+ Phase 2: Deep Investigation (turns 11-40)
645
+ - Read source code at critical points identified in Phase 1
646
+ - Run targeted tests, profiling, git archaeology
647
+ - Write and execute analysis scripts in .majlis/scripts/
648
+ - Cross-reference findings across experiments
649
+
650
+ Phase 3: Synthesis (turns 41-60)
651
+ - Compile findings into a diagnostic report
652
+ - Identify root causes, not symptoms
653
+ - Rank issues by structural impact
654
+ - Suggest investigation directions (not fixes)
655
+
656
+ ## Output Format
657
+ Produce a diagnostic report as markdown. At the end, include:
658
+
659
+ <!-- majlis-json
660
+ {
661
+ "diagnosis": {
662
+ "root_causes": ["List of identified root causes"],
663
+ "patterns": ["Recurring patterns across experiments"],
664
+ "evidence_gaps": ["What we don't know but should"],
665
+ "investigation_directions": ["Suggested directions for next experiments"]
666
+ }
667
+ }
668
+ -->
669
+
670
+ ## Safety Reminders
671
+ - You are READ-ONLY for project code. Write ONLY to .majlis/scripts/.
672
+ - Focus on diagnosis, not fixing. Your value is insight, not implementation.
673
+ - Trust the database export over docs/ files when they conflict.`
674
+ };
675
+ var SLASH_COMMANDS2 = {
676
+ classify: {
677
+ description: "Classify a problem domain into canonical sub-types before building",
678
+ body: `Run \`majlis classify "$ARGUMENTS"\` and follow its output.
679
+ If the CLI is not installed, act as the Builder in classification mode.
680
+ Read docs/synthesis/current.md and docs/synthesis/dead-ends.md for context.
681
+ Enumerate and classify all canonical sub-types of: $ARGUMENTS
682
+ Produce a classification document following docs/classification/_TEMPLATE.md.`
683
+ },
684
+ doubt: {
685
+ description: "Run a constructive doubt pass on an experiment",
686
+ body: `Run \`majlis doubt $ARGUMENTS\` to spawn the critic agent.
687
+ If the CLI is not installed, act as the Critic directly.
688
+ Doubt the experiment at $ARGUMENTS. Produce a doubt document
689
+ following docs/doubts/_TEMPLATE.md.`
690
+ },
691
+ challenge: {
692
+ description: "Construct adversarial test cases for an experiment",
693
+ body: `Run \`majlis challenge $ARGUMENTS\` to spawn the adversary agent.
694
+ If the CLI is not installed, act as the Adversary directly.
695
+ Construct pathological inputs designed to break the approach in $ARGUMENTS.
696
+ Produce a challenge document following docs/challenges/_TEMPLATE.md.`
697
+ },
698
+ verify: {
699
+ description: "Verify correctness and provenance of an experiment",
700
+ body: `Run \`majlis verify $ARGUMENTS\` to spawn the verifier agent.
701
+ If the CLI is not installed, act as the Verifier directly.
702
+ Perform dual verification (provenance + content) on $ARGUMENTS.
703
+ Produce a verification report following docs/verification/_TEMPLATE.md.`
704
+ },
705
+ reframe: {
706
+ description: "Independently reframe a problem from scratch",
707
+ body: `Run \`majlis reframe $ARGUMENTS\` to spawn the reframer agent.
708
+ If the CLI is not installed, act as the Reframer directly.
709
+ You receive ONLY the problem statement and classification \u2014 NOT builder code.
710
+ Independently decompose $ARGUMENTS and compare with existing classification.`
711
+ },
712
+ compress: {
713
+ description: "Compress project state into dense synthesis",
714
+ body: `Run \`majlis compress\` to spawn the compressor agent.
715
+ If the CLI is not installed, act as the Compressor directly.
716
+ Read everything. Rewrite docs/synthesis/current.md shorter and denser.
717
+ Update fragility map and dead-end registry.`
718
+ },
719
+ scout: {
720
+ description: "Search externally for alternative approaches",
721
+ body: `Run \`majlis scout $ARGUMENTS\` to spawn the scout agent.
722
+ If the CLI is not installed, search for alternative approaches to $ARGUMENTS.
723
+ Look for: limitations of current approach, alternative formulations from other fields,
724
+ structurally similar problems in unrelated domains.
725
+ Produce a rihla document at docs/rihla/.`
726
+ },
727
+ audit: {
728
+ description: "Maqasid check \u2014 is the frame right?",
729
+ body: `Run \`majlis audit "$ARGUMENTS"\` for a purpose audit.
730
+ If the CLI is not installed, review: original objective, current classification,
731
+ recent failures, dead-ends. Ask: is the classification serving the objective?
732
+ Would we decompose differently with what we now know?`
733
+ },
734
+ diagnose: {
735
+ description: "Deep project-wide diagnostic analysis",
736
+ body: `Run \`majlis diagnose $ARGUMENTS\` for deep diagnosis.
737
+ If the CLI is not installed, perform a deep diagnostic analysis.
738
+ Read docs/synthesis/current.md, fragility.md, dead-ends.md, and all experiments.
739
+ Identify root causes, recurring patterns, evidence gaps, and investigation directions.
740
+ Do NOT modify project code \u2014 analysis only.`
741
+ },
742
+ scan: {
743
+ description: "Scan existing project to auto-detect config and write synthesis",
744
+ body: `Run \`majlis scan\` to analyze the existing codebase.
745
+ This spawns two agents in parallel:
746
+ - Cartographer: maps architecture \u2192 docs/synthesis/current.md + fragility.md
747
+ - Toolsmith: verifies toolchain \u2192 .majlis/scripts/metrics.sh + config.json
748
+ Use --force to overwrite existing synthesis files.`
749
+ },
750
+ resync: {
751
+ description: "Update stale synthesis after project evolved without Majlis",
752
+ body: `Run \`majlis resync\` to bring Majlis back up to speed.
753
+ Unlike scan (which starts from zero), resync starts from existing knowledge.
754
+ It assesses staleness, then re-runs cartographer (always) and toolsmith (if needed)
755
+ with the old synthesis and DB history as context.
756
+ Use --check to see the staleness report without making changes.
757
+ Use --force to skip active experiment checks.`
758
+ }
759
+ };
760
+ var HOOKS_CONFIG2 = {
761
+ hooks: {
762
+ SessionStart: [
763
+ {
764
+ hooks: [
765
+ {
766
+ type: "command",
767
+ command: "majlis status --json 2>/dev/null || true"
768
+ }
769
+ ]
770
+ }
771
+ ],
772
+ PreToolUse: [
773
+ {
774
+ matcher: "Bash",
775
+ hooks: [
776
+ {
777
+ type: "command",
778
+ command: "majlis check-commit 2>/dev/null || true",
779
+ timeout: 10
780
+ }
781
+ ]
782
+ }
783
+ ],
784
+ SubagentStop: [
785
+ {
786
+ hooks: [
787
+ {
788
+ type: "command",
789
+ command: "echo 'Subagent completed. Run majlis next to continue the cycle.'",
790
+ timeout: 5
791
+ }
792
+ ]
793
+ }
794
+ ]
795
+ }
796
+ };
797
+ var DOC_TEMPLATES2 = {
798
+ "experiments/_TEMPLATE.md": `# Experiment: {{title}}
799
+
800
+ **Hypothesis:** {{hypothesis}}
801
+ **Branch:** {{branch}}
802
+ **Status:** {{status}}
803
+ **Sub-type:** {{sub_type}}
804
+ **Created:** {{date}}
805
+
806
+ ## Approach
807
+
808
+ [Describe the approach]
809
+
810
+ ## Decisions
811
+
812
+ - [evidence_level] Decision description \u2014 justification
813
+
814
+ ## Results
815
+
816
+ [Describe the results]
817
+
818
+ ## Metrics
819
+
820
+ | Fixture | Metric | Before | After | Delta |
821
+ |---------|--------|--------|-------|-------|
822
+ | | | | | |
823
+
824
+ <!-- majlis-json
825
+ {
826
+ "decisions": [],
827
+ "grades": []
828
+ }
829
+ -->
830
+ `,
831
+ "decisions/_TEMPLATE.md": `# Decision: {{title}}
832
+
833
+ **Evidence Level:** {{evidence_level}}
834
+ **Experiment:** {{experiment}}
835
+ **Date:** {{date}}
836
+
837
+ ## Description
838
+
839
+ [What was decided]
840
+
841
+ ## Justification
842
+
843
+ [Why this decision was made, referencing evidence]
844
+
845
+ ## Alternatives Considered
846
+
847
+ [What else was considered and why it was rejected]
848
+
849
+ <!-- majlis-json
850
+ {
851
+ "decisions": [
852
+ { "description": "", "evidence_level": "", "justification": "" }
853
+ ]
854
+ }
855
+ -->
856
+ `,
857
+ "classification/_TEMPLATE.md": `# Classification: {{domain}}
858
+
859
+ **Date:** {{date}}
860
+
861
+ ## Problem Domain
862
+
863
+ [Describe the problem domain]
864
+
865
+ ## Sub-Types
866
+
867
+ ### 1. {{sub_type_1}}
868
+ - **Description:**
869
+ - **Canonical form:**
870
+ - **Known constraints:**
871
+
872
+ ### 2. {{sub_type_2}}
873
+ - **Description:**
874
+ - **Canonical form:**
875
+ - **Known constraints:**
876
+
877
+ ## Relationships
878
+
879
+ [How sub-types relate to each other]
880
+ `,
881
+ "doubts/_TEMPLATE.md": `# Doubt Document \u2014 Against Experiment {{experiment}}
882
+
883
+ **Critic:** {{agent}}
884
+ **Date:** {{date}}
885
+
886
+ ## Doubt 1: {{title}}
887
+
888
+ **Claim doubted:** {{claim}}
889
+ **Evidence level of claim:** {{evidence_level}}
890
+ **Severity:** {{severity}}
891
+
892
+ **Evidence for doubt:**
893
+ [Specific evidence \u2014 a prior experiment, inconsistency, untested case, or false analogy]
894
+
895
+ <!-- majlis-json
896
+ {
897
+ "doubts": [
898
+ { "claim_doubted": "", "evidence_level_of_claim": "", "evidence_for_doubt": "", "severity": "critical" }
899
+ ]
900
+ }
901
+ -->
902
+ `,
903
+ "challenges/_TEMPLATE.md": `# Challenge Document \u2014 Against Experiment {{experiment}}
904
+
905
+ **Adversary:** {{agent}}
906
+ **Date:** {{date}}
907
+
908
+ ## Challenge 1: {{title}}
909
+
910
+ **Constructed case:**
911
+ [Specific input or condition designed to break the approach]
912
+
913
+ **Reasoning:**
914
+ [Why this case should break the approach \u2014 what assumption does it violate?]
915
+
916
+ ## Challenge 2: {{title}}
917
+
918
+ **Constructed case:**
919
+ [Specific input or condition]
920
+
921
+ **Reasoning:**
922
+ [Why this should break]
923
+
924
+ <!-- majlis-json
925
+ {
926
+ "challenges": [
927
+ { "description": "", "reasoning": "" }
928
+ ]
929
+ }
930
+ -->
931
+ `,
932
+ "verification/_TEMPLATE.md": `# Verification Report \u2014 Experiment {{experiment}}
933
+
934
+ **Verifier:** {{agent}}
935
+ **Date:** {{date}}
936
+
937
+ ## Provenance Check (Isnad)
938
+
939
+ | Component | Traceable | Chain intact | Notes |
940
+ |-----------|-----------|--------------|-------|
941
+ | | yes/no | yes/no | |
942
+
943
+ ## Content Check (Matn)
944
+
945
+ | Component | Tests pass | Consistent | Grade | Notes |
946
+ |-----------|-----------|------------|-------|-------|
947
+ | | yes/no | yes/no | sound/good/weak/rejected | |
948
+
949
+ ## Doubt Resolution
950
+
951
+ | Doubt | Resolution | Evidence |
952
+ |-------|------------|----------|
953
+ | | confirmed/dismissed/inconclusive | |
954
+
955
+ <!-- majlis-json
956
+ {
957
+ "grades": [
958
+ { "component": "", "grade": "sound", "provenance_intact": true, "content_correct": true, "notes": "" }
959
+ ],
960
+ "doubt_resolutions": [
961
+ { "doubt_id": 0, "resolution": "confirmed" }
962
+ ]
963
+ }
964
+ -->
965
+ `,
966
+ "reframes/_TEMPLATE.md": `# Reframe: {{domain}}
967
+
968
+ **Reframer:** {{agent}}
969
+ **Date:** {{date}}
970
+
971
+ ## Independent Decomposition
972
+
973
+ [How this problem should be decomposed \u2014 without seeing the builder's approach]
974
+
975
+ ## Natural Joints
976
+
977
+ [Where does this problem naturally divide?]
978
+
979
+ ## Cross-Domain Analogies
980
+
981
+ [What analogies from other domains apply?]
982
+
983
+ ## Comparison with Existing Classification
984
+
985
+ [Structural divergences from the current classification]
986
+
987
+ ## Divergences (Most Valuable Signals)
988
+
989
+ [Where the independent decomposition differs from the builder's classification]
990
+ `,
991
+ "rihla/_TEMPLATE.md": `# Rihla (Scout Report): {{topic}}
992
+
993
+ **Date:** {{date}}
994
+
995
+ ## Problem (Domain-Neutral)
996
+
997
+ [Describe the problem in domain-neutral terms]
998
+
999
+ ## Alternative Approaches Found
1000
+
1001
+ ### 1. {{approach}}
1002
+ - **Source:**
1003
+ - **Description:**
1004
+ - **Applicability:**
1005
+
1006
+ ## Known Limitations of Current Approach
1007
+
1008
+ [What external sources say about where this approach fails]
1009
+
1010
+ ## Cross-Domain Analogues
1011
+
1012
+ [Structurally similar problems in unrelated domains]
1013
+ `
1014
+ };
1015
+ var DOC_DIRS2 = [
1016
+ "inbox",
1017
+ "experiments",
1018
+ "decisions",
1019
+ "classification",
1020
+ "doubts",
1021
+ "challenges",
1022
+ "verification",
1023
+ "reframes",
1024
+ "rihla",
1025
+ "synthesis",
1026
+ "diagnosis"
1027
+ ];
1028
+ var WORKFLOW_MD2 = `# Majlis Workflow \u2014 Quick Reference
1029
+
1030
+ ## The Cycle
1031
+
1032
+ \`\`\`
1033
+ 1. CLASSIFY \u2192 Taxonomy before solution (Al-Khwarizmi)
1034
+ 2. REFRAME \u2192 Independent decomposition (Al-Biruni)
1035
+ 3. GATE \u2192 Hypothesis quality check ('Ilm al-'Ilal)
1036
+ 4. BUILD \u2192 Write code with tagged decisions (Ijtihad)
1037
+ 5. CHALLENGE \u2192 Construct breaking inputs (Ibn al-Haytham)
1038
+ 6. DOUBT \u2192 Systematic challenge with evidence (Shukuk)
1039
+ 7. SCOUT \u2192 External search for alternatives (Rihla)
1040
+ 8. VERIFY \u2192 Provenance + content checks (Isnad + Matn)
1041
+ 9. RESOLVE \u2192 Route based on grades
1042
+ 10. COMPRESS \u2192 Shorter and denser (Hifz)
1043
+ \`\`\`
1044
+
1045
+ ## Resolution
1046
+ - **Sound** \u2192 Merge
1047
+ - **Good** \u2192 Merge + add gaps to fragility map
1048
+ - **Weak** \u2192 Cycle back with synthesised guidance
1049
+ - **Rejected** \u2192 Dead-end with structural constraint
1050
+
1051
+ ## Circuit Breaker
1052
+ 3+ weak/rejected on same sub-type \u2192 Maqasid Check (purpose audit)
1053
+
1054
+ ## Evidence Hierarchy
1055
+ 1. Proof \u2192 2. Test \u2192 3a. Strong Consensus \u2192 3b. Consensus \u2192 4. Analogy \u2192 5. Judgment
1056
+
1057
+ ## Commands
1058
+ | Action | Command |
1059
+ |--------|---------|
1060
+ | Initialize | \`majlis init\` |
1061
+ | Status | \`majlis status\` |
1062
+ | New experiment | \`majlis new "hypothesis"\` |
1063
+ | Baseline metrics | \`majlis baseline\` |
1064
+ | Measure metrics | \`majlis measure\` |
1065
+ | Compare metrics | \`majlis compare\` |
1066
+ | Next step | \`majlis next\` |
1067
+ | Auto cycle | \`majlis next --auto\` |
1068
+ | Autonomous | \`majlis run "goal"\` |
1069
+ | Session start | \`majlis session start "intent"\` |
1070
+ | Session end | \`majlis session end\` |
1071
+ | Compress | \`majlis compress\` |
1072
+ | Audit | \`majlis audit "objective"\` |
1073
+
1074
+ ## Experiment Flags
1075
+ | Flag | Purpose |
1076
+ |------|---------|
1077
+ | \`--sub-type TYPE\` | Classify experiment by problem sub-type |
1078
+ | \`--depends-on SLUG\` | Block building until dependency is merged |
1079
+ | \`--context FILE,FILE\` | Inject domain-specific docs into agent context |
1080
+
1081
+ Example: \`majlis new "improve fitting accuracy" --sub-type fitting --depends-on surface-construction --context docs/algorithms/fitting.md,fixtures/anatomy/part1/README.md\`
1082
+
1083
+ ## Project Readiness
1084
+
1085
+ Majlis works with zero config \u2014 agents figure things out from CLAUDE.md. But each
1086
+ config field you wire up removes a failure mode and makes cycles more autonomous.
1087
+
1088
+ ### Metrics Command
1089
+ Your \`metrics.command\` must output JSON in this format:
1090
+ \`\`\`json
1091
+ { "fixtures": { "fixture_name": { "metric_name": 123.4 } } }
1092
+ \`\`\`
1093
+ If your test harness outputs human-readable text, write a thin wrapper script that
1094
+ parses it into this format. The framework runs this command automatically before and
1095
+ after each build to capture regression data.
1096
+
1097
+ ### Fixtures and Gates
1098
+ Define your test cases in \`config.metrics.fixtures\`. Flag your regression baseline
1099
+ as a gate \u2014 regressions on gate fixtures block merge regardless of verification grades:
1100
+ \`\`\`json
1101
+ "fixtures": {
1102
+ "baseline_test": { "gate": true },
1103
+ "target_test": { "gate": false }
1104
+ }
1105
+ \`\`\`
1106
+
1107
+ ### Tracked Metrics
1108
+ Name the metrics you care about and set their direction:
1109
+ \`\`\`json
1110
+ "tracked": {
1111
+ "error_rate": { "direction": "lower_is_better" },
1112
+ "accuracy": { "direction": "higher_is_better" },
1113
+ "value_delta": { "direction": "closer_to_gt", "target": 0 }
1114
+ }
1115
+ \`\`\`
1116
+
1117
+ ### Architecture Docs
1118
+ Agents read CLAUDE.md for project context. The more specific it is about where things
1119
+ live, how to build, and how to test, the better agents perform. Include build commands,
1120
+ test commands, file layout, and key patterns.
1121
+
1122
+ Run \`majlis status\` to see which readiness checks pass and which need attention.
1123
+ `;
1124
+ var SYNTHESIS_STARTERS2 = {
1125
+ "current.md": '# Project Synthesis\n\n*No experiments yet. Run `majlis new "hypothesis"` to begin.*\n',
1126
+ "fragility.md": "# Fragility Map\n\n*No fragility recorded yet.*\n",
1127
+ "dead-ends.md": "# Dead-End Registry\n\n*No dead-ends recorded yet.*\n"
1128
+ };
1129
+ var CLAUDE_MD_SECTION = `
1130
+ ## Majlis Protocol
1131
+
1132
+ This project uses the Majlis Framework for structured multi-agent problem solving.
1133
+ See \`docs/workflow.md\` for the full cycle. See \`.claude/agents/\` for role definitions (source of truth in \`.majlis/agents/\`).
1134
+
1135
+ ### Evidence Hierarchy (tag every decision)
1136
+ 1. **Proof** \u2014 mathematical proof. Overturn requires error in proof.
1137
+ 2. **Test** \u2014 empirical test. Overturn requires showing test insufficiency.
1138
+ 3a. **Strong Consensus** \u2014 convergence across independent approaches.
1139
+ 3b. **Consensus** \u2014 agreement from same-model experiments.
1140
+ 4. **Analogy** \u2014 justified by similarity to prior work.
1141
+ 5. **Judgment** \u2014 independent reasoning without precedent.
1142
+
1143
+ ### Session Discipline
1144
+ - One intent per session. Declare it with \`majlis session start "intent"\`.
1145
+ - Stray thoughts \u2192 Telegram (Scribe) or docs/inbox/.
1146
+ - Every session ends with \`majlis session end\`.
1147
+
1148
+ ### Before Building
1149
+ - Read \`docs/synthesis/current.md\` for compressed project state.
1150
+ - Run \`majlis dead-ends --sub-type <relevant>\` for structural constraints.
1151
+ - Run \`majlis decisions --level judgment\` for provisional decisions to challenge.
1152
+
1153
+ ### Compression Trigger
1154
+ - Run \`majlis status\` \u2014 it will warn when compression is due.
1155
+
1156
+ ### Current State
1157
+ Run \`majlis status\` for live experiment state and cycle position.
1158
+ `;
1159
+ function claudeMdContent2(name, objective) {
1160
+ return `# ${name}
1161
+
1162
+ ${objective ? `**Objective:** ${objective}
1163
+ ` : ""}## Majlis Protocol
1164
+
1165
+ This project uses the Majlis Framework for structured multi-agent problem solving.
1166
+ See \`docs/workflow.md\` for the full cycle. See \`.claude/agents/\` for role definitions (source of truth in \`.majlis/agents/\`).
1167
+
1168
+ ### Evidence Hierarchy (tag every decision)
1169
+ 1. **Proof** \u2014 mathematical proof. Overturn requires error in proof.
1170
+ 2. **Test** \u2014 empirical test. Overturn requires showing test insufficiency.
1171
+ 3a. **Strong Consensus** \u2014 convergence across independent approaches.
1172
+ 3b. **Consensus** \u2014 agreement from same-model experiments.
1173
+ 4. **Analogy** \u2014 justified by similarity to prior work.
1174
+ 5. **Judgment** \u2014 independent reasoning without precedent.
1175
+
1176
+ ### Session Discipline
1177
+ - One intent per session. Declare it with \`majlis session start "intent"\`.
1178
+ - Stray thoughts \u2192 Telegram (Scribe) or docs/inbox/.
1179
+ - Every session ends with \`majlis session end\`.
1180
+
1181
+ ### Before Building
1182
+ - Read \`docs/synthesis/current.md\` for compressed project state.
1183
+ - Run \`majlis dead-ends --sub-type <relevant>\` for structural constraints.
1184
+ - Run \`majlis decisions --level judgment\` for provisional decisions to challenge.
1185
+
1186
+ ### Compression Trigger
1187
+ - Run \`majlis status\` \u2014 it will warn when compression is due.
1188
+
1189
+ ### Current State
1190
+ Run \`majlis status\` for live experiment state and cycle position.
1191
+ `;
1192
+ }
1193
+ var DEFAULT_CONFIG = {
1194
+ project: {
1195
+ name: "",
1196
+ description: "",
1197
+ objective: ""
1198
+ },
1199
+ metrics: {
1200
+ command: `echo '{"fixtures":{}}'`,
1201
+ fixtures: {},
1202
+ tracked: {}
1203
+ },
1204
+ build: {
1205
+ pre_measure: null,
1206
+ post_measure: null
1207
+ },
1208
+ cycle: {
1209
+ compression_interval: 5,
1210
+ circuit_breaker_threshold: 3,
1211
+ require_doubt_before_verify: true,
1212
+ require_challenge_before_verify: false,
1213
+ auto_baseline_on_new_experiment: true
1214
+ },
1215
+ models: {
1216
+ builder: "opus",
1217
+ critic: "opus",
1218
+ adversary: "opus",
1219
+ verifier: "opus",
1220
+ reframer: "opus",
1221
+ compressor: "opus",
1222
+ gatekeeper: "sonnet",
1223
+ scout: "opus"
1224
+ }
1225
+ };
1226
+ function configTemplate2(answers) {
1227
+ return JSON.stringify({
1228
+ project: {
1229
+ name: answers.name,
1230
+ description: answers.description,
1231
+ objective: answers.objective
1232
+ },
1233
+ metrics: {
1234
+ command: answers.metricsCommand,
1235
+ fixtures: {},
1236
+ tracked: {}
1237
+ },
1238
+ build: {
1239
+ pre_measure: answers.buildPre || null,
1240
+ post_measure: answers.buildPost || null
1241
+ },
1242
+ cycle: {
1243
+ compression_interval: 5,
1244
+ circuit_breaker_threshold: 3,
1245
+ require_doubt_before_verify: true,
1246
+ require_challenge_before_verify: false,
1247
+ auto_baseline_on_new_experiment: true
1248
+ },
1249
+ models: {
1250
+ builder: "opus",
1251
+ critic: "opus",
1252
+ adversary: "opus",
1253
+ verifier: "opus",
1254
+ reframer: "opus",
1255
+ compressor: "opus",
1256
+ gatekeeper: "sonnet",
1257
+ scout: "opus"
1258
+ }
1259
+ }, null, 2);
1260
+ }
1261
+ var fs3 = __toESM2(require("fs"));
1262
+ function mkdirSafe2(dir) {
1263
+ if (!fs3.existsSync(dir)) {
1264
+ fs3.mkdirSync(dir, { recursive: true });
1265
+ }
1266
+ }
1267
+ function validateProject(checks) {
1268
+ const results = [];
1269
+ results.push(
1270
+ checks.hasGitRepo ? { label: "Git repository", status: "pass", detail: "Detected" } : { label: "Git repository", status: "fail", detail: "Not a git repo \u2014 experiment branches will not work" }
1271
+ );
1272
+ results.push(
1273
+ checks.hasObjective ? { label: "Project objective", status: "pass", detail: "Set in config" } : { label: "Project objective", status: "warn", detail: "Not set \u2014 agents lack goal context for maqasid checks" }
1274
+ );
1275
+ results.push(
1276
+ checks.hasClaudeMd ? { label: "CLAUDE.md", status: "pass", detail: "Found \u2014 agents will have project context" } : { label: "CLAUDE.md", status: "warn", detail: "Not found \u2014 agents will lack project architecture context" }
1277
+ );
1278
+ const hasCommand = checks.metricsCommand && !checks.metricsCommand.includes(`echo '{"fixtures":{}}'`);
1279
+ if (!hasCommand) {
1280
+ results.push({ label: "Metrics command", status: "warn", detail: "Using default no-op \u2014 configure metrics.command for automatic regression detection" });
1281
+ } else if (!checks.metricsCommandRunnable) {
1282
+ results.push({ label: "Metrics command", status: "warn", detail: "Set but not runnable \u2014 check the command works: " + checks.metricsCommand });
1283
+ } else {
1284
+ results.push({ label: "Metrics command", status: "pass", detail: "Set and runnable" });
1285
+ }
1286
+ const fixtureEntries = Array.isArray(checks.fixtures) ? checks.fixtures : Object.keys(checks.fixtures);
1287
+ if (fixtureEntries.length === 0) {
1288
+ results.push({ label: "Fixtures", status: "warn", detail: "None defined \u2014 consider adding fixtures with gate flags for regression protection" });
1289
+ } else {
1290
+ const gateCount = Array.isArray(checks.fixtures) ? 0 : Object.values(checks.fixtures).filter((f) => f.gate).length;
1291
+ if (gateCount === 0) {
1292
+ results.push({ label: "Fixtures", status: "warn", detail: `${fixtureEntries.length} fixture(s) but none flagged as gate \u2014 no regression protection` });
1293
+ } else {
1294
+ results.push({ label: "Fixtures", status: "pass", detail: `${fixtureEntries.length} fixture(s), ${gateCount} gate(s)` });
1295
+ }
1296
+ }
1297
+ const trackedCount = Object.keys(checks.tracked).length;
1298
+ if (trackedCount === 0) {
1299
+ results.push({ label: "Tracked metrics", status: "warn", detail: "None defined \u2014 regression detection disabled" });
1300
+ } else {
1301
+ results.push({ label: "Tracked metrics", status: "pass", detail: `${trackedCount} metric(s) tracked` });
1302
+ }
1303
+ results.push(
1304
+ checks.preMeasure ? { label: "Build command", status: "pass", detail: "Set (pre_measure)" } : { label: "Build command", status: "warn", detail: "No pre_measure \u2014 builder must know how to build from CLAUDE.md" }
1305
+ );
1306
+ results.push(
1307
+ checks.hasSynthesis ? { label: "Synthesis document", status: "pass", detail: "Found" } : { label: "Synthesis document", status: "warn", detail: "Empty \u2014 will be populated after first compression cycle" }
1308
+ );
1309
+ return results;
1310
+ }
1311
+ var _useColor = !process.env.NO_COLOR && process.stderr?.isTTY !== false;
1312
+ function formatValidation(checks) {
1313
+ const lines = [];
1314
+ for (const c of checks) {
1315
+ const icon = c.status === "pass" ? _useColor ? "\x1B[32m\u2713\x1B[0m" : "\u2713" : c.status === "warn" ? _useColor ? "\x1B[33m\u26A0\x1B[0m" : "\u26A0" : _useColor ? "\x1B[31m\u2717\x1B[0m" : "\u2717";
1316
+ lines.push(` ${icon} ${c.label}: ${c.detail}`);
1317
+ }
1318
+ return lines.join("\n");
1319
+ }
1320
+ }
1321
+ });
1322
+
26
1323
  // src/index.ts
27
1324
  var fs2 = __toESM(require("fs"));
28
1325
  var path2 = __toESM(require("path"));
@@ -71,7 +1368,7 @@ function defaultAnswers(projectName) {
71
1368
  var fs = __toESM(require("fs"));
72
1369
  var path = __toESM(require("path"));
73
1370
  var import_node_child_process = require("child_process");
74
- var import_shared = require("@majlis/shared");
1371
+ var import_shared = __toESM(require_dist());
75
1372
  function scaffold(opts) {
76
1373
  const { targetDir, answers, fresh, noHooks, minimal } = opts;
77
1374
  if (fresh) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "create-majlis",
3
- "version": "0.7.1",
3
+ "version": "0.7.3",
4
4
  "description": "Scaffold the Majlis Framework into a project",
5
5
  "bin": {
6
6
  "create-majlis": "./dist/index.js"
@@ -9,10 +9,8 @@
9
9
  "build": "tsup src/index.ts --format cjs --clean",
10
10
  "test": "echo 'No tests yet'"
11
11
  },
12
- "dependencies": {
13
- "@majlis/shared": "*"
14
- },
15
12
  "devDependencies": {
13
+ "@majlis/shared": "*",
16
14
  "@types/node": "^22.0.0",
17
15
  "tsup": "^8.0.0",
18
16
  "typescript": "^5.5.0"