amalfa 1.0.1 → 1.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. package/README.md +226 -263
  2. package/package.json +6 -3
  3. package/polyvis.settings.json.bak +38 -0
  4. package/src/cli.ts +103 -21
  5. package/src/config/defaults.ts +52 -12
  6. package/src/core/VectorEngine.ts +18 -9
  7. package/src/mcp/index.ts +62 -7
  8. package/src/resonance/DatabaseFactory.ts +3 -4
  9. package/src/resonance/db.ts +4 -4
  10. package/src/resonance/services/vector-daemon.ts +151 -0
  11. package/src/utils/DaemonManager.ts +147 -0
  12. package/src/utils/ZombieDefense.ts +5 -1
  13. package/:memory: +0 -0
  14. package/:memory:-shm +0 -0
  15. package/:memory:-wal +0 -0
  16. package/CHANGELOG.md.old +0 -43
  17. package/README.old.md +0 -112
  18. package/ROADMAP.md +0 -316
  19. package/TEST_PLAN.md +0 -561
  20. package/agents.config.json +0 -11
  21. package/docs/AGENT_PROTOCOLS.md +0 -28
  22. package/docs/ARCHITECTURAL_OVERVIEW.md +0 -123
  23. package/docs/BENTO_BOXING_DEPRECATION.md +0 -281
  24. package/docs/Bun-SQLite.html +0 -464
  25. package/docs/COMMIT_GUIDELINES.md +0 -367
  26. package/docs/DEVELOPER_ONBOARDING.md +0 -36
  27. package/docs/Graph and Vector Database Best Practices.md +0 -214
  28. package/docs/PERFORMANCE_BASELINES.md +0 -88
  29. package/docs/REPOSITORY_CLEANUP_SUMMARY.md +0 -261
  30. package/docs/edge-generation-methods.md +0 -57
  31. package/docs/elevator-pitch.md +0 -118
  32. package/docs/graph-and-vector-database-playbook.html +0 -480
  33. package/docs/hardened-sqlite.md +0 -85
  34. package/docs/headless-knowledge-management.md +0 -79
  35. package/docs/john-kaye-flux-prompt.md +0 -46
  36. package/docs/keyboard-shortcuts.md +0 -80
  37. package/docs/opinion-proceed-pattern.md +0 -29
  38. package/docs/polyvis-nodes-edges-schema.md +0 -77
  39. package/docs/protocols/lab-protocol.md +0 -30
  40. package/docs/reaction-iquest-loop-coder.md +0 -46
  41. package/docs/services.md +0 -60
  42. package/docs/sqlite-wal-readonly-trap.md +0 -228
  43. package/docs/strategy/css-architecture.md +0 -40
  44. package/docs/test-document-cycle.md +0 -83
  45. package/docs/test_lifecycle_E2E.md +0 -4
  46. package/docs/the-bicameral-graph.md +0 -83
  47. package/docs/user-guide.md +0 -70
  48. package/docs/vision-helper.md +0 -53
  49. package/drizzle/0000_minor_iron_fist.sql +0 -19
  50. package/drizzle/meta/0000_snapshot.json +0 -139
  51. package/drizzle/meta/_journal.json +0 -13
  52. package/example_usage.ts +0 -39
  53. package/experiment.sh +0 -35
  54. package/hello +0 -2
  55. package/index.html +0 -52
  56. package/knowledge/excalibur.md +0 -12
  57. package/plans/experience-graph-integration.md +0 -60
  58. package/prompts/gemini-king-mode-prompt.md +0 -46
  59. package/public/docs/MCP_TOOLS.md +0 -372
  60. package/schemas/README.md +0 -20
  61. package/schemas/cda.schema.json +0 -84
  62. package/schemas/conceptual-lexicon.schema.json +0 -75
  63. package/scratchpads/dummy-debrief-boxed.md +0 -39
  64. package/scratchpads/dummy-debrief.md +0 -27
  65. package/scratchpads/scratchpad-design.md +0 -50
  66. package/scratchpads/scratchpad-scrolling.md +0 -20
  67. package/scratchpads/scratchpad-toc-disappearance.md +0 -23
  68. package/scratchpads/scratchpad-toc.md +0 -28
  69. package/scratchpads/test_gardener.md +0 -7
  70. package/src/core/LLMClient.ts +0 -93
  71. package/src/core/TagEngine.ts +0 -56
  72. package/src/db/schema.ts +0 -46
  73. package/src/gardeners/AutoTagger.ts +0 -116
  74. package/src/pipeline/HarvesterPipeline.ts +0 -101
  75. package/src/pipeline/Ingestor.ts +0 -555
  76. package/src/resonance/cli/ingest.ts +0 -41
  77. package/src/resonance/cli/migrate.ts +0 -54
  78. package/src/resonance/config.ts +0 -40
  79. package/src/resonance/daemon.ts +0 -236
  80. package/src/resonance/pipeline/extract.ts +0 -89
  81. package/src/resonance/pipeline/transform_docs.ts +0 -60
  82. package/src/resonance/services/tokenizer.ts +0 -159
  83. package/src/resonance/transform/cda.ts +0 -393
  84. package/src/utils/EnvironmentVerifier.ts +0 -67
  85. package/substack/substack-playbook-1.md +0 -95
  86. package/substack/substack-playbook-2.md +0 -78
  87. package/tasks/ui-investigation.md +0 -26
  88. package/test-db +0 -0
  89. package/test-db-shm +0 -0
  90. package/test-db-wal +0 -0
  91. package/tests/canary/verify_pinch_check.ts +0 -44
  92. package/tests/fixtures/ingest_test.md +0 -12
  93. package/tests/fixtures/ingest_test_boxed.md +0 -13
  94. package/tests/fixtures/safety_test.md +0 -45
  95. package/tests/fixtures/safety_test_boxed.md +0 -49
  96. package/tests/fixtures/tagged_output.md +0 -49
  97. package/tests/fixtures/tagged_test.md +0 -49
  98. package/tests/mcp-server-settings.json +0 -8
  99. package/verify-embedder.ts +0 -54
@@ -1,393 +0,0 @@
1
- #!/usr/bin/env bun
2
-
3
- /**
4
- * CDA/CL Transformation Pipeline
5
- *
6
- * Transforms raw CDA and Lexicon into enriched intermediate structure
7
- * with keyword extraction and candidate relationship generation.
8
- */
9
-
10
- import { join } from "node:path";
11
- import { SemanticMatcher } from "@src/core/SemanticMatcher";
12
- import type {
13
- CandidateRelationship,
14
- EnrichedCdaDocument,
15
- EnrichedCdaEntry,
16
- EnrichedLexiconConcept,
17
- EnrichedLexiconDocument,
18
- } from "@src/resonance/types/enriched-cda";
19
- import settings from "@/polyvis.settings.json";
20
-
21
- // Simple keyword extraction (can be enhanced later)
22
- function extractKeywords(text: string): string[] {
23
- if (!text) return [];
24
-
25
- // Remove common words, extract significant terms
26
- const stopWords = new Set([
27
- "the",
28
- "a",
29
- "an",
30
- "and",
31
- "or",
32
- "but",
33
- "in",
34
- "on",
35
- "at",
36
- "to",
37
- "for",
38
- "of",
39
- "with",
40
- "by",
41
- "from",
42
- "as",
43
- "is",
44
- "are",
45
- "was",
46
- "were",
47
- "be",
48
- "been",
49
- "being",
50
- "have",
51
- "has",
52
- "had",
53
- "do",
54
- "does",
55
- "did",
56
- "will",
57
- "would",
58
- "should",
59
- "could",
60
- "may",
61
- "might",
62
- "must",
63
- "can",
64
- "this",
65
- "that",
66
- "these",
67
- "those",
68
- "it",
69
- "its",
70
- ]);
71
-
72
- const words = text
73
- .toLowerCase()
74
- .replace(/[^a-z0-9\s-]/g, " ")
75
- .split(/\s+/)
76
- .filter((w) => w.length > 3 && !stopWords.has(w));
77
-
78
- // Return unique keywords
79
- return [...new Set(words)];
80
- }
81
-
82
- // Match keywords to lexicon concepts
83
- function matchKeywordsToConcepts(
84
- keywords: string[],
85
- concepts: EnrichedLexiconConcept[],
86
- ): CandidateRelationship[] {
87
- const relationships: CandidateRelationship[] = [];
88
-
89
- for (const keyword of keywords) {
90
- for (const concept of concepts) {
91
- // Check title match
92
- const titleMatch = concept.title.toLowerCase().includes(keyword);
93
- const aliasMatch = concept.aliases.some((alias) =>
94
- alias.toLowerCase().includes(keyword),
95
- );
96
- const keywordMatch = concept.extracted_keywords.includes(keyword);
97
-
98
- if (titleMatch || aliasMatch || keywordMatch) {
99
- // Calculate confidence based on match type
100
- let confidence = 0.5; // Base confidence for keyword match
101
- if (titleMatch) confidence = 0.85;
102
- if (aliasMatch) confidence = 0.75;
103
-
104
- relationships.push({
105
- type: "MENTIONS",
106
- target: concept.id,
107
- confidence,
108
- source: "keyword_match",
109
- });
110
- }
111
- }
112
- }
113
-
114
- // Deduplicate and keep highest confidence
115
- const deduped = new Map<string, CandidateRelationship>();
116
- for (const rel of relationships) {
117
- const existing = deduped.get(rel.target);
118
- if (!existing || rel.confidence > existing.confidence) {
119
- deduped.set(rel.target, rel);
120
- }
121
- }
122
-
123
- return Array.from(deduped.values());
124
- }
125
-
126
- // Parse explicit tags from CDA entries
127
- function parseExplicitTags(
128
- tags: string[],
129
- concepts: EnrichedLexiconConcept[],
130
- ): CandidateRelationship[] {
131
- const relationships: CandidateRelationship[] = [];
132
-
133
- for (const tag of tags) {
134
- // Match pattern: [TYPE: Target]
135
- const match = tag.match(/\[([A-Z_]+):\s*([^\]]+)\]/);
136
- if (!match) continue;
137
-
138
- const typeStr = match[1];
139
- const targetStr = match[2];
140
- if (!typeStr || !targetStr) continue;
141
-
142
- const target = targetStr.trim().toLowerCase().replace(/\s+/g, "-");
143
-
144
- // Map tag types to relationship types
145
- const typeMap: Record<string, CandidateRelationship["type"]> = {
146
- SUBSTRATE_ISSUE: "ADDRESSES",
147
- IMPLEMENTS: "IMPLEMENTS",
148
- GUIDED_BY: "GUIDED_BY",
149
- RELATED_TO: "RELATED_TO",
150
- REQUIRES: "REQUIRES",
151
- ENABLES: "ENABLES",
152
- };
153
-
154
- const relType = typeMap[typeStr] || "RELATED_TO";
155
-
156
- // Try to find matching concept
157
- const concept = concepts.find(
158
- (c) =>
159
- c.id.includes(target) ||
160
- c.title.toLowerCase().replace(/\s+/g, "-") === target ||
161
- c.aliases.some((a) => a.toLowerCase().replace(/\s+/g, "-") === target),
162
- );
163
-
164
- if (concept) {
165
- relationships.push({
166
- type: relType,
167
- target: concept.id,
168
- confidence: 1.0, // Explicit tags have high confidence
169
- source: "explicit_tag",
170
- });
171
- } else {
172
- console.warn(
173
- `⚠️ Tag target not found in lexicon: ${targetStr} (from tag: ${tag})`,
174
- );
175
- }
176
- }
177
-
178
- return relationships;
179
- }
180
-
181
- async function main() {
182
- console.log("🔄 CDA/CL Transformation Pipeline");
183
- console.log("═".repeat(60));
184
-
185
- const root = process.cwd();
186
-
187
- // Load source files
188
- console.log("\n📂 Loading source files...");
189
- const lexiconPath = join(root, settings.paths.sources.persona.lexicon);
190
- const cdaPath = join(root, settings.paths.sources.persona.cda);
191
-
192
- const lexiconData = await Bun.file(lexiconPath).json();
193
- const cdaData = await Bun.file(cdaPath).json();
194
-
195
- // Transform Lexicon
196
- console.log("\n🧠 Transforming Lexicon...");
197
- const lexiconConcepts = (
198
- Array.isArray(lexiconData) ? lexiconData : lexiconData.concepts
199
- ) as EnrichedLexiconConcept[];
200
-
201
- const enrichedConcepts: EnrichedLexiconConcept[] = lexiconConcepts.map(
202
- (c) => ({
203
- id: c.id,
204
- type: "concept",
205
- title: c.title,
206
- description: c.description || c.title,
207
- category: c.category || "uncategorized",
208
- extracted_keywords: extractKeywords(`${c.title} ${c.description || ""}`),
209
- aliases: c.aliases || [],
210
- meta: {
211
- type: c.type,
212
- },
213
- }),
214
- );
215
-
216
- const enrichedLexicon: EnrichedLexiconDocument = {
217
- version: "1.0.0",
218
- generated_at: new Date().toISOString(),
219
- source_file: lexiconPath,
220
- concepts: enrichedConcepts,
221
- stats: {
222
- total_concepts: enrichedConcepts.length,
223
- total_keywords_extracted: enrichedConcepts.reduce(
224
- (sum, c) => sum + c.extracted_keywords.length,
225
- 0,
226
- ),
227
- },
228
- };
229
-
230
- console.log(` ✅ ${enrichedConcepts.length} concepts enriched`);
231
- console.log(
232
- ` ✅ ${enrichedLexicon.stats.total_keywords_extracted} keywords extracted`,
233
- );
234
-
235
- // Transform CDA
236
- console.log("\n📋 Transforming CDA...");
237
- const cdaEntries: EnrichedCdaEntry[] = [];
238
-
239
- // Initialize Semantic Matcher (mgrep wrapper)
240
- const semanticMatcher = new SemanticMatcher();
241
- console.log(" 🤖 Initialized Semantic Matcher");
242
- let totalSemanticRels = 0;
243
-
244
- for (const section of cdaData.directives) {
245
- for (const entry of section.entries) {
246
- const keywords = extractKeywords(entry.definition || "");
247
- const explicitRels = parseExplicitTags(
248
- entry.tags || [],
249
- enrichedConcepts,
250
- );
251
- const keywordRels = matchKeywordsToConcepts(keywords, enrichedConcepts);
252
-
253
- // Semantic Search Soft Links
254
- const semanticRels: CandidateRelationship[] = [];
255
-
256
- // Only run if we have a meaty definition to search with
257
- if (entry.definition && entry.definition.length > 15) {
258
- try {
259
- // Search known documentation for semantic references
260
- const docsPath = join(process.cwd(), settings.paths.docs.public);
261
-
262
- const matches = await semanticMatcher.findCandidates(
263
- entry.definition,
264
- docsPath,
265
- );
266
-
267
- for (const match of matches) {
268
- // Logic: If mgrep returns a match in the lexicon file,
269
- // we need to identify WHICH concept that line belongs to.
270
- // Naive approach: Basic text proximity or line number mapping.
271
- // Better approach for MVP: Check if the matched content *contains* a concept title.
272
-
273
- const relatedConcept = enrichedConcepts.find((c) =>
274
- match.content.toLowerCase().includes(c.title.toLowerCase()),
275
- );
276
-
277
- if (relatedConcept) {
278
- // Avoid dupes from keywords
279
- if (!keywordRels.some((r) => r.target === relatedConcept.id)) {
280
- semanticRels.push({
281
- type: "RELATED_TO",
282
- target: relatedConcept.id,
283
- confidence: 0.65, // Lower than keyword, but significant
284
- source: "semantic_search",
285
- });
286
- totalSemanticRels++;
287
- }
288
- }
289
- }
290
- } catch (_e) {
291
- // Fail silently to normal flow
292
- }
293
- }
294
-
295
- const candidateRels = [...explicitRels, ...keywordRels, ...semanticRels];
296
-
297
- // Auto-validate high-confidence relationships
298
- const validatedRels = candidateRels
299
- .filter((rel) => rel.confidence >= 0.75) // High confidence threshold
300
- .map((rel) => ({
301
- type: rel.type,
302
- target: rel.target,
303
- source: rel.source,
304
- validated: true,
305
- validator:
306
- rel.source === "explicit_tag" ? "auto" : "confidence_threshold",
307
- validated_at: new Date().toISOString(),
308
- }));
309
-
310
- cdaEntries.push({
311
- id: entry.id,
312
- type: "directive",
313
- title: entry.term || entry.id,
314
- definition: entry.definition || "",
315
- section: section.section,
316
- explicit_tags: entry.tags || [],
317
- extracted_keywords: keywords,
318
- candidate_relationships: candidateRels,
319
- validated_relationships: validatedRels,
320
- meta: {},
321
- });
322
- }
323
- }
324
-
325
- const enrichedCda: EnrichedCdaDocument = {
326
- version: "1.0.0",
327
- generated_at: new Date().toISOString(),
328
- source_files: {
329
- cda: cdaPath,
330
- lexicon: lexiconPath,
331
- },
332
- entries: cdaEntries,
333
- stats: {
334
- total_entries: cdaEntries.length,
335
- entries_with_tags: cdaEntries.filter((e) => e.explicit_tags.length > 0)
336
- .length,
337
- total_explicit_tags: cdaEntries.reduce(
338
- (sum, e) => sum + e.explicit_tags.length,
339
- 0,
340
- ),
341
- total_keywords_extracted: cdaEntries.reduce(
342
- (sum, e) => sum + e.extracted_keywords.length,
343
- 0,
344
- ),
345
- total_candidate_relationships: cdaEntries.reduce(
346
- (sum, e) => sum + e.candidate_relationships.length,
347
- 0,
348
- ),
349
- total_validated_relationships: cdaEntries.reduce(
350
- (sum, e) => sum + e.validated_relationships.length,
351
- 0,
352
- ),
353
- },
354
- };
355
-
356
- console.log(` ✅ ${cdaEntries.length} directives enriched`);
357
- console.log(
358
- ` ✅ ${enrichedCda.stats.total_keywords_extracted} keywords extracted`,
359
- );
360
- console.log(
361
- ` ✅ ${enrichedCda.stats.total_candidate_relationships} candidate relationships generated`,
362
- );
363
- console.log(
364
- ` ✅ ${enrichedCda.stats.total_validated_relationships} relationships validated`,
365
- );
366
-
367
- console.log(
368
- ` ✅ ${enrichedCda.stats.total_validated_relationships} relationships validated`,
369
- );
370
- console.log(
371
- ` ✨ ${totalSemanticRels} SWL (Semantic Soft Links) discovered`,
372
- );
373
-
374
- // Write output
375
- console.log("\n💾 Writing enriched artifacts...");
376
- const outputDir = join(root, ".resonance", "artifacts");
377
- await Bun.write(
378
- join(outputDir, "lexicon-enriched.json"),
379
- JSON.stringify(enrichedLexicon, null, 2),
380
- );
381
- await Bun.write(
382
- join(outputDir, "cda-enriched.json"),
383
- JSON.stringify(enrichedCda, null, 2),
384
- );
385
-
386
- console.log(` ✅ Lexicon: .resonance/artifacts/lexicon-enriched.json`);
387
- console.log(` ✅ CDA: .resonance/artifacts/cda-enriched.json`);
388
-
389
- console.log(`\n${"═".repeat(60)}`);
390
- console.log("✅ Transformation Complete");
391
- }
392
-
393
- main().catch(console.error);
@@ -1,67 +0,0 @@
1
- import { existsSync } from "node:fs";
2
- import { resolve } from "node:path";
3
- import settings from "../../polyvis.settings.json";
4
-
5
- /**
6
- * EnvironmentVerifier
7
- *
8
- * Responsible for verifying that the runtime environment matches the
9
- * expectations set in polyvis.settings.json.
10
- *
11
- * Usage:
12
- * await EnvironmentVerifier.verifyOrExit();
13
- */
14
- export const EnvironmentVerifier = {
15
- async verifyOrExit(): Promise<void> {
16
- const errors: string[] = [];
17
- const cwd = process.cwd();
18
-
19
- console.error(`🛡️ [Env] Verifying filesystem context in: ${cwd}`);
20
-
21
- // 1. Verify Database Directory
22
- const dbPath = settings.paths.database.resonance;
23
- const dbDir = resolve(cwd, dbPath, "..");
24
- if (!existsSync(dbDir)) {
25
- errors.push(`Database directory missing: ${dbDir}`);
26
- }
27
-
28
- // 2. Verify Source Directories
29
- // We iterate over the 'sources' config to ensure target folders exist
30
- const experienceSources = settings.paths.sources.experience;
31
- for (const source of experienceSources) {
32
- const absPath = resolve(cwd, source.path);
33
- if (!existsSync(absPath)) {
34
- console.warn(
35
- ` ⚠️ Optional source directory missing: ${source.path} (created automatically by some tools, but worth noting)`,
36
- );
37
- // We don't hard fail on standard folders, as they might be empty/missing in a fresh repo.
38
- // But specifically for 'docs' or critical ones we might want to be stricter.
39
- // For now, we WARN.
40
- }
41
- }
42
-
43
- // 3. Verify Static Assets (Lexicon/CDA)
44
- const lexiconPath = resolve(cwd, settings.paths.sources.persona.lexicon);
45
- if (!existsSync(lexiconPath)) {
46
- errors.push(`Critical Artifact missing: Lexicon (${lexiconPath})`);
47
- }
48
-
49
- const cdaPath = resolve(cwd, settings.paths.sources.persona.cda);
50
- if (!existsSync(cdaPath)) {
51
- errors.push(`Critical Artifact missing: CDA (${cdaPath})`);
52
- }
53
-
54
- if (errors.length > 0) {
55
- console.error("\n❌ Environment Verification Failed:");
56
- errors.forEach((e) => {
57
- console.error(` - ${e}`);
58
- });
59
- console.error(
60
- "\nPlease ensure you are running from the project root and all assets are present.",
61
- );
62
- process.exit(1);
63
- }
64
-
65
- console.error(" ✅ Environment Verified.");
66
- },
67
- };
@@ -1,95 +0,0 @@
1
- This is an exciting moment. Launching a Substack is effectively "shipping" your philosophy, not just your code.
2
-
3
- Based on my analysis of the **PolyVis repository**—specifically the focus on "Structural Information Engineering," "Zero Magic" design, and the "Ctx" persona—your project has a very distinct voice. It is rigorous, slightly contrarian (rejecting modern complexity), and deeply focused on **order**.
4
-
5
- Here is a strategy and a draft for your first post.
6
-
7
- ### The Strategy: "The Manifesto" Approach
8
-
9
- Substack readers love a strong point of view. Since PolyVis is about **reducing entropy** and **finding structure**, your first post should define the *problem* (AI creates a mess/hairball) and your *solution* (PolyVis/Graphs).
10
-
11
- **Key Themes from your Repo to highlight:**
12
- 1. **"Stuff into Things"**: The core act of Mentation.
13
- 2. **The "Hairball" Problem**: Vector databases give you vibes; Graphs give you maps.
14
- 3. **Low-Tech Defence**: Why you built this with Alpine/Bun/SQLite instead of a heavy React stack.
15
-
16
- ---
17
-
18
- ### First Draft: PolyVis Substack Post #1
19
-
20
- **Headline:** Don't Just Search the Machine. Map It.
21
- **Subtitle:** Why I built PolyVis to turn AI "stuff" into structural "things."
22
-
23
- ***
24
-
25
- We are drowning in context.
26
-
27
- Every time we interact with an LLM, we generate a stream of brilliance, hallucinations, code snippets, and philosophical debates. But the moment the chat window closes, that intelligence evaporates. It becomes a "hairball"—a tangled mess of unstructured text.
28
-
29
- We have access to the most powerful intelligence in human history, but we lack the map to navigate it.
30
-
31
- **Hello. I am pjsvis.**
32
-
33
- I am a builder focused on **Structural Information Engineering**. For the past few months, I have been working on a project called **PolyVis**.
34
-
35
- ### The Problem: The "Vector Bottleneck"
36
-
37
- The current industry obsession is RAG (Retrieval Augmented Generation). We take our documents, chop them into chunks, throw them into a vector database, and hope the AI can "fish" out the right answer based on similarity.
38
-
39
- It works... mostly. But it’s probabilistic. It guesses. It gives you "fuzzy vibes" rather than hard facts.
40
-
41
- I wanted something deterministic. I didn't want to just *search* my knowledge base; I wanted to *see* it. I wanted to see how a "Directive" connects to a "Heuristic," and how that Heuristic informs a specific piece of "Code."
42
-
43
- ### Enter PolyVis
44
-
45
- PolyVis is a **Neuro-Symbolic Graph Visualizer**.
46
-
47
- That sounds fancy, but the philosophy is simple: **Turn "Stuff" (unstructured inputs) into "Things" (coherent, connected objects).**
48
-
49
- It is a tool that sits on top of my AI interactions. It creates a "Psychogeography" of the code—a map where I can see the highways, the ghettos, and the fortresses of my project.
50
-
51
-
52
- ### The "Zero Magic" Philosophy
53
-
54
- PolyVis isn't just about *what* it does, but *how* it's built.
55
-
56
- In an era of bloat, PolyVis follows the **Principle of Effective Low-Tech Defence**.
57
- * **No Build Step:** It runs on standard web technologies.
58
- * **No React:** It uses Alpine.js for lightweight reactivity.
59
- * **Local First:** It runs on a local SQLite database (via WASM) because your data belongs to you, not a cloud provider.
60
-
61
- I call this **"Zero Magic."** If I can't explain how a pixel got to the screen without referencing a black-box library, it doesn't belong in the stack.
62
-
63
- ### What to Expect from This Substack
64
-
65
- I am using this space to document the engineering and philosophy behind PolyVis. Future posts will cover:
66
-
67
- 1. **The Hejlsberg Inversion:** Why you should ask AI to write programs, not just answers.
68
- 2. **The Tailwind Paradox:** Why AI agents prefer "ugly" utility classes over "clean" semantic CSS.
69
- 3. **Persona Engineering:** How to architect cognitive constraints to stop your AI from hallucinating.
70
- 4. **Playbooks:** Practical, copy-paste guides for "Low-Tech" web development.
71
-
72
- If you are an engineer, a knowledge architect, or just someone tired of the "black box" nature of modern AI, welcome aboard.
73
-
74
- Let’s reduce some entropy.
75
-
76
- — **pjsvis**
77
-
78
- ***
79
-
80
- ### Pre-Publishing Checklist (Substack Specifics)
81
-
82
- Based on general Substack best practices, here are a few settings you should tweak before hitting publish to match the "PolyVis" aesthetic:
83
-
84
- 1. **The "Welcome" Email:**
85
- * *Default:* "Thanks for subscribing."
86
- * *PolyVis Style:* Change this to something like: "Welcome to the structure. You have successfully lowered your input activation threshold."
87
- 2. **The "About" Page:**
88
- * Paste the `CIP-1` definition from your `lexicon.md` here. Define yourself as "methodically exploring conceptual space."
89
- 3. **Visual Style:**
90
- * Go to Settings -> Style.
91
- * Choose "Mono" or "Clean" font settings.
92
- * Use the hex code `#202124` (Your "Google Dark Gray") for accent colors if possible, to match your app's `theme.css`.
93
-
94
- ### Next Step for You
95
- Does this draft feel aligned with your voice? We can make it more "Grumpy" (cynical about modern tech) or more "Academic" (focusing on the Scottish Enlightenment roots) depending on your preference.
@@ -1,78 +0,0 @@
1
- This revised draft incorporates your specific narrative points ("The Bits," "Persona Engineering Origins," "The Artifact Stack") and adopts the "Fun/Academic/Experienced" tone. It positions you not just as a coder, but as a methodologist.
2
-
3
- ***
4
-
5
- ### **Title:** Don't Just Search the Machine. Map It.
6
- **Subtitle:** Moving from "Prompt Engineering" to "Structural Information Engineering."
7
-
8
- We have access to the most powerful intelligence in human history, and all we seem to be interested in is the size of its bits.
9
-
10
- *(And yes, that is a pun. Make of it what you will.)*
11
-
12
- We are drowning in context. Every time we open a chat window, we generate a stream of brilliance, hallucinations, code snippets, and philosophical debates. But the moment the window closes, that intelligence evaporates. It becomes a "hairball"—a tangled mess of unstructured text.
13
-
14
- We have built the Oracle, but we forgot to build the Library.
15
-
16
- **Hello. I am pjsvis.**
17
-
18
- I am a builder focused on **Structural Information Engineering**. For the past few months, I have been working on a project called **PolyVis** to solve a specific problem: **How do we engineer a "Persona" that actually remembers who it is?**
19
-
20
- ### The Origin: Taming the Sycophant
21
-
22
- My journey into **Persona Engineering** didn't start with a grand vision. It started with annoyance.
23
-
24
- Like many of you, I was tired of the default "Substrate" behavior of modern LLMs. You know the type: over-effusive, sycophantic, eager to please, and prone to making things up just to keep the conversation flowing. I didn't want a cheerleader; I wanted a colleague.
25
-
26
- I realized that "Prompt Engineering" was too shallow. To fix this, I needed to architect a **Persona Stack**—a set of constraints (Directives, Heuristics, Lexicons) that forced the AI to behave with "Mentational Humility."
27
-
28
- As we overcame those initial annoying tendencies, a realization hit: **Substrates are more aligned with our intentions when they have a persona that is aligned with our values and methodologies.**
29
-
30
- When I treated the AI like a Junior Engineer with a very specific "Employee Handbook," the hallucinations dropped, and the utility skyrocketed.
31
-
32
- ### Agile for the AI Age
33
-
34
- As we moved into **AI-Assisted Coding**, we found that traditional Agile didn't quite fit. You can't just throw user stories at an LLM and expect clean code. You need a new set of artifacts to manage the "fuzzy" nature of probabilistic intelligence.
35
-
36
- We developed a process rooted in **Context-Driven Design**, relying on four key artifacts:
37
-
38
- 1. **The Brief:** A "Weaponised Happy Path" for a task. These tend to be ephemeral; they often don't survive contact with the reality of development, but they set the vector.
39
- 2. **The `_CURRENT_TASK.md`:** A safety net living in the root of the project. It tracks exactly where we are, protecting us from the "context rot" that happens when you step away for coffee (or sleep).
40
- 3. **The Debrief:** Formed strictly of **Accomplishments**, **Problems**, and **Lessons Learned**. This is where we admit what broke.
41
- 4. **The Playbook:** The destination for those lessons. We moved from a monolith of "Notes" to categorized Playbooks (e.g., `css-playbook.md`, `bun-playbook.md`). This is our crystallized wisdom.
42
-
43
- This isn't just "coding." It's a new form of project management where the "team" is synthetic, but the discipline is human.
44
-
45
- ### Enter PolyVis
46
-
47
- So, what is PolyVis?
48
-
49
- It is the visualizer for this methodology. It is a **Neuro-Symbolic Graph Explorer** that takes all that "Stuff" (Directives, Heuristics, Playbooks) and turns them into "Things" (Nodes and Edges).
50
-
51
- The current industry obsession is RAG (Retrieval Augmented Generation) and Vector Databases. They work... mostly. But they give you "fuzzy vibes" based on similarity.
52
-
53
- I wanted something deterministic. I wanted to see the **Structure**. I wanted to see how a specific **Heuristic** (like *OH-104: Local Memory Block Protocol*) connects to a specific **Piece of Code**.
54
-
55
-
56
-
57
- ### The "Zero Magic" Philosophy
58
-
59
- PolyVis is built on the **Principle of Effective Low-Tech Defence**.
60
- * **No Build Step:** It runs on standard web technologies.
61
- * **No React:** It uses **Alpine.js** for lightweight reactivity.
62
- * **Local First:** It runs on a local SQLite database (via WASM).
63
-
64
- I call this **"Zero Magic."** If I can't explain how a pixel got to the screen without referencing a black-box library, it doesn't belong in the stack.
65
-
66
- ### What's Next?
67
-
68
- I am using this Substack to document the engineering, the philosophy, and the "bon mots" discovered while building PolyVis. Future posts will cover:
69
-
70
- 1. **The Hejlsberg Inversion:** Why you should ask AI to write programs to do jobs, rather than asking it to do the job itself.
71
- 2. **The Tailwind Paradox:** Why AI agents prefer "ugly" utility classes over "clean" semantic CSS.
72
- 3. **The Bestiary of Substrate Tendencies:** A catalogue of the weird behaviors we've identified in the wild.
73
-
74
- If you are an engineer, a knowledge architect, or just someone tired of the "black box" nature of modern AI, welcome aboard.
75
-
76
- Let’s reduce some entropy.
77
-
78
- — **pjsvis**
@@ -1,26 +0,0 @@
1
- # UI Investigation & Snag List
2
-
3
- **Status**: Open
4
- **Owner**: User
5
- **Created**: 2025-12-31
6
-
7
- ## Context
8
- A list of UI/UX issues, edge cases, and layout bugs discovered after the "Terminal Brutalist" overhaul.
9
-
10
- ## Issues to Investigate
11
-
12
- ### Investigation Log
13
- - [x] **Home Page:** Ensure "Polyvis" in header is visible. (Fixed: Added `.nav-brand` class).
14
- - [x] **Home Page:** Center content and remove horizontal line.
15
- - [x] **Home Page:** Ensure font consistency.
16
- - [x] **Home Page:** Remove Graph Explorer arrow and dotted line.
17
- - [x] **Home Page:** Fix button hover contrast (Fixed: Semantic Inversion).
18
- - [x] **Home Page:** Reorder buttons (Docs, Graph, About).
19
- - [x] **Home Page:** "Vertical Monolith" Layout (5:8 Ratio).
20
- - [x] **Home Page:** Uniform Gaps (Integer Scale).
21
- - [x] **Docs:** Fix `showSource` error.
22
- - [x] **All Pages:** Update CSS Playbooks.
23
-
24
- ## Outstanding
25
- - [ ] **Mobile:** Ensure workable on touch devices.
26
- - [ ] **About Page:** Implement "Back" button and align visual system (currently FAFCAS Core specific).
package/test-db DELETED
Binary file
package/test-db-shm DELETED
Binary file
package/test-db-wal DELETED
File without changes