modular-studio 0.2.4 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. package/README.md +41 -261
  2. package/dist/assets/{graphPopulator-C6jg83nL.js → graphPopulator-CKGQeaUP.js} +1 -1
  3. package/dist/assets/{index-CXhIX28x.js → index-DD78gpRw.js} +193 -165
  4. package/dist/assets/index-dSu60FDQ.css +1 -0
  5. package/dist/assets/{jszip.min-BlpRodxc.js → jszip.min-aECPDEdt.js} +1 -1
  6. package/dist/index.html +2 -2
  7. package/dist-server/server/index.d.ts.map +1 -1
  8. package/dist-server/server/index.js +2 -0
  9. package/dist-server/server/index.js.map +1 -1
  10. package/dist-server/server/routes/conversations.d.ts +6 -0
  11. package/dist-server/server/routes/conversations.d.ts.map +1 -0
  12. package/dist-server/server/routes/conversations.js +111 -0
  13. package/dist-server/server/routes/conversations.js.map +1 -0
  14. package/dist-server/server/routes/embeddings.d.ts.map +1 -1
  15. package/dist-server/server/routes/embeddings.js +18 -8
  16. package/dist-server/server/routes/embeddings.js.map +1 -1
  17. package/dist-server/server/routes/repo-index.d.ts.map +1 -1
  18. package/dist-server/server/routes/repo-index.js +4 -3
  19. package/dist-server/server/routes/repo-index.js.map +1 -1
  20. package/dist-server/server/services/__tests__/embeddingService.test.js +13 -5
  21. package/dist-server/server/services/__tests__/embeddingService.test.js.map +1 -1
  22. package/dist-server/server/services/embeddingService.d.ts +3 -2
  23. package/dist-server/server/services/embeddingService.d.ts.map +1 -1
  24. package/dist-server/server/services/embeddingService.js +14 -1
  25. package/dist-server/server/services/embeddingService.js.map +1 -1
  26. package/dist-server/server/services/sqliteStore.d.ts +22 -0
  27. package/dist-server/server/services/sqliteStore.d.ts.map +1 -0
  28. package/dist-server/server/services/sqliteStore.js +101 -0
  29. package/dist-server/server/services/sqliteStore.js.map +1 -0
  30. package/dist-server/tsconfig.server.tsbuildinfo +1 -1
  31. package/package.json +3 -2
  32. package/dist/assets/index-CeNF0r-K.css +0 -1
package/README.md CHANGED
@@ -1,261 +1,41 @@
1
- # Modular Studio
2
-
3
- > The Context Engineering Layer — an IDE for designing AI agent knowledge pipelines.
4
-
5
- [![License: Apache 2.0](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://www.apache.org/licenses/LICENSE-2.0)
6
- [![Tests: 646](https://img.shields.io/badge/tests-646_passing-green.svg)]()
7
- [![Version: 0.2.0](https://img.shields.io/badge/version-0.2.0-blue.svg)]()
8
-
9
- ## What is this?
10
-
11
- Modular Studio is a **context engineering IDE** — a visual workspace for building AI agents through structured knowledge pipelines rather than monolithic prompts.
12
-
13
- Instead of writing one massive system prompt and hoping for the best, you design a pipeline:
14
-
15
- ```
16
- Sources → Tree Index → Budget Allocator → Agent Navigator → Compress → Context Assembly → LLM
17
- ```
18
-
19
- Every source (markdown files, Notion pages, HubSpot records, Slack threads, GitHub repos) becomes a **tree of headings** that an agent navigates per-task, selecting branches at the right depth. An epistemic budget allocator ensures ground-truth sources get priority over hypotheses. The result is dense, relevant context assembled within a token budget.
20
-
21
- ## Architecture
22
-
23
- ```
24
- ┌─────────────────────────────────────────────────────────────────────────┐
25
- │ Modular Studio IDE │
26
- │ │
27
- │ ┌──────────────┐ ┌──────────────────┐ ┌────────────────────────┐ │
28
- │ │ Sources Panel │ │ Agent Builder │ │ Test & Export Panel │ │
29
- │ │ │ │ │ │ │ │
30
- │ │ Knowledge │ │ Identity │ │ Chat Testing │ │
31
- │ │ MCP Servers │ │ Instructions │ │ Execution Traces │ │
32
- │ │ Skills │ │ Constraints │ │ Export (6 formats) │ │
33
- │ │ Memory │ │ Workflow │ │ Security Badges │ │
34
- │ │ Fact Insights│ │ Tools │ │ │ │
35
- │ └──────┬───────┘ └────────┬─────────┘ └──────────┬─────────────┘ │
36
- │ │ │ │ │
37
- │ ───────┴────────────────────┴─────────────────────────┴──────────────
38
- │ │
39
- │ ┌─────────────────── Context Engineering Pipeline ──────────────────┐ │
40
- │ │ │ │
41
- │ │ Sources ──► Tree ──► Budget ──► Agent ──► Compress │ │
42
- │ │ Index Allocator Navigator │ │
43
- │ │ (4 connectors) (epistemic (LLM-driven (semantic │ │
44
- │ │ weights) branch sel.) dedup) │ │
45
- │ │ │ │ │ │
46
- │ │ ▼ ▼ │ │
47
- │ │ Contradiction Corrective Re-Nav │ │
48
- │ │ Detection + HyDE │ │
49
- │ │ │ │ │ │
50
- │ │ └──────┬───────┘ │ │
51
- │ │ ▼ │ │
52
- │ │ Attention-Ordered │ │
53
- │ │ Context Assembly │ │
54
- │ └───────────────────────────────────────────────────────────────────┘ │
55
- │ │
56
- │ ┌─────────────────── Memory System ─────────────────────────────────┐ │
57
- │ │ Fact Extraction ──► Three-Factor Retrieval ──► Consolidation │ │
58
- │ │ (pattern + LLM) (relevance + recency (prune, merge, │ │
59
- │ │ + importance) promote) │ │
60
- │ └───────────────────────────────────────────────────────────────────┘ │
61
- │ │
62
- │ ┌─────────────────── Team Runtime ──────────────────────────────────┐ │
63
- │ │ POST /api/runtime/team ──► Parallel Agents ──► Shared Facts │ │
64
- │ │ (SSE streaming) (Claude Agent SDK) (deduplicated) │ │
65
- │ └───────────────────────────────────────────────────────────────────┘ │
66
- │ │
67
- │ Backend: Express 5 + TypeScript Frontend: React 19 + Zustand │
68
- │ Port 4800 Port 5173 (dev) │
69
- └─────────────────────────────────────────────────────────────────────────┘
70
- ```
71
-
72
- ## Features
73
-
74
- ### Pipeline
75
- - **Epistemic Budget Allocator** — Token budgets by knowledge type (ground-truth 30%, evidence 20%, framework 15%, guideline 15%, signal 12%, hypothesis 8%)
76
- - **4 Source Connectors** — Markdown, Structured, Chronological, Flat — normalize any source to a navigable tree
77
- - **Agent Navigator** — LLM reads tree headlines and selects relevant branches per task
78
- - **Attention-Aware Ordering** — Sources reordered to exploit LLM primacy/recency attention bias
79
- - **Contradiction Detection** — Heuristic entity extraction + epistemic priority resolution, no LLM calls
80
- - **Context Compression** — Semantic dedup, filler removal, code compression (inspired by [rtk-ai/rtk](https://github.com/rtk-ai/rtk))
81
- - **Corrective Re-Navigation** — Critique pass identifies gaps, re-navigates with 20% budget cap
82
- - **HyDE Navigation** — Hypothetical ideal answer improves heading matching for complex queries
83
-
84
- ### Memory
85
- - **Fact Extraction** — Pattern-based + LLM-based fact extraction with epistemic typing
86
- - **Three-Factor Retrieval** — `score = relevance + 0.5×recency + 0.5×importance`
87
- - **Ebbinghaus Decay** — Strength = importance × e^(-days/halfLife), half-life extends with access frequency
88
- - **Consolidation** — Prune weak facts, merge similar ones, promote validated hypotheses
89
-
90
- ### Runtime
91
- - **Team Execution** — Multi-agent parallel runs with SSE streaming and per-agent system prompts
92
- - **Cross-Agent Facts** — Extracted facts shared across team members, deduplicated by confidence
93
- - **Claude Agent SDK** — Full integration with built-in tools (Read, Edit, Bash, Grep, Glob, WebSearch, WebFetch)
94
- - **Maximizable Results** — Full-screen overlay for reading long agent outputs
95
- - **Contrastive Retrieval** — For analytical queries, automatically pulls both supporting AND contradicting evidence
96
-
97
- ### IDE
98
- - **Knowledge Type System** — 6 types with classification rules and visual color coding
99
- - **MCP Server Registry** — 100+ pre-configured servers with live health probes
100
- - **Skills Marketplace** — Searchable catalog with security badges (GEN, Socket, Snyk)
101
- - **Agent Directory Format** — Primary export: ZIP with agent.yaml + SOUL.md + INSTRUCTIONS.md + TOOLS.md + KNOWLEDGE.md + MEMORY.md. Git-friendly, human-readable, portable.
102
- - **Universal Export** — Claude Code, Amp, Codex, Vibe Kanban, OpenClaw, Generic JSON
103
- - **Execution Traces** — Timeline of LLM calls, tool invocations, retrievals
104
- - **Automatic Versioning** — Semantic diffs on every agent change
105
- - **AI-Powered Generation** — Generate full agent configs from plain-language descriptions
106
-
107
- ## Quick Start
108
-
109
- ```bash
110
- # Run directly
111
- npx modular-studio
112
-
113
- # Or install globally
114
- npm install -g modular-studio
115
- modular-studio
116
-
117
- # Development
118
- git clone https://github.com/VictorGjn/modular-patchbay.git
119
- cd modular-patchbay
120
- npm install --legacy-peer-deps
121
- npm run dev # Frontend on :5173, backend on :4800
122
- npm run build:all # Full production build
123
- npm test # 646 tests
124
- ```
125
-
126
- ### First steps
127
-
128
- 1. Add an LLM provider (Anthropic, OpenAI, OpenRouter, or Google)
129
- 2. Create an agent — set identity, persona, objectives, constraints
130
- 3. Add knowledge sources — assign knowledge types (ground-truth, evidence, framework, etc.)
131
- 4. Test with the chat panel — the pipeline assembles context automatically
132
- 5. Export to your preferred format (Claude Code, Amp, Codex, etc.)
133
-
134
- ## Agent Definition Format
135
-
136
- ```yaml
137
- version: '1.0'
138
- kind: agent
139
-
140
- identity:
141
- name: fleet-monitor
142
- display_name: Fleet Monitor
143
- description: Real-time vessel performance monitoring agent
144
- tags: ['maritime', 'fleet', 'monitoring']
145
-
146
- instructions:
147
- persona: |
148
- You are a maritime fleet performance analyst.
149
- Monitor vessel metrics and flag anomalies.
150
- constraints:
151
- - Never recommend speed changes without fuel impact analysis
152
- - Always include CII rating context
153
- objectives:
154
- primary: Detect performance anomalies early
155
- success_criteria:
156
- - Flag fuel overconsumption within 4 hours
157
- - Correlate weather impact on route efficiency
158
-
159
- context:
160
- knowledge:
161
- - type: file
162
- ref: fleet-specs.md
163
- knowledge_type: ground-truth
164
- depth: 0
165
- - type: structured
166
- ref: hubspot://deals
167
- knowledge_type: signal
168
- depth: 2
169
-
170
- mcp_servers:
171
- - name: github
172
- transport: stdio
173
- command: npx @modelcontextprotocol/server-github
174
-
175
- workflow:
176
- steps:
177
- - id: ingest
178
- action: Read latest vessel telemetry
179
- condition: always
180
- - id: analyze
181
- action: Compare against baseline performance
182
- - id: alert
183
- action: Flag anomalies with severity and recommended actions
184
- ```
185
-
186
- ## Export Targets
187
-
188
- | Target | Format | Use case |
189
- |--------|--------|----------|
190
- | Claude Code | `AGENTS.md` | Direct CLI integration |
191
- | Amp | YAML | Sourcegraph agent definitions |
192
- | Codex | JSON | OpenAI-compatible configs |
193
- | Vibe Kanban | YAML | BloopAI task automation |
194
- | OpenClaw | YAML | Open-source agent runtime |
195
- | Generic | JSON | Custom integrations |
196
-
197
- ## Tech Stack
198
-
199
- - **Frontend**: React 18 + TypeScript + Vite
200
- - **Styling**: Tailwind CSS + custom design system
201
- - **State**: Zustand (9 stores with persist middleware)
202
- - **Backend**: Express + TypeScript (LLM proxy, MCP manager, team runner)
203
- - **Agent SDK**: @anthropic-ai/claude-agent-sdk
204
- - **Testing**: Vitest (unit) + Playwright (E2E) — 646 tests
205
- - **Fonts**: Geist Sans (body) + Geist Mono (labels)
206
-
207
- ## Documentation
208
-
209
- | Document | Description |
210
- |---|---|
211
- | [Release Notes v0.2.0](docs/RELEASE-v0.2.0.md) | What's new in v0.2.0 |
212
- | [Usage Guide](docs/USAGE-GUIDE.md) | Comprehensive usage guide |
213
- | [Hurricane Use Case](docs/USE-CASE-HURRICANE.md) | End-to-end maritime hurricane response validation |
214
- | [Dogfood Review](docs/DOGFOOD-REVIEW.md) | Can Modular Studio improve itself? |
215
- | [Agent Architecture](docs/AGENT-ARCHITECTURE.md) | Platform design and agent definition format |
216
- | [Context Engineering Vision](docs/CONTEXT-ENGINEERING-VISION.md) | Product vision and value proposition |
217
- | [Knowledge Pipeline v2](docs/KNOWLEDGE-PIPELINE-V2.md) | Pipeline architecture spec |
218
- | [Memory System](docs/MEMORY-SYSTEM-ANALYSIS.md) | Memory management design |
219
-
220
- ## Contributing
221
-
222
- We use conventional commits:
223
-
224
- ```
225
- feat: add embedding-based navigation
226
- fix: budget allocator overflow on empty sources
227
- docs: add hurricane use case validation
228
- refactor: simplify depth filter to budget multiplier
229
- test: add contradiction detector edge cases
230
- ```
231
-
232
- ### Development workflow
233
-
234
- 1. Fork and clone the repository
235
- 2. `npm install --legacy-peer-deps`
236
- 3. `npm run dev` — starts frontend + backend
237
- 4. Make changes, write tests
238
- 5. `npm test` — ensure all tests pass
239
- 6. `npx tsc --noEmit` — verify TypeScript compiles
240
- 7. Submit a PR with conventional commit title
241
-
242
- ### Code quality
243
-
244
- - No dead code — if it's not used, delete it
245
- - DRY + KISS — prefer simplicity over abstraction
246
- - Continuous refactoring — leave code better than you found it
247
- - Squash-and-merge for PRs
248
-
249
- ## Acknowledgments
250
-
251
- - [rtk-ai/rtk](https://github.com/rtk-ai/rtk) — Rust Token Killer. Our context compression module is inspired by RTK's approach to minimizing LLM token consumption.
252
- - [ReactFlow](https://reactflow.dev) — Used for the visual canvas mode.
253
- - [Anthropic](https://anthropic.com) — Claude Agent SDK for backend agent execution.
254
-
255
- ## License
256
-
257
- Apache License 2.0 — see [LICENSE](LICENSE) for details.
258
-
259
- ---
260
-
261
- *Context engineering is the layer every AI platform needs. Modular Studio is that layer.*
1
+ # Modular Studio
2
+
3
+ **The context engineering IDE for AI agents**
4
+
5
+ Build knowledge pipelines, not just prompts. Design AI systems that understand your codebase through tree-aware retrieval and deep contextual intelligence.
6
+
7
+ ## Quick Start
8
+
9
+ ```bash
10
+ npx modular-studio
11
+ ```
12
+
13
+ Open [localhost:4800](http://localhost:4800) and start building.
14
+
15
+ ## Key Features
16
+
17
+ - **🌳 Tree-Aware Retrieval** — Semantic search that understands code structure
18
+ - **🔌 150+ MCP Connectors** — Integrate with any system or service
19
+ - **⚡ Built-in Tools** GitHub indexing, file management, embeddings out of the box
20
+ - **📊 Pipeline Visibility** — See exactly how context flows through your system
21
+ - **🤖 Multi-Agent** — Coordinate multiple AI agents with shared knowledge
22
+ - **📤 Export Anywhere** — Generate prompts for Claude, ChatGPT, or any LLM
23
+
24
+ ## How It Compares
25
+
26
+ | | **Traditional RAG** | **Prompt Engineering** | **Modular Studio** |
27
+ |---|---|---|---|
28
+ | **Context Quality** | Document chunks | Manual examples | Tree-aware semantic |
29
+ | **Knowledge Integration** | Static embeddings | Copy-paste | Live repository sync |
30
+ | **Collaboration** | Team wikis | Shared prompts | Version-controlled pipelines |
31
+ | **Debugging** | Black box | Trial and error | Visual pipeline inspection |
32
+ | **Scalability** | Database scaling | Prompt size limits | Distributed knowledge graphs |
33
+
34
+ ## Requirements
35
+
36
+ - **Node 18+**
37
+ - **API Key** (Anthropic, OpenAI, or compatible)
38
+
39
+ ## License
40
+
41
+ Apache 2.0 Free for commercial and open source use.
@@ -1 +1 @@
1
- import{u as l}from"./index-CXhIX28x.js";function c(o,n){const t=[],a=[];t.push({name:o,entityType:"repository",observations:[`Tech stack: ${n.stack.join(", ")}`,`Total files: ${n.totalFiles}`,`Total tokens: ${n.totalTokens}`,...(n.conventions??[]).map(e=>`Convention: ${e}`)]});for(const e of n.features){const s=`${o}/${e.name}`;t.push({name:s,entityType:"feature",observations:[`Key files: ${e.keyFiles.join(", ")}`,...(e.stores??[]).map(i=>`Store: ${i}`),...(e.routes??[]).map(i=>`Route: ${i}`),...e.componentCount?[`Components: ${e.componentCount}`]:[]]}),a.push({from:o,to:s,relationType:"has_feature"});for(const i of e.keyFiles.slice(0,5)){const r=`${o}:${i}`;t.push({name:r,entityType:"file",observations:[`Path: ${i}`,`Feature: ${e.name}`]}),a.push({from:s,to:r,relationType:"contains_file"})}}for(const e of n.modules??[]){const s=`${o}/module:${e.name}`;t.push({name:s,entityType:"module",observations:[`Files: ${e.files.length}`,`Key files: ${e.files.slice(0,5).join(", ")}`]}),a.push({from:o,to:s,relationType:"has_module"})}return{entities:t,relations:a}}async function f(o,n){const t=l.getState(),a=t.servers.find(i=>i.status==="connected"&&i.tools.some(r=>r.name==="create_entities"));if(!a)return;const{entities:e,relations:s}=c(o,n);try{e.length>0&&await t.callTool(a.id,"create_entities",{entities:e}),s.length>0&&await t.callTool(a.id,"create_relations",{relations:s})}catch{}}async function m(o){for(const{name:n,scan:t}of o)await f(n,t)}export{m as populateGraphFromMultiScan,f as populateGraphFromScan};
1
+ import{u as l}from"./index-DD78gpRw.js";function c(o,n){const t=[],a=[];t.push({name:o,entityType:"repository",observations:[`Tech stack: ${n.stack.join(", ")}`,`Total files: ${n.totalFiles}`,`Total tokens: ${n.totalTokens}`,...(n.conventions??[]).map(e=>`Convention: ${e}`)]});for(const e of n.features){const s=`${o}/${e.name}`;t.push({name:s,entityType:"feature",observations:[`Key files: ${e.keyFiles.join(", ")}`,...(e.stores??[]).map(i=>`Store: ${i}`),...(e.routes??[]).map(i=>`Route: ${i}`),...e.componentCount?[`Components: ${e.componentCount}`]:[]]}),a.push({from:o,to:s,relationType:"has_feature"});for(const i of e.keyFiles.slice(0,5)){const r=`${o}:${i}`;t.push({name:r,entityType:"file",observations:[`Path: ${i}`,`Feature: ${e.name}`]}),a.push({from:s,to:r,relationType:"contains_file"})}}for(const e of n.modules??[]){const s=`${o}/module:${e.name}`;t.push({name:s,entityType:"module",observations:[`Files: ${e.files.length}`,`Key files: ${e.files.slice(0,5).join(", ")}`]}),a.push({from:o,to:s,relationType:"has_module"})}return{entities:t,relations:a}}async function f(o,n){const t=l.getState(),a=t.servers.find(i=>i.status==="connected"&&i.tools.some(r=>r.name==="create_entities"));if(!a)return;const{entities:e,relations:s}=c(o,n);try{e.length>0&&await t.callTool(a.id,"create_entities",{entities:e}),s.length>0&&await t.callTool(a.id,"create_relations",{relations:s})}catch{}}async function m(o){for(const{name:n,scan:t}of o)await f(n,t)}export{m as populateGraphFromMultiScan,f as populateGraphFromScan};