@vibes.diy/prompts 0.0.0-dev-fresh-data

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. package/LICENSE.md +232 -0
  2. package/README.md +29 -0
  3. package/catalog.d.ts +1 -0
  4. package/catalog.js +4 -0
  5. package/catalog.js.map +1 -0
  6. package/chat.d.ts +144 -0
  7. package/chat.js +2 -0
  8. package/chat.js.map +1 -0
  9. package/component-export-transforms.d.ts +12 -0
  10. package/component-export-transforms.js +32 -0
  11. package/component-export-transforms.js.map +1 -0
  12. package/component-transforms.d.ts +3 -0
  13. package/component-transforms.js +327 -0
  14. package/component-transforms.js.map +1 -0
  15. package/index.d.ts +11 -0
  16. package/index.js +12 -0
  17. package/index.js.map +1 -0
  18. package/json-docs.d.ts +21 -0
  19. package/json-docs.js +25 -0
  20. package/json-docs.js.map +1 -0
  21. package/llms/callai.d.ts +2 -0
  22. package/llms/callai.js +10 -0
  23. package/llms/callai.js.map +1 -0
  24. package/llms/callai.txt +455 -0
  25. package/llms/d3.d.ts +2 -0
  26. package/llms/d3.js +10 -0
  27. package/llms/d3.js.map +1 -0
  28. package/llms/d3.md +679 -0
  29. package/llms/fireproof.d.ts +2 -0
  30. package/llms/fireproof.js +10 -0
  31. package/llms/fireproof.js.map +1 -0
  32. package/llms/fireproof.txt +451 -0
  33. package/llms/image-gen.d.ts +2 -0
  34. package/llms/image-gen.js +10 -0
  35. package/llms/image-gen.js.map +1 -0
  36. package/llms/image-gen.txt +128 -0
  37. package/llms/index.d.ts +8 -0
  38. package/llms/index.js +21 -0
  39. package/llms/index.js.map +1 -0
  40. package/llms/three-js.d.ts +2 -0
  41. package/llms/three-js.js +10 -0
  42. package/llms/three-js.js.map +1 -0
  43. package/llms/three-js.md +2232 -0
  44. package/llms/types.d.ts +10 -0
  45. package/llms/types.js +2 -0
  46. package/llms/types.js.map +1 -0
  47. package/llms/web-audio.d.ts +2 -0
  48. package/llms/web-audio.js +9 -0
  49. package/llms/web-audio.js.map +1 -0
  50. package/llms/web-audio.txt +220 -0
  51. package/load-docs.d.ts +2 -0
  52. package/load-docs.js +17 -0
  53. package/load-docs.js.map +1 -0
  54. package/package.json +39 -0
  55. package/prompts.d.ts +43 -0
  56. package/prompts.js +315 -0
  57. package/prompts.js.map +1 -0
  58. package/segment-parser.d.ts +4 -0
  59. package/segment-parser.js +135 -0
  60. package/segment-parser.js.map +1 -0
  61. package/settings.d.ts +16 -0
  62. package/settings.js +2 -0
  63. package/settings.js.map +1 -0
  64. package/style-prompts.d.ts +7 -0
  65. package/style-prompts.js +63 -0
  66. package/style-prompts.js.map +1 -0
  67. package/tsconfig.json +21 -0
  68. package/txt-docs.d.ts +15 -0
  69. package/txt-docs.js +53 -0
  70. package/txt-docs.js.map +1 -0
  71. package/view-state.d.ts +17 -0
  72. package/view-state.js +2 -0
  73. package/view-state.js.map +1 -0
@@ -0,0 +1,10 @@
1
+ export interface LlmConfig {
2
+ name: string;
3
+ label: string;
4
+ module: string;
5
+ description: string;
6
+ importModule: string;
7
+ importName: string;
8
+ importType?: "named" | "namespace" | "default";
9
+ llmsTxtUrl?: string;
10
+ }
package/llms/types.js ADDED
@@ -0,0 +1,2 @@
1
+ export {};
2
+ //# sourceMappingURL=types.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"types.js","sourceRoot":"","sources":["../../jsr/llms/types.ts"],"names":[],"mappings":""}
@@ -0,0 +1,2 @@
1
+ import type { LlmConfig } from "./types.js";
2
+ export declare const webAudioConfig: LlmConfig;
@@ -0,0 +1,9 @@
1
+ export const webAudioConfig = {
2
+ name: "web-audio",
3
+ label: "Web Audio API",
4
+ module: "web-audio",
5
+ description: "Web Audio fundamentals; echo/delay with effects in the feedback path; mic monitoring with a metronome; audio‑clock scheduling; timing design for multi‑channel drum machines and MIDI synths with accurate voice overlap.",
6
+ importModule: "web-audio",
7
+ importName: "WebAudioAPI",
8
+ };
9
+ //# sourceMappingURL=web-audio.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"web-audio.js","sourceRoot":"","sources":["../../jsr/llms/web-audio.ts"],"names":[],"mappings":"AAEA,MAAM,CAAC,MAAM,cAAc,GAAc;IACvC,IAAI,EAAE,WAAW;IACjB,KAAK,EAAE,eAAe;IACtB,MAAM,EAAE,WAAW;IACnB,WAAW,EACT,+NAA2N;IAC7N,YAAY,EAAE,WAAW;IACzB,UAAU,EAAE,aAAa;CAC1B,CAAC"}
@@ -0,0 +1,220 @@
1
+ # Web Audio API: Fundamentals, Echo with FX-in-Feedback, Mic Monitoring + Metronome, and Timing Architecture
2
+
3
+ Authoritative source: Issue #228 research threads — comments 3192681700, 3192696052, 3192806626.
4
+
5
+ ## 1) Fundamentals and Core Nodes
6
+
7
+ - AudioContext — master interface and clock (`audioCtx.currentTime`). Resume on a user gesture.
8
+ - OscillatorNode — synthesis; set `type` and `frequency`.
9
+ - AudioBufferSourceNode — decoded-file playback; schedule with `.start(when, offset?, duration?)`.
10
+ - GainNode — volume control and envelopes.
11
+ - BiquadFilterNode — EQ/tonal shaping (`type`, `frequency`, `Q`, etc.).
12
+ - AnalyserNode — FFT/time-domain visualization.
13
+
14
+ Examples
15
+
16
+ ```js
17
+ // 1) Context (user gesture required in many browsers)
18
+ const audioCtx = new (window.AudioContext || window.webkitAudioContext)();
19
+
20
+ // Start/resume only in direct response to a user gesture (e.g., a Play button)
21
+ document.querySelector('#start-audio')?.addEventListener('click', async () => {
22
+ if (audioCtx.state !== 'running') await audioCtx.resume();
23
+ // now safe to create/start nodes
24
+ });
25
+
26
+ // 2) Simple tone
27
+ const osc = audioCtx.createOscillator();
28
+ osc.type = 'sine';
29
+ osc.frequency.value = 440;
30
+ osc.connect(audioCtx.destination);
31
+ osc.start();
32
+ osc.stop(audioCtx.currentTime + 1);
33
+
34
+ // 3) Load/decode and play a file
35
+ const buf = await fetch('/path/audio.mp3').then(r => r.arrayBuffer()).then(b => audioCtx.decodeAudioData(b));
36
+ const src = audioCtx.createBufferSource();
37
+ src.buffer = buf;
38
+ src.connect(audioCtx.destination);
39
+ src.start();
40
+
41
+ // 4) Gain and Filter in series
42
+ const gain = audioCtx.createGain();
43
+ gain.gain.value = 0.5;
44
+ const filter = audioCtx.createBiquadFilter();
45
+ filter.type = 'lowpass';
46
+ filter.frequency.value = 1000;
47
+ osc.disconnect();
48
+ osc.connect(filter).connect(gain).connect(audioCtx.destination);
49
+ ```
50
+
51
+ Practical: clean up disconnected nodes; check browser support; use headphones to avoid feedback when monitoring.
52
+
53
+ ## 2) Echo/Delay with Effects Inside the Feedback Loop
54
+
55
+ Graph (node names are exact):
56
+
57
+ - Dry: `source → dryGain:GainNode → destination`
58
+ - Wet: `source → delay:DelayNode → wetGain:GainNode → destination`
59
+ - Feedback loop with FX: `delay → filter:BiquadFilterNode → distortion:WaveShaperNode → reverb:ConvolverNode → feedbackGain:GainNode → delay`
60
+
61
+ Parameters to expose
62
+
63
+ - `delay.delayTime` (s), `feedbackGain.gain` (0–1, keep < 1.0)
64
+ - `filter.type`, `filter.frequency`
65
+ - `distortion.curve` (Float32Array)
66
+ - `convolver.buffer` (IR AudioBuffer)
67
+ - `wetGain.gain`, `dryGain.gain`
68
+
69
+ Notes: Prevent runaway by capping feedback below 1.0; `ConvolverNode` requires a loaded impulse response; zero-delay cycles are disallowed.
70
+
71
+ ```js
72
+ const delay = audioCtx.createDelay(5.0);
73
+ const feedbackGain = audioCtx.createGain();
74
+ const filter = audioCtx.createBiquadFilter();
75
+ const distortion = audioCtx.createWaveShaper();
76
+ const reverb = audioCtx.createConvolver();
77
+ const wetGain = audioCtx.createGain();
78
+ const dryGain = audioCtx.createGain();
79
+
80
+ delay.delayTime.value = 0.35;
81
+ feedbackGain.gain.value = 0.5; // < 1.0
82
+ filter.type = 'lowpass';
83
+ filter.frequency.value = 8000;
84
+ // distortion.curve = yourFloat32Curve;
85
+ // reverb.buffer = yourImpulseResponseAudioBuffer;
86
+ wetGain.gain.value = 0.4;
87
+ dryGain.gain.value = 1.0;
88
+
89
+ // Dry and wet
90
+ source.connect(dryGain).connect(audioCtx.destination);
91
+ source.connect(delay);
92
+ delay.connect(wetGain).connect(audioCtx.destination);
93
+
94
+ // Feedback with FX
95
+ delay.connect(filter);
96
+ filter.connect(distortion);
97
+ distortion.connect(reverb);
98
+ reverb.connect(feedbackGain);
99
+ feedbackGain.connect(delay);
100
+ ```
101
+
102
+ Helper (load IR):
103
+
104
+ ```js
105
+ async function loadImpulseResponse(url) {
106
+ const res = await fetch(url, { mode: 'cors' });
107
+ if (!res.ok) throw new Error(`Failed to fetch IR ${url}: ${res.status} ${res.statusText}`);
108
+ const ab = await res.arrayBuffer();
109
+ try {
110
+ return await audioCtx.decodeAudioData(ab);
111
+ } catch (err) {
112
+ console.error('decodeAudioData failed for IR', url, err);
113
+ throw err; // Surface decoding/CORS-related failures clearly
114
+ }
115
+ }
116
+ ```
117
+
118
+ ## 3) Microphone Monitoring + Metronome Overlay
119
+
120
+ Mic capture: request permission with `navigator.mediaDevices.getUserMedia({ audio: { echoCancellation, noiseSuppression, autoGainControl } })`. Create `MediaStreamAudioSourceNode` and route to a `GainNode` → destination.
121
+
122
+ Metronome: synthesize a short click (e.g., square/sine burst through a gain envelope). Schedule by audio clock at `AudioContext.currentTime` with lookahead.
123
+
124
+ Mix graph: `micGain + metronomeGain → master → destination`.
125
+
126
+ ```js
127
+ const master = audioCtx.createGain();
128
+ master.connect(audioCtx.destination);
129
+ const micGain = audioCtx.createGain();
130
+ const metronomeGain = audioCtx.createGain();
131
+ micGain.connect(master);
132
+ metronomeGain.connect(master);
133
+
134
+ async function initMic() {
135
+ const stream = await navigator.mediaDevices.getUserMedia({ audio: { echoCancellation: true, noiseSuppression: true, autoGainControl: false } });
136
+ const micSrc = audioCtx.createMediaStreamSource(stream);
137
+ micSrc.connect(micGain);
138
+ }
139
+
140
+ function scheduleClick(atTime, downbeat = false) {
141
+ const osc = audioCtx.createOscillator();
142
+ const env = audioCtx.createGain();
143
+ osc.type = 'square';
144
+ osc.frequency.setValueAtTime(downbeat ? 2000 : 1600, atTime);
145
+ env.gain.setValueAtTime(0.0001, atTime);
146
+ env.gain.exponentialRampToValueAtTime(1.0, atTime + 0.001);
147
+ env.gain.exponentialRampToValueAtTime(0.0001, atTime + 0.03);
148
+ osc.connect(env).connect(metronomeGain);
149
+ osc.start(atTime);
150
+ osc.stop(atTime + 0.05);
151
+ // Cleanup to avoid accumulating nodes during long sessions
152
+ osc.onended = () => {
153
+ try { osc.disconnect(); } catch {}
154
+ try { env.disconnect(); } catch {}
155
+ };
156
+ }
157
+
158
+ function startMetronome({ bpm = 120, beatsPerBar = 4 } = {}) {
159
+ const spb = 60 / bpm; // seconds per beat
160
+ let next = audioCtx.currentTime + 0.1;
161
+ let beat = 0;
162
+ const lookaheadMs = 25, ahead = 0.2;
163
+ const id = setInterval(() => {
164
+ while (next < audioCtx.currentTime + ahead) {
165
+ scheduleClick(next, beat % beatsPerBar === 0);
166
+ next += spb; beat = (beat + 1) % beatsPerBar;
167
+ }
168
+ }, lookaheadMs);
169
+ return () => clearInterval(id);
170
+ }
171
+ ```
172
+
173
+ Latency and safety: start/resume on user gesture; clean up per-tick nodes after `ended` to prevent buildup in long-running metronomes; use headphones while monitoring; mobile devices have higher base latency.
174
+
175
+ ## 4) Time Synchronization and Scheduling Model
176
+
177
+ Clocks/time domains
178
+
179
+ - Master: `AudioContext.currentTime` — sample-accurate; schedule everything on this timeline.
180
+ - UI/high-res: `performance.now()` — for UI timers and Web MIDI timestamps.
181
+ - Mapping: capture `(tPerf0 = performance.now(), tAudio0 = audioCtx.currentTime)`, convert MIDI/perf timestamps with `tAudio = tAudio0 + (timeStamp - tPerf0)/1000`.
182
+ - Hints: `audioCtx.baseLatency`, `audioCtx.getOutputTimestamp?.()` — estimate DAC/output delay if aligning to “heard” time.
183
+
184
+ Scheduling primitives
185
+
186
+ - `AudioBufferSourceNode.start(when, offset?, duration?)` for one-shots/loops.
187
+ - `AudioParam` automation (`setValueAtTime`, `linearRampToValueAtTime`, `setTargetAtTime`, `setValueCurveAtTime`).
188
+ - Avoid `requestAnimationFrame`/`setTimeout` for timing; use an AudioWorklet for custom DSP/tight jitter when needed.
189
+
190
+ Tempo transport and lookahead
191
+
192
+ - Tempo mapping: `secondsPerBeat = 60 / bpm`; compute bars:beats:ticks → seconds on the audio clock (choose PPQ, e.g., 480/960).
193
+ - Lookahead window: maintain ~50–200 ms rolling schedule; enqueue with absolute `when` times in audio seconds.
194
+
195
+ Multi‑channel drum machine
196
+
197
+ - Pre‑decode all samples; never decode on hit.
198
+ - Per hit: create a fresh `AudioBufferSourceNode` and call `.start(when)`.
199
+ - For phase‑aligned layers (kick+clap, etc.), schedule all sources with the same `when` to guarantee sample‑accurate overlap.
200
+ - Routing: per‑track `GainNode`/optional FX → master bus; allow overlapping retriggers; compute flams as small `when` offsets.
201
+ - Pattern changes: compute the next bar boundary on the audio clock and enqueue new pattern hits relative to that time.
202
+
203
+ MIDI synth playback
204
+
205
+ - Live input: map `MIDIMessageEvent.timeStamp` (perf.now domain) → audio clock as above; buffer a short lookahead (5–20 ms) to reduce jitter.
206
+ - SMF playback: convert PPQ ticks using the tempo map; schedule noteOn/noteOff separately; sustain (CC64) defers noteOff until pedal release.
207
+ - Voice management: one voice per active note; allow overlapping envelopes; define voice‑steal policy if a polyphony cap is hit.
208
+
209
+ External sync and drift
210
+
211
+ - For MIDI Clock/MTC, derive BPM/phase from incoming ticks, convert to audio time, and drive the transport. Correct small phase error between beats with bounded micro‑nudges—avoid discontinuities.
212
+
213
+ ## 5) Practical Notes
214
+
215
+ - User gesture required to start/resume `AudioContext` and to access the mic.
216
+ - Convolver IRs: host with CORS if cross‑origin; decode before use.
217
+ - Latency budget: device `baseLatency` + your lookahead + any Worklet buffering.
218
+ - Headphones recommended for monitoring to avoid acoustic feedback.
219
+
220
+ — End —
package/load-docs.d.ts ADDED
@@ -0,0 +1,2 @@
1
+ import { CoerceURI, Result } from "@adviser/cement";
2
+ export declare function loadDocs(localPath: string, baseUrl: CoerceURI): Promise<Result<string>>;
package/load-docs.js ADDED
@@ -0,0 +1,17 @@
1
+ import { Result } from "@adviser/cement";
2
+ import { joinUrlParts } from "call-ai";
3
+ export async function loadDocs(localPath, baseUrl) {
4
+ const url = joinUrlParts(baseUrl?.toString() || "", localPath);
5
+ try {
6
+ const response = await fetch(url);
7
+ if (!response.ok) {
8
+ return Result.Err(`Failed to fetch ${url}: ${response.status} ${response.statusText}`);
9
+ }
10
+ const text = await response.text();
11
+ return Result.Ok(text);
12
+ }
13
+ catch (error) {
14
+ return Result.Err(`Error fetching ${url}: ${error}`);
15
+ }
16
+ }
17
+ //# sourceMappingURL=load-docs.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"load-docs.js","sourceRoot":"","sources":["../jsr/load-docs.ts"],"names":[],"mappings":"AAAA,OAAO,EAAa,MAAM,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,EAAE,YAAY,EAAE,MAAM,SAAS,CAAC;AAEvC,MAAM,CAAC,KAAK,UAAU,QAAQ,CAC5B,SAAiB,EACjB,OAAkB,EACO;IACzB,MAAM,GAAG,GAAG,YAAY,CAAC,OAAO,EAAE,QAAQ,EAAE,IAAI,EAAE,EAAE,SAAS,CAAC,CAAC;IAC/D,IAAI,CAAC;QACH,MAAM,QAAQ,GAAG,MAAM,KAAK,CAAC,GAAG,CAAC,CAAC;QAClC,IAAI,CAAC,QAAQ,CAAC,EAAE,EAAE,CAAC;YACjB,OAAO,MAAM,CAAC,GAAG,CACf,mBAAmB,GAAG,KAAK,QAAQ,CAAC,MAAM,IAAI,QAAQ,CAAC,UAAU,EAAE,CACpE,CAAC;QACJ,CAAC;QACD,MAAM,IAAI,GAAG,MAAM,QAAQ,CAAC,IAAI,EAAE,CAAC;QACnC,OAAO,MAAM,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC;IACzB,CAAC;IAAC,OAAO,KAAK,EAAE,CAAC;QACf,OAAO,MAAM,CAAC,GAAG,CAAC,kBAAkB,GAAG,KAAK,KAAK,EAAE,CAAC,CAAC;IACvD,CAAC;AAAA,CACF"}
package/package.json ADDED
@@ -0,0 +1,39 @@
1
+ {
2
+ "name": "@vibes.diy/prompts",
3
+ "version": "0.0.0-dev-fresh-data",
4
+ "type": "module",
5
+ "description": "",
6
+ "keywords": [
7
+ "ai",
8
+ "dom",
9
+ "micro-app",
10
+ "generator",
11
+ "web",
12
+ "esm",
13
+ "typescript"
14
+ ],
15
+ "contributors": [
16
+ "J Chris Anderson",
17
+ "Meno Abels"
18
+ ],
19
+ "license": "Apache-2.0",
20
+ "dependencies": {
21
+ "@adviser/cement": "^0.4.53",
22
+ "@fireproof/core-types-base": "^0.23.15",
23
+ "@vibes.diy/use-vibes-types": "^0.0.0-dev-fresh-data",
24
+ "call-ai": "^0.0.0-dev-fresh-data"
25
+ },
26
+ "peerDependencies": {
27
+ "react": ">=19.1.0"
28
+ },
29
+ "devDependencies": {
30
+ "@fireproof/core-cli": "^0.23.15",
31
+ "@vitest/browser-playwright": "^4.0.2",
32
+ "typescript": "^5.9.3",
33
+ "typescript-eslint": "^8.46.1"
34
+ },
35
+ "scripts": {
36
+ "build": "core-cli tsc",
37
+ "test": "vitest run"
38
+ }
39
+ }
package/prompts.d.ts ADDED
@@ -0,0 +1,43 @@
1
+ import { Mocks } from "call-ai";
2
+ import type { HistoryMessage, UserSettings } from "./settings.js";
3
+ import { CoerceURI } from "@adviser/cement";
4
+ import { LlmCatalogEntry } from "./json-docs.js";
5
+ export declare const DEFAULT_CODING_MODEL: "anthropic/claude-sonnet-4.5";
6
+ export declare const RAG_DECISION_MODEL: "openai/gpt-4o";
7
+ export declare function defaultCodingModel(): Promise<"anthropic/claude-sonnet-4.5">;
8
+ export declare function normalizeModelId(id: unknown): string | undefined;
9
+ export declare function isPermittedModelId(id: unknown): id is string;
10
+ export declare function resolveEffectiveModel(settingsDoc?: {
11
+ model?: string;
12
+ }, vibeDoc?: {
13
+ selectedModel?: string;
14
+ }): Promise<string>;
15
+ export interface SystemPromptResult {
16
+ systemPrompt: string;
17
+ dependencies: string[];
18
+ instructionalText: boolean;
19
+ demoData: boolean;
20
+ model: string;
21
+ }
22
+ export interface LlmSelectionDecisions {
23
+ selected: string[];
24
+ instructionalText: boolean;
25
+ demoData: boolean;
26
+ }
27
+ export interface LlmSelectionOptions {
28
+ readonly appMode?: "test" | "production";
29
+ readonly callAiEndpoint?: CoerceURI;
30
+ readonly fallBackUrl?: CoerceURI;
31
+ readonly getAuthToken?: () => Promise<string>;
32
+ readonly mock?: Mocks;
33
+ }
34
+ export type LlmSelectionWithFallbackUrl = Omit<Omit<LlmSelectionOptions, "fallBackUrl">, "callAiEndpoint"> & {
35
+ readonly fallBackUrl: CoerceURI;
36
+ readonly callAiEndpoint?: CoerceURI;
37
+ };
38
+ export declare function selectLlmsAndOptions(model: string, userPrompt: string, history: HistoryMessage[], iopts: LlmSelectionOptions): Promise<LlmSelectionDecisions>;
39
+ export declare function generateImportStatements(llms: LlmCatalogEntry[]): string;
40
+ export declare function makeBaseSystemPrompt(model: string, sessionDoc: Partial<UserSettings> & LlmSelectionOptions): Promise<SystemPromptResult>;
41
+ export declare const RESPONSE_FORMAT: {
42
+ structure: string[];
43
+ };