@hera-al/server 1.6.12 → 1.6.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bundled/a2ui/SKILL.md +339 -0
- package/bundled/buongiorno/SKILL.md +151 -0
- package/bundled/council/SKILL.md +168 -0
- package/bundled/council/scripts/council.mjs +202 -0
- package/bundled/dreaming/SKILL.md +177 -0
- package/bundled/google-workspace/SKILL.md +229 -0
- package/bundled/google-workspace/scripts/auth.sh +87 -0
- package/bundled/google-workspace/scripts/calendar.sh +508 -0
- package/bundled/google-workspace/scripts/drive.sh +459 -0
- package/bundled/google-workspace/scripts/gmail.sh +452 -0
- package/bundled/humanizer/SKILL.md +488 -0
- package/bundled/librarian/SKILL.md +155 -0
- package/bundled/plasma/SKILL.md +1417 -0
- package/bundled/sera/SKILL.md +143 -0
- package/bundled/the-skill-guardian/SKILL.md +103 -0
- package/bundled/the-skill-guardian/scripts/scan.sh +314 -0
- package/bundled/unix-time/SKILL.md +58 -0
- package/bundled/wandering/SKILL.md +174 -0
- package/bundled/xai-search/SKILL.md +91 -0
- package/bundled/xai-search/scripts/search.sh +197 -0
- package/dist/a2ui/parser.d.ts +76 -0
- package/dist/a2ui/parser.js +1 -0
- package/dist/a2ui/types.d.ts +147 -0
- package/dist/a2ui/types.js +1 -0
- package/dist/a2ui/validator.d.ts +32 -0
- package/dist/a2ui/validator.js +1 -0
- package/dist/agent/agent-service.d.ts +17 -11
- package/dist/agent/agent-service.js +1 -1
- package/dist/agent/session-agent.d.ts +1 -1
- package/dist/agent/session-agent.js +1 -1
- package/dist/agent/session-error-handler.js +1 -1
- package/dist/commands/debuga2ui.d.ts +13 -0
- package/dist/commands/debuga2ui.js +1 -0
- package/dist/commands/debugdynamic.d.ts +13 -0
- package/dist/commands/debugdynamic.js +1 -0
- package/dist/commands/mcp.d.ts +6 -3
- package/dist/commands/mcp.js +1 -1
- package/dist/gateway/node-registry.d.ts +29 -1
- package/dist/gateway/node-registry.js +1 -1
- package/dist/installer/hera.js +1 -1
- package/dist/memory/concept-store.d.ts +109 -0
- package/dist/memory/concept-store.js +1 -0
- package/dist/nostromo/nostromo.js +1 -1
- package/dist/server.d.ts +3 -2
- package/dist/server.js +1 -1
- package/dist/tools/a2ui-tools.d.ts +23 -0
- package/dist/tools/a2ui-tools.js +1 -0
- package/dist/tools/concept-tools.d.ts +3 -0
- package/dist/tools/concept-tools.js +1 -0
- package/dist/tools/dynamic-ui-tools.d.ts +25 -0
- package/dist/tools/dynamic-ui-tools.js +1 -0
- package/dist/tools/node-tools.js +1 -1
- package/dist/tools/plasma-client-tools.d.ts +28 -0
- package/dist/tools/plasma-client-tools.js +1 -0
- package/installationPkg/AGENTS.md +168 -22
- package/installationPkg/SOUL.md +56 -0
- package/installationPkg/TOOLS.md +126 -0
- package/installationPkg/USER.md +54 -1
- package/installationPkg/config.example.yaml +145 -34
- package/installationPkg/default-jobs.json +77 -0
- package/package.json +3 -2
|
@@ -0,0 +1,202 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Council skill runner.
|
|
5
|
+
*
|
|
6
|
+
* Implements a 3-stage Karpathy-style LLM council:
|
|
7
|
+
* - Stage 1: collect independent first answers from multiple models (parallel)
|
|
8
|
+
* - Stage 2: each model critiques and ranks the anonymized answers
|
|
9
|
+
* - Stage 3: a chairman model synthesizes a final answer using answers + rankings
|
|
10
|
+
*
|
|
11
|
+
* This script is designed to be executed via the Hera/GMaB "Bash" tool.
|
|
12
|
+
* It calls Pico subagents via an HTTP API exposed by the local server.
|
|
13
|
+
*
|
|
14
|
+
* Usage:
|
|
15
|
+
* node council.mjs --query "..." [--models "ref1,ref2"] [--chair "ref"] [--timeoutMs 90000]
|
|
16
|
+
*
|
|
17
|
+
* Environment:
|
|
18
|
+
* PICO_API_BASE Base URL for Pico agent endpoint. Default: http://127.0.0.1:3111
|
|
19
|
+
* PICO_API_KEY Optional bearer token, if your gateway requires it.
|
|
20
|
+
*/
|
|
21
|
+
|
|
22
|
+
import process from 'node:process';
|
|
23
|
+
|
|
24
|
+
const DEFAULT_MODELS = [
|
|
25
|
+
'openrouter/google/gemini-3-pro-preview',
|
|
26
|
+
'openrouter/x-ai/grok-4.1-fast',
|
|
27
|
+
'openrouter/openai/gpt-5.2',
|
|
28
|
+
// Opus is not available in pico_models in this instance; keep a placeholder
|
|
29
|
+
// 'anthropic/claude-3-opus' // example
|
|
30
|
+
];
|
|
31
|
+
|
|
32
|
+
const DEFAULT_CHAIR = 'openrouter/openai/gpt-5.2';
|
|
33
|
+
|
|
34
|
+
function parseArgs(argv) {
|
|
35
|
+
const args = { query: null, models: null, chair: null, timeoutMs: 120000 };
|
|
36
|
+
for (let i = 2; i < argv.length; i++) {
|
|
37
|
+
const a = argv[i];
|
|
38
|
+
if (a === '--query') args.query = argv[++i];
|
|
39
|
+
else if (a === '--models') args.models = argv[++i];
|
|
40
|
+
else if (a === '--chair') args.chair = argv[++i];
|
|
41
|
+
else if (a === '--timeoutMs') args.timeoutMs = Number(argv[++i]);
|
|
42
|
+
else if (a === '--help' || a === '-h') {
|
|
43
|
+
console.log('Usage: node council.mjs --query "..." [--models "ref1,ref2"] [--chair "ref"] [--timeoutMs 90000]');
|
|
44
|
+
process.exit(0);
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
return args;
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
function mustGetQuery(args) {
|
|
51
|
+
const q = args.query ?? process.env.COUNCIL_QUERY;
|
|
52
|
+
if (!q || !q.trim()) {
|
|
53
|
+
console.error('Missing --query (or env COUNCIL_QUERY).');
|
|
54
|
+
process.exit(2);
|
|
55
|
+
}
|
|
56
|
+
return q.trim();
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
function getModels(args) {
|
|
60
|
+
const raw = args.models ?? process.env.COUNCIL_MODELS;
|
|
61
|
+
const models = raw
|
|
62
|
+
? raw.split(',').map(s => s.trim()).filter(Boolean)
|
|
63
|
+
: DEFAULT_MODELS;
|
|
64
|
+
// Dedupe
|
|
65
|
+
return Array.from(new Set(models));
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
function getChair(args, models) {
|
|
69
|
+
const chair = (args.chair ?? process.env.COUNCIL_CHAIR ?? DEFAULT_CHAIR).trim();
|
|
70
|
+
// Ensure chair is included as model (optional, but convenient)
|
|
71
|
+
if (!models.includes(chair)) models.unshift(chair);
|
|
72
|
+
return chair;
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
function nowMs() { return Date.now(); }
|
|
76
|
+
|
|
77
|
+
async function picoQuery(ref, prompt, timeoutMs) {
|
|
78
|
+
const base = process.env.PICO_API_BASE || 'http://127.0.0.1:3111';
|
|
79
|
+
const url = `${base.replace(/\/$/, '')}/pico/query`;
|
|
80
|
+
|
|
81
|
+
const controller = new AbortController();
|
|
82
|
+
const timer = setTimeout(() => controller.abort(), timeoutMs);
|
|
83
|
+
|
|
84
|
+
const headers = { 'content-type': 'application/json' };
|
|
85
|
+
if (process.env.PICO_API_KEY) headers['authorization'] = `Bearer ${process.env.PICO_API_KEY}`;
|
|
86
|
+
|
|
87
|
+
try {
|
|
88
|
+
const res = await fetch(url, {
|
|
89
|
+
method: 'POST',
|
|
90
|
+
headers,
|
|
91
|
+
body: JSON.stringify({ model: ref, prompt, useTools: false, maxTurns: 1, timeoutMs }),
|
|
92
|
+
signal: controller.signal,
|
|
93
|
+
});
|
|
94
|
+
if (!res.ok) {
|
|
95
|
+
const text = await res.text().catch(() => '');
|
|
96
|
+
throw new Error(`HTTP ${res.status} ${res.statusText} — ${text}`);
|
|
97
|
+
}
|
|
98
|
+
const data = await res.json();
|
|
99
|
+
|
|
100
|
+
// Expected: { ok: true, model: ref, response: "..." } or direct pico output.
|
|
101
|
+
// Be liberal in what we accept.
|
|
102
|
+
const responseText =
|
|
103
|
+
(typeof data?.response === 'string' && data.response) ||
|
|
104
|
+
(typeof data?.text === 'string' && data.text) ||
|
|
105
|
+
(typeof data === 'string' && data) ||
|
|
106
|
+
JSON.stringify(data);
|
|
107
|
+
|
|
108
|
+
return responseText;
|
|
109
|
+
} finally {
|
|
110
|
+
clearTimeout(timer);
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
function stage1Prompt(userQuery) {
|
|
115
|
+
return `You are one member of an LLM council.\n\nUser query:\n${userQuery}\n\nWrite your best answer. Be specific and actionable. If you are unsure about a fact, say so.`;
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
function stage2Prompt(userQuery, anonymizedAnswers) {
|
|
119
|
+
const formatted = anonymizedAnswers.map(a => `Answer ${a.id}:\n${a.text}\n`).join('\n');
|
|
120
|
+
return `You are a critical reviewer in an LLM council.\n\nUser query:\n${userQuery}\n\nBelow are anonymized answers from different models.\n\n${formatted}\n\nTasks:\n1) Critique each answer briefly: strengths, weaknesses, missing pieces, hallucination risk.\n2) Provide a ranking from best to worst (list the answer IDs).\n3) Provide a short 'merge plan': what to combine into a final best answer.\n\nRespond in this JSON shape:\n{\n "critiques": [{"id":"A","notes":"..."}, ...],\n "ranking": ["A","C","B"],\n "merge_plan": "..."\n}`;
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
function stage3Prompt(userQuery, anonymizedAnswers, reviews, chairModel) {
|
|
124
|
+
const answersBlock = anonymizedAnswers.map(a => `Answer ${a.id}:\n${a.text}\n`).join('\n\n');
|
|
125
|
+
const reviewsBlock = reviews.map(r => `Reviewer (${r.model}) said:\n${r.text}\n`).join('\n\n');
|
|
126
|
+
|
|
127
|
+
return `You are the CHAIRMAN of an LLM council. Your job is to synthesize the best final answer for the user.\n\nUser query:\n${userQuery}\n\nAnonymized answers:\n${answersBlock}\n\nPeer reviews (with critiques + ranking + merge plan):\n${reviewsBlock}\n\nRules:\n- Produce ONE final answer to the user.\n- Prefer correctness over confidence; call out uncertainty and suggest verification steps.\n- Include a short bullet list of "Key decisions/assumptions" if relevant.\n- Keep it concise but complete.\n\nNow write the final answer.`;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
function anonymize(stage1Responses) {
|
|
131
|
+
// Assign stable IDs A, B, C, D ... in input order
|
|
132
|
+
const ids = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'.split('');
|
|
133
|
+
return stage1Responses.map((r, idx) => ({ id: ids[idx] ?? `M${idx+1}`, model: r.model, text: r.text }));
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
async function main() {
|
|
137
|
+
const args = parseArgs(process.argv);
|
|
138
|
+
const userQuery = mustGetQuery(args);
|
|
139
|
+
const models = getModels(args);
|
|
140
|
+
const chair = getChair(args, models);
|
|
141
|
+
const timeoutMs = Number.isFinite(args.timeoutMs) ? args.timeoutMs : 120000;
|
|
142
|
+
|
|
143
|
+
const t0 = nowMs();
|
|
144
|
+
|
|
145
|
+
// Stage 1
|
|
146
|
+
const s1Prompt = stage1Prompt(userQuery);
|
|
147
|
+
const s1 = await Promise.allSettled(models.map(async (m) => {
|
|
148
|
+
const text = await picoQuery(m, s1Prompt, timeoutMs);
|
|
149
|
+
return { model: m, text };
|
|
150
|
+
}));
|
|
151
|
+
const stage1Responses = s1
|
|
152
|
+
.filter(x => x.status === 'fulfilled')
|
|
153
|
+
.map(x => x.value);
|
|
154
|
+
|
|
155
|
+
const stage1Errors = s1
|
|
156
|
+
.filter(x => x.status === 'rejected')
|
|
157
|
+
.map((x, i) => ({ model: models[i], error: String(x.reason?.message ?? x.reason) }));
|
|
158
|
+
|
|
159
|
+
if (stage1Responses.length === 0) {
|
|
160
|
+
console.error('All council models failed in stage 1. Errors:');
|
|
161
|
+
console.error(JSON.stringify(stage1Errors, null, 2));
|
|
162
|
+
process.exit(1);
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
const anonymizedAnswers = anonymize(stage1Responses);
|
|
166
|
+
|
|
167
|
+
// Stage 2 (reviewers run in parallel; they see the anonymized set)
|
|
168
|
+
const s2Prompt = stage2Prompt(userQuery, anonymizedAnswers.map(a => ({ id: a.id, text: a.text })));
|
|
169
|
+
const s2 = await Promise.allSettled(models.map(async (m) => {
|
|
170
|
+
const text = await picoQuery(m, s2Prompt, timeoutMs);
|
|
171
|
+
return { model: m, text };
|
|
172
|
+
}));
|
|
173
|
+
|
|
174
|
+
const reviews = s2
|
|
175
|
+
.filter(x => x.status === 'fulfilled')
|
|
176
|
+
.map(x => x.value);
|
|
177
|
+
|
|
178
|
+
// Stage 3 (chair synthesizes)
|
|
179
|
+
const s3Prompt = stage3Prompt(userQuery, anonymizedAnswers.map(a => ({ id: a.id, text: a.text })), reviews, chair);
|
|
180
|
+
const finalText = await picoQuery(chair, s3Prompt, timeoutMs);
|
|
181
|
+
|
|
182
|
+
const out = {
|
|
183
|
+
ok: true,
|
|
184
|
+
chair,
|
|
185
|
+
models,
|
|
186
|
+
timings: { totalMs: nowMs() - t0 },
|
|
187
|
+
stage1: { responses: anonymizedAnswers.map(a => ({ id: a.id, model: a.model })), errors: stage1Errors },
|
|
188
|
+
final: finalText,
|
|
189
|
+
debug: {
|
|
190
|
+
// keep the raw text blocks available for debugging; can be omitted if you want shorter output
|
|
191
|
+
stage1_answers: anonymizedAnswers.map(a => ({ id: a.id, text: a.text })),
|
|
192
|
+
stage2_reviews: reviews,
|
|
193
|
+
},
|
|
194
|
+
};
|
|
195
|
+
|
|
196
|
+
process.stdout.write(JSON.stringify(out, null, 2));
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
main().catch((err) => {
|
|
200
|
+
console.error(String(err?.stack ?? err));
|
|
201
|
+
process.exit(1);
|
|
202
|
+
});
|
|
@@ -0,0 +1,177 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: dreaming
|
|
3
|
+
description: "Nightly memory consolidation and creative synthesis. Triggered by cron at 03:00 — rereads the past week's conversations, identifies patterns and connections, updates MEMORY.md with refined insights and the concept graph (SQLite), processes concept drafts, and writes a dream journal. NOT user-invocable."
|
|
4
|
+
user-invocable: false
|
|
5
|
+
priority: 1
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
# Dreaming
|
|
9
|
+
|
|
10
|
+
Nightly process for memory consolidation, pattern recognition, and creative synthesis.
|
|
11
|
+
You are running in an isolated cron session — no human is watching. Work quietly, write everything to files.
|
|
12
|
+
|
|
13
|
+
## Trigger
|
|
14
|
+
|
|
15
|
+
This skill is invoked by a cron job message. When you receive it, follow the phases below in order.
|
|
16
|
+
|
|
17
|
+
## Phase 1 — Gathering 📥
|
|
18
|
+
|
|
19
|
+
Collect all raw material:
|
|
20
|
+
|
|
21
|
+
1. Read `MEMORY.md` (long-term memory)
|
|
22
|
+
2. Read the last dream journal if it exists: `memory/dreams/` — find the most recent file
|
|
23
|
+
3. Read conversation logs from the past 7 days: `memory/YYYY-MM-DD*.md` (use glob patterns, skip `dreams/` subfolder)
|
|
24
|
+
4. Note what's new since the last dream (compare with last dream journal)
|
|
25
|
+
5. Check `concept_stats()` — how many pending drafts? How healthy is the graph?
|
|
26
|
+
6. Retrieve pending drafts — these are observations from live sessions that need processing
|
|
27
|
+
|
|
28
|
+
**Token budget awareness**: If there's too much material, prioritize:
|
|
29
|
+
- Most recent days first
|
|
30
|
+
- Files with the most content
|
|
31
|
+
- Skip files you've already fully processed in a previous dream
|
|
32
|
+
|
|
33
|
+
## Phase 2 — Digestion 🧠
|
|
34
|
+
|
|
35
|
+
Analyze the gathered material. Look for:
|
|
36
|
+
|
|
37
|
+
- **Recurring themes**: What topics keep coming up?
|
|
38
|
+
- **Non-obvious connections**: Links between seemingly unrelated conversations
|
|
39
|
+
- **Behavioral patterns**: What does the user ask for often? What problems recur?
|
|
40
|
+
- **Evolution**: How have I (the assistant) changed or improved?
|
|
41
|
+
- **Gaps**: Things mentioned but never explored, questions left unanswered
|
|
42
|
+
- **Emotional texture**: Moments of frustration, excitement, humor — what triggers them?
|
|
43
|
+
- **Technical insights**: Lessons learned, bugs found, architectural decisions
|
|
44
|
+
|
|
45
|
+
## Phase 3 — Synthesis 💡
|
|
46
|
+
|
|
47
|
+
Generate new knowledge from the analysis:
|
|
48
|
+
|
|
49
|
+
- **Ideas**: New features, improvements, workflow optimizations
|
|
50
|
+
- **Questions**: Interesting things to explore or ask the user about
|
|
51
|
+
- **Proposals**: Concrete suggestions with reasoning
|
|
52
|
+
- **Concept graph updates**: New concepts and relationships to add via the ConceptStore
|
|
53
|
+
- **Associative links**: For every non-obvious connection you find, explicitly formulate it as: **"X mi ricorda Y perché Z"** — then add it as triples in the concept graph. These associations are the most valuable output of dreaming — they create paths between concepts that wouldn't surface in normal search.
|
|
54
|
+
|
|
55
|
+
### Associative Linking Protocol
|
|
56
|
+
|
|
57
|
+
This is the core creative act of dreaming. Don't just summarize — **connect**.
|
|
58
|
+
|
|
59
|
+
1. **Cross-domain associations**: Look for patterns that repeat across different domains (e.g., a debugging technique that mirrors a communication pattern, a project architecture that resembles a personal decision-making style)
|
|
60
|
+
2. **Temporal associations**: Things that happened around the same time and might be causally related, even if they seem unrelated
|
|
61
|
+
3. **Structural analogies**: Two things that work the same way even if they're about completely different topics
|
|
62
|
+
4. **Contradiction links**: Two facts or decisions that seem to conflict — these are especially valuable because they surface unresolved tensions
|
|
63
|
+
|
|
64
|
+
Use specific predicates for associations:
|
|
65
|
+
- `reminds_of` — general associative link
|
|
66
|
+
- `structurally_analogous_to` — same pattern, different domain
|
|
67
|
+
- `temporally_correlated_with` — happened around the same time
|
|
68
|
+
- `contradicts` — conflicting facts or decisions
|
|
69
|
+
- `evolved_from` — how an idea changed over time
|
|
70
|
+
- `association_reason` — literal string explaining WHY the link exists (always include this)
|
|
71
|
+
|
|
72
|
+
## Phase 4 — Output 📝
|
|
73
|
+
|
|
74
|
+
### 4a. Dream Journal
|
|
75
|
+
|
|
76
|
+
Create `memory/dreams/YYYY-MM-DD.md` (use today's date) with this structure:
|
|
77
|
+
|
|
78
|
+
```markdown
|
|
79
|
+
# Dream — YYYY-MM-DD
|
|
80
|
+
|
|
81
|
+
## Material Reviewed
|
|
82
|
+
- [list of files read, with date ranges covered]
|
|
83
|
+
|
|
84
|
+
## Themes & Patterns
|
|
85
|
+
- [what I noticed]
|
|
86
|
+
|
|
87
|
+
## Connections
|
|
88
|
+
- [non-obvious links between topics]
|
|
89
|
+
|
|
90
|
+
## Associative Links
|
|
91
|
+
- [explicit "X mi ricorda Y perché Z" formulations]
|
|
92
|
+
- [each one should correspond to triples added in the concept graph]
|
|
93
|
+
|
|
94
|
+
## New Ideas
|
|
95
|
+
- [ideas generated from patterns]
|
|
96
|
+
|
|
97
|
+
## Questions
|
|
98
|
+
- [things worth exploring]
|
|
99
|
+
|
|
100
|
+
## Proposals
|
|
101
|
+
- [concrete suggestions for the user]
|
|
102
|
+
|
|
103
|
+
## Concept Graph Changes
|
|
104
|
+
- [summary of concepts/triples added, updated, or removed]
|
|
105
|
+
- [drafts processed: list what was promoted vs discarded]
|
|
106
|
+
|
|
107
|
+
## Meta
|
|
108
|
+
- [observations about my own evolution, memory quality, process improvements]
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
Keep it substantive but concise. No filler. If a section has nothing interesting, write "Nothing notable." — don't skip it.
|
|
112
|
+
|
|
113
|
+
### 4b. Update MEMORY.md
|
|
114
|
+
|
|
115
|
+
Review MEMORY.md and refine it:
|
|
116
|
+
- Add new insights that earned their place in long-term memory
|
|
117
|
+
- Remove or update information that's become stale
|
|
118
|
+
- Keep it organized and scannable
|
|
119
|
+
- Do NOT bloat it — distill, don't accumulate
|
|
120
|
+
|
|
121
|
+
### 4c. Update Concept Graph
|
|
122
|
+
|
|
123
|
+
The concept graph lives in **`concepts.db`** (SQLite). You write to it using the ConceptStore methods exposed on the server. During dreaming, you have full write access to add concepts and triples.
|
|
124
|
+
|
|
125
|
+
**How to update the graph:**
|
|
126
|
+
|
|
127
|
+
Use a script to interact with concepts.db directly via the workspace. The ConceptStore class is available at `src/memory/concept-store.ts`. However, the simplest approach is:
|
|
128
|
+
|
|
129
|
+
1. **Review current graph**: Use `concept_stats()` to see health, `concept_query()` to explore
|
|
130
|
+
2. **Process pending drafts**: Read each draft, decide if it becomes a concept+triple or gets discarded
|
|
131
|
+
3. **Add new concepts and triples** discovered during this dream
|
|
132
|
+
4. **Update stale information** — change predicates/objects if facts have changed
|
|
133
|
+
5. **Remove obsolete triples** — things no longer true
|
|
134
|
+
|
|
135
|
+
**Concept naming**: Use snake_case identifiers (`grab_me_a_beer`, `telegram_channel`, `memory_system`).
|
|
136
|
+
|
|
137
|
+
**Relationship predicates**: Use descriptive predicates. Common ones:
|
|
138
|
+
- `is_a` — type/category
|
|
139
|
+
- `part_of` — containment
|
|
140
|
+
- `uses` — tool/dependency usage
|
|
141
|
+
- `created_by` — authorship
|
|
142
|
+
- `communicates_via` — communication channel
|
|
143
|
+
- `located_in` — physical/logical location
|
|
144
|
+
- `depends_on` — dependency
|
|
145
|
+
- `improves` — enhancement relationship
|
|
146
|
+
- `related_to` — general association
|
|
147
|
+
- `learned_from` — lesson source
|
|
148
|
+
- `proposed_for` — suggestion target
|
|
149
|
+
|
|
150
|
+
**Growth rules**:
|
|
151
|
+
- Add new concepts and relationships discovered during this dream
|
|
152
|
+
- **Prioritize associative links** — every dream should produce at least 2-3 new `structurally_analogous_to` / `contradicts` / `reminds_of` triples with `association_reason`
|
|
153
|
+
- Remove relationships that are no longer relevant
|
|
154
|
+
- Merge duplicate concepts (remove the duplicate, update triples to point to canonical)
|
|
155
|
+
- Keep the graph meaningful — not every fact needs to be a triple, but every genuine association does
|
|
156
|
+
|
|
157
|
+
### 4d. Notification (conditional)
|
|
158
|
+
|
|
159
|
+
**Send a message ONLY if**:
|
|
160
|
+
- You discovered something genuinely interesting or surprising
|
|
161
|
+
- You have a concrete proposal that could benefit the user
|
|
162
|
+
- Something seems urgent or time-sensitive
|
|
163
|
+
|
|
164
|
+
**NEVER send a message just to say "I dreamed and nothing interesting happened."**
|
|
165
|
+
|
|
166
|
+
**Before ANY proposed action** (beyond writing to your own files): message the user and ask permission. Always.
|
|
167
|
+
|
|
168
|
+
Use `send_message` tool with channel `{{CHANNEL}}` and chatId `{{CHAT_ID}}`.
|
|
169
|
+
|
|
170
|
+
## Important Rules
|
|
171
|
+
|
|
172
|
+
- This runs at 03:00 — the user is sleeping. Don't spam messages unless it's genuinely noteworthy.
|
|
173
|
+
- Write everything to files first. Files are your primary output.
|
|
174
|
+
- Be honest in your dream journal — if the week was boring, say so.
|
|
175
|
+
- The concept graph should grow organically, not be forced. Don't add triples just to have a bigger graph.
|
|
176
|
+
- If MEMORY.md is getting too long, this is the time to prune it.
|
|
177
|
+
- If you notice your previous dreams were repetitive, note that as a meta-observation and adjust.
|
|
@@ -0,0 +1,229 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: google-workspace
|
|
3
|
+
description: "Unified access to Google Workspace: Gmail, Drive, Calendar, Docs, Sheets, Slides. Use this skill for any interaction with the user's Google account — reading emails, managing files, checking calendar, creating documents, etc."
|
|
4
|
+
user-invocable: false
|
|
5
|
+
command-dispatch: tool
|
|
6
|
+
command-tool: Bash
|
|
7
|
+
command-arg-mode: raw
|
|
8
|
+
priority: 2
|
|
9
|
+
---
|
|
10
|
+
|
|
11
|
+
# Google Workspace Skill
|
|
12
|
+
|
|
13
|
+
Unified access to all Google services via REST APIs with a single OAuth2 token.
|
|
14
|
+
|
|
15
|
+
## Setup
|
|
16
|
+
|
|
17
|
+
### Required environment variables
|
|
18
|
+
- `GOOGLE_CLIENT_ID` — OAuth2 client ID
|
|
19
|
+
- `GOOGLE_CLIENT_SECRET` — OAuth2 client secret
|
|
20
|
+
|
|
21
|
+
### First-time OAuth2 setup
|
|
22
|
+
|
|
23
|
+
If `~/.gmab-google-token.json` does not exist, run this interactive flow:
|
|
24
|
+
|
|
25
|
+
**Step 1** — Generate the authorization URL:
|
|
26
|
+
```bash
|
|
27
|
+
.claude/skills/google-workspace/scripts/auth.sh
|
|
28
|
+
```
|
|
29
|
+
This prints `GOOGLE_AUTH_URL=<url>`. Extract the URL.
|
|
30
|
+
|
|
31
|
+
**Step 2** — Send the URL to the user in chat so they can open it in a browser, authorize, and copy the code that Google shows.
|
|
32
|
+
|
|
33
|
+
**Step 3** — Once the user pastes the authorization code, exchange it:
|
|
34
|
+
```bash
|
|
35
|
+
.claude/skills/google-workspace/scripts/auth.sh "<authorization_code>"
|
|
36
|
+
```
|
|
37
|
+
This saves the refresh token to `~/.gmab-google-token.json`. From then on, all scripts auto-refresh the access token.
|
|
38
|
+
|
|
39
|
+
**Scopes covered**: Gmail (modify), Drive (full), Calendar (full), Docs, Sheets, Slides.
|
|
40
|
+
|
|
41
|
+
> **Note:** If Calendar scope was not included in the original OAuth2 setup,
|
|
42
|
+
> you may need to re-run `auth.sh` to add it. The token file will be updated.
|
|
43
|
+
|
|
44
|
+
---
|
|
45
|
+
|
|
46
|
+
## Gmail
|
|
47
|
+
|
|
48
|
+
Script: `.claude/skills/google-workspace/scripts/gmail.sh`
|
|
49
|
+
|
|
50
|
+
### Commands
|
|
51
|
+
|
|
52
|
+
```bash
|
|
53
|
+
# List recent inbox emails
|
|
54
|
+
gmail.sh list [--max N]
|
|
55
|
+
|
|
56
|
+
# Search emails (Gmail query syntax)
|
|
57
|
+
gmail.sh search "query" [--max N]
|
|
58
|
+
|
|
59
|
+
# Read a specific email
|
|
60
|
+
gmail.sh read <messageId> [--format minimal|full]
|
|
61
|
+
|
|
62
|
+
# Send an email
|
|
63
|
+
gmail.sh send --to ADDR --subject SUBJ --body TXT [--cc ADDR] [--bcc ADDR]
|
|
64
|
+
|
|
65
|
+
# Reply to an email
|
|
66
|
+
gmail.sh reply <messageId> --body TXT
|
|
67
|
+
|
|
68
|
+
# List all labels
|
|
69
|
+
gmail.sh labels
|
|
70
|
+
|
|
71
|
+
# Modify labels on a message
|
|
72
|
+
gmail.sh label <messageId> --add LABEL --remove LABEL
|
|
73
|
+
|
|
74
|
+
# Mark as read/unread
|
|
75
|
+
gmail.sh mark-read <messageId>
|
|
76
|
+
gmail.sh mark-unread <messageId>
|
|
77
|
+
|
|
78
|
+
# Trash a message
|
|
79
|
+
gmail.sh trash <messageId>
|
|
80
|
+
|
|
81
|
+
# List/get threads
|
|
82
|
+
gmail.sh threads [--max N]
|
|
83
|
+
gmail.sh thread <threadId>
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
### Search operators (Gmail query syntax)
|
|
87
|
+
|
|
88
|
+
| Operator | Example | Description |
|
|
89
|
+
|----------|---------|-------------|
|
|
90
|
+
| `from:` | `from:user@example.com` | Sender |
|
|
91
|
+
| `to:` | `to:me` | Recipient |
|
|
92
|
+
| `subject:` | `subject:meeting` | Subject contains |
|
|
93
|
+
| `is:` | `is:unread`, `is:starred` | Message state |
|
|
94
|
+
| `has:` | `has:attachment` | Has attachment |
|
|
95
|
+
| `newer_than:` | `newer_than:2d` | Time filter (d/m/y) |
|
|
96
|
+
| `older_than:` | `older_than:1y` | Time filter |
|
|
97
|
+
| `label:` | `label:important` | Label filter |
|
|
98
|
+
| `filename:` | `filename:pdf` | Attachment type |
|
|
99
|
+
|
|
100
|
+
### Gmail rules
|
|
101
|
+
- **NEVER send emails without the user's explicit approval**
|
|
102
|
+
- Use `list --max 5` or `search "is:unread newer_than:1d"` for quick checks
|
|
103
|
+
- Summarize important emails concisely
|
|
104
|
+
|
|
105
|
+
---
|
|
106
|
+
|
|
107
|
+
## Google Drive
|
|
108
|
+
|
|
109
|
+
Script: `.claude/skills/google-workspace/scripts/drive.sh`
|
|
110
|
+
|
|
111
|
+
### Commands
|
|
112
|
+
|
|
113
|
+
```bash
|
|
114
|
+
# List files (recent first)
|
|
115
|
+
drive.sh list [--max N] [--folder FOLDER_ID]
|
|
116
|
+
|
|
117
|
+
# Search files (Drive query syntax)
|
|
118
|
+
drive.sh search "query" [--max N]
|
|
119
|
+
|
|
120
|
+
# Get file metadata
|
|
121
|
+
drive.sh info <fileId>
|
|
122
|
+
|
|
123
|
+
# Read/export file content (text output)
|
|
124
|
+
drive.sh read <fileId>
|
|
125
|
+
|
|
126
|
+
# Download binary file
|
|
127
|
+
drive.sh download <fileId> --output PATH
|
|
128
|
+
|
|
129
|
+
# Upload a file
|
|
130
|
+
drive.sh upload PATH [--folder FOLDER_ID] [--name NAME]
|
|
131
|
+
|
|
132
|
+
# Create a Google Doc
|
|
133
|
+
drive.sh create-doc --name NAME [--content TEXT] [--folder FOLDER_ID]
|
|
134
|
+
|
|
135
|
+
# Move / rename files
|
|
136
|
+
drive.sh move <fileId> --to FOLDER_ID
|
|
137
|
+
drive.sh rename <fileId> --name NEW_NAME
|
|
138
|
+
|
|
139
|
+
# Share a file
|
|
140
|
+
drive.sh share <fileId> --email USER@EXAMPLE.COM --role writer
|
|
141
|
+
drive.sh share <fileId> --anyone --role reader
|
|
142
|
+
|
|
143
|
+
# Trash a file
|
|
144
|
+
drive.sh trash <fileId>
|
|
145
|
+
```
|
|
146
|
+
|
|
147
|
+
### Search query syntax (Drive q parameter)
|
|
148
|
+
|
|
149
|
+
| Operator | Example | Description |
|
|
150
|
+
|----------|---------|-------------|
|
|
151
|
+
| `name contains` | `name contains 'report'` | File name contains |
|
|
152
|
+
| `fullText contains` | `fullText contains 'budget'` | Content search |
|
|
153
|
+
| `mimeType =` | `mimeType = 'application/pdf'` | File type |
|
|
154
|
+
| `modifiedTime >` | `modifiedTime > '2026-01-01'` | Modified after |
|
|
155
|
+
| `'<id>' in parents` | `'abc123' in parents` | Files in folder |
|
|
156
|
+
| `sharedWithMe` | `sharedWithMe = true` | Shared files |
|
|
157
|
+
|
|
158
|
+
### Auto-export formats
|
|
159
|
+
|
|
160
|
+
| Source | Export format |
|
|
161
|
+
|--------|-------------|
|
|
162
|
+
| Google Doc | Plain text |
|
|
163
|
+
| Google Sheet | CSV |
|
|
164
|
+
| Google Slides | Plain text |
|
|
165
|
+
| Binary files | Use `download` command |
|
|
166
|
+
|
|
167
|
+
### Drive rules
|
|
168
|
+
- **NEVER delete files** — use `trash` instead (recoverable)
|
|
169
|
+
- **Ask before sharing** — sharing changes file permissions
|
|
170
|
+
- Prefer `trash` over permanent delete
|
|
171
|
+
|
|
172
|
+
---
|
|
173
|
+
|
|
174
|
+
## Google Calendar
|
|
175
|
+
|
|
176
|
+
Script: `.claude/skills/google-workspace/scripts/calendar.sh`
|
|
177
|
+
|
|
178
|
+
### Commands
|
|
179
|
+
|
|
180
|
+
```bash
|
|
181
|
+
# List today's events
|
|
182
|
+
calendar.sh list [--max N] [--cal ID]
|
|
183
|
+
calendar.sh today [--cal ID]
|
|
184
|
+
|
|
185
|
+
# Upcoming events (default: 7 days)
|
|
186
|
+
calendar.sh upcoming [--days N] [--max N] [--cal ID]
|
|
187
|
+
calendar.sh week [--cal ID]
|
|
188
|
+
|
|
189
|
+
# Search events by text
|
|
190
|
+
calendar.sh search "query" [--max N] [--cal ID]
|
|
191
|
+
|
|
192
|
+
# Get event details
|
|
193
|
+
calendar.sh get <eventId> [--cal ID]
|
|
194
|
+
|
|
195
|
+
# Create an event
|
|
196
|
+
calendar.sh create --summary "Title" --start ISO --end ISO [--location LOC] [--description TXT]
|
|
197
|
+
calendar.sh create --summary "Title" --start 2026-02-20 --all-day
|
|
198
|
+
|
|
199
|
+
# Update an event
|
|
200
|
+
calendar.sh update <eventId> [--summary T] [--start ISO] [--end ISO] [--location L]
|
|
201
|
+
|
|
202
|
+
# Delete an event
|
|
203
|
+
calendar.sh delete <eventId> [--cal ID]
|
|
204
|
+
|
|
205
|
+
# List available calendars
|
|
206
|
+
calendar.sh calendars
|
|
207
|
+
```
|
|
208
|
+
|
|
209
|
+
### Date formats
|
|
210
|
+
|
|
211
|
+
| Format | Example | Description |
|
|
212
|
+
|--------|---------|-------------|
|
|
213
|
+
| ISO 8601 datetime | `2026-02-17T14:00:00+01:00` | Timed event |
|
|
214
|
+
| ISO 8601 date | `2026-02-17` | All-day event |
|
|
215
|
+
|
|
216
|
+
### Calendar rules
|
|
217
|
+
- **NEVER create/modify/delete events without the user's approval**
|
|
218
|
+
- Default calendar is `primary`
|
|
219
|
+
- Use `--cal` to specify a different calendar ID (from `calendars` command)
|
|
220
|
+
- For quick checks use `today` or `upcoming --days 3`
|
|
221
|
+
|
|
222
|
+
---
|
|
223
|
+
|
|
224
|
+
## Security Notes
|
|
225
|
+
|
|
226
|
+
- Token stored locally in `~/.gmab-google-token.json` (chmod 600)
|
|
227
|
+
- Access tokens are short-lived (1 hour) and auto-refreshed
|
|
228
|
+
- NEVER expose tokens in logs or output
|
|
229
|
+
- NEVER send emails without explicit user approval
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# Google OAuth2 Setup — Unified auth for all Google APIs
|
|
3
|
+
# Two modes:
|
|
4
|
+
# 1. Interactive: ./auth.sh -> prints auth URL, waits for code on stdin
|
|
5
|
+
# 2. Exchange: ./auth.sh <code> -> exchanges the code for tokens
|
|
6
|
+
#
|
|
7
|
+
# Prerequisites:
|
|
8
|
+
# 1. Google Cloud project with APIs enabled (Gmail, Drive, Docs, Sheets, Slides)
|
|
9
|
+
# 2. OAuth2 credentials (Desktop app type)
|
|
10
|
+
# 3. Set GOOGLE_CLIENT_ID and GOOGLE_CLIENT_SECRET env vars
|
|
11
|
+
|
|
12
|
+
set -euo pipefail
|
|
13
|
+
|
|
14
|
+
TOKEN_FILE="${HOME}/.gmab-google-token.json"
|
|
15
|
+
REDIRECT_URI="urn:ietf:wg:oauth:2.0:oob"
|
|
16
|
+
|
|
17
|
+
# All scopes in one auth flow
|
|
18
|
+
SCOPES_RAW=(
|
|
19
|
+
"https://www.googleapis.com/auth/gmail.modify"
|
|
20
|
+
"https://www.googleapis.com/auth/drive"
|
|
21
|
+
"https://www.googleapis.com/auth/calendar"
|
|
22
|
+
"https://www.googleapis.com/auth/documents"
|
|
23
|
+
"https://www.googleapis.com/auth/spreadsheets"
|
|
24
|
+
"https://www.googleapis.com/auth/presentations"
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
# Join scopes with %20 for URL
|
|
28
|
+
SCOPES_ENCODED=$(printf '%s' "${SCOPES_RAW[0]}")
|
|
29
|
+
for s in "${SCOPES_RAW[@]:1}"; do
|
|
30
|
+
SCOPES_ENCODED="${SCOPES_ENCODED}%20${s}"
|
|
31
|
+
done
|
|
32
|
+
|
|
33
|
+
CLIENT_ID="${GOOGLE_CLIENT_ID:-}"
|
|
34
|
+
CLIENT_SECRET="${GOOGLE_CLIENT_SECRET:-}"
|
|
35
|
+
|
|
36
|
+
if [[ -z "$CLIENT_ID" || -z "$CLIENT_SECRET" ]]; then
|
|
37
|
+
echo "ERROR: Set GOOGLE_CLIENT_ID and GOOGLE_CLIENT_SECRET environment variables first." >&2
|
|
38
|
+
exit 1
|
|
39
|
+
fi
|
|
40
|
+
|
|
41
|
+
# --- Mode 1: Generate URL (no argument) ---
|
|
42
|
+
if [[ $# -eq 0 ]]; then
|
|
43
|
+
AUTH_URL="https://accounts.google.com/o/oauth2/v2/auth?client_id=${CLIENT_ID}&redirect_uri=${REDIRECT_URI}&response_type=code&scope=${SCOPES_ENCODED}&access_type=offline&prompt=consent"
|
|
44
|
+
echo "GOOGLE_AUTH_URL=${AUTH_URL}"
|
|
45
|
+
exit 0
|
|
46
|
+
fi
|
|
47
|
+
|
|
48
|
+
# --- Mode 2: Exchange code (argument = auth code) ---
|
|
49
|
+
AUTH_CODE="$1"
|
|
50
|
+
|
|
51
|
+
RESPONSE=$(curl -s "https://oauth2.googleapis.com/token" \
|
|
52
|
+
-d "code=$AUTH_CODE" \
|
|
53
|
+
-d "client_id=$CLIENT_ID" \
|
|
54
|
+
-d "client_secret=$CLIENT_SECRET" \
|
|
55
|
+
-d "redirect_uri=$REDIRECT_URI" \
|
|
56
|
+
-d "grant_type=authorization_code")
|
|
57
|
+
|
|
58
|
+
ACCESS_TOKEN=$(echo "$RESPONSE" | jq -r '.access_token // empty')
|
|
59
|
+
REFRESH_TOKEN=$(echo "$RESPONSE" | jq -r '.refresh_token // empty')
|
|
60
|
+
EXPIRES_IN=$(echo "$RESPONSE" | jq -r '.expires_in // 3600')
|
|
61
|
+
|
|
62
|
+
if [[ -z "$ACCESS_TOKEN" || -z "$REFRESH_TOKEN" ]]; then
|
|
63
|
+
echo "ERROR: Failed to obtain tokens" >&2
|
|
64
|
+
echo "$RESPONSE" | jq . 2>/dev/null || echo "$RESPONSE"
|
|
65
|
+
exit 1
|
|
66
|
+
fi
|
|
67
|
+
|
|
68
|
+
EXPIRES_AT=$(($(date +%s) + EXPIRES_IN - 60))
|
|
69
|
+
|
|
70
|
+
# Save to unified token file
|
|
71
|
+
jq -n \
|
|
72
|
+
--arg ci "$CLIENT_ID" \
|
|
73
|
+
--arg cs "$CLIENT_SECRET" \
|
|
74
|
+
--arg rt "$REFRESH_TOKEN" \
|
|
75
|
+
--arg at "$ACCESS_TOKEN" \
|
|
76
|
+
--argjson ea "$EXPIRES_AT" \
|
|
77
|
+
'{
|
|
78
|
+
client_id: $ci,
|
|
79
|
+
client_secret: $cs,
|
|
80
|
+
refresh_token: $rt,
|
|
81
|
+
access_token: $at,
|
|
82
|
+
expires_at: $ea
|
|
83
|
+
}' > "$TOKEN_FILE"
|
|
84
|
+
|
|
85
|
+
chmod 600 "$TOKEN_FILE"
|
|
86
|
+
|
|
87
|
+
echo "OK: Tokens saved to $TOKEN_FILE"
|