@looplia/looplia-cli 0.6.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/dist/index.d.ts +2 -0
  2. package/dist/index.js +33216 -0
  3. package/package.json +55 -0
  4. package/plugins/looplia-core/.claude-plugin/plugin.json +10 -0
  5. package/plugins/looplia-core/commands/build-workflow.md +92 -0
  6. package/plugins/looplia-core/commands/build.md +71 -0
  7. package/plugins/looplia-core/commands/list-workflows.md +55 -0
  8. package/plugins/looplia-core/commands/run.md +50 -0
  9. package/plugins/looplia-core/hooks/hooks.json +27 -0
  10. package/plugins/looplia-core/scripts/hooks/compact-inject-state.sh +36 -0
  11. package/plugins/looplia-core/scripts/hooks/post-write-validate.sh +81 -0
  12. package/plugins/looplia-core/scripts/hooks/stop-guard.sh +56 -0
  13. package/plugins/looplia-core/skills/plugin-registry-scanner/SKILL.md +108 -0
  14. package/plugins/looplia-core/skills/plugin-registry-scanner/scripts/scan-plugins.ts +221 -0
  15. package/plugins/looplia-core/skills/plugin-registry-scanner/test/scan-plugins.test.ts +256 -0
  16. package/plugins/looplia-core/skills/search/SKILL.md +174 -0
  17. package/plugins/looplia-core/skills/skill-capability-matcher/SKILL.md +378 -0
  18. package/plugins/looplia-core/skills/workflow-executor/SKILL.md +469 -0
  19. package/plugins/looplia-core/skills/workflow-executor-inline/SKILL.md +217 -0
  20. package/plugins/looplia-core/skills/workflow-schema-composer/SCHEMA.md +214 -0
  21. package/plugins/looplia-core/skills/workflow-schema-composer/SKILL.md +373 -0
  22. package/plugins/looplia-core/skills/workflow-schema-composer/templates/workflow.md.template +44 -0
  23. package/plugins/looplia-core/skills/workflow-validator/SKILL.md +171 -0
  24. package/plugins/looplia-core/skills/workflow-validator/scripts/validate.ts +244 -0
  25. package/plugins/looplia-writer/.claude-plugin/plugin.json +10 -0
  26. package/plugins/looplia-writer/README.md +107 -0
  27. package/plugins/looplia-writer/skills/content-documenter/SKILL.md +189 -0
  28. package/plugins/looplia-writer/skills/id-generator/SKILL.md +120 -0
  29. package/plugins/looplia-writer/skills/idea-synthesis/SKILL.md +162 -0
  30. package/plugins/looplia-writer/skills/media-reviewer/SKILL.md +105 -0
  31. package/plugins/looplia-writer/skills/user-profile-reader/SKILL.md +94 -0
  32. package/plugins/looplia-writer/skills/writing-enhancer/SKILL.md +34 -0
  33. package/plugins/looplia-writer/skills/writing-kit-assembler/SKILL.md +206 -0
  34. package/plugins/looplia-writer/workflows/writing-kit.md +134 -0
@@ -0,0 +1,221 @@
1
+ #!/usr/bin/env bun
2
+ /**
3
+ * Plugin Registry Scanner
4
+ *
5
+ * Deterministic script to scan installed plugins and catalog available skills.
6
+ * Outputs a JSON registry to stdout.
7
+ *
8
+ * Usage: bun plugins/looplia-core/skills/plugin-registry-scanner/scripts/scan-plugins.ts [plugins-dir]
9
+ */
10
+
11
+ import { readdir, readFile, stat } from "node:fs/promises";
12
+ import { basename, join } from "node:path";
13
+ import { parse as parseYaml } from "yaml";
14
+
15
+ // Top-level regex for frontmatter extraction
16
+ export const FRONTMATTER_REGEX = /^---\n([\s\S]*?)\n---/;
17
+
18
+ // Capability inference lookup table
19
+ export const CAPABILITY_PATTERNS: [string, string][] = [
20
+ ["analy", "content analysis"],
21
+ ["review", "content review"],
22
+ ["theme", "theme extraction"],
23
+ ["extract", "theme extraction"],
24
+ ["quote", "quote identification"],
25
+ ["generat", "content generation"],
26
+ ["idea", "idea generation"],
27
+ ["hook", "idea generation"],
28
+ ["outline", "outline creation"],
29
+ ["transform", "content transformation"],
30
+ ["document", "structured output"],
31
+ ["structur", "structured output"],
32
+ ["assembl", "content assembly"],
33
+ ["combin", "content assembly"],
34
+ ["profile", "personalization"],
35
+ ["user", "personalization"],
36
+ ["valid", "validation"],
37
+ ];
38
+
39
+ export type SkillInfo = {
40
+ name: string;
41
+ description: string;
42
+ tools?: string[];
43
+ model?: string;
44
+ capabilities: string[];
45
+ };
46
+
47
+ export type PluginInfo = {
48
+ name: string;
49
+ path: string;
50
+ skills: SkillInfo[];
51
+ };
52
+
53
+ export type Registry = {
54
+ plugins: PluginInfo[];
55
+ summary: {
56
+ totalPlugins: number;
57
+ totalSkills: number;
58
+ };
59
+ };
60
+
61
+ /**
62
+ * Extract YAML frontmatter from markdown content
63
+ */
64
+ export function extractFrontmatter(
65
+ content: string
66
+ ): Record<string, unknown> | null {
67
+ const match = content.match(FRONTMATTER_REGEX);
68
+ if (!match) {
69
+ return null;
70
+ }
71
+
72
+ try {
73
+ return parseYaml(match[1]) as Record<string, unknown>;
74
+ } catch {
75
+ return null;
76
+ }
77
+ }
78
+
79
+ /**
80
+ * Infer capabilities from skill description using pattern matching
81
+ */
82
+ export function inferCapabilities(description: string): string[] {
83
+ const descLower = description.toLowerCase();
84
+ const capabilities = new Set<string>();
85
+
86
+ for (const [pattern, capability] of CAPABILITY_PATTERNS) {
87
+ if (descLower.includes(pattern)) {
88
+ capabilities.add(capability);
89
+ }
90
+ }
91
+
92
+ if (capabilities.size === 0) {
93
+ capabilities.add("general processing");
94
+ }
95
+
96
+ return [...capabilities];
97
+ }
98
+
99
+ /**
100
+ * Scan a single skill directory
101
+ */
102
+ async function scanSkill(skillPath: string): Promise<SkillInfo | null> {
103
+ const skillMdPath = join(skillPath, "SKILL.md");
104
+
105
+ try {
106
+ const content = await readFile(skillMdPath, "utf-8");
107
+ const frontmatter = extractFrontmatter(content);
108
+
109
+ if (!(frontmatter?.name && frontmatter.description)) {
110
+ console.error(`Warning: Invalid frontmatter in ${skillMdPath}`);
111
+ return null;
112
+ }
113
+
114
+ const description =
115
+ typeof frontmatter.description === "string"
116
+ ? frontmatter.description.trim()
117
+ : String(frontmatter.description);
118
+
119
+ let tools: string[] | undefined;
120
+ if (Array.isArray(frontmatter.tools)) {
121
+ tools = frontmatter.tools.map(String);
122
+ } else if (typeof frontmatter.tools === "string") {
123
+ tools = frontmatter.tools.split(",").map((t: string) => t.trim());
124
+ }
125
+
126
+ return {
127
+ name: String(frontmatter.name),
128
+ description,
129
+ tools,
130
+ model: frontmatter.model ? String(frontmatter.model) : undefined,
131
+ capabilities: inferCapabilities(description),
132
+ };
133
+ } catch {
134
+ // SKILL.md doesn't exist or can't be read
135
+ return null;
136
+ }
137
+ }
138
+
139
+ /**
140
+ * Scan a single plugin directory
141
+ */
142
+ async function scanPlugin(pluginPath: string): Promise<PluginInfo | null> {
143
+ const skillsDir = join(pluginPath, "skills");
144
+
145
+ try {
146
+ const skillsStat = await stat(skillsDir);
147
+ if (!skillsStat.isDirectory()) {
148
+ return null;
149
+ }
150
+ } catch {
151
+ // No skills directory
152
+ return null;
153
+ }
154
+
155
+ const skillDirs = await readdir(skillsDir);
156
+ const skills: SkillInfo[] = [];
157
+
158
+ for (const skillDir of skillDirs) {
159
+ const skillPath = join(skillsDir, skillDir);
160
+ const skillStat = await stat(skillPath);
161
+
162
+ if (skillStat.isDirectory()) {
163
+ const skill = await scanSkill(skillPath);
164
+ if (skill) {
165
+ skills.push(skill);
166
+ }
167
+ }
168
+ }
169
+
170
+ if (skills.length === 0) {
171
+ return null;
172
+ }
173
+
174
+ return {
175
+ name: basename(pluginPath),
176
+ path: pluginPath,
177
+ skills,
178
+ };
179
+ }
180
+
181
+ /**
182
+ * Main scanner function
183
+ */
184
+ export async function scanPlugins(pluginsPath: string): Promise<Registry> {
185
+ const plugins: PluginInfo[] = [];
186
+
187
+ try {
188
+ const entries = await readdir(pluginsPath);
189
+
190
+ for (const entry of entries) {
191
+ const pluginPath = join(pluginsPath, entry);
192
+ const entryStat = await stat(pluginPath);
193
+
194
+ if (entryStat.isDirectory() && !entry.startsWith(".")) {
195
+ const plugin = await scanPlugin(pluginPath);
196
+ if (plugin) {
197
+ plugins.push(plugin);
198
+ }
199
+ }
200
+ }
201
+ } catch (error) {
202
+ console.error(`Error scanning plugins directory: ${error}`);
203
+ }
204
+
205
+ const totalSkills = plugins.reduce((sum, p) => sum + p.skills.length, 0);
206
+
207
+ return {
208
+ plugins,
209
+ summary: {
210
+ totalPlugins: plugins.length,
211
+ totalSkills,
212
+ },
213
+ };
214
+ }
215
+
216
+ // Main execution (only when run directly, not when imported)
217
+ if (import.meta.main) {
218
+ const pluginsDir = process.argv[2] || "plugins";
219
+ const registry = await scanPlugins(pluginsDir);
220
+ console.log(JSON.stringify(registry, null, 2));
221
+ }
@@ -0,0 +1,256 @@
1
+ import { describe, expect, it } from "bun:test";
2
+ import {
3
+ CAPABILITY_PATTERNS,
4
+ extractFrontmatter,
5
+ inferCapabilities,
6
+ scanPlugins,
7
+ } from "../scripts/scan-plugins";
8
+
9
+ describe("scan-plugins", () => {
10
+ describe("extractFrontmatter", () => {
11
+ it("should extract valid YAML frontmatter", () => {
12
+ const content = `---
13
+ name: test-skill
14
+ description: A test skill
15
+ tools: Read, Write
16
+ ---
17
+
18
+ # Test Skill
19
+
20
+ This is the body.`;
21
+
22
+ const result = extractFrontmatter(content);
23
+ expect(result).not.toBeNull();
24
+ expect(result?.name).toBe("test-skill");
25
+ expect(result?.description).toBe("A test skill");
26
+ expect(result?.tools).toBe("Read, Write");
27
+ });
28
+
29
+ it("should extract multiline description with pipe syntax", () => {
30
+ const content = `---
31
+ name: multi-line
32
+ description: |
33
+ This is a multiline
34
+ description with details.
35
+ model: claude-haiku-4-5-20251001
36
+ ---
37
+
38
+ # Skill`;
39
+
40
+ const result = extractFrontmatter(content);
41
+ expect(result).not.toBeNull();
42
+ expect(result?.name).toBe("multi-line");
43
+ expect(result?.description).toContain("multiline");
44
+ expect(result?.model).toBe("claude-haiku-4-5-20251001");
45
+ });
46
+
47
+ it("should return null for content without frontmatter", () => {
48
+ const content = `# Just a heading
49
+
50
+ No frontmatter here.`;
51
+
52
+ const result = extractFrontmatter(content);
53
+ expect(result).toBeNull();
54
+ });
55
+
56
+ it("should return null for invalid YAML", () => {
57
+ const content = `---
58
+ name: test
59
+ invalid: yaml: syntax: here
60
+ ---
61
+
62
+ # Test`;
63
+
64
+ const result = extractFrontmatter(content);
65
+ expect(result).toBeNull();
66
+ });
67
+
68
+ it("should return null for unclosed frontmatter", () => {
69
+ const content = `---
70
+ name: unclosed
71
+ description: Missing closing delimiter
72
+
73
+ # Content`;
74
+
75
+ const result = extractFrontmatter(content);
76
+ expect(result).toBeNull();
77
+ });
78
+ });
79
+
80
+ describe("inferCapabilities", () => {
81
+ it("should infer content analysis capability", () => {
82
+ const result = inferCapabilities("Deep content analysis for videos");
83
+ expect(result).toContain("content analysis");
84
+ });
85
+
86
+ it("should infer theme extraction capability", () => {
87
+ const result = inferCapabilities("Extract key themes from documents");
88
+ expect(result).toContain("theme extraction");
89
+ });
90
+
91
+ it("should infer idea generation capability", () => {
92
+ const result = inferCapabilities("Generate creative ideas and hooks");
93
+ expect(result).toContain("idea generation");
94
+ });
95
+
96
+ it("should infer multiple capabilities from description", () => {
97
+ const result = inferCapabilities(
98
+ "Analyze content, generate ideas, and create outlines"
99
+ );
100
+ expect(result).toContain("content analysis");
101
+ expect(result).toContain("idea generation");
102
+ expect(result).toContain("outline creation");
103
+ });
104
+
105
+ it("should infer validation capability", () => {
106
+ const result = inferCapabilities("Validate workflow outputs");
107
+ expect(result).toContain("validation");
108
+ });
109
+
110
+ it("should infer personalization capability", () => {
111
+ const result = inferCapabilities("Read user profile for context");
112
+ expect(result).toContain("personalization");
113
+ });
114
+
115
+ it("should infer structured output capability", () => {
116
+ const result = inferCapabilities("Generate structured JSON documents");
117
+ expect(result).toContain("structured output");
118
+ });
119
+
120
+ it("should infer content assembly capability", () => {
121
+ const result = inferCapabilities("Assemble and combine content pieces");
122
+ expect(result).toContain("content assembly");
123
+ });
124
+
125
+ it("should return general processing for unknown descriptions", () => {
126
+ const result = inferCapabilities("Something completely different");
127
+ expect(result).toContain("general processing");
128
+ expect(result.length).toBe(1);
129
+ });
130
+
131
+ it("should be case-insensitive", () => {
132
+ const result = inferCapabilities("ANALYZE CONTENT");
133
+ expect(result).toContain("content analysis");
134
+ });
135
+
136
+ it("should not duplicate capabilities", () => {
137
+ // "analyze" and "analysis" should not create duplicate entries
138
+ const result = inferCapabilities("Analyze using analysis techniques");
139
+ const analysisCount = result.filter(
140
+ (c) => c === "content analysis"
141
+ ).length;
142
+ expect(analysisCount).toBe(1);
143
+ });
144
+ });
145
+
146
+ describe("CAPABILITY_PATTERNS", () => {
147
+ it("should have expected patterns defined", () => {
148
+ const patterns = CAPABILITY_PATTERNS.map(([p]) => p);
149
+ expect(patterns).toContain("analy");
150
+ expect(patterns).toContain("valid");
151
+ expect(patterns).toContain("generat");
152
+ expect(patterns).toContain("theme");
153
+ });
154
+
155
+ it("should map patterns to capabilities", () => {
156
+ const patternMap = Object.fromEntries(CAPABILITY_PATTERNS);
157
+ expect(patternMap.analy).toBe("content analysis");
158
+ expect(patternMap.valid).toBe("validation");
159
+ expect(patternMap.outline).toBe("outline creation");
160
+ });
161
+ });
162
+
163
+ describe("scanPlugins", () => {
164
+ it("should discover looplia-core skills", async () => {
165
+ const result = await scanPlugins("plugins");
166
+ const corePlugin = result.plugins.find((p) => p.name === "looplia-core");
167
+
168
+ expect(corePlugin).toBeDefined();
169
+ expect(corePlugin?.skills.length).toBeGreaterThan(0);
170
+
171
+ const skillNames = corePlugin?.skills.map((s) => s.name) ?? [];
172
+ expect(skillNames).toContain("plugin-registry-scanner");
173
+ expect(skillNames).toContain("workflow-executor");
174
+ expect(skillNames).toContain("skill-capability-matcher");
175
+ });
176
+
177
+ it("should discover looplia-writer skills", async () => {
178
+ const result = await scanPlugins("plugins");
179
+ const writerPlugin = result.plugins.find(
180
+ (p) => p.name === "looplia-writer"
181
+ );
182
+
183
+ expect(writerPlugin).toBeDefined();
184
+ expect(writerPlugin?.skills.length).toBeGreaterThan(0);
185
+
186
+ const skillNames = writerPlugin?.skills.map((s) => s.name) ?? [];
187
+ expect(skillNames).toContain("media-reviewer");
188
+ expect(skillNames).toContain("idea-synthesis");
189
+ expect(skillNames).toContain("writing-kit-assembler");
190
+ });
191
+
192
+ it("should return valid registry JSON schema", async () => {
193
+ const result = await scanPlugins("plugins");
194
+
195
+ // Check top-level structure
196
+ expect(result).toHaveProperty("plugins");
197
+ expect(result).toHaveProperty("summary");
198
+ expect(Array.isArray(result.plugins)).toBe(true);
199
+
200
+ // Check summary
201
+ expect(result.summary).toHaveProperty("totalPlugins");
202
+ expect(result.summary).toHaveProperty("totalSkills");
203
+ expect(typeof result.summary.totalPlugins).toBe("number");
204
+ expect(typeof result.summary.totalSkills).toBe("number");
205
+
206
+ // Check skill structure
207
+ const firstSkill = result.plugins[0]?.skills[0];
208
+ if (firstSkill) {
209
+ expect(firstSkill).toHaveProperty("name");
210
+ expect(firstSkill).toHaveProperty("description");
211
+ expect(firstSkill).toHaveProperty("capabilities");
212
+ expect(Array.isArray(firstSkill.capabilities)).toBe(true);
213
+ }
214
+ });
215
+
216
+ it("should include tools and model when specified", async () => {
217
+ const result = await scanPlugins("plugins");
218
+ const corePlugin = result.plugins.find((p) => p.name === "looplia-core");
219
+ const scanner = corePlugin?.skills.find(
220
+ (s) => s.name === "plugin-registry-scanner"
221
+ );
222
+
223
+ expect(scanner?.tools).toBeDefined();
224
+ expect(scanner?.tools).toContain("Bash");
225
+ expect(scanner?.model).toBe("claude-haiku-4-5-20251001");
226
+ });
227
+
228
+ it("should handle non-existent plugins directory gracefully", async () => {
229
+ const result = await scanPlugins("non-existent-dir");
230
+
231
+ expect(result.plugins).toEqual([]);
232
+ expect(result.summary.totalPlugins).toBe(0);
233
+ expect(result.summary.totalSkills).toBe(0);
234
+ });
235
+
236
+ it("should calculate correct totals", async () => {
237
+ const result = await scanPlugins("plugins");
238
+
239
+ const calculatedTotal = result.plugins.reduce(
240
+ (sum, p) => sum + p.skills.length,
241
+ 0
242
+ );
243
+
244
+ expect(result.summary.totalSkills).toBe(calculatedTotal);
245
+ expect(result.summary.totalPlugins).toBe(result.plugins.length);
246
+ });
247
+
248
+ it("should discover exactly 14 skills after adding workflow-executor-inline", async () => {
249
+ const result = await scanPlugins("plugins");
250
+
251
+ // After adding workflow-executor-inline skill, we should have all 14 skills
252
+ expect(result.summary.totalSkills).toBe(14);
253
+ expect(result.summary.totalPlugins).toBe(2);
254
+ });
255
+ });
256
+ });
@@ -0,0 +1,174 @@
1
+ ---
2
+ name: search
3
+ description: |
4
+ This skill should be used when the user needs to search for information, find files,
5
+ look up content online, or retrieve data without providing input files.
6
+
7
+ Trigger phrases include:
8
+ - "search for", "find", "look up", "research"
9
+ - "search the web", "search online", "fetch from URL"
10
+ - "search files", "scan codebase", "find in project", "grep for"
11
+ - "get news from", "read hacker news", "check what's trending"
12
+
13
+ Supports multiple search modes:
14
+ - local: Search local filesystem using Glob, Grep, Read, Bash
15
+ - web: Search web content using WebSearch, WebFetch
16
+ - future: Pluggable providers (jina.ai, firecrawl, exa.ai)
17
+
18
+ This is an input-less skill - it executes search missions autonomously.
19
+ model: claude-haiku-4-5-20251001
20
+ tools:
21
+ - Glob
22
+ - Grep
23
+ - Read
24
+ - Bash
25
+ - WebSearch
26
+ - WebFetch
27
+ ---
28
+
29
+ # Search Skill
30
+
31
+ Execute search missions and compile structured results from various sources.
32
+
33
+ ## Purpose
34
+
35
+ Provide a unified search interface that can:
36
+ 1. Search local files, codebases, and directories
37
+ 2. Search web content (via WebSearch/WebFetch or future provider integrations)
38
+ 3. Compile results into structured JSON output
39
+
40
+ This skill operates **without user-provided input files** - it receives a search mission and executes it autonomously.
41
+
42
+ ## Search Modes
43
+
44
+ ### Local Mode
45
+
46
+ Search the local environment:
47
+
48
+ | Tool | Use Case |
49
+ |------|----------|
50
+ | `Glob` | Find files by pattern (e.g., `**/*.md`, `src/**/*.ts`) |
51
+ | `Grep` | Search file contents for patterns |
52
+ | `Read` | Read matched files for content extraction |
53
+ | `Bash` | Execute find, ls, or other search commands |
54
+
55
+ ### Web Mode
56
+
57
+ Search web content:
58
+
59
+ | Tool | Use Case |
60
+ |------|----------|
61
+ | `WebSearch` | Search the web for topics, news, documentation |
62
+ | `WebFetch` | Fetch and extract content from URLs |
63
+
64
+ **Future Providers** (planned integration):
65
+ - `jina.ai` - AI-powered web search and extraction
66
+ - `firecrawl` - Web crawling and scraping
67
+ - `exa.ai` - Neural search engine
68
+
69
+ ## Process
70
+
71
+ ### Step 1: Parse Mission
72
+
73
+ Extract from the mission description:
74
+ - **Search target**: What to search for (keywords, patterns, topics)
75
+ - **Search mode**: local, web, or auto-detect
76
+ - **Scope**: Directories, file types, domains, date range
77
+ - **Compile format**: How to structure results
78
+
79
+ ### Step 2: Execute Search
80
+
81
+ Based on mode:
82
+
83
+ **Local:**
84
+ ```
85
+ 1. Use Glob to find matching files
86
+ 2. Use Grep to search content patterns
87
+ 3. Use Read to extract relevant sections
88
+ 4. Compile matched results
89
+ ```
90
+
91
+ **Web:**
92
+ ```
93
+ 1. Use WebSearch to find relevant pages
94
+ 2. Use WebFetch to extract content
95
+ 3. Parse and filter results
96
+ 4. Compile into structured format
97
+ ```
98
+
99
+ ### Step 3: Compile Results
100
+
101
+ Structure findings into the output schema with:
102
+ - Source attribution (file path or URL)
103
+ - Relevance scoring
104
+ - Extracted content snippets
105
+ - Metadata (date, author if available)
106
+
107
+ ## Input
108
+
109
+ The skill receives a **mission description** specifying:
110
+ - What to search for
111
+ - Where to search (mode/scope)
112
+ - What to extract and compile
113
+
114
+ Example missions:
115
+ ```
116
+ Search for all TODO comments in the src/ directory and compile a list with file locations.
117
+ ```
118
+ ```
119
+ Search Hacker News for the top 5 trending AI stories today and summarize each.
120
+ ```
121
+ ```
122
+ Find all TypeScript files that import from '@looplia-core/core' and list their exports.
123
+ ```
124
+
125
+ ## Output Schema
126
+
127
+ ```json
128
+ {
129
+ "query": "original search query/mission",
130
+ "mode": "local | web",
131
+ "results": [
132
+ {
133
+ "source": "path/to/file.ts | https://example.com/page",
134
+ "type": "file | url",
135
+ "title": "Optional title or filename",
136
+ "content": "Extracted content or snippet",
137
+ "relevance": 0.95,
138
+ "metadata": {
139
+ "lineNumber": 42,
140
+ "matchContext": "surrounding context",
141
+ "date": "2025-12-24",
142
+ "author": "optional"
143
+ }
144
+ }
145
+ ],
146
+ "summary": "Brief summary of findings",
147
+ "totalMatches": 15,
148
+ "compiledAt": "2025-12-24T10:30:00Z"
149
+ }
150
+ ```
151
+
152
+ ## Usage in Workflows
153
+
154
+ This skill enables **input-less workflow steps**:
155
+
156
+ ```yaml
157
+ steps:
158
+ - id: find-news
159
+ skill: search
160
+ mission: |
161
+ Search Hacker News for today's top 3 AI stories.
162
+ Extract title, URL, points, and brief summary for each.
163
+ output: outputs/news.json
164
+ ```
165
+
166
+ No `input:` field required - the skill operates autonomously.
167
+
168
+ ## Important Rules
169
+
170
+ 1. **Always attribute sources** - Include file paths or URLs for all results
171
+ 2. **Respect scope** - Only search within specified boundaries
172
+ 3. **Compile, don't dump** - Structure results, don't return raw search output
173
+ 4. **Handle empty results** - Return valid JSON even when no matches found
174
+ 5. **Rate limit web searches** - Be mindful of API limits for web providers