cognitive-modules 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
cognitive/__init__.py ADDED
@@ -0,0 +1,12 @@
1
+ """
2
+ Cognitive Modules - Structured LLM task runner with schema validation.
3
+
4
+ Usage:
5
+ cog list # List installed modules
6
+ cog run <module> <input> # Run a module
7
+ cog validate <module> # Validate module structure
8
+ cog install <source> # Install module from git/local
9
+ cog doctor # Check environment setup
10
+ """
11
+
12
+ __version__ = "0.1.0"
cognitive/cli.py ADDED
@@ -0,0 +1,423 @@
1
+ """
2
+ Cognitive CLI - Main entry point for the cog command.
3
+
4
+ Commands:
5
+ cog list List installed modules
6
+ cog run <module> <input> Run a module
7
+ cog validate <module> Validate module structure
8
+ cog install <source> Install module from git/local/registry
9
+ cog uninstall <module> Remove an installed module
10
+ cog init <name> Create a new module from template
11
+ cog search <query> Search the public registry
12
+ cog doctor Check environment setup
13
+ cog info <module> Show module details
14
+ """
15
+
16
+ import json
17
+ import sys
18
+ from pathlib import Path
19
+ from typing import Optional
20
+
21
+ import typer
22
+ from rich import print as rprint
23
+ from rich.console import Console
24
+ from rich.table import Table
25
+
26
+ from . import __version__
27
+ from .registry import (
28
+ list_modules,
29
+ find_module,
30
+ install_module,
31
+ uninstall_module,
32
+ search_registry,
33
+ fetch_registry,
34
+ USER_MODULES_DIR,
35
+ )
36
+ from .loader import load_module, detect_format
37
+ from .runner import run_module
38
+ from .subagent import run_with_subagents
39
+ from .validator import validate_module
40
+ from .templates import create_module
41
+ from .providers import check_provider_status
42
+
43
+ app = typer.Typer(
44
+ name="cog",
45
+ help="Cognitive Modules CLI - Structured LLM task runner",
46
+ add_completion=False,
47
+ )
48
+ console = Console()
49
+
50
+
51
+ @app.command("list")
52
+ def list_cmd(
53
+ format: str = typer.Option("table", "--format", "-f", help="Output format: table, json"),
54
+ ):
55
+ """List all installed cognitive modules."""
56
+ modules = list_modules()
57
+
58
+ if not modules:
59
+ rprint("[yellow]No modules found.[/yellow]")
60
+ rprint(f"\nInstall modules with:")
61
+ rprint(f" [cyan]cog install <source>[/cyan]")
62
+ rprint(f" [cyan]cog init <name>[/cyan]")
63
+ return
64
+
65
+ if format == "json":
66
+ print(json.dumps([{"name": m["name"], "location": m["location"], "format": m["format"]} for m in modules], indent=2))
67
+ return
68
+
69
+ table = Table(title="Installed Modules")
70
+ table.add_column("Name", style="cyan")
71
+ table.add_column("Location", style="green")
72
+ table.add_column("Format", style="dim")
73
+ table.add_column("Path")
74
+
75
+ for m in modules:
76
+ table.add_row(m["name"], m["location"], m["format"], str(m["path"]))
77
+
78
+ console.print(table)
79
+
80
+
81
+ @app.command("run")
82
+ def run_cmd(
83
+ module: str = typer.Argument(..., help="Module name or path"),
84
+ input_file: Optional[Path] = typer.Argument(None, help="Input JSON file (optional if using --args)"),
85
+ output: Optional[Path] = typer.Option(None, "--output", "-o", help="Output file"),
86
+ args: Optional[str] = typer.Option(None, "--args", "-a", help="Direct text input (replaces $ARGUMENTS in prompt)"),
87
+ pretty: bool = typer.Option(False, "--pretty", help="Pretty-print JSON output"),
88
+ no_validate: bool = typer.Option(False, "--no-validate", help="Skip validation"),
89
+ subagent: bool = typer.Option(False, "--subagent", "-s", help="Enable subagent mode (@call support)"),
90
+ model: Optional[str] = typer.Option(None, "--model", "-m", help="LLM model override"),
91
+ ):
92
+ """Run a cognitive module with input data or direct arguments."""
93
+ # Determine input source
94
+ skip_input_validation = False
95
+ if args:
96
+ # Direct text input via --args (skip input schema validation)
97
+ input_data = {"$ARGUMENTS": args, "query": args}
98
+ skip_input_validation = True
99
+ elif input_file:
100
+ if not input_file.exists():
101
+ rprint(f"[red]Error: Input file not found: {input_file}[/red]")
102
+ raise typer.Exit(1)
103
+ with open(input_file, 'r', encoding='utf-8') as f:
104
+ input_data = json.load(f)
105
+ else:
106
+ rprint("[red]Error: Provide either input file or --args[/red]")
107
+ raise typer.Exit(1)
108
+
109
+ mode_str = " [dim](subagent mode)[/dim]" if subagent else ""
110
+ rprint(f"[cyan]→[/cyan] Running module: [bold]{module}[/bold]{mode_str}")
111
+
112
+ try:
113
+ if subagent:
114
+ # Use subagent orchestrator for @call support
115
+ result = run_with_subagents(
116
+ module,
117
+ input_data,
118
+ model=model,
119
+ validate_input=not no_validate and not skip_input_validation,
120
+ validate_output=not no_validate,
121
+ )
122
+ else:
123
+ result = run_module(
124
+ module,
125
+ input_data,
126
+ validate_input=not no_validate and not skip_input_validation,
127
+ validate_output=not no_validate,
128
+ model=model,
129
+ )
130
+
131
+ indent = 2 if pretty else None
132
+ output_json = json.dumps(result, indent=indent, ensure_ascii=False)
133
+
134
+ if output:
135
+ with open(output, 'w', encoding='utf-8') as f:
136
+ f.write(output_json)
137
+ rprint(f"[green]✓[/green] Output saved to: {output}")
138
+ else:
139
+ print(output_json)
140
+
141
+ if "confidence" in result:
142
+ conf = result["confidence"]
143
+ color = "green" if conf >= 0.8 else "yellow" if conf >= 0.6 else "red"
144
+ rprint(f"[{color}]Confidence: {conf:.2f}[/{color}]")
145
+
146
+ except Exception as e:
147
+ rprint(f"[red]✗ Error: {e}[/red]")
148
+ raise typer.Exit(1)
149
+
150
+
151
+ @app.command("validate")
152
+ def validate_cmd(
153
+ module: str = typer.Argument(..., help="Module name or path"),
154
+ ):
155
+ """Validate a cognitive module's structure and examples."""
156
+ rprint(f"[cyan]→[/cyan] Validating module: [bold]{module}[/bold]\n")
157
+
158
+ is_valid, errors, warnings = validate_module(module)
159
+
160
+ if warnings:
161
+ rprint(f"[yellow]⚠ Warnings ({len(warnings)}):[/yellow]")
162
+ for w in warnings:
163
+ rprint(f" - {w}")
164
+ print()
165
+
166
+ if is_valid:
167
+ rprint(f"[green]✓ Module '{module}' is valid[/green]")
168
+ else:
169
+ rprint(f"[red]✗ Validation failed ({len(errors)} errors):[/red]")
170
+ for e in errors:
171
+ rprint(f" - {e}")
172
+ raise typer.Exit(1)
173
+
174
+
175
+ @app.command("install")
176
+ def install_cmd(
177
+ source: str = typer.Argument(..., help="Source: github:org/repo/path, registry:name, or local path"),
178
+ name: Optional[str] = typer.Option(None, "--name", "-n", help="Override module name"),
179
+ ):
180
+ """Install a cognitive module from git, registry, or local path."""
181
+ rprint(f"[cyan]→[/cyan] Installing from: {source}")
182
+
183
+ try:
184
+ target = install_module(source, name)
185
+
186
+ is_valid, errors, warnings = validate_module(str(target))
187
+
188
+ if not is_valid:
189
+ rprint(f"[red]✗ Installed module failed validation:[/red]")
190
+ for e in errors:
191
+ rprint(f" - {e}")
192
+ uninstall_module(target.name)
193
+ raise typer.Exit(1)
194
+
195
+ rprint(f"[green]✓ Installed: {target.name}[/green]")
196
+ rprint(f" Location: {target}")
197
+
198
+ if warnings:
199
+ rprint(f"[yellow] Warnings: {len(warnings)}[/yellow]")
200
+
201
+ except Exception as e:
202
+ rprint(f"[red]✗ Install failed: {e}[/red]")
203
+ raise typer.Exit(1)
204
+
205
+
206
+ @app.command("uninstall")
207
+ def uninstall_cmd(
208
+ module: str = typer.Argument(..., help="Module name to uninstall"),
209
+ ):
210
+ """Uninstall a cognitive module."""
211
+ target = USER_MODULES_DIR / module
212
+
213
+ if not target.exists():
214
+ rprint(f"[red]Module not found in global location: {module}[/red]")
215
+ rprint(f" (Only modules in ~/.cognitive/modules can be uninstalled)")
216
+ raise typer.Exit(1)
217
+
218
+ if uninstall_module(module):
219
+ rprint(f"[green]✓ Uninstalled: {module}[/green]")
220
+ else:
221
+ rprint(f"[red]✗ Failed to uninstall: {module}[/red]")
222
+ raise typer.Exit(1)
223
+
224
+
225
+ @app.command("init")
226
+ def init_cmd(
227
+ name: str = typer.Argument(..., help="Module name (lowercase, hyphenated)"),
228
+ responsibility: str = typer.Option("(描述模块职责)", "--desc", "-d", help="One-line description"),
229
+ target: Path = typer.Option(Path("./cognitive/modules"), "--target", "-t", help="Target directory"),
230
+ no_examples: bool = typer.Option(False, "--no-examples", help="Skip creating examples"),
231
+ ):
232
+ """Create a new cognitive module from template."""
233
+ # Validate name
234
+ if not name.replace("-", "").replace("_", "").isalnum():
235
+ rprint(f"[red]Invalid module name: {name}[/red]")
236
+ rprint(" Use lowercase letters, numbers, and hyphens only")
237
+ raise typer.Exit(1)
238
+
239
+ name = name.lower()
240
+
241
+ rprint(f"[cyan]→[/cyan] Creating module: [bold]{name}[/bold]")
242
+
243
+ try:
244
+ module_path = create_module(
245
+ name=name,
246
+ target_dir=target,
247
+ responsibility=responsibility,
248
+ with_examples=not no_examples,
249
+ )
250
+
251
+ rprint(f"[green]✓ Created module at: {module_path}[/green]")
252
+ rprint(f"\nFiles created:")
253
+ rprint(f" - MODULE.md (edit this)")
254
+ rprint(f" - schema.json")
255
+ if not no_examples:
256
+ rprint(f" - examples/input.json")
257
+ rprint(f" - examples/output.json")
258
+ rprint(f"\nNext steps:")
259
+ rprint(f" 1. Edit [cyan]MODULE.md[/cyan] to add your instructions")
260
+ rprint(f" 2. Edit [cyan]schema.json[/cyan] to define input/output")
261
+ rprint(f" 3. Run [cyan]cog validate {name}[/cyan] to check")
262
+
263
+ except Exception as e:
264
+ rprint(f"[red]✗ Failed to create module: {e}[/red]")
265
+ raise typer.Exit(1)
266
+
267
+
268
+ @app.command("search")
269
+ def search_cmd(
270
+ query: str = typer.Argument(..., help="Search query"),
271
+ ):
272
+ """Search the public module registry."""
273
+ rprint(f"[cyan]→[/cyan] Searching registry for: {query}\n")
274
+
275
+ results = search_registry(query)
276
+
277
+ if not results:
278
+ rprint("[yellow]No modules found.[/yellow]")
279
+ return
280
+
281
+ table = Table(title=f"Search Results ({len(results)})")
282
+ table.add_column("Name", style="cyan")
283
+ table.add_column("Description")
284
+ table.add_column("Version", style="dim")
285
+
286
+ for r in results:
287
+ table.add_row(r["name"], r["description"], r["version"])
288
+
289
+ console.print(table)
290
+ rprint(f"\nInstall with: [cyan]cog install registry:<name>[/cyan]")
291
+
292
+
293
+ @app.command("registry")
294
+ def registry_cmd(
295
+ refresh: bool = typer.Option(False, "--refresh", "-r", help="Force refresh from remote"),
296
+ ):
297
+ """Show public registry status and modules."""
298
+ rprint("[cyan]→[/cyan] Fetching registry...\n")
299
+
300
+ registry = fetch_registry(use_cache=not refresh)
301
+
302
+ if "error" in registry:
303
+ rprint(f"[yellow]⚠ Registry fetch warning: {registry['error']}[/yellow]")
304
+
305
+ modules = registry.get("modules", {})
306
+
307
+ if not modules:
308
+ rprint("[yellow]Registry is empty or unavailable.[/yellow]")
309
+ return
310
+
311
+ table = Table(title=f"Public Registry ({len(modules)} modules)")
312
+ table.add_column("Name", style="cyan")
313
+ table.add_column("Description")
314
+ table.add_column("Version", style="dim")
315
+
316
+ for name, info in modules.items():
317
+ table.add_row(name, info.get("description", ""), info.get("version", ""))
318
+
319
+ console.print(table)
320
+
321
+
322
+ @app.command("doctor")
323
+ def doctor_cmd():
324
+ """Check environment setup and provider availability."""
325
+ rprint("[cyan]Cognitive Modules - Environment Check[/cyan]\n")
326
+
327
+ status = check_provider_status()
328
+
329
+ table = Table(title="LLM Providers")
330
+ table.add_column("Provider", style="cyan")
331
+ table.add_column("Installed")
332
+ table.add_column("Configured")
333
+
334
+ for provider in ["openai", "anthropic", "minimax", "ollama"]:
335
+ info = status[provider]
336
+ installed = "[green]✓[/green]" if info["installed"] else "[red]✗[/red]"
337
+ configured = "[green]✓[/green]" if info["configured"] else "[yellow]–[/yellow]"
338
+ table.add_row(provider, installed, configured)
339
+
340
+ console.print(table)
341
+
342
+ rprint(f"\nCurrent provider: [cyan]{status['current_provider']}[/cyan]")
343
+ rprint(f"Current model: [cyan]{status['current_model']}[/cyan]")
344
+
345
+ rprint("\n[cyan]Module Search Paths:[/cyan]")
346
+ rprint(f" 1. ./cognitive/modules (project-local)")
347
+ rprint(f" 2. ~/.cognitive/modules (user-global)")
348
+
349
+ modules = list_modules()
350
+ rprint(f"\n[cyan]Installed Modules:[/cyan] {len(modules)}")
351
+
352
+ if status["current_provider"] == "stub":
353
+ rprint("\n[yellow]⚠ Using stub provider (no real LLM)[/yellow]")
354
+ rprint(" Set LLM_PROVIDER and API key to use a real LLM:")
355
+ rprint(" [dim]export LLM_PROVIDER=openai[/dim]")
356
+ rprint(" [dim]export OPENAI_API_KEY=sk-...[/dim]")
357
+
358
+
359
+ @app.command("info")
360
+ def info_cmd(
361
+ module: str = typer.Argument(..., help="Module name or path"),
362
+ ):
363
+ """Show detailed information about a module."""
364
+ # Find module
365
+ path = Path(module)
366
+ if path.exists() and path.is_dir():
367
+ module_path = path
368
+ else:
369
+ module_path = find_module(module)
370
+ if not module_path:
371
+ rprint(f"[red]Module not found: {module}[/red]")
372
+ raise typer.Exit(1)
373
+
374
+ try:
375
+ m = load_module(module_path)
376
+ except Exception as e:
377
+ rprint(f"[red]Failed to load module: {e}[/red]")
378
+ raise typer.Exit(1)
379
+
380
+ meta = m["metadata"]
381
+
382
+ rprint(f"[bold cyan]{meta.get('name', module)}[/bold cyan] v{meta.get('version', '?')}")
383
+ rprint(f"[dim]Format: {m['format']}[/dim]")
384
+
385
+ rprint(f"\n[bold]Responsibility:[/bold]")
386
+ rprint(f" {meta.get('responsibility', 'Not specified')}")
387
+
388
+ if 'excludes' in meta:
389
+ rprint(f"\n[bold]Excludes:[/bold]")
390
+ for exc in meta['excludes']:
391
+ rprint(f" - {exc}")
392
+
393
+ if 'context' in meta:
394
+ ctx = meta['context']
395
+ ctx_desc = "隔离执行" if ctx == "fork" else "共享执行"
396
+ rprint(f"\n[bold]Context:[/bold] {ctx} ({ctx_desc})")
397
+
398
+ if 'constraints' in meta:
399
+ rprint(f"\n[bold]Constraints:[/bold]")
400
+ for k, v in meta['constraints'].items():
401
+ status = "[green]✓[/green]" if v else "[red]✗[/red]"
402
+ rprint(f" {status} {k}")
403
+
404
+ rprint(f"\n[bold]Path:[/bold] {m['path']}")
405
+ rprint(f"[bold]Prompt size:[/bold] {len(m['prompt'])} chars")
406
+
407
+
408
+ @app.callback(invoke_without_command=True)
409
+ def main(
410
+ ctx: typer.Context,
411
+ version: bool = typer.Option(False, "--version", "-v", help="Show version"),
412
+ ):
413
+ """Cognitive Modules CLI - Structured LLM task runner."""
414
+ if version:
415
+ rprint(f"cog version {__version__}")
416
+ raise typer.Exit()
417
+
418
+ if ctx.invoked_subcommand is None:
419
+ rprint(ctx.get_help())
420
+
421
+
422
+ if __name__ == "__main__":
423
+ app()
cognitive/loader.py ADDED
@@ -0,0 +1,133 @@
1
+ """
2
+ Module Loader - Load cognitive modules in both old and new formats.
3
+
4
+ Old format (6 files):
5
+ - module.md (YAML frontmatter)
6
+ - input.schema.json
7
+ - output.schema.json
8
+ - constraints.yaml
9
+ - prompt.txt
10
+ - examples/
11
+
12
+ New format (2 files):
13
+ - MODULE.md (YAML frontmatter + prompt)
14
+ - schema.json (input + output combined)
15
+ """
16
+
17
+ import json
18
+ from pathlib import Path
19
+ from typing import Optional
20
+
21
+ import yaml
22
+
23
+
24
+ def detect_format(module_path: Path) -> str:
25
+ """Detect module format: 'new' or 'old'."""
26
+ if (module_path / "MODULE.md").exists():
27
+ return "new"
28
+ elif (module_path / "module.md").exists():
29
+ return "old"
30
+ else:
31
+ raise FileNotFoundError(f"No MODULE.md or module.md found in {module_path}")
32
+
33
+
34
+ def parse_frontmatter(content: str) -> tuple[dict, str]:
35
+ """Parse YAML frontmatter from markdown content."""
36
+ if not content.startswith('---'):
37
+ return {}, content
38
+
39
+ parts = content.split('---', 2)
40
+ if len(parts) < 3:
41
+ return {}, content
42
+
43
+ frontmatter = yaml.safe_load(parts[1]) or {}
44
+ body = parts[2].strip()
45
+ return frontmatter, body
46
+
47
+
48
+ def load_new_format(module_path: Path) -> dict:
49
+ """Load module in new format (MODULE.md + schema.json)."""
50
+ # Load MODULE.md
51
+ with open(module_path / "MODULE.md", 'r', encoding='utf-8') as f:
52
+ content = f.read()
53
+
54
+ metadata, prompt = parse_frontmatter(content)
55
+
56
+ # Extract constraints from metadata
57
+ constraints = {
58
+ "operational": {
59
+ "no_external_network": metadata.get("constraints", {}).get("no_network", True),
60
+ "no_side_effects": metadata.get("constraints", {}).get("no_side_effects", True),
61
+ "no_inventing_data": metadata.get("constraints", {}).get("no_inventing_data", True),
62
+ },
63
+ "output_quality": {
64
+ "require_confidence": metadata.get("constraints", {}).get("require_confidence", True),
65
+ "require_rationale": metadata.get("constraints", {}).get("require_rationale", True),
66
+ }
67
+ }
68
+
69
+ # Load schema.json
70
+ schema_path = module_path / "schema.json"
71
+ if schema_path.exists():
72
+ with open(schema_path, 'r', encoding='utf-8') as f:
73
+ schema = json.load(f)
74
+ input_schema = schema.get("input", {})
75
+ output_schema = schema.get("output", {})
76
+ else:
77
+ input_schema = {}
78
+ output_schema = {}
79
+
80
+ return {
81
+ "name": metadata.get("name", module_path.name),
82
+ "path": module_path,
83
+ "format": "new",
84
+ "metadata": metadata,
85
+ "input_schema": input_schema,
86
+ "output_schema": output_schema,
87
+ "constraints": constraints,
88
+ "prompt": prompt,
89
+ }
90
+
91
+
92
+ def load_old_format(module_path: Path) -> dict:
93
+ """Load module in old format (6 files)."""
94
+ # Load module.md
95
+ with open(module_path / "module.md", 'r', encoding='utf-8') as f:
96
+ content = f.read()
97
+
98
+ metadata, _ = parse_frontmatter(content)
99
+
100
+ # Load schemas
101
+ with open(module_path / "input.schema.json", 'r', encoding='utf-8') as f:
102
+ input_schema = json.load(f)
103
+
104
+ with open(module_path / "output.schema.json", 'r', encoding='utf-8') as f:
105
+ output_schema = json.load(f)
106
+
107
+ # Load constraints
108
+ with open(module_path / "constraints.yaml", 'r', encoding='utf-8') as f:
109
+ constraints = yaml.safe_load(f)
110
+
111
+ # Load prompt
112
+ with open(module_path / "prompt.txt", 'r', encoding='utf-8') as f:
113
+ prompt = f.read()
114
+
115
+ return {
116
+ "name": metadata.get("name", module_path.name),
117
+ "path": module_path,
118
+ "format": "old",
119
+ "metadata": metadata,
120
+ "input_schema": input_schema,
121
+ "output_schema": output_schema,
122
+ "constraints": constraints,
123
+ "prompt": prompt,
124
+ }
125
+
126
+
127
+ def load_module(module_path: Path) -> dict:
128
+ """Load a module, auto-detecting format."""
129
+ fmt = detect_format(module_path)
130
+ if fmt == "new":
131
+ return load_new_format(module_path)
132
+ else:
133
+ return load_old_format(module_path)