cognitive-modules 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
cognitive/subagent.py ADDED
@@ -0,0 +1,245 @@
1
+ """
2
+ Subagent - Orchestrate module calls with isolated execution contexts.
3
+
4
+ Supports:
5
+ - @call:module-name - Call another module
6
+ - @call:module-name(args) - Call with arguments
7
+ - context: fork - Isolated execution (no shared state)
8
+ - context: main - Shared execution (default)
9
+ """
10
+
11
+ import re
12
+ import json
13
+ import copy
14
+ from pathlib import Path
15
+ from typing import Optional, Any
16
+ from dataclasses import dataclass, field
17
+
18
+
19
+ @dataclass
20
+ class SubagentContext:
21
+ """Execution context for a module run."""
22
+ parent_id: Optional[str] = None
23
+ depth: int = 0
24
+ max_depth: int = 5
25
+ results: dict = field(default_factory=dict)
26
+ isolated: bool = False
27
+
28
+ def fork(self, module_name: str) -> "SubagentContext":
29
+ """Create a child context (isolated)."""
30
+ return SubagentContext(
31
+ parent_id=module_name,
32
+ depth=self.depth + 1,
33
+ max_depth=self.max_depth,
34
+ results={}, # Isolated - no inherited results
35
+ isolated=True,
36
+ )
37
+
38
+ def extend(self, module_name: str) -> "SubagentContext":
39
+ """Create a child context (shared)."""
40
+ return SubagentContext(
41
+ parent_id=module_name,
42
+ depth=self.depth + 1,
43
+ max_depth=self.max_depth,
44
+ results=copy.copy(self.results), # Shared results
45
+ isolated=False,
46
+ )
47
+
48
+
49
+ # Pattern to match @call:module-name or @call:module-name(args)
50
+ CALL_PATTERN = re.compile(r'@call:([a-zA-Z0-9_-]+)(?:\(([^)]*)\))?')
51
+
52
+
53
+ def parse_calls(text: str) -> list[dict]:
54
+ """
55
+ Parse @call directives from text.
56
+
57
+ Returns list of:
58
+ {"module": "name", "args": "optional args", "match": "full match string"}
59
+ """
60
+ calls = []
61
+ for match in CALL_PATTERN.finditer(text):
62
+ calls.append({
63
+ "module": match.group(1),
64
+ "args": match.group(2) or "",
65
+ "match": match.group(0),
66
+ })
67
+ return calls
68
+
69
+
70
+ def substitute_call_results(text: str, call_results: dict) -> str:
71
+ """
72
+ Replace @call directives with their results.
73
+
74
+ call_results: {"@call:module-name": result_dict, ...}
75
+ """
76
+ for call_str, result in call_results.items():
77
+ if isinstance(result, dict):
78
+ # Inject as JSON
79
+ result_str = json.dumps(result, indent=2, ensure_ascii=False)
80
+ else:
81
+ result_str = str(result)
82
+ text = text.replace(call_str, f"[Result from {call_str}]:\n{result_str}")
83
+ return text
84
+
85
+
86
+ class SubagentOrchestrator:
87
+ """
88
+ Orchestrates module execution with subagent support.
89
+
90
+ Usage:
91
+ orchestrator = SubagentOrchestrator()
92
+ result = orchestrator.run("parent-module", input_data)
93
+ """
94
+
95
+ def __init__(self, model: Optional[str] = None):
96
+ self.model = model
97
+ self._running = set() # Prevent circular calls
98
+
99
+ def run(
100
+ self,
101
+ module_name: str,
102
+ input_data: dict,
103
+ context: Optional[SubagentContext] = None,
104
+ validate_input: bool = True,
105
+ validate_output: bool = True,
106
+ ) -> dict:
107
+ """
108
+ Run a module with subagent support.
109
+
110
+ Recursively resolves @call directives before final execution.
111
+ """
112
+ from .registry import find_module
113
+ from .loader import load_module
114
+ from .runner import (
115
+ validate_data,
116
+ substitute_arguments,
117
+ build_prompt,
118
+ parse_llm_response,
119
+ )
120
+ from .providers import call_llm
121
+
122
+ # Initialize context
123
+ if context is None:
124
+ context = SubagentContext()
125
+
126
+ # Check depth limit
127
+ if context.depth > context.max_depth:
128
+ raise RecursionError(
129
+ f"Max subagent depth ({context.max_depth}) exceeded. "
130
+ f"Check for circular calls."
131
+ )
132
+
133
+ # Prevent circular calls
134
+ if module_name in self._running:
135
+ raise RecursionError(f"Circular call detected: {module_name}")
136
+
137
+ self._running.add(module_name)
138
+
139
+ try:
140
+ # Find and load module
141
+ path = Path(module_name)
142
+ if path.exists() and path.is_dir():
143
+ module_path = path
144
+ else:
145
+ module_path = find_module(module_name)
146
+ if not module_path:
147
+ raise FileNotFoundError(f"Module not found: {module_name}")
148
+
149
+ module = load_module(module_path)
150
+
151
+ # Check if this module wants isolated execution
152
+ module_context_mode = module.get("metadata", {}).get("context", "main")
153
+
154
+ # Validate input
155
+ if validate_input and module["input_schema"]:
156
+ errors = validate_data(input_data, module["input_schema"], "Input")
157
+ if errors:
158
+ raise ValueError(f"Input validation failed: {errors}")
159
+
160
+ # Get prompt and substitute arguments
161
+ prompt = substitute_arguments(module["prompt"], input_data)
162
+
163
+ # Parse and resolve @call directives
164
+ calls = parse_calls(prompt)
165
+ call_results = {}
166
+
167
+ for call in calls:
168
+ child_module = call["module"]
169
+ child_args = call["args"]
170
+
171
+ # Prepare child input
172
+ if child_args:
173
+ child_input = {"$ARGUMENTS": child_args, "query": child_args}
174
+ else:
175
+ # Pass through parent input
176
+ child_input = input_data
177
+
178
+ # Determine child context based on module's context setting
179
+ if module_context_mode == "fork":
180
+ child_context = context.fork(module_name)
181
+ else:
182
+ child_context = context.extend(module_name)
183
+
184
+ # Recursively run child module
185
+ child_result = self.run(
186
+ child_module,
187
+ child_input,
188
+ context=child_context,
189
+ validate_input=False, # Skip validation for @call args
190
+ validate_output=validate_output,
191
+ )
192
+
193
+ call_results[call["match"]] = child_result
194
+
195
+ # Substitute call results into prompt
196
+ if call_results:
197
+ prompt = substitute_call_results(prompt, call_results)
198
+ # Rebuild full prompt with substituted content
199
+ module["prompt"] = prompt
200
+
201
+ # Build final prompt and call LLM
202
+ full_prompt = build_prompt(module, input_data)
203
+
204
+ # Add context info if there are subagent results
205
+ if call_results:
206
+ full_prompt += "\n\n## Subagent Results Available\n"
207
+ full_prompt += "The @call results have been injected above. Use them in your response.\n"
208
+
209
+ response = call_llm(full_prompt, model=self.model)
210
+
211
+ # Parse response
212
+ output_data = parse_llm_response(response)
213
+
214
+ # Validate output
215
+ if validate_output and module["output_schema"]:
216
+ errors = validate_data(output_data, module["output_schema"], "Output")
217
+ if errors:
218
+ raise ValueError(f"Output validation failed: {errors}")
219
+
220
+ # Store result in context
221
+ context.results[module_name] = output_data
222
+
223
+ return output_data
224
+
225
+ finally:
226
+ self._running.discard(module_name)
227
+
228
+
229
+ def run_with_subagents(
230
+ module_name: str,
231
+ input_data: dict,
232
+ model: Optional[str] = None,
233
+ validate_input: bool = True,
234
+ validate_output: bool = True,
235
+ ) -> dict:
236
+ """
237
+ Convenience function to run a module with subagent support.
238
+ """
239
+ orchestrator = SubagentOrchestrator(model=model)
240
+ return orchestrator.run(
241
+ module_name,
242
+ input_data,
243
+ validate_input=validate_input,
244
+ validate_output=validate_output,
245
+ )
cognitive/templates.py ADDED
@@ -0,0 +1,186 @@
1
+ """
2
+ Module Templates - Generate skeleton for new cognitive modules.
3
+ """
4
+
5
+ import json
6
+ from pathlib import Path
7
+
8
+ MODULE_MD_TEMPLATE = '''---
9
+ name: {name}
10
+ version: 1.0.0
11
+ responsibility: {responsibility}
12
+
13
+ excludes:
14
+ - 编造未提供的数据
15
+ - 访问外部网络
16
+ - 产生副作用
17
+
18
+ constraints:
19
+ no_network: true
20
+ no_side_effects: true
21
+ no_inventing_data: true
22
+ require_confidence: true
23
+ require_rationale: true
24
+
25
+ invocation:
26
+ user_invocable: true
27
+ agent_invocable: true
28
+ ---
29
+
30
+ # {name}
31
+
32
+ 你是一个 {name} 模块。{responsibility}
33
+
34
+ ## 输入
35
+
36
+ 用户会提供:
37
+ - (描述期望的输入)
38
+
39
+ ## 处理流程
40
+
41
+ 1. 分析输入
42
+ 2. 执行主要逻辑
43
+ 3. 生成结构化输出
44
+
45
+ ## 输出要求
46
+
47
+ 输出 JSON 包含:
48
+ - `result`: 主要结果
49
+ - `rationale`: 决策说明
50
+ - `confidence`: 置信度 0-1
51
+
52
+ ## 约束
53
+
54
+ - 不编造未提供的信息(标记为 unknown)
55
+ - 不访问外部资源
56
+ - 诚实报告置信度
57
+ '''
58
+
59
+ EXAMPLE_INPUT = {
60
+ "query": "示例输入",
61
+ "context": {}
62
+ }
63
+
64
+ EXAMPLE_OUTPUT = {
65
+ "result": {
66
+ "summary": "示例输出结果"
67
+ },
68
+ "rationale": {
69
+ "decisions": [
70
+ {
71
+ "aspect": "示例决策",
72
+ "decision": "做了什么",
73
+ "reasoning": "为什么这样做"
74
+ }
75
+ ],
76
+ "assumptions": [],
77
+ "open_questions": []
78
+ },
79
+ "confidence": 0.8
80
+ }
81
+
82
+
83
+ def get_schema_template(name: str) -> dict:
84
+ """Generate schema template as dict."""
85
+ return {
86
+ "$schema": "https://cognitive-modules.io/schema/v1",
87
+ "$id": name,
88
+ "title": f"{name.replace('-', ' ').title()} Schema",
89
+ "input": {
90
+ "type": "object",
91
+ "required": ["query"],
92
+ "additionalProperties": False,
93
+ "properties": {
94
+ "query": {
95
+ "type": "string",
96
+ "description": "用户输入"
97
+ },
98
+ "context": {
99
+ "type": "object",
100
+ "description": "可选上下文"
101
+ }
102
+ }
103
+ },
104
+ "output": {
105
+ "type": "object",
106
+ "required": ["result", "rationale", "confidence"],
107
+ "additionalProperties": False,
108
+ "properties": {
109
+ "result": {
110
+ "type": "object",
111
+ "description": "主要输出结果"
112
+ },
113
+ "rationale": {
114
+ "type": "object",
115
+ "required": ["decisions"],
116
+ "properties": {
117
+ "decisions": {
118
+ "type": "array",
119
+ "items": {
120
+ "type": "object",
121
+ "required": ["aspect", "decision", "reasoning"]
122
+ }
123
+ },
124
+ "assumptions": {"type": "array"},
125
+ "open_questions": {"type": "array"}
126
+ }
127
+ },
128
+ "confidence": {
129
+ "type": "number",
130
+ "minimum": 0,
131
+ "maximum": 1
132
+ }
133
+ }
134
+ }
135
+ }
136
+
137
+
138
+ def create_module(
139
+ name: str,
140
+ target_dir: Path,
141
+ responsibility: str = "(描述模块职责)",
142
+ with_examples: bool = True,
143
+ ) -> Path:
144
+ """
145
+ Create a new cognitive module from template.
146
+
147
+ Args:
148
+ name: Module name (lowercase, hyphenated)
149
+ target_dir: Directory to create module in
150
+ responsibility: One-line description
151
+ with_examples: Whether to create examples directory
152
+
153
+ Returns:
154
+ Path to created module directory
155
+ """
156
+ module_path = target_dir / name
157
+ module_path.mkdir(parents=True, exist_ok=True)
158
+
159
+ # Create MODULE.md
160
+ module_md = MODULE_MD_TEMPLATE.format(
161
+ name=name,
162
+ responsibility=responsibility,
163
+ )
164
+ (module_path / "MODULE.md").write_text(module_md, encoding='utf-8')
165
+
166
+ # Create schema.json
167
+ schema = get_schema_template(name)
168
+ (module_path / "schema.json").write_text(
169
+ json.dumps(schema, indent=2, ensure_ascii=False),
170
+ encoding='utf-8'
171
+ )
172
+
173
+ # Create examples
174
+ if with_examples:
175
+ examples_path = module_path / "examples"
176
+ examples_path.mkdir(exist_ok=True)
177
+ (examples_path / "input.json").write_text(
178
+ json.dumps(EXAMPLE_INPUT, indent=2, ensure_ascii=False),
179
+ encoding='utf-8'
180
+ )
181
+ (examples_path / "output.json").write_text(
182
+ json.dumps(EXAMPLE_OUTPUT, indent=2, ensure_ascii=False),
183
+ encoding='utf-8'
184
+ )
185
+
186
+ return module_path