cognitive-modules 0.2.0__tar.gz → 0.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {cognitive_modules-0.2.0 → cognitive_modules-0.3.0}/PKG-INFO +1 -1
  2. {cognitive_modules-0.2.0 → cognitive_modules-0.3.0}/pyproject.toml +1 -1
  3. {cognitive_modules-0.2.0 → cognitive_modules-0.3.0}/src/cognitive/loader.py +16 -5
  4. cognitive_modules-0.3.0/src/cognitive/runner.py +276 -0
  5. {cognitive_modules-0.2.0 → cognitive_modules-0.3.0}/src/cognitive_modules.egg-info/PKG-INFO +1 -1
  6. cognitive_modules-0.2.0/src/cognitive/runner.py +0 -140
  7. {cognitive_modules-0.2.0 → cognitive_modules-0.3.0}/LICENSE +0 -0
  8. {cognitive_modules-0.2.0 → cognitive_modules-0.3.0}/README.md +0 -0
  9. {cognitive_modules-0.2.0 → cognitive_modules-0.3.0}/setup.cfg +0 -0
  10. {cognitive_modules-0.2.0 → cognitive_modules-0.3.0}/src/cognitive/__init__.py +0 -0
  11. {cognitive_modules-0.2.0 → cognitive_modules-0.3.0}/src/cognitive/cli.py +0 -0
  12. {cognitive_modules-0.2.0 → cognitive_modules-0.3.0}/src/cognitive/providers/__init__.py +0 -0
  13. {cognitive_modules-0.2.0 → cognitive_modules-0.3.0}/src/cognitive/registry.py +0 -0
  14. {cognitive_modules-0.2.0 → cognitive_modules-0.3.0}/src/cognitive/subagent.py +0 -0
  15. {cognitive_modules-0.2.0 → cognitive_modules-0.3.0}/src/cognitive/templates.py +0 -0
  16. {cognitive_modules-0.2.0 → cognitive_modules-0.3.0}/src/cognitive/validator.py +0 -0
  17. {cognitive_modules-0.2.0 → cognitive_modules-0.3.0}/src/cognitive_modules.egg-info/SOURCES.txt +0 -0
  18. {cognitive_modules-0.2.0 → cognitive_modules-0.3.0}/src/cognitive_modules.egg-info/dependency_links.txt +0 -0
  19. {cognitive_modules-0.2.0 → cognitive_modules-0.3.0}/src/cognitive_modules.egg-info/entry_points.txt +0 -0
  20. {cognitive_modules-0.2.0 → cognitive_modules-0.3.0}/src/cognitive_modules.egg-info/requires.txt +0 -0
  21. {cognitive_modules-0.2.0 → cognitive_modules-0.3.0}/src/cognitive_modules.egg-info/top_level.txt +0 -0
  22. {cognitive_modules-0.2.0 → cognitive_modules-0.3.0}/tests/test_cli.py +0 -0
  23. {cognitive_modules-0.2.0 → cognitive_modules-0.3.0}/tests/test_loader.py +0 -0
  24. {cognitive_modules-0.2.0 → cognitive_modules-0.3.0}/tests/test_registry.py +0 -0
  25. {cognitive_modules-0.2.0 → cognitive_modules-0.3.0}/tests/test_runner.py +0 -0
  26. {cognitive_modules-0.2.0 → cognitive_modules-0.3.0}/tests/test_subagent.py +0 -0
  27. {cognitive_modules-0.2.0 → cognitive_modules-0.3.0}/tests/test_validator.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cognitive-modules
3
- Version: 0.2.0
3
+ Version: 0.3.0
4
4
  Summary: Structured LLM task runner with schema validation, confidence scoring, and subagent orchestration
5
5
  Author: ziel-io
6
6
  License: MIT
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "cognitive-modules"
7
- version = "0.2.0"
7
+ version = "0.3.0"
8
8
  description = "Structured LLM task runner with schema validation, confidence scoring, and subagent orchestration"
9
9
  readme = "README.md"
10
10
  license = {text = "MIT"}
@@ -79,22 +79,28 @@ def load_v2_format(module_path: Path) -> dict:
79
79
  output_schema = {}
80
80
  error_schema = {}
81
81
 
82
- # Extract constraints
82
+ # Extract constraints (supports both old and new format)
83
83
  constraints_raw = manifest.get("constraints", {})
84
+ policies_raw = manifest.get("policies", {})
85
+
84
86
  constraints = {
85
87
  "operational": {
86
- "no_external_network": constraints_raw.get("no_network", True),
87
- "no_side_effects": constraints_raw.get("no_side_effects", True),
88
- "no_file_write": constraints_raw.get("no_file_write", True),
88
+ "no_external_network": constraints_raw.get("no_network", True) or policies_raw.get("network") == "deny",
89
+ "no_side_effects": constraints_raw.get("no_side_effects", True) or policies_raw.get("side_effects") == "deny",
90
+ "no_file_write": constraints_raw.get("no_file_write", True) or policies_raw.get("filesystem_write") == "deny",
89
91
  "no_inventing_data": constraints_raw.get("no_inventing_data", True),
90
92
  },
91
93
  "output_quality": {
92
94
  "require_confidence": manifest.get("output", {}).get("require_confidence", True),
93
95
  "require_rationale": manifest.get("output", {}).get("require_rationale", True),
94
96
  "require_behavior_equivalence": manifest.get("output", {}).get("require_behavior_equivalence", False),
95
- }
97
+ },
98
+ "behavior_equivalence_false_max_confidence": constraints_raw.get("behavior_equivalence_false_max_confidence", 0.7),
96
99
  }
97
100
 
101
+ # Extract policies (v2.1)
102
+ policies = manifest.get("policies", {})
103
+
98
104
  # Extract tools policy
99
105
  tools = manifest.get("tools", {})
100
106
 
@@ -104,6 +110,9 @@ def load_v2_format(module_path: Path) -> dict:
104
110
  # Extract failure contract
105
111
  failure_contract = manifest.get("failure", {})
106
112
 
113
+ # Extract runtime requirements
114
+ runtime_requirements = manifest.get("runtime_requirements", {})
115
+
107
116
  return {
108
117
  "name": manifest.get("name", module_path.name),
109
118
  "version": manifest.get("version", "1.0.0"),
@@ -116,9 +125,11 @@ def load_v2_format(module_path: Path) -> dict:
116
125
  "output_schema": output_schema,
117
126
  "error_schema": error_schema,
118
127
  "constraints": constraints,
128
+ "policies": policies,
119
129
  "tools": tools,
120
130
  "output_contract": output_contract,
121
131
  "failure_contract": failure_contract,
132
+ "runtime_requirements": runtime_requirements,
122
133
  "prompt": prompt,
123
134
  }
124
135
 
@@ -0,0 +1,276 @@
1
+ """
2
+ Module Runner - Execute cognitive modules with validation.
3
+ Supports v2 envelope format and legacy formats.
4
+ """
5
+
6
+ import json
7
+ from pathlib import Path
8
+ from typing import Optional, TypedDict, Union
9
+
10
+ import jsonschema
11
+ import yaml
12
+
13
+ from .registry import find_module
14
+ from .loader import load_module
15
+ from .providers import call_llm
16
+
17
+
18
+ class EnvelopeError(TypedDict):
19
+ code: str
20
+ message: str
21
+
22
+
23
+ class EnvelopeSuccess(TypedDict):
24
+ ok: bool # True
25
+ data: dict
26
+
27
+
28
+ class EnvelopeFailure(TypedDict):
29
+ ok: bool # False
30
+ error: EnvelopeError
31
+ partial_data: Optional[dict]
32
+
33
+
34
+ EnvelopeResponse = Union[EnvelopeSuccess, EnvelopeFailure]
35
+
36
+
37
+ def validate_data(data: dict, schema: dict, label: str = "Data") -> list[str]:
38
+ """Validate data against schema. Returns list of errors."""
39
+ errors = []
40
+ if not schema:
41
+ return errors
42
+ try:
43
+ jsonschema.validate(instance=data, schema=schema)
44
+ except jsonschema.ValidationError as e:
45
+ errors.append(f"{label} validation error: {e.message} at {list(e.absolute_path)}")
46
+ except jsonschema.SchemaError as e:
47
+ errors.append(f"Schema error: {e.message}")
48
+ return errors
49
+
50
+
51
+ def substitute_arguments(text: str, input_data: dict) -> str:
52
+ """Substitute $ARGUMENTS and $N placeholders in text."""
53
+ # Get arguments
54
+ args_value = input_data.get("$ARGUMENTS", input_data.get("query", input_data.get("code", "")))
55
+
56
+ # Replace $ARGUMENTS
57
+ text = text.replace("$ARGUMENTS", str(args_value))
58
+
59
+ # Replace $ARGUMENTS[N] and $N for indexed access
60
+ if isinstance(args_value, str):
61
+ args_list = args_value.split()
62
+ for i, arg in enumerate(args_list):
63
+ text = text.replace(f"$ARGUMENTS[{i}]", arg)
64
+ text = text.replace(f"${i}", arg)
65
+
66
+ return text
67
+
68
+
69
+ def build_prompt(module: dict, input_data: dict, use_envelope: bool = False) -> str:
70
+ """Build the complete prompt for the LLM."""
71
+ # Substitute $ARGUMENTS in prompt
72
+ prompt = substitute_arguments(module["prompt"], input_data)
73
+
74
+ parts = [
75
+ prompt,
76
+ "\n\n## Constraints\n",
77
+ yaml.dump(module["constraints"], default_flow_style=False),
78
+ "\n\n## Input\n",
79
+ "```json\n",
80
+ json.dumps(input_data, indent=2, ensure_ascii=False),
81
+ "\n```\n",
82
+ ]
83
+
84
+ if use_envelope:
85
+ parts.extend([
86
+ "\n## Response Format (Envelope)\n",
87
+ "You MUST wrap your response in the envelope format:\n",
88
+ "- Success: { \"ok\": true, \"data\": { ...your output... } }\n",
89
+ "- Error: { \"ok\": false, \"error\": { \"code\": \"ERROR_CODE\", \"message\": \"...\" } }\n",
90
+ "Return ONLY valid JSON.\n",
91
+ ])
92
+ else:
93
+ parts.extend([
94
+ "\n## Instructions\n",
95
+ "Analyze the input and generate output matching the required schema.",
96
+ "Return ONLY valid JSON. Do not include any text before or after the JSON.",
97
+ ])
98
+
99
+ return "".join(parts)
100
+
101
+
102
+ def parse_llm_response(response: str) -> dict:
103
+ """Parse LLM response, handling potential markdown code blocks."""
104
+ text = response.strip()
105
+
106
+ # Remove markdown code blocks if present
107
+ if text.startswith("```"):
108
+ lines = text.split("\n")
109
+ start = 1
110
+ end = len(lines) - 1
111
+ for i, line in enumerate(lines[1:], 1):
112
+ if line.strip() == "```":
113
+ end = i
114
+ break
115
+ text = "\n".join(lines[start:end])
116
+
117
+ return json.loads(text)
118
+
119
+
120
+ def is_envelope_response(data: dict) -> bool:
121
+ """Check if response is in envelope format."""
122
+ return isinstance(data.get("ok"), bool)
123
+
124
+
125
+ def parse_envelope_response(data: dict) -> EnvelopeResponse:
126
+ """Parse and normalize envelope response."""
127
+ if data.get("ok") is True:
128
+ return {
129
+ "ok": True,
130
+ "data": data.get("data", {})
131
+ }
132
+ else:
133
+ return {
134
+ "ok": False,
135
+ "error": data.get("error", {"code": "UNKNOWN", "message": "Unknown error"}),
136
+ "partial_data": data.get("partial_data")
137
+ }
138
+
139
+
140
+ def convert_to_envelope(data: dict, is_error: bool = False) -> EnvelopeResponse:
141
+ """Convert legacy format to envelope format."""
142
+ if is_error or "error" in data:
143
+ error = data.get("error", {})
144
+ return {
145
+ "ok": False,
146
+ "error": {
147
+ "code": error.get("code", "UNKNOWN"),
148
+ "message": error.get("message", str(error))
149
+ },
150
+ "partial_data": None
151
+ }
152
+ else:
153
+ return {
154
+ "ok": True,
155
+ "data": data
156
+ }
157
+
158
+
159
+ def run_module(
160
+ name_or_path: str,
161
+ input_data: dict,
162
+ validate_input: bool = True,
163
+ validate_output: bool = True,
164
+ model: Optional[str] = None,
165
+ use_envelope: Optional[bool] = None,
166
+ ) -> EnvelopeResponse:
167
+ """
168
+ Run a cognitive module with the given input.
169
+ Returns envelope format response.
170
+
171
+ Args:
172
+ name_or_path: Module name or path to module directory
173
+ input_data: Input data dictionary
174
+ validate_input: Whether to validate input against schema
175
+ validate_output: Whether to validate output against schema
176
+ model: Optional model override
177
+ use_envelope: Force envelope format (auto-detect if None)
178
+
179
+ Returns:
180
+ EnvelopeResponse with ok=True/False and data/error
181
+ """
182
+ # Find module path
183
+ path = Path(name_or_path)
184
+ if path.exists() and path.is_dir():
185
+ module_path = path
186
+ else:
187
+ module_path = find_module(name_or_path)
188
+ if not module_path:
189
+ return {
190
+ "ok": False,
191
+ "error": {"code": "MODULE_NOT_FOUND", "message": f"Module not found: {name_or_path}"},
192
+ "partial_data": None
193
+ }
194
+
195
+ # Load module (auto-detects format)
196
+ module = load_module(module_path)
197
+
198
+ # Determine if we should use envelope format
199
+ should_use_envelope = use_envelope
200
+ if should_use_envelope is None:
201
+ # Auto-detect: use envelope for v2 format or if output.envelope is True
202
+ output_contract = module.get("output_contract", {})
203
+ should_use_envelope = (
204
+ module.get("format") == "v2" or
205
+ output_contract.get("envelope", False)
206
+ )
207
+
208
+ # Validate input
209
+ if validate_input and module["input_schema"]:
210
+ errors = validate_data(input_data, module["input_schema"], "Input")
211
+ if errors:
212
+ return {
213
+ "ok": False,
214
+ "error": {"code": "INVALID_INPUT", "message": str(errors)},
215
+ "partial_data": None
216
+ }
217
+
218
+ # Build prompt and call LLM
219
+ full_prompt = build_prompt(module, input_data, use_envelope=should_use_envelope)
220
+ response = call_llm(full_prompt, model=model)
221
+
222
+ # Parse response
223
+ try:
224
+ output_data = parse_llm_response(response)
225
+ except json.JSONDecodeError as e:
226
+ return {
227
+ "ok": False,
228
+ "error": {"code": "PARSE_ERROR", "message": f"Failed to parse JSON: {e}"},
229
+ "partial_data": None
230
+ }
231
+
232
+ # Handle envelope format
233
+ if is_envelope_response(output_data):
234
+ result = parse_envelope_response(output_data)
235
+ else:
236
+ # Convert legacy format to envelope
237
+ result = convert_to_envelope(output_data)
238
+
239
+ # Validate output (only for success responses)
240
+ if result["ok"] and validate_output and module["output_schema"]:
241
+ data_to_validate = result.get("data", {})
242
+ errors = validate_data(data_to_validate, module["output_schema"], "Output")
243
+ if errors:
244
+ return {
245
+ "ok": False,
246
+ "error": {"code": "OUTPUT_VALIDATION_ERROR", "message": str(errors)},
247
+ "partial_data": data_to_validate
248
+ }
249
+
250
+ return result
251
+
252
+
253
+ def run_module_legacy(
254
+ name_or_path: str,
255
+ input_data: dict,
256
+ validate_input: bool = True,
257
+ validate_output: bool = True,
258
+ model: Optional[str] = None,
259
+ ) -> dict:
260
+ """
261
+ Run a cognitive module (legacy API, returns raw output).
262
+ For backward compatibility.
263
+ """
264
+ result = run_module(
265
+ name_or_path,
266
+ input_data,
267
+ validate_input=validate_input,
268
+ validate_output=validate_output,
269
+ model=model,
270
+ use_envelope=False
271
+ )
272
+
273
+ if result["ok"]:
274
+ return result["data"]
275
+ else:
276
+ raise ValueError(f"{result['error']['code']}: {result['error']['message']}")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cognitive-modules
3
- Version: 0.2.0
3
+ Version: 0.3.0
4
4
  Summary: Structured LLM task runner with schema validation, confidence scoring, and subagent orchestration
5
5
  Author: ziel-io
6
6
  License: MIT
@@ -1,140 +0,0 @@
1
- """
2
- Module Runner - Execute cognitive modules with validation.
3
- Supports both old and new module formats.
4
- """
5
-
6
- import json
7
- from pathlib import Path
8
- from typing import Optional
9
-
10
- import jsonschema
11
- import yaml
12
-
13
- from .registry import find_module
14
- from .loader import load_module
15
- from .providers import call_llm
16
-
17
-
18
- def validate_data(data: dict, schema: dict, label: str = "Data") -> list[str]:
19
- """Validate data against schema. Returns list of errors."""
20
- errors = []
21
- if not schema:
22
- return errors
23
- try:
24
- jsonschema.validate(instance=data, schema=schema)
25
- except jsonschema.ValidationError as e:
26
- errors.append(f"{label} validation error: {e.message} at {list(e.absolute_path)}")
27
- except jsonschema.SchemaError as e:
28
- errors.append(f"Schema error: {e.message}")
29
- return errors
30
-
31
-
32
- def substitute_arguments(text: str, input_data: dict) -> str:
33
- """Substitute $ARGUMENTS and $N placeholders in text."""
34
- # Get arguments
35
- args_value = input_data.get("$ARGUMENTS", input_data.get("query", ""))
36
-
37
- # Replace $ARGUMENTS
38
- text = text.replace("$ARGUMENTS", str(args_value))
39
-
40
- # Replace $ARGUMENTS[N] and $N for indexed access
41
- if isinstance(args_value, str):
42
- args_list = args_value.split()
43
- for i, arg in enumerate(args_list):
44
- text = text.replace(f"$ARGUMENTS[{i}]", arg)
45
- text = text.replace(f"${i}", arg)
46
-
47
- return text
48
-
49
-
50
- def build_prompt(module: dict, input_data: dict) -> str:
51
- """Build the complete prompt for the LLM."""
52
- # Substitute $ARGUMENTS in prompt
53
- prompt = substitute_arguments(module["prompt"], input_data)
54
-
55
- parts = [
56
- prompt,
57
- "\n\n## Constraints\n",
58
- yaml.dump(module["constraints"], default_flow_style=False),
59
- "\n\n## Input\n",
60
- "```json\n",
61
- json.dumps(input_data, indent=2, ensure_ascii=False),
62
- "\n```\n",
63
- "\n## Instructions\n",
64
- "Analyze the input and generate output matching the required schema.",
65
- "Return ONLY valid JSON. Do not include any text before or after the JSON.",
66
- ]
67
- return "".join(parts)
68
-
69
-
70
- def parse_llm_response(response: str) -> dict:
71
- """Parse LLM response, handling potential markdown code blocks."""
72
- text = response.strip()
73
-
74
- # Remove markdown code blocks if present
75
- if text.startswith("```"):
76
- lines = text.split("\n")
77
- start = 1
78
- end = len(lines) - 1
79
- for i, line in enumerate(lines[1:], 1):
80
- if line.strip() == "```":
81
- end = i
82
- break
83
- text = "\n".join(lines[start:end])
84
-
85
- return json.loads(text)
86
-
87
-
88
- def run_module(
89
- name_or_path: str,
90
- input_data: dict,
91
- validate_input: bool = True,
92
- validate_output: bool = True,
93
- model: Optional[str] = None,
94
- ) -> dict:
95
- """
96
- Run a cognitive module with the given input.
97
- Supports both old and new module formats.
98
-
99
- Args:
100
- name_or_path: Module name or path to module directory
101
- input_data: Input data dictionary
102
- validate_input: Whether to validate input against schema
103
- validate_output: Whether to validate output against schema
104
- model: Optional model override
105
-
106
- Returns:
107
- The module output as a dictionary
108
- """
109
- # Find module path
110
- path = Path(name_or_path)
111
- if path.exists() and path.is_dir():
112
- module_path = path
113
- else:
114
- module_path = find_module(name_or_path)
115
- if not module_path:
116
- raise FileNotFoundError(f"Module not found: {name_or_path}")
117
-
118
- # Load module (auto-detects format)
119
- module = load_module(module_path)
120
-
121
- # Validate input
122
- if validate_input and module["input_schema"]:
123
- errors = validate_data(input_data, module["input_schema"], "Input")
124
- if errors:
125
- raise ValueError(f"Input validation failed: {errors}")
126
-
127
- # Build prompt and call LLM
128
- full_prompt = build_prompt(module, input_data)
129
- response = call_llm(full_prompt, model=model)
130
-
131
- # Parse response
132
- output_data = parse_llm_response(response)
133
-
134
- # Validate output
135
- if validate_output and module["output_schema"]:
136
- errors = validate_data(output_data, module["output_schema"], "Output")
137
- if errors:
138
- raise ValueError(f"Output validation failed: {errors}")
139
-
140
- return output_data