cognitive-modules 0.2.0__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cognitive/loader.py +16 -5
- cognitive/runner.py +155 -19
- {cognitive_modules-0.2.0.dist-info → cognitive_modules-0.3.0.dist-info}/METADATA +1 -1
- {cognitive_modules-0.2.0.dist-info → cognitive_modules-0.3.0.dist-info}/RECORD +8 -8
- {cognitive_modules-0.2.0.dist-info → cognitive_modules-0.3.0.dist-info}/WHEEL +0 -0
- {cognitive_modules-0.2.0.dist-info → cognitive_modules-0.3.0.dist-info}/entry_points.txt +0 -0
- {cognitive_modules-0.2.0.dist-info → cognitive_modules-0.3.0.dist-info}/licenses/LICENSE +0 -0
- {cognitive_modules-0.2.0.dist-info → cognitive_modules-0.3.0.dist-info}/top_level.txt +0 -0
cognitive/loader.py
CHANGED
|
@@ -79,22 +79,28 @@ def load_v2_format(module_path: Path) -> dict:
|
|
|
79
79
|
output_schema = {}
|
|
80
80
|
error_schema = {}
|
|
81
81
|
|
|
82
|
-
# Extract constraints
|
|
82
|
+
# Extract constraints (supports both old and new format)
|
|
83
83
|
constraints_raw = manifest.get("constraints", {})
|
|
84
|
+
policies_raw = manifest.get("policies", {})
|
|
85
|
+
|
|
84
86
|
constraints = {
|
|
85
87
|
"operational": {
|
|
86
|
-
"no_external_network": constraints_raw.get("no_network", True),
|
|
87
|
-
"no_side_effects": constraints_raw.get("no_side_effects", True),
|
|
88
|
-
"no_file_write": constraints_raw.get("no_file_write", True),
|
|
88
|
+
"no_external_network": constraints_raw.get("no_network", True) or policies_raw.get("network") == "deny",
|
|
89
|
+
"no_side_effects": constraints_raw.get("no_side_effects", True) or policies_raw.get("side_effects") == "deny",
|
|
90
|
+
"no_file_write": constraints_raw.get("no_file_write", True) or policies_raw.get("filesystem_write") == "deny",
|
|
89
91
|
"no_inventing_data": constraints_raw.get("no_inventing_data", True),
|
|
90
92
|
},
|
|
91
93
|
"output_quality": {
|
|
92
94
|
"require_confidence": manifest.get("output", {}).get("require_confidence", True),
|
|
93
95
|
"require_rationale": manifest.get("output", {}).get("require_rationale", True),
|
|
94
96
|
"require_behavior_equivalence": manifest.get("output", {}).get("require_behavior_equivalence", False),
|
|
95
|
-
}
|
|
97
|
+
},
|
|
98
|
+
"behavior_equivalence_false_max_confidence": constraints_raw.get("behavior_equivalence_false_max_confidence", 0.7),
|
|
96
99
|
}
|
|
97
100
|
|
|
101
|
+
# Extract policies (v2.1)
|
|
102
|
+
policies = manifest.get("policies", {})
|
|
103
|
+
|
|
98
104
|
# Extract tools policy
|
|
99
105
|
tools = manifest.get("tools", {})
|
|
100
106
|
|
|
@@ -104,6 +110,9 @@ def load_v2_format(module_path: Path) -> dict:
|
|
|
104
110
|
# Extract failure contract
|
|
105
111
|
failure_contract = manifest.get("failure", {})
|
|
106
112
|
|
|
113
|
+
# Extract runtime requirements
|
|
114
|
+
runtime_requirements = manifest.get("runtime_requirements", {})
|
|
115
|
+
|
|
107
116
|
return {
|
|
108
117
|
"name": manifest.get("name", module_path.name),
|
|
109
118
|
"version": manifest.get("version", "1.0.0"),
|
|
@@ -116,9 +125,11 @@ def load_v2_format(module_path: Path) -> dict:
|
|
|
116
125
|
"output_schema": output_schema,
|
|
117
126
|
"error_schema": error_schema,
|
|
118
127
|
"constraints": constraints,
|
|
128
|
+
"policies": policies,
|
|
119
129
|
"tools": tools,
|
|
120
130
|
"output_contract": output_contract,
|
|
121
131
|
"failure_contract": failure_contract,
|
|
132
|
+
"runtime_requirements": runtime_requirements,
|
|
122
133
|
"prompt": prompt,
|
|
123
134
|
}
|
|
124
135
|
|
cognitive/runner.py
CHANGED
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
"""
|
|
2
2
|
Module Runner - Execute cognitive modules with validation.
|
|
3
|
-
Supports
|
|
3
|
+
Supports v2 envelope format and legacy formats.
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
6
|
import json
|
|
7
7
|
from pathlib import Path
|
|
8
|
-
from typing import Optional
|
|
8
|
+
from typing import Optional, TypedDict, Union
|
|
9
9
|
|
|
10
10
|
import jsonschema
|
|
11
11
|
import yaml
|
|
@@ -15,6 +15,25 @@ from .loader import load_module
|
|
|
15
15
|
from .providers import call_llm
|
|
16
16
|
|
|
17
17
|
|
|
18
|
+
class EnvelopeError(TypedDict):
|
|
19
|
+
code: str
|
|
20
|
+
message: str
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class EnvelopeSuccess(TypedDict):
|
|
24
|
+
ok: bool # True
|
|
25
|
+
data: dict
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class EnvelopeFailure(TypedDict):
|
|
29
|
+
ok: bool # False
|
|
30
|
+
error: EnvelopeError
|
|
31
|
+
partial_data: Optional[dict]
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
EnvelopeResponse = Union[EnvelopeSuccess, EnvelopeFailure]
|
|
35
|
+
|
|
36
|
+
|
|
18
37
|
def validate_data(data: dict, schema: dict, label: str = "Data") -> list[str]:
|
|
19
38
|
"""Validate data against schema. Returns list of errors."""
|
|
20
39
|
errors = []
|
|
@@ -32,7 +51,7 @@ def validate_data(data: dict, schema: dict, label: str = "Data") -> list[str]:
|
|
|
32
51
|
def substitute_arguments(text: str, input_data: dict) -> str:
|
|
33
52
|
"""Substitute $ARGUMENTS and $N placeholders in text."""
|
|
34
53
|
# Get arguments
|
|
35
|
-
args_value = input_data.get("$ARGUMENTS", input_data.get("query", ""))
|
|
54
|
+
args_value = input_data.get("$ARGUMENTS", input_data.get("query", input_data.get("code", "")))
|
|
36
55
|
|
|
37
56
|
# Replace $ARGUMENTS
|
|
38
57
|
text = text.replace("$ARGUMENTS", str(args_value))
|
|
@@ -47,7 +66,7 @@ def substitute_arguments(text: str, input_data: dict) -> str:
|
|
|
47
66
|
return text
|
|
48
67
|
|
|
49
68
|
|
|
50
|
-
def build_prompt(module: dict, input_data: dict) -> str:
|
|
69
|
+
def build_prompt(module: dict, input_data: dict, use_envelope: bool = False) -> str:
|
|
51
70
|
"""Build the complete prompt for the LLM."""
|
|
52
71
|
# Substitute $ARGUMENTS in prompt
|
|
53
72
|
prompt = substitute_arguments(module["prompt"], input_data)
|
|
@@ -60,10 +79,23 @@ def build_prompt(module: dict, input_data: dict) -> str:
|
|
|
60
79
|
"```json\n",
|
|
61
80
|
json.dumps(input_data, indent=2, ensure_ascii=False),
|
|
62
81
|
"\n```\n",
|
|
63
|
-
"\n## Instructions\n",
|
|
64
|
-
"Analyze the input and generate output matching the required schema.",
|
|
65
|
-
"Return ONLY valid JSON. Do not include any text before or after the JSON.",
|
|
66
82
|
]
|
|
83
|
+
|
|
84
|
+
if use_envelope:
|
|
85
|
+
parts.extend([
|
|
86
|
+
"\n## Response Format (Envelope)\n",
|
|
87
|
+
"You MUST wrap your response in the envelope format:\n",
|
|
88
|
+
"- Success: { \"ok\": true, \"data\": { ...your output... } }\n",
|
|
89
|
+
"- Error: { \"ok\": false, \"error\": { \"code\": \"ERROR_CODE\", \"message\": \"...\" } }\n",
|
|
90
|
+
"Return ONLY valid JSON.\n",
|
|
91
|
+
])
|
|
92
|
+
else:
|
|
93
|
+
parts.extend([
|
|
94
|
+
"\n## Instructions\n",
|
|
95
|
+
"Analyze the input and generate output matching the required schema.",
|
|
96
|
+
"Return ONLY valid JSON. Do not include any text before or after the JSON.",
|
|
97
|
+
])
|
|
98
|
+
|
|
67
99
|
return "".join(parts)
|
|
68
100
|
|
|
69
101
|
|
|
@@ -85,16 +117,56 @@ def parse_llm_response(response: str) -> dict:
|
|
|
85
117
|
return json.loads(text)
|
|
86
118
|
|
|
87
119
|
|
|
120
|
+
def is_envelope_response(data: dict) -> bool:
|
|
121
|
+
"""Check if response is in envelope format."""
|
|
122
|
+
return isinstance(data.get("ok"), bool)
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def parse_envelope_response(data: dict) -> EnvelopeResponse:
|
|
126
|
+
"""Parse and normalize envelope response."""
|
|
127
|
+
if data.get("ok") is True:
|
|
128
|
+
return {
|
|
129
|
+
"ok": True,
|
|
130
|
+
"data": data.get("data", {})
|
|
131
|
+
}
|
|
132
|
+
else:
|
|
133
|
+
return {
|
|
134
|
+
"ok": False,
|
|
135
|
+
"error": data.get("error", {"code": "UNKNOWN", "message": "Unknown error"}),
|
|
136
|
+
"partial_data": data.get("partial_data")
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def convert_to_envelope(data: dict, is_error: bool = False) -> EnvelopeResponse:
|
|
141
|
+
"""Convert legacy format to envelope format."""
|
|
142
|
+
if is_error or "error" in data:
|
|
143
|
+
error = data.get("error", {})
|
|
144
|
+
return {
|
|
145
|
+
"ok": False,
|
|
146
|
+
"error": {
|
|
147
|
+
"code": error.get("code", "UNKNOWN"),
|
|
148
|
+
"message": error.get("message", str(error))
|
|
149
|
+
},
|
|
150
|
+
"partial_data": None
|
|
151
|
+
}
|
|
152
|
+
else:
|
|
153
|
+
return {
|
|
154
|
+
"ok": True,
|
|
155
|
+
"data": data
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
|
|
88
159
|
def run_module(
|
|
89
160
|
name_or_path: str,
|
|
90
161
|
input_data: dict,
|
|
91
162
|
validate_input: bool = True,
|
|
92
163
|
validate_output: bool = True,
|
|
93
164
|
model: Optional[str] = None,
|
|
94
|
-
|
|
165
|
+
use_envelope: Optional[bool] = None,
|
|
166
|
+
) -> EnvelopeResponse:
|
|
95
167
|
"""
|
|
96
168
|
Run a cognitive module with the given input.
|
|
97
|
-
|
|
169
|
+
Returns envelope format response.
|
|
98
170
|
|
|
99
171
|
Args:
|
|
100
172
|
name_or_path: Module name or path to module directory
|
|
@@ -102,9 +174,10 @@ def run_module(
|
|
|
102
174
|
validate_input: Whether to validate input against schema
|
|
103
175
|
validate_output: Whether to validate output against schema
|
|
104
176
|
model: Optional model override
|
|
177
|
+
use_envelope: Force envelope format (auto-detect if None)
|
|
105
178
|
|
|
106
179
|
Returns:
|
|
107
|
-
|
|
180
|
+
EnvelopeResponse with ok=True/False and data/error
|
|
108
181
|
"""
|
|
109
182
|
# Find module path
|
|
110
183
|
path = Path(name_or_path)
|
|
@@ -113,28 +186,91 @@ def run_module(
|
|
|
113
186
|
else:
|
|
114
187
|
module_path = find_module(name_or_path)
|
|
115
188
|
if not module_path:
|
|
116
|
-
|
|
189
|
+
return {
|
|
190
|
+
"ok": False,
|
|
191
|
+
"error": {"code": "MODULE_NOT_FOUND", "message": f"Module not found: {name_or_path}"},
|
|
192
|
+
"partial_data": None
|
|
193
|
+
}
|
|
117
194
|
|
|
118
195
|
# Load module (auto-detects format)
|
|
119
196
|
module = load_module(module_path)
|
|
120
197
|
|
|
198
|
+
# Determine if we should use envelope format
|
|
199
|
+
should_use_envelope = use_envelope
|
|
200
|
+
if should_use_envelope is None:
|
|
201
|
+
# Auto-detect: use envelope for v2 format or if output.envelope is True
|
|
202
|
+
output_contract = module.get("output_contract", {})
|
|
203
|
+
should_use_envelope = (
|
|
204
|
+
module.get("format") == "v2" or
|
|
205
|
+
output_contract.get("envelope", False)
|
|
206
|
+
)
|
|
207
|
+
|
|
121
208
|
# Validate input
|
|
122
209
|
if validate_input and module["input_schema"]:
|
|
123
210
|
errors = validate_data(input_data, module["input_schema"], "Input")
|
|
124
211
|
if errors:
|
|
125
|
-
|
|
212
|
+
return {
|
|
213
|
+
"ok": False,
|
|
214
|
+
"error": {"code": "INVALID_INPUT", "message": str(errors)},
|
|
215
|
+
"partial_data": None
|
|
216
|
+
}
|
|
126
217
|
|
|
127
218
|
# Build prompt and call LLM
|
|
128
|
-
full_prompt = build_prompt(module, input_data)
|
|
219
|
+
full_prompt = build_prompt(module, input_data, use_envelope=should_use_envelope)
|
|
129
220
|
response = call_llm(full_prompt, model=model)
|
|
130
221
|
|
|
131
222
|
# Parse response
|
|
132
|
-
|
|
223
|
+
try:
|
|
224
|
+
output_data = parse_llm_response(response)
|
|
225
|
+
except json.JSONDecodeError as e:
|
|
226
|
+
return {
|
|
227
|
+
"ok": False,
|
|
228
|
+
"error": {"code": "PARSE_ERROR", "message": f"Failed to parse JSON: {e}"},
|
|
229
|
+
"partial_data": None
|
|
230
|
+
}
|
|
133
231
|
|
|
134
|
-
#
|
|
135
|
-
if
|
|
136
|
-
|
|
232
|
+
# Handle envelope format
|
|
233
|
+
if is_envelope_response(output_data):
|
|
234
|
+
result = parse_envelope_response(output_data)
|
|
235
|
+
else:
|
|
236
|
+
# Convert legacy format to envelope
|
|
237
|
+
result = convert_to_envelope(output_data)
|
|
238
|
+
|
|
239
|
+
# Validate output (only for success responses)
|
|
240
|
+
if result["ok"] and validate_output and module["output_schema"]:
|
|
241
|
+
data_to_validate = result.get("data", {})
|
|
242
|
+
errors = validate_data(data_to_validate, module["output_schema"], "Output")
|
|
137
243
|
if errors:
|
|
138
|
-
|
|
244
|
+
return {
|
|
245
|
+
"ok": False,
|
|
246
|
+
"error": {"code": "OUTPUT_VALIDATION_ERROR", "message": str(errors)},
|
|
247
|
+
"partial_data": data_to_validate
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
return result
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
def run_module_legacy(
|
|
254
|
+
name_or_path: str,
|
|
255
|
+
input_data: dict,
|
|
256
|
+
validate_input: bool = True,
|
|
257
|
+
validate_output: bool = True,
|
|
258
|
+
model: Optional[str] = None,
|
|
259
|
+
) -> dict:
|
|
260
|
+
"""
|
|
261
|
+
Run a cognitive module (legacy API, returns raw output).
|
|
262
|
+
For backward compatibility.
|
|
263
|
+
"""
|
|
264
|
+
result = run_module(
|
|
265
|
+
name_or_path,
|
|
266
|
+
input_data,
|
|
267
|
+
validate_input=validate_input,
|
|
268
|
+
validate_output=validate_output,
|
|
269
|
+
model=model,
|
|
270
|
+
use_envelope=False
|
|
271
|
+
)
|
|
139
272
|
|
|
140
|
-
|
|
273
|
+
if result["ok"]:
|
|
274
|
+
return result["data"]
|
|
275
|
+
else:
|
|
276
|
+
raise ValueError(f"{result['error']['code']}: {result['error']['message']}")
|
|
@@ -1,15 +1,15 @@
|
|
|
1
1
|
cognitive/__init__.py,sha256=uSX5NuOWyW0qtq1bnxbzabZ0OQakAtsjF0MWbjQBvwE,401
|
|
2
2
|
cognitive/cli.py,sha256=q0vHCHFmig9gQ85KRyIrX6vrJZFm5nXF_bW5qQRGEPU,14608
|
|
3
|
-
cognitive/loader.py,sha256=
|
|
3
|
+
cognitive/loader.py,sha256=3_ShEfefOMP2EJjpQ1VGTGio4K_GUWLZMQHa0RgvBwg,8715
|
|
4
4
|
cognitive/registry.py,sha256=aBkkpg5PtL_tKDIOXpv6vblB7x_Ax0eVaWgejuQTfCE,8851
|
|
5
|
-
cognitive/runner.py,sha256=
|
|
5
|
+
cognitive/runner.py,sha256=KFO_pV7YTluYaL98_5bPp_HTNgwsTvlUjgNYlI8mo-w,8467
|
|
6
6
|
cognitive/subagent.py,sha256=fb7LWwNF6YcJtC_T1dK0EvzqWMBnav-kiCIpvVohEBw,8142
|
|
7
7
|
cognitive/templates.py,sha256=lKC197X9aQIA-npUvVCaplSwvhxjsH_KYVCQtrTZrL4,4712
|
|
8
8
|
cognitive/validator.py,sha256=1v1HUHYOlAc2sYCkIq_gnUMMnca0fdtQwr7UMBFpp04,12200
|
|
9
9
|
cognitive/providers/__init__.py,sha256=hqhVA1IEXpVtyCAteXhO5yD8a8ikQpVIPEKJVHLtRFY,7492
|
|
10
|
-
cognitive_modules-0.
|
|
11
|
-
cognitive_modules-0.
|
|
12
|
-
cognitive_modules-0.
|
|
13
|
-
cognitive_modules-0.
|
|
14
|
-
cognitive_modules-0.
|
|
15
|
-
cognitive_modules-0.
|
|
10
|
+
cognitive_modules-0.3.0.dist-info/licenses/LICENSE,sha256=NXFYUy2hPJdh3NHRxMChTnMiQD9k8zFxkmR7gWefexc,1064
|
|
11
|
+
cognitive_modules-0.3.0.dist-info/METADATA,sha256=PcUIJeGz7wr78MHPEdmDRxqKuFLEzX5aEgpMRHfGN-w,11381
|
|
12
|
+
cognitive_modules-0.3.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
13
|
+
cognitive_modules-0.3.0.dist-info/entry_points.txt,sha256=PKHlfrFmve5K2349ryipySKbOOsKxo_vIq1NNT-iBV0,42
|
|
14
|
+
cognitive_modules-0.3.0.dist-info/top_level.txt,sha256=kGIfDucCKylo8cRBtxER_v3DHIea-Sol9x9YSJo1u3Y,10
|
|
15
|
+
cognitive_modules-0.3.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|