cognitive-modules 2.2.1__tar.gz → 2.2.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/PKG-INFO +11 -8
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/README.md +10 -7
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/pyproject.toml +1 -1
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/src/cognitive/loader.py +8 -4
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/src/cognitive/providers/__init__.py +152 -1
- cognitive_modules-2.2.2/src/cognitive/runner.py +1192 -0
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/src/cognitive/validator.py +2 -4
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/src/cognitive_modules.egg-info/PKG-INFO +11 -8
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/tests/test_runner.py +309 -7
- cognitive_modules-2.2.1/src/cognitive/runner.py +0 -654
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/LICENSE +0 -0
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/setup.cfg +0 -0
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/src/cognitive/__init__.py +0 -0
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/src/cognitive/cli.py +0 -0
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/src/cognitive/mcp_server.py +0 -0
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/src/cognitive/migrate.py +0 -0
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/src/cognitive/registry.py +0 -0
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/src/cognitive/server.py +0 -0
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/src/cognitive/subagent.py +0 -0
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/src/cognitive/templates.py +0 -0
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/src/cognitive_modules.egg-info/SOURCES.txt +0 -0
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/src/cognitive_modules.egg-info/dependency_links.txt +0 -0
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/src/cognitive_modules.egg-info/entry_points.txt +0 -0
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/src/cognitive_modules.egg-info/requires.txt +0 -0
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/src/cognitive_modules.egg-info/top_level.txt +0 -0
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/tests/test_cli.py +0 -0
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/tests/test_loader.py +0 -0
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/tests/test_migrate.py +0 -0
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/tests/test_registry.py +0 -0
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/tests/test_subagent.py +0 -0
- {cognitive_modules-2.2.1 → cognitive_modules-2.2.2}/tests/test_validator.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: cognitive-modules
|
|
3
|
-
Version: 2.2.
|
|
3
|
+
Version: 2.2.2
|
|
4
4
|
Summary: Structured LLM task runner with schema validation, confidence scoring, and subagent orchestration
|
|
5
5
|
Author: ziel-io
|
|
6
6
|
License: MIT
|
|
@@ -59,7 +59,10 @@ Dynamic: license-file
|
|
|
59
59
|
# Cognitive Modules
|
|
60
60
|
|
|
61
61
|
[](https://github.com/ziel-io/cognitive-modules/actions/workflows/ci.yml)
|
|
62
|
-
[](https://www.npmjs.com/package/cognitive-modules-cli)
|
|
63
|
+
[](https://pypi.org/project/cognitive-modules/)
|
|
64
|
+
[](https://www.npmjs.com/package/cognitive-modules-cli)
|
|
65
|
+
[](https://nodejs.org/)
|
|
63
66
|
[](https://www.python.org/downloads/)
|
|
64
67
|
[](https://opensource.org/licenses/MIT)
|
|
65
68
|
|
|
@@ -94,13 +97,13 @@ Cognitive Modules is an AI task definition specification designed for generation
|
|
|
94
97
|
|
|
95
98
|
| Version | Spec | npm | PyPI | Status |
|
|
96
99
|
|---------|------|-----|------|--------|
|
|
97
|
-
| **v2.2** | v2.2 | `2.2.0` | `2.2.
|
|
100
|
+
| **v2.2** | v2.2 | `2.2.0` | `2.2.1` | ✅ Stable (recommended) |
|
|
98
101
|
| **v2.5** | v2.5 | `2.5.0-beta.x` | `2.5.0bx` | 🧪 Beta (streaming + multimodal) |
|
|
99
102
|
|
|
100
103
|
```bash
|
|
101
104
|
# Install stable v2.2
|
|
102
105
|
npm install cognitive-modules-cli@2.2.0
|
|
103
|
-
pip install cognitive-modules==2.2.
|
|
106
|
+
pip install cognitive-modules==2.2.1
|
|
104
107
|
|
|
105
108
|
# Install beta v2.5 (streaming + multimodal)
|
|
106
109
|
npm install cognitive-modules-cli@beta
|
|
@@ -122,12 +125,12 @@ npm install -g cognitive-modules-cli@2.2.0
|
|
|
122
125
|
### Python (pip)
|
|
123
126
|
|
|
124
127
|
```bash
|
|
125
|
-
pip install cognitive-modules==2.2.
|
|
128
|
+
pip install cognitive-modules==2.2.1
|
|
126
129
|
|
|
127
130
|
# With LLM support
|
|
128
|
-
pip install "cognitive-modules[openai]==2.2.
|
|
129
|
-
pip install "cognitive-modules[anthropic]==2.2.
|
|
130
|
-
pip install "cognitive-modules[all]==2.2.
|
|
131
|
+
pip install "cognitive-modules[openai]==2.2.1" # OpenAI
|
|
132
|
+
pip install "cognitive-modules[anthropic]==2.2.1" # Claude
|
|
133
|
+
pip install "cognitive-modules[all]==2.2.1" # All providers
|
|
131
134
|
```
|
|
132
135
|
|
|
133
136
|
| Platform | Package | Command | Features |
|
|
@@ -1,7 +1,10 @@
|
|
|
1
1
|
# Cognitive Modules
|
|
2
2
|
|
|
3
3
|
[](https://github.com/ziel-io/cognitive-modules/actions/workflows/ci.yml)
|
|
4
|
-
[](https://www.npmjs.com/package/cognitive-modules-cli)
|
|
5
|
+
[](https://pypi.org/project/cognitive-modules/)
|
|
6
|
+
[](https://www.npmjs.com/package/cognitive-modules-cli)
|
|
7
|
+
[](https://nodejs.org/)
|
|
5
8
|
[](https://www.python.org/downloads/)
|
|
6
9
|
[](https://opensource.org/licenses/MIT)
|
|
7
10
|
|
|
@@ -36,13 +39,13 @@ Cognitive Modules is an AI task definition specification designed for generation
|
|
|
36
39
|
|
|
37
40
|
| Version | Spec | npm | PyPI | Status |
|
|
38
41
|
|---------|------|-----|------|--------|
|
|
39
|
-
| **v2.2** | v2.2 | `2.2.0` | `2.2.
|
|
42
|
+
| **v2.2** | v2.2 | `2.2.0` | `2.2.1` | ✅ Stable (recommended) |
|
|
40
43
|
| **v2.5** | v2.5 | `2.5.0-beta.x` | `2.5.0bx` | 🧪 Beta (streaming + multimodal) |
|
|
41
44
|
|
|
42
45
|
```bash
|
|
43
46
|
# Install stable v2.2
|
|
44
47
|
npm install cognitive-modules-cli@2.2.0
|
|
45
|
-
pip install cognitive-modules==2.2.
|
|
48
|
+
pip install cognitive-modules==2.2.1
|
|
46
49
|
|
|
47
50
|
# Install beta v2.5 (streaming + multimodal)
|
|
48
51
|
npm install cognitive-modules-cli@beta
|
|
@@ -64,12 +67,12 @@ npm install -g cognitive-modules-cli@2.2.0
|
|
|
64
67
|
### Python (pip)
|
|
65
68
|
|
|
66
69
|
```bash
|
|
67
|
-
pip install cognitive-modules==2.2.
|
|
70
|
+
pip install cognitive-modules==2.2.1
|
|
68
71
|
|
|
69
72
|
# With LLM support
|
|
70
|
-
pip install "cognitive-modules[openai]==2.2.
|
|
71
|
-
pip install "cognitive-modules[anthropic]==2.2.
|
|
72
|
-
pip install "cognitive-modules[all]==2.2.
|
|
73
|
+
pip install "cognitive-modules[openai]==2.2.1" # OpenAI
|
|
74
|
+
pip install "cognitive-modules[anthropic]==2.2.1" # Claude
|
|
75
|
+
pip install "cognitive-modules[all]==2.2.1" # All providers
|
|
73
76
|
```
|
|
74
77
|
|
|
75
78
|
| Platform | Package | Command | Features |
|
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "cognitive-modules"
|
|
7
|
-
version = "2.2.
|
|
7
|
+
version = "2.2.2"
|
|
8
8
|
description = "Structured LLM task runner with schema validation, confidence scoring, and subagent orchestration"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
license = {text = "MIT"}
|
|
@@ -184,16 +184,20 @@ def load_v2_format(module_path: Path) -> dict:
|
|
|
184
184
|
"require_suggested_mapping": overflow_raw.get("require_suggested_mapping", True)
|
|
185
185
|
}
|
|
186
186
|
|
|
187
|
-
enums
|
|
187
|
+
# Merge enums with defaults (ensure defaults are applied even if partial config exists)
|
|
188
|
+
enums_defaults = {
|
|
188
189
|
"strategy": "extensible" if tier in ("decision", "exploration") else "strict",
|
|
189
190
|
"unknown_tag": "custom"
|
|
190
|
-
}
|
|
191
|
+
}
|
|
192
|
+
enums = {**enums_defaults, **manifest.get("enums", {})}
|
|
191
193
|
|
|
192
|
-
compat
|
|
194
|
+
# Merge compat with defaults (ensure defaults are applied even if partial config exists)
|
|
195
|
+
compat_defaults = {
|
|
193
196
|
"accepts_v21_payload": True,
|
|
194
197
|
"runtime_auto_wrap": True,
|
|
195
198
|
"schema_output_alias": "data"
|
|
196
|
-
}
|
|
199
|
+
}
|
|
200
|
+
compat = {**compat_defaults, **manifest.get("compat", {})}
|
|
197
201
|
|
|
198
202
|
io_config = manifest.get("io", {})
|
|
199
203
|
tests = manifest.get("tests", [])
|
|
@@ -1,11 +1,15 @@
|
|
|
1
1
|
"""
|
|
2
2
|
LLM Providers - Unified interface for calling different LLM backends.
|
|
3
|
+
|
|
4
|
+
Supports both synchronous and streaming modes:
|
|
5
|
+
- call_llm(): Synchronous, returns complete response
|
|
6
|
+
- call_llm_stream(): Streaming, yields response chunks
|
|
3
7
|
"""
|
|
4
8
|
|
|
5
9
|
import json
|
|
6
10
|
import os
|
|
7
11
|
from pathlib import Path
|
|
8
|
-
from typing import Optional
|
|
12
|
+
from typing import Optional, Iterator
|
|
9
13
|
|
|
10
14
|
|
|
11
15
|
def call_llm(prompt: str, model: Optional[str] = None) -> str:
|
|
@@ -38,6 +42,153 @@ def call_llm(prompt: str, model: Optional[str] = None) -> str:
|
|
|
38
42
|
return _call_stub(prompt)
|
|
39
43
|
|
|
40
44
|
|
|
45
|
+
def call_llm_stream(prompt: str, model: Optional[str] = None) -> Iterator[str]:
|
|
46
|
+
"""
|
|
47
|
+
Call the configured LLM with streaming output.
|
|
48
|
+
|
|
49
|
+
Yields chunks of the response as they are generated.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
prompt: The prompt to send
|
|
53
|
+
model: Optional model override
|
|
54
|
+
|
|
55
|
+
Yields:
|
|
56
|
+
String chunks of the LLM's response
|
|
57
|
+
"""
|
|
58
|
+
provider = os.environ.get("LLM_PROVIDER", "stub").lower()
|
|
59
|
+
|
|
60
|
+
if provider == "openai":
|
|
61
|
+
yield from _stream_openai(prompt, model)
|
|
62
|
+
elif provider == "anthropic":
|
|
63
|
+
yield from _stream_anthropic(prompt, model)
|
|
64
|
+
elif provider == "ollama":
|
|
65
|
+
yield from _stream_ollama(prompt, model)
|
|
66
|
+
elif provider == "minimax":
|
|
67
|
+
yield from _stream_minimax(prompt, model)
|
|
68
|
+
else:
|
|
69
|
+
# Stub doesn't support streaming, return full response
|
|
70
|
+
yield _call_stub(prompt)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def _stream_openai(prompt: str, model: Optional[str] = None) -> Iterator[str]:
|
|
74
|
+
"""Stream from OpenAI API."""
|
|
75
|
+
try:
|
|
76
|
+
from openai import OpenAI
|
|
77
|
+
except ImportError:
|
|
78
|
+
raise ImportError("OpenAI not installed. Run: pip install cognitive[openai]")
|
|
79
|
+
|
|
80
|
+
api_key = os.environ.get("OPENAI_API_KEY")
|
|
81
|
+
if not api_key:
|
|
82
|
+
raise ValueError("OPENAI_API_KEY environment variable not set")
|
|
83
|
+
|
|
84
|
+
client = OpenAI(api_key=api_key)
|
|
85
|
+
model = model or os.environ.get("LLM_MODEL", "gpt-4o")
|
|
86
|
+
|
|
87
|
+
stream = client.chat.completions.create(
|
|
88
|
+
model=model,
|
|
89
|
+
messages=[
|
|
90
|
+
{"role": "system", "content": "You output only valid JSON matching the required schema."},
|
|
91
|
+
{"role": "user", "content": prompt}
|
|
92
|
+
],
|
|
93
|
+
temperature=0.2,
|
|
94
|
+
stream=True
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
for chunk in stream:
|
|
98
|
+
if chunk.choices and chunk.choices[0].delta.content:
|
|
99
|
+
yield chunk.choices[0].delta.content
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def _stream_anthropic(prompt: str, model: Optional[str] = None) -> Iterator[str]:
|
|
103
|
+
"""Stream from Anthropic Claude API."""
|
|
104
|
+
try:
|
|
105
|
+
import anthropic
|
|
106
|
+
except ImportError:
|
|
107
|
+
raise ImportError("Anthropic not installed. Run: pip install cognitive[anthropic]")
|
|
108
|
+
|
|
109
|
+
api_key = os.environ.get("ANTHROPIC_API_KEY")
|
|
110
|
+
if not api_key:
|
|
111
|
+
raise ValueError("ANTHROPIC_API_KEY environment variable not set")
|
|
112
|
+
|
|
113
|
+
client = anthropic.Anthropic(api_key=api_key)
|
|
114
|
+
model = model or os.environ.get("LLM_MODEL", "claude-sonnet-4-20250514")
|
|
115
|
+
|
|
116
|
+
with client.messages.stream(
|
|
117
|
+
model=model,
|
|
118
|
+
max_tokens=8192,
|
|
119
|
+
system="You output only valid JSON matching the required schema.",
|
|
120
|
+
messages=[{"role": "user", "content": prompt}]
|
|
121
|
+
) as stream:
|
|
122
|
+
for text in stream.text_stream:
|
|
123
|
+
yield text
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def _stream_minimax(prompt: str, model: Optional[str] = None) -> Iterator[str]:
|
|
127
|
+
"""Stream from MiniMax API (OpenAI-compatible)."""
|
|
128
|
+
try:
|
|
129
|
+
from openai import OpenAI
|
|
130
|
+
except ImportError:
|
|
131
|
+
raise ImportError("OpenAI SDK not installed. Run: pip install openai")
|
|
132
|
+
|
|
133
|
+
api_key = os.environ.get("MINIMAX_API_KEY")
|
|
134
|
+
if not api_key:
|
|
135
|
+
raise ValueError("MINIMAX_API_KEY environment variable not set")
|
|
136
|
+
|
|
137
|
+
client = OpenAI(
|
|
138
|
+
api_key=api_key,
|
|
139
|
+
base_url="https://api.minimax.chat/v1"
|
|
140
|
+
)
|
|
141
|
+
model = model or os.environ.get("LLM_MODEL", "MiniMax-Text-01")
|
|
142
|
+
|
|
143
|
+
stream = client.chat.completions.create(
|
|
144
|
+
model=model,
|
|
145
|
+
messages=[
|
|
146
|
+
{"role": "system", "content": "You output only valid JSON matching the required schema."},
|
|
147
|
+
{"role": "user", "content": prompt}
|
|
148
|
+
],
|
|
149
|
+
temperature=0.2,
|
|
150
|
+
stream=True
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
for chunk in stream:
|
|
154
|
+
if chunk.choices and chunk.choices[0].delta.content:
|
|
155
|
+
yield chunk.choices[0].delta.content
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def _stream_ollama(prompt: str, model: Optional[str] = None) -> Iterator[str]:
|
|
159
|
+
"""Stream from local Ollama instance."""
|
|
160
|
+
try:
|
|
161
|
+
import requests
|
|
162
|
+
except ImportError:
|
|
163
|
+
raise ImportError("Requests not installed. Run: pip install cognitive[ollama]")
|
|
164
|
+
|
|
165
|
+
host = os.environ.get("OLLAMA_HOST", "http://localhost:11434")
|
|
166
|
+
model = model or os.environ.get("LLM_MODEL", "llama3.1")
|
|
167
|
+
|
|
168
|
+
response = requests.post(
|
|
169
|
+
f"{host}/api/generate",
|
|
170
|
+
json={
|
|
171
|
+
"model": model,
|
|
172
|
+
"prompt": prompt,
|
|
173
|
+
"stream": True,
|
|
174
|
+
"format": "json",
|
|
175
|
+
"options": {"temperature": 0.2}
|
|
176
|
+
},
|
|
177
|
+
stream=True
|
|
178
|
+
)
|
|
179
|
+
response.raise_for_status()
|
|
180
|
+
|
|
181
|
+
for line in response.iter_lines():
|
|
182
|
+
if line:
|
|
183
|
+
try:
|
|
184
|
+
data = json.loads(line)
|
|
185
|
+
if "response" in data:
|
|
186
|
+
yield data["response"]
|
|
187
|
+
except json.JSONDecodeError:
|
|
188
|
+
# Skip malformed JSON lines
|
|
189
|
+
continue
|
|
190
|
+
|
|
191
|
+
|
|
41
192
|
def _call_openai(prompt: str, model: Optional[str] = None) -> str:
|
|
42
193
|
"""Call OpenAI API."""
|
|
43
194
|
try:
|