cognitive-modules 0.4.0__py3-none-any.whl → 0.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cognitive/__init__.py +1 -1
- cognitive/cli.py +173 -18
- cognitive/loader.py +191 -14
- cognitive/mcp_server.py +245 -0
- cognitive/migrate.py +624 -0
- cognitive/runner.py +443 -80
- cognitive/server.py +294 -0
- cognitive/validator.py +380 -122
- {cognitive_modules-0.4.0.dist-info → cognitive_modules-0.5.1.dist-info}/METADATA +194 -177
- cognitive_modules-0.5.1.dist-info/RECORD +18 -0
- cognitive_modules-0.4.0.dist-info/RECORD +0 -15
- {cognitive_modules-0.4.0.dist-info → cognitive_modules-0.5.1.dist-info}/WHEEL +0 -0
- {cognitive_modules-0.4.0.dist-info → cognitive_modules-0.5.1.dist-info}/entry_points.txt +0 -0
- {cognitive_modules-0.4.0.dist-info → cognitive_modules-0.5.1.dist-info}/licenses/LICENSE +0 -0
- {cognitive_modules-0.4.0.dist-info → cognitive_modules-0.5.1.dist-info}/top_level.txt +0 -0
cognitive/server.py
ADDED
|
@@ -0,0 +1,294 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Cognitive Modules HTTP API Server
|
|
3
|
+
|
|
4
|
+
提供 RESTful API 接口,支持工作流平台集成(包括 Coze 插件)。
|
|
5
|
+
|
|
6
|
+
启动方式:
|
|
7
|
+
cogn serve --port 8000
|
|
8
|
+
|
|
9
|
+
或直接运行:
|
|
10
|
+
uvicorn cognitive.server:app --host 0.0.0.0 --port 8000
|
|
11
|
+
|
|
12
|
+
环境变量:
|
|
13
|
+
COGNITIVE_API_KEY - API Key 认证(可选,不设置则无需认证)
|
|
14
|
+
LLM_PROVIDER - LLM 提供商 (openai, anthropic, deepseek, minimax)
|
|
15
|
+
OPENAI_API_KEY - OpenAI API Key
|
|
16
|
+
ANTHROPIC_API_KEY - Anthropic API Key
|
|
17
|
+
DEEPSEEK_API_KEY - DeepSeek API Key
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
from fastapi import FastAPI, HTTPException, Depends, Security
|
|
21
|
+
from fastapi.middleware.cors import CORSMiddleware
|
|
22
|
+
from fastapi.security import APIKeyHeader
|
|
23
|
+
from pydantic import BaseModel, Field
|
|
24
|
+
from typing import Optional, Any, Dict, List
|
|
25
|
+
import os
|
|
26
|
+
|
|
27
|
+
from .registry import list_modules, find_module
|
|
28
|
+
from .loader import load_module
|
|
29
|
+
from .runner import run_module as execute_module
|
|
30
|
+
|
|
31
|
+
# ============================================================
|
|
32
|
+
# API Key 认证
|
|
33
|
+
# ============================================================
|
|
34
|
+
|
|
35
|
+
API_KEY_NAME = "Authorization"
|
|
36
|
+
api_key_header = APIKeyHeader(name=API_KEY_NAME, auto_error=False)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
async def verify_api_key(api_key: Optional[str] = Security(api_key_header)) -> Optional[str]:
|
|
40
|
+
"""
|
|
41
|
+
验证 API Key
|
|
42
|
+
|
|
43
|
+
如果 COGNITIVE_API_KEY 未设置,则跳过验证。
|
|
44
|
+
如果设置了,则要求请求头携带 Bearer <key> 格式的认证。
|
|
45
|
+
"""
|
|
46
|
+
expected_key = os.environ.get("COGNITIVE_API_KEY")
|
|
47
|
+
|
|
48
|
+
# 如果未设置 API Key,则不需要认证
|
|
49
|
+
if not expected_key:
|
|
50
|
+
return None
|
|
51
|
+
|
|
52
|
+
if not api_key:
|
|
53
|
+
raise HTTPException(
|
|
54
|
+
status_code=401,
|
|
55
|
+
detail="Missing API Key. Use header: Authorization: Bearer <your-api-key>"
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
# 支持 Bearer token 格式
|
|
59
|
+
if api_key.startswith("Bearer "):
|
|
60
|
+
api_key = api_key[7:]
|
|
61
|
+
|
|
62
|
+
if api_key != expected_key:
|
|
63
|
+
raise HTTPException(status_code=401, detail="Invalid API Key")
|
|
64
|
+
|
|
65
|
+
return api_key
|
|
66
|
+
|
|
67
|
+
# ============================================================
|
|
68
|
+
# App Setup
|
|
69
|
+
# ============================================================
|
|
70
|
+
|
|
71
|
+
app = FastAPI(
|
|
72
|
+
title="Cognitive Modules API",
|
|
73
|
+
description="可验证的结构化 AI 任务规范 - HTTP API",
|
|
74
|
+
version="0.4.0",
|
|
75
|
+
docs_url="/docs",
|
|
76
|
+
redoc_url="/redoc",
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
# CORS 配置
|
|
80
|
+
app.add_middleware(
|
|
81
|
+
CORSMiddleware,
|
|
82
|
+
allow_origins=["*"],
|
|
83
|
+
allow_credentials=True,
|
|
84
|
+
allow_methods=["*"],
|
|
85
|
+
allow_headers=["*"],
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
# ============================================================
|
|
89
|
+
# Request/Response Models
|
|
90
|
+
# ============================================================
|
|
91
|
+
|
|
92
|
+
class RunRequest(BaseModel):
|
|
93
|
+
"""运行模块请求"""
|
|
94
|
+
module: str = Field(..., description="模块名称", example="code-reviewer")
|
|
95
|
+
args: str = Field(..., description="输入参数", example="def foo(): pass")
|
|
96
|
+
provider: Optional[str] = Field(None, description="LLM 提供商", example="openai")
|
|
97
|
+
model: Optional[str] = Field(None, description="模型名称", example="gpt-4o")
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
class RunResponse(BaseModel):
|
|
101
|
+
"""运行模块响应"""
|
|
102
|
+
ok: bool = Field(..., description="是否成功")
|
|
103
|
+
data: Optional[Dict[str, Any]] = Field(None, description="成功时的结果")
|
|
104
|
+
error: Optional[str] = Field(None, description="失败时的错误信息")
|
|
105
|
+
module: str = Field(..., description="模块名称")
|
|
106
|
+
provider: Optional[str] = Field(None, description="使用的 LLM 提供商")
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
class ModuleInfo(BaseModel):
|
|
110
|
+
"""模块信息"""
|
|
111
|
+
name: str
|
|
112
|
+
version: Optional[str] = None
|
|
113
|
+
description: Optional[str] = None
|
|
114
|
+
format: str # v0, v1, v2
|
|
115
|
+
path: str
|
|
116
|
+
responsibility: Optional[str] = None
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
class ModuleListResponse(BaseModel):
|
|
120
|
+
"""模块列表响应"""
|
|
121
|
+
modules: List[ModuleInfo]
|
|
122
|
+
count: int
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
class HealthResponse(BaseModel):
|
|
126
|
+
"""健康检查响应"""
|
|
127
|
+
status: str
|
|
128
|
+
version: str
|
|
129
|
+
providers: Dict[str, bool]
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
# ============================================================
|
|
133
|
+
# API Endpoints
|
|
134
|
+
# ============================================================
|
|
135
|
+
|
|
136
|
+
@app.get("/", tags=["Info"])
|
|
137
|
+
async def root():
|
|
138
|
+
"""API 根路径"""
|
|
139
|
+
return {
|
|
140
|
+
"name": "Cognitive Modules API",
|
|
141
|
+
"version": "0.4.0",
|
|
142
|
+
"docs": "/docs",
|
|
143
|
+
"endpoints": {
|
|
144
|
+
"run": "POST /run",
|
|
145
|
+
"modules": "GET /modules",
|
|
146
|
+
"module_info": "GET /modules/{name}",
|
|
147
|
+
"health": "GET /health",
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
@app.get("/health", response_model=HealthResponse, tags=["Info"])
|
|
153
|
+
async def health():
|
|
154
|
+
"""健康检查"""
|
|
155
|
+
providers = {
|
|
156
|
+
"openai": bool(os.environ.get("OPENAI_API_KEY")),
|
|
157
|
+
"anthropic": bool(os.environ.get("ANTHROPIC_API_KEY")),
|
|
158
|
+
"minimax": bool(os.environ.get("MINIMAX_API_KEY")),
|
|
159
|
+
"deepseek": bool(os.environ.get("DEEPSEEK_API_KEY")),
|
|
160
|
+
}
|
|
161
|
+
return HealthResponse(
|
|
162
|
+
status="healthy",
|
|
163
|
+
version="0.4.0",
|
|
164
|
+
providers=providers,
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
@app.get("/modules", response_model=ModuleListResponse, tags=["Modules"])
|
|
169
|
+
async def get_modules():
|
|
170
|
+
"""列出所有已安装模块"""
|
|
171
|
+
modules_data = list_modules()
|
|
172
|
+
modules = []
|
|
173
|
+
|
|
174
|
+
for m in modules_data:
|
|
175
|
+
try:
|
|
176
|
+
module = load_module(m["name"])
|
|
177
|
+
modules.append(ModuleInfo(
|
|
178
|
+
name=m["name"],
|
|
179
|
+
version=module.get("version"),
|
|
180
|
+
description=module.get("description") or module.get("responsibility"),
|
|
181
|
+
format=m.get("format", "unknown"),
|
|
182
|
+
path=m["path"],
|
|
183
|
+
responsibility=module.get("responsibility"),
|
|
184
|
+
))
|
|
185
|
+
except Exception:
|
|
186
|
+
modules.append(ModuleInfo(
|
|
187
|
+
name=m["name"],
|
|
188
|
+
format=m.get("format", "unknown"),
|
|
189
|
+
path=m["path"],
|
|
190
|
+
))
|
|
191
|
+
|
|
192
|
+
return ModuleListResponse(modules=modules, count=len(modules))
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
@app.get("/modules/{name}", response_model=ModuleInfo, tags=["Modules"])
|
|
196
|
+
async def get_module(name: str):
|
|
197
|
+
"""获取单个模块信息"""
|
|
198
|
+
module_path = find_module(name)
|
|
199
|
+
if not module_path:
|
|
200
|
+
raise HTTPException(status_code=404, detail=f"Module '{name}' not found")
|
|
201
|
+
|
|
202
|
+
try:
|
|
203
|
+
module = load_module(name)
|
|
204
|
+
modules_data = list_modules()
|
|
205
|
+
module_meta = next((m for m in modules_data if m["name"] == name), {})
|
|
206
|
+
|
|
207
|
+
return ModuleInfo(
|
|
208
|
+
name=name,
|
|
209
|
+
version=module.get("version"),
|
|
210
|
+
description=module.get("description") or module.get("responsibility"),
|
|
211
|
+
format=module_meta.get("format", "unknown"),
|
|
212
|
+
path=str(module_path),
|
|
213
|
+
responsibility=module.get("responsibility"),
|
|
214
|
+
)
|
|
215
|
+
except Exception as e:
|
|
216
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
@app.post("/run", response_model=RunResponse, tags=["Execution"])
|
|
220
|
+
async def run_module(
|
|
221
|
+
request: RunRequest,
|
|
222
|
+
api_key: Optional[str] = Depends(verify_api_key)
|
|
223
|
+
):
|
|
224
|
+
"""
|
|
225
|
+
运行 Cognitive Module
|
|
226
|
+
|
|
227
|
+
示例:
|
|
228
|
+
```json
|
|
229
|
+
{
|
|
230
|
+
"module": "code-reviewer",
|
|
231
|
+
"args": "def login(u,p): return db.query(f'SELECT * FROM users WHERE name={u}')"
|
|
232
|
+
}
|
|
233
|
+
```
|
|
234
|
+
|
|
235
|
+
认证:
|
|
236
|
+
如果服务器设置了 COGNITIVE_API_KEY,需要在请求头中携带:
|
|
237
|
+
Authorization: Bearer <your-api-key>
|
|
238
|
+
"""
|
|
239
|
+
# 检查模块是否存在
|
|
240
|
+
module_path = find_module(request.module)
|
|
241
|
+
if not module_path:
|
|
242
|
+
raise HTTPException(status_code=404, detail=f"Module '{request.module}' not found")
|
|
243
|
+
|
|
244
|
+
# 设置 provider(如果指定)
|
|
245
|
+
original_provider = os.environ.get("LLM_PROVIDER")
|
|
246
|
+
original_model = os.environ.get("LLM_MODEL")
|
|
247
|
+
|
|
248
|
+
try:
|
|
249
|
+
if request.provider:
|
|
250
|
+
os.environ["LLM_PROVIDER"] = request.provider
|
|
251
|
+
if request.model:
|
|
252
|
+
os.environ["LLM_MODEL"] = request.model
|
|
253
|
+
|
|
254
|
+
# 执行模块
|
|
255
|
+
result = execute_module(request.module, args=request.args)
|
|
256
|
+
|
|
257
|
+
return RunResponse(
|
|
258
|
+
ok=True,
|
|
259
|
+
data=result,
|
|
260
|
+
module=request.module,
|
|
261
|
+
provider=request.provider or os.environ.get("LLM_PROVIDER", "openai"),
|
|
262
|
+
)
|
|
263
|
+
except Exception as e:
|
|
264
|
+
return RunResponse(
|
|
265
|
+
ok=False,
|
|
266
|
+
error=str(e),
|
|
267
|
+
module=request.module,
|
|
268
|
+
provider=request.provider,
|
|
269
|
+
)
|
|
270
|
+
finally:
|
|
271
|
+
# 恢复原始环境变量
|
|
272
|
+
if original_provider:
|
|
273
|
+
os.environ["LLM_PROVIDER"] = original_provider
|
|
274
|
+
elif "LLM_PROVIDER" in os.environ and request.provider:
|
|
275
|
+
del os.environ["LLM_PROVIDER"]
|
|
276
|
+
|
|
277
|
+
if original_model:
|
|
278
|
+
os.environ["LLM_MODEL"] = original_model
|
|
279
|
+
elif "LLM_MODEL" in os.environ and request.model:
|
|
280
|
+
del os.environ["LLM_MODEL"]
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
# ============================================================
|
|
284
|
+
# 启动入口
|
|
285
|
+
# ============================================================
|
|
286
|
+
|
|
287
|
+
def serve(host: str = "0.0.0.0", port: int = 8000):
|
|
288
|
+
"""启动 API 服务器"""
|
|
289
|
+
import uvicorn
|
|
290
|
+
uvicorn.run(app, host=host, port=port)
|
|
291
|
+
|
|
292
|
+
|
|
293
|
+
if __name__ == "__main__":
|
|
294
|
+
serve()
|