cognitive-modules-cli 2.2.1 → 2.2.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. package/CHANGELOG.md +11 -0
  2. package/LICENSE +21 -0
  3. package/README.md +35 -29
  4. package/dist/cli.js +513 -22
  5. package/dist/commands/add.d.ts +33 -14
  6. package/dist/commands/add.js +222 -13
  7. package/dist/commands/compose.js +60 -23
  8. package/dist/commands/index.d.ts +4 -0
  9. package/dist/commands/index.js +4 -0
  10. package/dist/commands/init.js +23 -1
  11. package/dist/commands/migrate.d.ts +30 -0
  12. package/dist/commands/migrate.js +650 -0
  13. package/dist/commands/pipe.d.ts +1 -0
  14. package/dist/commands/pipe.js +31 -11
  15. package/dist/commands/remove.js +33 -2
  16. package/dist/commands/run.d.ts +1 -0
  17. package/dist/commands/run.js +37 -27
  18. package/dist/commands/search.d.ts +28 -0
  19. package/dist/commands/search.js +143 -0
  20. package/dist/commands/test.d.ts +65 -0
  21. package/dist/commands/test.js +454 -0
  22. package/dist/commands/update.d.ts +1 -0
  23. package/dist/commands/update.js +106 -14
  24. package/dist/commands/validate.d.ts +36 -0
  25. package/dist/commands/validate.js +97 -0
  26. package/dist/errors/index.d.ts +218 -0
  27. package/dist/errors/index.js +412 -0
  28. package/dist/mcp/server.js +84 -79
  29. package/dist/modules/composition.js +97 -32
  30. package/dist/modules/loader.js +4 -2
  31. package/dist/modules/runner.d.ts +65 -0
  32. package/dist/modules/runner.js +293 -49
  33. package/dist/modules/subagent.d.ts +6 -1
  34. package/dist/modules/subagent.js +18 -13
  35. package/dist/modules/validator.js +14 -6
  36. package/dist/providers/anthropic.d.ts +15 -0
  37. package/dist/providers/anthropic.js +147 -5
  38. package/dist/providers/base.d.ts +11 -0
  39. package/dist/providers/base.js +18 -0
  40. package/dist/providers/gemini.d.ts +15 -0
  41. package/dist/providers/gemini.js +122 -5
  42. package/dist/providers/ollama.d.ts +15 -0
  43. package/dist/providers/ollama.js +111 -3
  44. package/dist/providers/openai.d.ts +11 -0
  45. package/dist/providers/openai.js +133 -0
  46. package/dist/registry/client.d.ts +204 -0
  47. package/dist/registry/client.js +356 -0
  48. package/dist/registry/index.d.ts +4 -0
  49. package/dist/registry/index.js +4 -0
  50. package/dist/server/http.js +173 -42
  51. package/dist/types.d.ts +32 -1
  52. package/dist/types.js +4 -1
  53. package/dist/version.d.ts +1 -0
  54. package/dist/version.js +4 -0
  55. package/package.json +31 -7
  56. package/dist/modules/composition.test.d.ts +0 -11
  57. package/dist/modules/composition.test.js +0 -450
  58. package/dist/modules/policy.test.d.ts +0 -10
  59. package/dist/modules/policy.test.js +0 -369
  60. package/src/cli.ts +0 -471
  61. package/src/commands/add.ts +0 -315
  62. package/src/commands/compose.ts +0 -185
  63. package/src/commands/index.ts +0 -13
  64. package/src/commands/init.ts +0 -94
  65. package/src/commands/list.ts +0 -33
  66. package/src/commands/pipe.ts +0 -76
  67. package/src/commands/remove.ts +0 -57
  68. package/src/commands/run.ts +0 -80
  69. package/src/commands/update.ts +0 -130
  70. package/src/commands/versions.ts +0 -79
  71. package/src/index.ts +0 -90
  72. package/src/mcp/index.ts +0 -5
  73. package/src/mcp/server.ts +0 -403
  74. package/src/modules/composition.test.ts +0 -558
  75. package/src/modules/composition.ts +0 -1674
  76. package/src/modules/index.ts +0 -9
  77. package/src/modules/loader.ts +0 -508
  78. package/src/modules/policy.test.ts +0 -455
  79. package/src/modules/runner.ts +0 -1983
  80. package/src/modules/subagent.ts +0 -277
  81. package/src/modules/validator.ts +0 -700
  82. package/src/providers/anthropic.ts +0 -89
  83. package/src/providers/base.ts +0 -29
  84. package/src/providers/deepseek.ts +0 -83
  85. package/src/providers/gemini.ts +0 -117
  86. package/src/providers/index.ts +0 -78
  87. package/src/providers/minimax.ts +0 -81
  88. package/src/providers/moonshot.ts +0 -82
  89. package/src/providers/ollama.ts +0 -83
  90. package/src/providers/openai.ts +0 -84
  91. package/src/providers/qwen.ts +0 -82
  92. package/src/server/http.ts +0 -316
  93. package/src/server/index.ts +0 -6
  94. package/src/types.ts +0 -599
  95. package/tsconfig.json +0 -17
@@ -1,5 +1,7 @@
1
1
  /**
2
2
  * Ollama Provider - Local LLM via Ollama
3
+ *
4
+ * Supports both streaming and non-streaming invocation.
3
5
  */
4
6
  import { BaseProvider } from './base.js';
5
7
  export class OllamaProvider extends BaseProvider {
@@ -14,8 +16,16 @@ export class OllamaProvider extends BaseProvider {
14
16
  isConfigured() {
15
17
  return true; // Ollama doesn't need API key
16
18
  }
17
- async invoke(params) {
18
- const url = `${this.baseUrl}/api/chat`;
19
+ /**
20
+ * Ollama supports streaming.
21
+ */
22
+ supportsStreaming() {
23
+ return true;
24
+ }
25
+ /**
26
+ * Build request body for Ollama API
27
+ */
28
+ buildRequestBody(params, stream) {
19
29
  let messages = params.messages.map(m => ({ role: m.role, content: m.content }));
20
30
  // Add JSON mode if schema provided
21
31
  if (params.jsonSchema) {
@@ -31,7 +41,7 @@ export class OllamaProvider extends BaseProvider {
31
41
  const body = {
32
42
  model: this.model,
33
43
  messages,
34
- stream: false,
44
+ stream,
35
45
  options: {
36
46
  temperature: params.temperature ?? 0.7,
37
47
  num_predict: params.maxTokens ?? 4096,
@@ -41,6 +51,11 @@ export class OllamaProvider extends BaseProvider {
41
51
  if (params.jsonSchema) {
42
52
  body.format = 'json';
43
53
  }
54
+ return body;
55
+ }
56
+ async invoke(params) {
57
+ const url = `${this.baseUrl}/api/chat`;
58
+ const body = this.buildRequestBody(params, false);
44
59
  const response = await fetch(url, {
45
60
  method: 'POST',
46
61
  headers: { 'Content-Type': 'application/json' },
@@ -61,4 +76,97 @@ export class OllamaProvider extends BaseProvider {
61
76
  },
62
77
  };
63
78
  }
79
+ /**
80
+ * Stream-based invoke using Ollama's streaming API.
81
+ * Yields content chunks as they arrive from the API.
82
+ */
83
+ async *invokeStream(params) {
84
+ const url = `${this.baseUrl}/api/chat`;
85
+ const body = this.buildRequestBody(params, true);
86
+ const response = await fetch(url, {
87
+ method: 'POST',
88
+ headers: { 'Content-Type': 'application/json' },
89
+ body: JSON.stringify(body),
90
+ });
91
+ if (!response.ok) {
92
+ const error = await response.text();
93
+ throw new Error(`Ollama API error: ${response.status} - ${error}`);
94
+ }
95
+ if (!response.body) {
96
+ throw new Error('Ollama API returned no body for streaming request');
97
+ }
98
+ const reader = response.body.getReader();
99
+ const decoder = new TextDecoder('utf-8');
100
+ const collectedChunks = [];
101
+ let promptTokens = 0;
102
+ let completionTokens = 0;
103
+ let buffer = '';
104
+ try {
105
+ while (true) {
106
+ const { done, value } = await reader.read();
107
+ if (done)
108
+ break;
109
+ buffer += decoder.decode(value, { stream: true });
110
+ // Process complete lines from the buffer (NDJSON format)
111
+ const lines = buffer.split('\n');
112
+ buffer = lines.pop() || ''; // Keep incomplete line in buffer
113
+ for (const line of lines) {
114
+ const trimmed = line.trim();
115
+ if (!trimmed)
116
+ continue;
117
+ try {
118
+ const data = JSON.parse(trimmed);
119
+ // Extract content chunk
120
+ const content = data.message?.content;
121
+ if (content) {
122
+ collectedChunks.push(content);
123
+ yield content;
124
+ }
125
+ // Extract usage info (in the final message when done=true)
126
+ if (data.done) {
127
+ promptTokens = data.prompt_eval_count || 0;
128
+ completionTokens = data.eval_count || 0;
129
+ }
130
+ }
131
+ catch {
132
+ // Skip invalid JSON lines
133
+ }
134
+ }
135
+ }
136
+ // Flush decoder and process trailing buffered data even without trailing newline.
137
+ buffer += decoder.decode();
138
+ for (const line of buffer.split('\n')) {
139
+ const trimmed = line.trim();
140
+ if (!trimmed)
141
+ continue;
142
+ try {
143
+ const data = JSON.parse(trimmed);
144
+ const content = data.message?.content;
145
+ if (content) {
146
+ collectedChunks.push(content);
147
+ yield content;
148
+ }
149
+ if (data.done) {
150
+ promptTokens = data.prompt_eval_count || 0;
151
+ completionTokens = data.eval_count || 0;
152
+ }
153
+ }
154
+ catch {
155
+ // Skip invalid JSON lines
156
+ }
157
+ }
158
+ }
159
+ finally {
160
+ reader.releaseLock();
161
+ }
162
+ const fullContent = collectedChunks.join('');
163
+ return {
164
+ content: fullContent,
165
+ usage: {
166
+ promptTokens,
167
+ completionTokens,
168
+ totalTokens: promptTokens + completionTokens,
169
+ },
170
+ };
171
+ }
64
172
  }
@@ -1,5 +1,7 @@
1
1
  /**
2
2
  * OpenAI Provider - OpenAI API (and compatible APIs)
3
+ *
4
+ * Supports both streaming and non-streaming invocation.
3
5
  */
4
6
  import { BaseProvider } from './base.js';
5
7
  import type { InvokeParams, InvokeResult } from '../types.js';
@@ -10,5 +12,14 @@ export declare class OpenAIProvider extends BaseProvider {
10
12
  private baseUrl;
11
13
  constructor(apiKey?: string, model?: string, baseUrl?: string);
12
14
  isConfigured(): boolean;
15
+ /**
16
+ * OpenAI supports streaming.
17
+ */
18
+ supportsStreaming(): boolean;
13
19
  invoke(params: InvokeParams): Promise<InvokeResult>;
20
+ /**
21
+ * Stream-based invoke using OpenAI's streaming API.
22
+ * Yields content chunks as they arrive from the API.
23
+ */
24
+ invokeStream(params: InvokeParams): AsyncGenerator<string, InvokeResult, unknown>;
14
25
  }
@@ -1,5 +1,7 @@
1
1
  /**
2
2
  * OpenAI Provider - OpenAI API (and compatible APIs)
3
+ *
4
+ * Supports both streaming and non-streaming invocation.
3
5
  */
4
6
  import { BaseProvider } from './base.js';
5
7
  export class OpenAIProvider extends BaseProvider {
@@ -16,6 +18,12 @@ export class OpenAIProvider extends BaseProvider {
16
18
  isConfigured() {
17
19
  return !!this.apiKey;
18
20
  }
21
+ /**
22
+ * OpenAI supports streaming.
23
+ */
24
+ supportsStreaming() {
25
+ return true;
26
+ }
19
27
  async invoke(params) {
20
28
  if (!this.isConfigured()) {
21
29
  throw new Error('OpenAI API key not configured. Set OPENAI_API_KEY environment variable.');
@@ -64,4 +72,129 @@ export class OpenAIProvider extends BaseProvider {
64
72
  } : undefined,
65
73
  };
66
74
  }
75
+ /**
76
+ * Stream-based invoke using OpenAI's streaming API.
77
+ * Yields content chunks as they arrive from the API.
78
+ */
79
+ async *invokeStream(params) {
80
+ if (!this.isConfigured()) {
81
+ throw new Error('OpenAI API key not configured. Set OPENAI_API_KEY environment variable.');
82
+ }
83
+ const url = `${this.baseUrl}/chat/completions`;
84
+ const body = {
85
+ model: this.model,
86
+ messages: params.messages,
87
+ temperature: params.temperature ?? 0.7,
88
+ max_tokens: params.maxTokens ?? 4096,
89
+ stream: true,
90
+ stream_options: { include_usage: true },
91
+ };
92
+ // Add JSON mode if schema provided
93
+ if (params.jsonSchema) {
94
+ body.response_format = { type: 'json_object' };
95
+ // Append schema instruction to last user message
96
+ const lastUserIdx = params.messages.findLastIndex(m => m.role === 'user');
97
+ if (lastUserIdx >= 0) {
98
+ const messages = [...params.messages];
99
+ messages[lastUserIdx] = {
100
+ ...messages[lastUserIdx],
101
+ content: messages[lastUserIdx].content + this.buildJsonPrompt(params.jsonSchema),
102
+ };
103
+ body.messages = messages;
104
+ }
105
+ }
106
+ const response = await fetch(url, {
107
+ method: 'POST',
108
+ headers: {
109
+ 'Content-Type': 'application/json',
110
+ 'Authorization': `Bearer ${this.apiKey}`,
111
+ },
112
+ body: JSON.stringify(body),
113
+ });
114
+ if (!response.ok) {
115
+ const error = await response.text();
116
+ throw new Error(`OpenAI API error: ${response.status} - ${error}`);
117
+ }
118
+ if (!response.body) {
119
+ throw new Error('OpenAI API returned no body for streaming request');
120
+ }
121
+ const reader = response.body.getReader();
122
+ const decoder = new TextDecoder('utf-8');
123
+ const collectedChunks = [];
124
+ let usage;
125
+ let buffer = '';
126
+ try {
127
+ while (true) {
128
+ const { done, value } = await reader.read();
129
+ if (done)
130
+ break;
131
+ buffer += decoder.decode(value, { stream: true });
132
+ // Process complete lines from the buffer
133
+ const lines = buffer.split('\n');
134
+ buffer = lines.pop() || ''; // Keep incomplete line in buffer
135
+ for (const line of lines) {
136
+ const trimmed = line.trim();
137
+ if (!trimmed || trimmed === 'data: [DONE]')
138
+ continue;
139
+ if (trimmed.startsWith('data: ')) {
140
+ try {
141
+ const data = JSON.parse(trimmed.slice(6));
142
+ // Extract content chunk
143
+ const content = data.choices?.[0]?.delta?.content;
144
+ if (content) {
145
+ collectedChunks.push(content);
146
+ yield content;
147
+ }
148
+ // Extract usage info (usually in the last chunk)
149
+ if (data.usage) {
150
+ usage = {
151
+ promptTokens: data.usage.prompt_tokens || 0,
152
+ completionTokens: data.usage.completion_tokens || 0,
153
+ totalTokens: data.usage.total_tokens || 0,
154
+ };
155
+ }
156
+ }
157
+ catch {
158
+ // Skip invalid JSON chunks
159
+ }
160
+ }
161
+ }
162
+ }
163
+ // Flush decoder and process trailing buffered data even without trailing newline.
164
+ buffer += decoder.decode();
165
+ for (const line of buffer.split('\n')) {
166
+ const trimmed = line.trim();
167
+ if (!trimmed || trimmed === 'data: [DONE]')
168
+ continue;
169
+ if (trimmed.startsWith('data: ')) {
170
+ try {
171
+ const data = JSON.parse(trimmed.slice(6));
172
+ const content = data.choices?.[0]?.delta?.content;
173
+ if (content) {
174
+ collectedChunks.push(content);
175
+ yield content;
176
+ }
177
+ if (data.usage) {
178
+ usage = {
179
+ promptTokens: data.usage.prompt_tokens || 0,
180
+ completionTokens: data.usage.completion_tokens || 0,
181
+ totalTokens: data.usage.total_tokens || 0,
182
+ };
183
+ }
184
+ }
185
+ catch {
186
+ // Skip invalid JSON chunks
187
+ }
188
+ }
189
+ }
190
+ }
191
+ finally {
192
+ reader.releaseLock();
193
+ }
194
+ const fullContent = collectedChunks.join('');
195
+ return {
196
+ content: fullContent,
197
+ usage,
198
+ };
199
+ }
67
200
  }
@@ -0,0 +1,204 @@
1
+ /**
2
+ * Registry Client - Fetch and manage modules from Cognitive Modules Registry
3
+ *
4
+ * Supports both v1 and v2 registry formats.
5
+ *
6
+ * Usage:
7
+ * const client = new RegistryClient();
8
+ * const modules = await client.listModules();
9
+ * const module = await client.getModule('code-reviewer');
10
+ */
11
+ /** v1 Registry Format (current cognitive-registry.json) */
12
+ export interface RegistryV1 {
13
+ $schema?: string;
14
+ version: string;
15
+ updated: string;
16
+ modules: {
17
+ [name: string]: {
18
+ description: string;
19
+ version: string;
20
+ source: string;
21
+ tags: string[];
22
+ author: string;
23
+ };
24
+ };
25
+ categories?: {
26
+ [key: string]: {
27
+ name: string;
28
+ description: string;
29
+ modules: string[];
30
+ };
31
+ };
32
+ }
33
+ /** v2 Registry Format (new format per REGISTRY-PROTOCOL.md) */
34
+ export interface RegistryV2 {
35
+ $schema?: string;
36
+ version: string;
37
+ updated: string;
38
+ modules: {
39
+ [name: string]: RegistryEntryV2;
40
+ };
41
+ categories?: {
42
+ [key: string]: {
43
+ name: string;
44
+ name_zh?: string;
45
+ description: string;
46
+ modules: string[];
47
+ };
48
+ };
49
+ featured?: string[];
50
+ stats?: {
51
+ total_modules: number;
52
+ total_downloads: number;
53
+ last_updated: string;
54
+ };
55
+ }
56
+ /** v2 Registry Entry */
57
+ export interface RegistryEntryV2 {
58
+ identity: {
59
+ name: string;
60
+ namespace: string;
61
+ version: string;
62
+ spec_version: string;
63
+ };
64
+ metadata: {
65
+ description: string;
66
+ description_zh?: string;
67
+ author: string;
68
+ license?: string;
69
+ repository?: string;
70
+ documentation?: string;
71
+ homepage?: string;
72
+ keywords?: string[];
73
+ tier?: 'exec' | 'decision' | 'exploration';
74
+ };
75
+ quality?: {
76
+ conformance_level?: number;
77
+ test_coverage?: number;
78
+ test_vector_pass?: boolean;
79
+ verified?: boolean;
80
+ verified_by?: string;
81
+ verified_at?: string;
82
+ downloads_30d?: number;
83
+ stars?: number;
84
+ badges?: string[];
85
+ deprecated?: boolean;
86
+ successor?: string;
87
+ deprecation_reason?: string;
88
+ };
89
+ dependencies: {
90
+ runtime_min: string;
91
+ modules: string[];
92
+ };
93
+ distribution: {
94
+ tarball?: string;
95
+ checksum?: string;
96
+ size_bytes?: number;
97
+ files?: string[];
98
+ source?: string;
99
+ };
100
+ timestamps?: {
101
+ created_at?: string;
102
+ updated_at?: string;
103
+ deprecated_at?: string;
104
+ };
105
+ }
106
+ /** Normalized module info (works with both v1 and v2) */
107
+ export interface ModuleInfo {
108
+ name: string;
109
+ version: string;
110
+ description: string;
111
+ author: string;
112
+ source: string;
113
+ keywords: string[];
114
+ tier?: string;
115
+ namespace?: string;
116
+ license?: string;
117
+ repository?: string;
118
+ conformance_level?: number;
119
+ verified?: boolean;
120
+ deprecated?: boolean;
121
+ }
122
+ /** Search result */
123
+ export interface SearchResult {
124
+ name: string;
125
+ description: string;
126
+ version: string;
127
+ score: number;
128
+ keywords: string[];
129
+ }
130
+ export declare class RegistryClient {
131
+ private registryUrl;
132
+ private cache;
133
+ constructor(registryUrl?: string);
134
+ private parseRegistryResponse;
135
+ /**
136
+ * Generate a unique cache filename based on registry URL
137
+ */
138
+ private getCacheFileName;
139
+ /**
140
+ * Fetch registry index (with caching)
141
+ */
142
+ fetchRegistry(forceRefresh?: boolean): Promise<RegistryV1 | RegistryV2>;
143
+ /**
144
+ * Check if registry is v2 format
145
+ */
146
+ private isV2Registry;
147
+ /**
148
+ * Normalize module entry to unified format
149
+ */
150
+ private normalizeModule;
151
+ /**
152
+ * List all modules in registry
153
+ */
154
+ listModules(): Promise<ModuleInfo[]>;
155
+ /**
156
+ * Get a specific module by name
157
+ */
158
+ getModule(name: string): Promise<ModuleInfo | null>;
159
+ /**
160
+ * Search modules by query
161
+ */
162
+ search(query: string): Promise<SearchResult[]>;
163
+ /**
164
+ * Get categories
165
+ */
166
+ getCategories(): Promise<{
167
+ [key: string]: {
168
+ name: string;
169
+ description: string;
170
+ modules: string[];
171
+ };
172
+ }>;
173
+ /**
174
+ * Parse GitHub source string
175
+ * Format: github:<owner>/<repo>[/<path>][@<ref>]
176
+ */
177
+ parseGitHubSource(source: string): {
178
+ org: string;
179
+ repo: string;
180
+ path?: string;
181
+ ref?: string;
182
+ } | null;
183
+ /**
184
+ * Verify checksum of downloaded file
185
+ */
186
+ verifyChecksum(filePath: string, expected: string): Promise<boolean>;
187
+ /**
188
+ * Get the download URL for a module
189
+ */
190
+ getDownloadUrl(moduleName: string): Promise<{
191
+ url: string;
192
+ isGitHub: boolean;
193
+ githubInfo?: {
194
+ org: string;
195
+ repo: string;
196
+ path?: string;
197
+ ref?: string;
198
+ };
199
+ }>;
200
+ }
201
+ export declare const defaultRegistry: RegistryClient;
202
+ export declare function listRegistryModules(): Promise<ModuleInfo[]>;
203
+ export declare function getRegistryModule(name: string): Promise<ModuleInfo | null>;
204
+ export declare function searchRegistry(query: string): Promise<SearchResult[]>;