cognitive-modules-cli 1.4.1 → 2.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/dist/cli.js +65 -12
  2. package/dist/commands/compose.d.ts +31 -0
  3. package/dist/commands/compose.js +148 -0
  4. package/dist/commands/index.d.ts +1 -0
  5. package/dist/commands/index.js +1 -0
  6. package/dist/index.d.ts +2 -2
  7. package/dist/index.js +5 -1
  8. package/dist/modules/composition.d.ts +251 -0
  9. package/dist/modules/composition.js +1265 -0
  10. package/dist/modules/composition.test.d.ts +11 -0
  11. package/dist/modules/composition.test.js +450 -0
  12. package/dist/modules/index.d.ts +2 -0
  13. package/dist/modules/index.js +2 -0
  14. package/dist/modules/loader.d.ts +22 -2
  15. package/dist/modules/loader.js +167 -4
  16. package/dist/modules/policy.test.d.ts +10 -0
  17. package/dist/modules/policy.test.js +369 -0
  18. package/dist/modules/runner.d.ts +348 -34
  19. package/dist/modules/runner.js +1263 -708
  20. package/dist/modules/subagent.js +2 -0
  21. package/dist/modules/validator.d.ts +28 -0
  22. package/dist/modules/validator.js +629 -0
  23. package/dist/providers/base.d.ts +1 -45
  24. package/dist/providers/base.js +0 -67
  25. package/dist/providers/openai.d.ts +3 -27
  26. package/dist/providers/openai.js +3 -175
  27. package/dist/types.d.ts +93 -316
  28. package/dist/types.js +1 -120
  29. package/package.json +2 -1
  30. package/src/cli.ts +73 -12
  31. package/src/commands/compose.ts +185 -0
  32. package/src/commands/index.ts +1 -0
  33. package/src/index.ts +35 -0
  34. package/src/modules/composition.test.ts +558 -0
  35. package/src/modules/composition.ts +1674 -0
  36. package/src/modules/index.ts +2 -0
  37. package/src/modules/loader.ts +196 -6
  38. package/src/modules/policy.test.ts +455 -0
  39. package/src/modules/runner.ts +1692 -998
  40. package/src/modules/subagent.ts +2 -0
  41. package/src/modules/validator.ts +700 -0
  42. package/src/providers/base.ts +1 -86
  43. package/src/providers/openai.ts +4 -226
  44. package/src/types.ts +113 -462
  45. package/tsconfig.json +1 -1
@@ -1,17 +1,8 @@
1
1
  /**
2
2
  * Base Provider - Abstract class for all LLM providers
3
- * v2.5: Added streaming and multimodal support
4
3
  */
5
4
 
6
- import type {
7
- Provider,
8
- InvokeParams,
9
- InvokeResult,
10
- ProviderV25,
11
- InvokeParamsV25,
12
- StreamingInvokeResult,
13
- ModalityType
14
- } from '../types.js';
5
+ import type { Provider, InvokeParams, InvokeResult } from '../types.js';
15
6
 
16
7
  export abstract class BaseProvider implements Provider {
17
8
  abstract name: string;
@@ -36,79 +27,3 @@ export abstract class BaseProvider implements Provider {
36
27
  }
37
28
  }
38
29
  }
39
-
40
- /**
41
- * Base Provider with v2.5 streaming and multimodal support
42
- */
43
- export abstract class BaseProviderV25 extends BaseProvider implements ProviderV25 {
44
- /**
45
- * Check if this provider supports streaming
46
- * Override in subclass to enable streaming
47
- */
48
- supportsStreaming(): boolean {
49
- return false;
50
- }
51
-
52
- /**
53
- * Check if this provider supports multimodal input/output
54
- * Override in subclass to enable multimodal
55
- */
56
- supportsMultimodal(): { input: ModalityType[]; output: ModalityType[] } {
57
- return {
58
- input: ['text'],
59
- output: ['text']
60
- };
61
- }
62
-
63
- /**
64
- * Invoke with streaming response
65
- * Override in subclass to implement streaming
66
- */
67
- async invokeStream(params: InvokeParamsV25): Promise<StreamingInvokeResult> {
68
- // Default: fallback to non-streaming with async generator wrapper
69
- const result = await this.invoke(params);
70
-
71
- async function* generateChunks(): AsyncIterable<string> {
72
- yield result.content;
73
- }
74
-
75
- return {
76
- stream: generateChunks(),
77
- usage: result.usage
78
- };
79
- }
80
-
81
- /**
82
- * Format media inputs for the specific provider API
83
- * Override in subclass for provider-specific formatting
84
- */
85
- protected formatMediaForProvider(
86
- images?: Array<{ type: string; url?: string; data?: string; media_type?: string }>,
87
- _audio?: Array<{ type: string; url?: string; data?: string; media_type?: string }>,
88
- _video?: Array<{ type: string; url?: string; data?: string; media_type?: string }>
89
- ): unknown[] {
90
- // Default implementation for image-only providers (like OpenAI Vision)
91
- if (!images || images.length === 0) {
92
- return [];
93
- }
94
-
95
- return images.map(img => {
96
- if (img.type === 'url' && img.url) {
97
- return {
98
- type: 'image_url',
99
- image_url: {
100
- url: img.url
101
- }
102
- };
103
- } else if (img.type === 'base64' && img.data && img.media_type) {
104
- return {
105
- type: 'image_url',
106
- image_url: {
107
- url: `data:${img.media_type};base64,${img.data}`
108
- }
109
- };
110
- }
111
- return null;
112
- }).filter(Boolean);
113
- }
114
- }
@@ -1,35 +1,17 @@
1
1
  /**
2
2
  * OpenAI Provider - OpenAI API (and compatible APIs)
3
- * v2.5: Added streaming and multimodal (vision) support
4
3
  */
5
4
 
6
- import { BaseProviderV25 } from './base.js';
7
- import type {
8
- InvokeParams,
9
- InvokeResult,
10
- InvokeParamsV25,
11
- StreamingInvokeResult,
12
- ModalityType,
13
- MediaInput
14
- } from '../types.js';
5
+ import { BaseProvider } from './base.js';
6
+ import type { InvokeParams, InvokeResult } from '../types.js';
15
7
 
16
- // Type for OpenAI message content
17
- type OpenAIContentPart =
18
- | { type: 'text'; text: string }
19
- | { type: 'image_url'; image_url: { url: string; detail?: 'low' | 'high' | 'auto' } };
20
-
21
- type OpenAIMessage = {
22
- role: 'system' | 'user' | 'assistant';
23
- content: string | OpenAIContentPart[];
24
- };
25
-
26
- export class OpenAIProvider extends BaseProviderV25 {
8
+ export class OpenAIProvider extends BaseProvider {
27
9
  name = 'openai';
28
10
  private apiKey: string;
29
11
  private model: string;
30
12
  private baseUrl: string;
31
13
 
32
- constructor(apiKey?: string, model = 'gpt-4o', baseUrl = 'https://api.openai.com/v1') {
14
+ constructor(apiKey?: string, model = 'gpt-5.2', baseUrl = 'https://api.openai.com/v1') {
33
15
  super();
34
16
  this.apiKey = apiKey || process.env.OPENAI_API_KEY || '';
35
17
  this.model = model;
@@ -40,27 +22,6 @@ export class OpenAIProvider extends BaseProviderV25 {
40
22
  return !!this.apiKey;
41
23
  }
42
24
 
43
- /**
44
- * Check if streaming is supported (always true for OpenAI)
45
- */
46
- supportsStreaming(): boolean {
47
- return true;
48
- }
49
-
50
- /**
51
- * Check multimodal support (vision models)
52
- */
53
- supportsMultimodal(): { input: ModalityType[]; output: ModalityType[] } {
54
- // Vision models support image input
55
- const visionModels = ['gpt-4o', 'gpt-4-vision', 'gpt-4-turbo', 'gpt-4o-mini'];
56
- const supportsVision = visionModels.some(m => this.model.includes(m));
57
-
58
- return {
59
- input: supportsVision ? ['text', 'image'] : ['text'],
60
- output: ['text'] // DALL-E would be separate
61
- };
62
- }
63
-
64
25
  async invoke(params: InvokeParams): Promise<InvokeResult> {
65
26
  if (!this.isConfigured()) {
66
27
  throw new Error('OpenAI API key not configured. Set OPENAI_API_KEY environment variable.');
@@ -120,187 +81,4 @@ export class OpenAIProvider extends BaseProviderV25 {
120
81
  } : undefined,
121
82
  };
122
83
  }
123
-
124
- /**
125
- * Invoke with streaming response
126
- */
127
- async invokeStream(params: InvokeParamsV25): Promise<StreamingInvokeResult> {
128
- if (!this.isConfigured()) {
129
- throw new Error('OpenAI API key not configured. Set OPENAI_API_KEY environment variable.');
130
- }
131
-
132
- const url = `${this.baseUrl}/chat/completions`;
133
-
134
- // Build messages with multimodal content if present
135
- const messages = this.buildMessagesWithMedia(params);
136
-
137
- const body: Record<string, unknown> = {
138
- model: this.model,
139
- messages,
140
- temperature: params.temperature ?? 0.7,
141
- max_tokens: params.maxTokens ?? 4096,
142
- stream: true,
143
- };
144
-
145
- // Add JSON mode if schema provided
146
- if (params.jsonSchema) {
147
- body.response_format = { type: 'json_object' };
148
- }
149
-
150
- const response = await fetch(url, {
151
- method: 'POST',
152
- headers: {
153
- 'Content-Type': 'application/json',
154
- 'Authorization': `Bearer ${this.apiKey}`,
155
- },
156
- body: JSON.stringify(body),
157
- });
158
-
159
- if (!response.ok) {
160
- const error = await response.text();
161
- throw new Error(`OpenAI API error: ${response.status} - ${error}`);
162
- }
163
-
164
- const bodyReader = response.body?.getReader();
165
- if (!bodyReader) {
166
- throw new Error('No response body');
167
- }
168
-
169
- const decoder = new TextDecoder();
170
- let usage: { promptTokens: number; completionTokens: number; totalTokens: number } | undefined;
171
-
172
- // Capture reader reference for closure
173
- const reader = bodyReader;
174
-
175
- // Create async generator for streaming
176
- async function* streamGenerator(): AsyncIterable<string> {
177
- let buffer = '';
178
-
179
- while (true) {
180
- const { done, value } = await reader.read();
181
-
182
- if (done) break;
183
-
184
- buffer += decoder.decode(value, { stream: true });
185
-
186
- // Parse SSE events
187
- const lines = buffer.split('\n');
188
- buffer = lines.pop() || '';
189
-
190
- for (const line of lines) {
191
- if (line.startsWith('data: ')) {
192
- const data = line.slice(6);
193
-
194
- if (data === '[DONE]') {
195
- return;
196
- }
197
-
198
- try {
199
- const parsed = JSON.parse(data) as {
200
- choices?: Array<{ delta?: { content?: string } }>;
201
- usage?: { prompt_tokens?: number; completion_tokens?: number; total_tokens?: number };
202
- };
203
-
204
- const content = parsed.choices?.[0]?.delta?.content;
205
- if (content) {
206
- yield content;
207
- }
208
-
209
- // Capture usage if available
210
- if (parsed.usage) {
211
- usage = {
212
- promptTokens: parsed.usage.prompt_tokens || 0,
213
- completionTokens: parsed.usage.completion_tokens || 0,
214
- totalTokens: parsed.usage.total_tokens || 0,
215
- };
216
- }
217
- } catch {
218
- // Skip malformed JSON
219
- }
220
- }
221
- }
222
- }
223
- }
224
-
225
- return {
226
- stream: streamGenerator(),
227
- usage
228
- };
229
- }
230
-
231
- /**
232
- * Build messages with multimodal content (images)
233
- */
234
- private buildMessagesWithMedia(params: InvokeParamsV25): OpenAIMessage[] {
235
- const hasImages = params.images && params.images.length > 0;
236
-
237
- if (!hasImages) {
238
- return params.messages;
239
- }
240
-
241
- // Find the last user message and add images to it
242
- const messages: OpenAIMessage[] = [];
243
- const lastUserIdx = params.messages.findLastIndex(m => m.role === 'user');
244
-
245
- for (let i = 0; i < params.messages.length; i++) {
246
- const msg = params.messages[i];
247
-
248
- if (i === lastUserIdx && hasImages) {
249
- // Convert to multimodal content
250
- const content: OpenAIContentPart[] = [
251
- { type: 'text', text: msg.content }
252
- ];
253
-
254
- // Add images
255
- for (const img of params.images!) {
256
- const imageUrl = this.mediaInputToUrl(img);
257
- if (imageUrl) {
258
- content.push({
259
- type: 'image_url',
260
- image_url: { url: imageUrl, detail: 'auto' }
261
- });
262
- }
263
- }
264
-
265
- messages.push({ role: msg.role, content });
266
- } else {
267
- messages.push({ role: msg.role, content: msg.content });
268
- }
269
- }
270
-
271
- // Add JSON schema instruction if needed
272
- if (params.jsonSchema && lastUserIdx >= 0) {
273
- const lastMsg = messages[lastUserIdx];
274
- if (typeof lastMsg.content === 'string') {
275
- lastMsg.content = lastMsg.content + this.buildJsonPrompt(params.jsonSchema);
276
- } else {
277
- // Content is array, append to text part
278
- const textPart = lastMsg.content.find(p => p.type === 'text');
279
- if (textPart && textPart.type === 'text') {
280
- textPart.text = textPart.text + this.buildJsonPrompt(params.jsonSchema);
281
- }
282
- }
283
- }
284
-
285
- return messages;
286
- }
287
-
288
- /**
289
- * Convert MediaInput to URL for OpenAI API
290
- */
291
- private mediaInputToUrl(media: MediaInput): string | null {
292
- switch (media.type) {
293
- case 'url':
294
- return media.url;
295
- case 'base64':
296
- return `data:${media.media_type};base64,${media.data}`;
297
- case 'file':
298
- // File paths would need to be loaded first
299
- // This should be handled by the runner before calling the provider
300
- console.warn('[cognitive] File media input not pre-loaded, skipping');
301
- return null;
302
- default:
303
- return null;
304
- }
305
- }
306
84
  }