cognitive-modules-cli 1.4.0 → 2.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,8 +1,7 @@
1
1
  /**
2
2
  * Base Provider - Abstract class for all LLM providers
3
- * v2.5: Added streaming and multimodal support
4
3
  */
5
- import type { Provider, InvokeParams, InvokeResult, ProviderV25, InvokeParamsV25, StreamingInvokeResult, ModalityType } from '../types.js';
4
+ import type { Provider, InvokeParams, InvokeResult } from '../types.js';
6
5
  export declare abstract class BaseProvider implements Provider {
7
6
  abstract name: string;
8
7
  abstract invoke(params: InvokeParams): Promise<InvokeResult>;
@@ -10,46 +9,3 @@ export declare abstract class BaseProvider implements Provider {
10
9
  protected buildJsonPrompt(schema: object): string;
11
10
  protected parseJsonResponse(content: string): unknown;
12
11
  }
13
- /**
14
- * Base Provider with v2.5 streaming and multimodal support
15
- */
16
- export declare abstract class BaseProviderV25 extends BaseProvider implements ProviderV25 {
17
- /**
18
- * Check if this provider supports streaming
19
- * Override in subclass to enable streaming
20
- */
21
- supportsStreaming(): boolean;
22
- /**
23
- * Check if this provider supports multimodal input/output
24
- * Override in subclass to enable multimodal
25
- */
26
- supportsMultimodal(): {
27
- input: ModalityType[];
28
- output: ModalityType[];
29
- };
30
- /**
31
- * Invoke with streaming response
32
- * Override in subclass to implement streaming
33
- */
34
- invokeStream(params: InvokeParamsV25): Promise<StreamingInvokeResult>;
35
- /**
36
- * Format media inputs for the specific provider API
37
- * Override in subclass for provider-specific formatting
38
- */
39
- protected formatMediaForProvider(images?: Array<{
40
- type: string;
41
- url?: string;
42
- data?: string;
43
- media_type?: string;
44
- }>, _audio?: Array<{
45
- type: string;
46
- url?: string;
47
- data?: string;
48
- media_type?: string;
49
- }>, _video?: Array<{
50
- type: string;
51
- url?: string;
52
- data?: string;
53
- media_type?: string;
54
- }>): unknown[];
55
- }
@@ -1,6 +1,5 @@
1
1
  /**
2
2
  * Base Provider - Abstract class for all LLM providers
3
- * v2.5: Added streaming and multimodal support
4
3
  */
5
4
  export class BaseProvider {
6
5
  buildJsonPrompt(schema) {
@@ -18,69 +17,3 @@ export class BaseProvider {
18
17
  }
19
18
  }
20
19
  }
21
- /**
22
- * Base Provider with v2.5 streaming and multimodal support
23
- */
24
- export class BaseProviderV25 extends BaseProvider {
25
- /**
26
- * Check if this provider supports streaming
27
- * Override in subclass to enable streaming
28
- */
29
- supportsStreaming() {
30
- return false;
31
- }
32
- /**
33
- * Check if this provider supports multimodal input/output
34
- * Override in subclass to enable multimodal
35
- */
36
- supportsMultimodal() {
37
- return {
38
- input: ['text'],
39
- output: ['text']
40
- };
41
- }
42
- /**
43
- * Invoke with streaming response
44
- * Override in subclass to implement streaming
45
- */
46
- async invokeStream(params) {
47
- // Default: fallback to non-streaming with async generator wrapper
48
- const result = await this.invoke(params);
49
- async function* generateChunks() {
50
- yield result.content;
51
- }
52
- return {
53
- stream: generateChunks(),
54
- usage: result.usage
55
- };
56
- }
57
- /**
58
- * Format media inputs for the specific provider API
59
- * Override in subclass for provider-specific formatting
60
- */
61
- formatMediaForProvider(images, _audio, _video) {
62
- // Default implementation for image-only providers (like OpenAI Vision)
63
- if (!images || images.length === 0) {
64
- return [];
65
- }
66
- return images.map(img => {
67
- if (img.type === 'url' && img.url) {
68
- return {
69
- type: 'image_url',
70
- image_url: {
71
- url: img.url
72
- }
73
- };
74
- }
75
- else if (img.type === 'base64' && img.data && img.media_type) {
76
- return {
77
- type: 'image_url',
78
- image_url: {
79
- url: `data:${img.media_type};base64,${img.data}`
80
- }
81
- };
82
- }
83
- return null;
84
- }).filter(Boolean);
85
- }
86
- }
@@ -1,38 +1,14 @@
1
1
  /**
2
2
  * OpenAI Provider - OpenAI API (and compatible APIs)
3
- * v2.5: Added streaming and multimodal (vision) support
4
3
  */
5
- import { BaseProviderV25 } from './base.js';
6
- import type { InvokeParams, InvokeResult, InvokeParamsV25, StreamingInvokeResult, ModalityType } from '../types.js';
7
- export declare class OpenAIProvider extends BaseProviderV25 {
4
+ import { BaseProvider } from './base.js';
5
+ import type { InvokeParams, InvokeResult } from '../types.js';
6
+ export declare class OpenAIProvider extends BaseProvider {
8
7
  name: string;
9
8
  private apiKey;
10
9
  private model;
11
10
  private baseUrl;
12
11
  constructor(apiKey?: string, model?: string, baseUrl?: string);
13
12
  isConfigured(): boolean;
14
- /**
15
- * Check if streaming is supported (always true for OpenAI)
16
- */
17
- supportsStreaming(): boolean;
18
- /**
19
- * Check multimodal support (vision models)
20
- */
21
- supportsMultimodal(): {
22
- input: ModalityType[];
23
- output: ModalityType[];
24
- };
25
13
  invoke(params: InvokeParams): Promise<InvokeResult>;
26
- /**
27
- * Invoke with streaming response
28
- */
29
- invokeStream(params: InvokeParamsV25): Promise<StreamingInvokeResult>;
30
- /**
31
- * Build messages with multimodal content (images)
32
- */
33
- private buildMessagesWithMedia;
34
- /**
35
- * Convert MediaInput to URL for OpenAI API
36
- */
37
- private mediaInputToUrl;
38
14
  }
@@ -1,14 +1,13 @@
1
1
  /**
2
2
  * OpenAI Provider - OpenAI API (and compatible APIs)
3
- * v2.5: Added streaming and multimodal (vision) support
4
3
  */
5
- import { BaseProviderV25 } from './base.js';
6
- export class OpenAIProvider extends BaseProviderV25 {
4
+ import { BaseProvider } from './base.js';
5
+ export class OpenAIProvider extends BaseProvider {
7
6
  name = 'openai';
8
7
  apiKey;
9
8
  model;
10
9
  baseUrl;
11
- constructor(apiKey, model = 'gpt-4o', baseUrl = 'https://api.openai.com/v1') {
10
+ constructor(apiKey, model = 'gpt-5.2', baseUrl = 'https://api.openai.com/v1') {
12
11
  super();
13
12
  this.apiKey = apiKey || process.env.OPENAI_API_KEY || '';
14
13
  this.model = model;
@@ -17,24 +16,6 @@ export class OpenAIProvider extends BaseProviderV25 {
17
16
  isConfigured() {
18
17
  return !!this.apiKey;
19
18
  }
20
- /**
21
- * Check if streaming is supported (always true for OpenAI)
22
- */
23
- supportsStreaming() {
24
- return true;
25
- }
26
- /**
27
- * Check multimodal support (vision models)
28
- */
29
- supportsMultimodal() {
30
- // Vision models support image input
31
- const visionModels = ['gpt-4o', 'gpt-4-vision', 'gpt-4-turbo', 'gpt-4o-mini'];
32
- const supportsVision = visionModels.some(m => this.model.includes(m));
33
- return {
34
- input: supportsVision ? ['text', 'image'] : ['text'],
35
- output: ['text'] // DALL-E would be separate
36
- };
37
- }
38
19
  async invoke(params) {
39
20
  if (!this.isConfigured()) {
40
21
  throw new Error('OpenAI API key not configured. Set OPENAI_API_KEY environment variable.');
@@ -83,157 +64,4 @@ export class OpenAIProvider extends BaseProviderV25 {
83
64
  } : undefined,
84
65
  };
85
66
  }
86
- /**
87
- * Invoke with streaming response
88
- */
89
- async invokeStream(params) {
90
- if (!this.isConfigured()) {
91
- throw new Error('OpenAI API key not configured. Set OPENAI_API_KEY environment variable.');
92
- }
93
- const url = `${this.baseUrl}/chat/completions`;
94
- // Build messages with multimodal content if present
95
- const messages = this.buildMessagesWithMedia(params);
96
- const body = {
97
- model: this.model,
98
- messages,
99
- temperature: params.temperature ?? 0.7,
100
- max_tokens: params.maxTokens ?? 4096,
101
- stream: true,
102
- };
103
- // Add JSON mode if schema provided
104
- if (params.jsonSchema) {
105
- body.response_format = { type: 'json_object' };
106
- }
107
- const response = await fetch(url, {
108
- method: 'POST',
109
- headers: {
110
- 'Content-Type': 'application/json',
111
- 'Authorization': `Bearer ${this.apiKey}`,
112
- },
113
- body: JSON.stringify(body),
114
- });
115
- if (!response.ok) {
116
- const error = await response.text();
117
- throw new Error(`OpenAI API error: ${response.status} - ${error}`);
118
- }
119
- const bodyReader = response.body?.getReader();
120
- if (!bodyReader) {
121
- throw new Error('No response body');
122
- }
123
- const decoder = new TextDecoder();
124
- let usage;
125
- // Capture reader reference for closure
126
- const reader = bodyReader;
127
- // Create async generator for streaming
128
- async function* streamGenerator() {
129
- let buffer = '';
130
- while (true) {
131
- const { done, value } = await reader.read();
132
- if (done)
133
- break;
134
- buffer += decoder.decode(value, { stream: true });
135
- // Parse SSE events
136
- const lines = buffer.split('\n');
137
- buffer = lines.pop() || '';
138
- for (const line of lines) {
139
- if (line.startsWith('data: ')) {
140
- const data = line.slice(6);
141
- if (data === '[DONE]') {
142
- return;
143
- }
144
- try {
145
- const parsed = JSON.parse(data);
146
- const content = parsed.choices?.[0]?.delta?.content;
147
- if (content) {
148
- yield content;
149
- }
150
- // Capture usage if available
151
- if (parsed.usage) {
152
- usage = {
153
- promptTokens: parsed.usage.prompt_tokens || 0,
154
- completionTokens: parsed.usage.completion_tokens || 0,
155
- totalTokens: parsed.usage.total_tokens || 0,
156
- };
157
- }
158
- }
159
- catch {
160
- // Skip malformed JSON
161
- }
162
- }
163
- }
164
- }
165
- }
166
- return {
167
- stream: streamGenerator(),
168
- usage
169
- };
170
- }
171
- /**
172
- * Build messages with multimodal content (images)
173
- */
174
- buildMessagesWithMedia(params) {
175
- const hasImages = params.images && params.images.length > 0;
176
- if (!hasImages) {
177
- return params.messages;
178
- }
179
- // Find the last user message and add images to it
180
- const messages = [];
181
- const lastUserIdx = params.messages.findLastIndex(m => m.role === 'user');
182
- for (let i = 0; i < params.messages.length; i++) {
183
- const msg = params.messages[i];
184
- if (i === lastUserIdx && hasImages) {
185
- // Convert to multimodal content
186
- const content = [
187
- { type: 'text', text: msg.content }
188
- ];
189
- // Add images
190
- for (const img of params.images) {
191
- const imageUrl = this.mediaInputToUrl(img);
192
- if (imageUrl) {
193
- content.push({
194
- type: 'image_url',
195
- image_url: { url: imageUrl, detail: 'auto' }
196
- });
197
- }
198
- }
199
- messages.push({ role: msg.role, content });
200
- }
201
- else {
202
- messages.push({ role: msg.role, content: msg.content });
203
- }
204
- }
205
- // Add JSON schema instruction if needed
206
- if (params.jsonSchema && lastUserIdx >= 0) {
207
- const lastMsg = messages[lastUserIdx];
208
- if (typeof lastMsg.content === 'string') {
209
- lastMsg.content = lastMsg.content + this.buildJsonPrompt(params.jsonSchema);
210
- }
211
- else {
212
- // Content is array, append to text part
213
- const textPart = lastMsg.content.find(p => p.type === 'text');
214
- if (textPart && textPart.type === 'text') {
215
- textPart.text = textPart.text + this.buildJsonPrompt(params.jsonSchema);
216
- }
217
- }
218
- }
219
- return messages;
220
- }
221
- /**
222
- * Convert MediaInput to URL for OpenAI API
223
- */
224
- mediaInputToUrl(media) {
225
- switch (media.type) {
226
- case 'url':
227
- return media.url;
228
- case 'base64':
229
- return `data:${media.media_type};base64,${media.data}`;
230
- case 'file':
231
- // File paths would need to be loaded first
232
- // This should be handled by the runner before calling the provider
233
- console.warn('[cognitive] File media input not pre-loaded, skipping');
234
- return null;
235
- default:
236
- return null;
237
- }
238
- }
239
67
  }
package/dist/types.d.ts CHANGED
@@ -1,6 +1,6 @@
1
1
  /**
2
2
  * Cognitive Runtime - Core Types
3
- * Version 2.5 - With streaming response and multimodal support
3
+ * Version 2.2 - With Control/Data plane separation, tier, overflow, extensible enums
4
4
  */
5
5
  export interface Provider {
6
6
  name: string;
@@ -282,210 +282,3 @@ export declare function extractMeta<T>(response: EnvelopeResponse<T>, riskRule?:
282
282
  export declare function aggregateRisk(data: Record<string, unknown>, riskRule?: RiskRule): RiskLevel;
283
283
  /** Check if result should be escalated to human review */
284
284
  export declare function shouldEscalate<T>(response: EnvelopeResponse<T>, confidenceThreshold?: number): boolean;
285
- /** Response mode configuration */
286
- export type ResponseMode = 'sync' | 'streaming' | 'both';
287
- /** Chunk type for streaming */
288
- export type ChunkType = 'delta' | 'snapshot';
289
- /** Response configuration in module.yaml */
290
- export interface ResponseConfig {
291
- mode: ResponseMode;
292
- chunk_type?: ChunkType;
293
- buffer_size?: number;
294
- heartbeat_interval_ms?: number;
295
- max_duration_ms?: number;
296
- }
297
- /** Meta chunk - initial streaming response */
298
- export interface MetaChunk {
299
- ok: true;
300
- streaming: true;
301
- session_id: string;
302
- meta: Partial<EnvelopeMeta>;
303
- }
304
- /** Delta chunk - incremental content */
305
- export interface DeltaChunk {
306
- chunk: {
307
- seq: number;
308
- type: 'delta';
309
- field?: string;
310
- delta: string;
311
- };
312
- }
313
- /** Snapshot chunk - full state replacement */
314
- export interface SnapshotChunk {
315
- chunk: {
316
- seq: number;
317
- type: 'snapshot';
318
- field?: string;
319
- data: unknown;
320
- };
321
- }
322
- /** Progress chunk - progress update */
323
- export interface ProgressChunk {
324
- progress: {
325
- percent: number;
326
- stage?: string;
327
- message?: string;
328
- };
329
- }
330
- /** Final chunk - completion signal */
331
- export interface FinalChunk {
332
- final: true;
333
- meta: EnvelopeMeta;
334
- data: ModuleResultData;
335
- usage?: {
336
- input_tokens: number;
337
- output_tokens: number;
338
- total_tokens: number;
339
- };
340
- }
341
- /** Error chunk during streaming */
342
- export interface ErrorChunk {
343
- ok: false;
344
- streaming: true;
345
- session_id?: string;
346
- error: {
347
- code: string;
348
- message: string;
349
- recoverable?: boolean;
350
- };
351
- partial_data?: unknown;
352
- }
353
- /** Union of all streaming chunk types */
354
- export type StreamingChunk = MetaChunk | DeltaChunk | SnapshotChunk | ProgressChunk | FinalChunk | ErrorChunk;
355
- /** Streaming session state */
356
- export interface StreamingSession {
357
- session_id: string;
358
- module_name: string;
359
- started_at: number;
360
- chunks_sent: number;
361
- accumulated_data: Record<string, unknown>;
362
- accumulated_text: Record<string, string>;
363
- }
364
- /** Supported modality types */
365
- export type ModalityType = 'text' | 'image' | 'audio' | 'video' | 'document';
366
- /** Modalities configuration in module.yaml */
367
- export interface ModalitiesConfig {
368
- input: ModalityType[];
369
- output: ModalityType[];
370
- constraints?: MediaConstraints;
371
- }
372
- /** Media size/duration constraints */
373
- export interface MediaConstraints {
374
- max_image_size_mb?: number;
375
- max_audio_size_mb?: number;
376
- max_video_size_mb?: number;
377
- max_audio_duration_s?: number;
378
- max_video_duration_s?: number;
379
- allowed_image_types?: string[];
380
- allowed_audio_types?: string[];
381
- allowed_video_types?: string[];
382
- }
383
- /** Media input - URL reference */
384
- export interface UrlMediaInput {
385
- type: 'url';
386
- url: string;
387
- media_type?: string;
388
- }
389
- /** Media input - Base64 inline */
390
- export interface Base64MediaInput {
391
- type: 'base64';
392
- media_type: string;
393
- data: string;
394
- }
395
- /** Media input - File path */
396
- export interface FileMediaInput {
397
- type: 'file';
398
- path: string;
399
- }
400
- /** Union of media input types */
401
- export type MediaInput = UrlMediaInput | Base64MediaInput | FileMediaInput;
402
- /** Media output with metadata */
403
- export interface MediaOutput {
404
- type: 'url' | 'base64' | 'file';
405
- media_type: string;
406
- url?: string;
407
- data?: string;
408
- path?: string;
409
- width?: number;
410
- height?: number;
411
- duration_ms?: number;
412
- expires_at?: string;
413
- generation_params?: Record<string, unknown>;
414
- }
415
- /** Supported image MIME types */
416
- export declare const SUPPORTED_IMAGE_TYPES: readonly ["image/jpeg", "image/png", "image/webp", "image/gif"];
417
- /** Supported audio MIME types */
418
- export declare const SUPPORTED_AUDIO_TYPES: readonly ["audio/mpeg", "audio/wav", "audio/ogg", "audio/webm"];
419
- /** Supported video MIME types */
420
- export declare const SUPPORTED_VIDEO_TYPES: readonly ["video/mp4", "video/webm", "video/quicktime"];
421
- /** v2.5 Error codes for streaming and multimodal */
422
- export declare const ErrorCodesV25: {
423
- readonly UNSUPPORTED_MEDIA_TYPE: "E1010";
424
- readonly MEDIA_TOO_LARGE: "E1011";
425
- readonly MEDIA_FETCH_FAILED: "E1012";
426
- readonly MEDIA_DECODE_FAILED: "E1013";
427
- readonly STREAM_INTERRUPTED: "E2010";
428
- readonly STREAM_TIMEOUT: "E2011";
429
- readonly STREAMING_NOT_SUPPORTED: "E4010";
430
- readonly MULTIMODAL_NOT_SUPPORTED: "E4011";
431
- };
432
- export type ErrorCodeV25 = typeof ErrorCodesV25[keyof typeof ErrorCodesV25];
433
- /** Runtime capability declaration */
434
- export interface RuntimeCapabilities {
435
- streaming: boolean;
436
- multimodal: {
437
- input: ModalityType[];
438
- output: ModalityType[];
439
- };
440
- max_media_size_mb: number;
441
- supported_transports: ('sse' | 'websocket' | 'ndjson')[];
442
- }
443
- /** Default runtime capabilities */
444
- export declare const DEFAULT_RUNTIME_CAPABILITIES: RuntimeCapabilities;
445
- /** Extended invoke params with streaming support */
446
- export interface InvokeParamsV25 extends InvokeParams {
447
- stream?: boolean;
448
- images?: MediaInput[];
449
- audio?: MediaInput[];
450
- video?: MediaInput[];
451
- }
452
- /** Streaming invoke result */
453
- export interface StreamingInvokeResult {
454
- stream: AsyncIterable<string>;
455
- usage?: {
456
- promptTokens: number;
457
- completionTokens: number;
458
- totalTokens: number;
459
- };
460
- }
461
- /** Extended provider interface for v2.5 */
462
- export interface ProviderV25 extends Provider {
463
- /** Check if provider supports streaming */
464
- supportsStreaming?(): boolean;
465
- /** Check if provider supports multimodal input */
466
- supportsMultimodal?(): {
467
- input: ModalityType[];
468
- output: ModalityType[];
469
- };
470
- /** Invoke with streaming */
471
- invokeStream?(params: InvokeParamsV25): Promise<StreamingInvokeResult>;
472
- }
473
- /** Type guard for v2.5 provider */
474
- export declare function isProviderV25(provider: Provider): provider is ProviderV25;
475
- /** Extended module interface for v2.5 */
476
- export interface CognitiveModuleV25 extends CognitiveModule {
477
- /** v2.5: Response configuration */
478
- response?: ResponseConfig;
479
- /** v2.5: Modalities configuration */
480
- modalities?: ModalitiesConfig;
481
- }
482
- /** Type guard for v2.5 module */
483
- export declare function isModuleV25(module: CognitiveModule): module is CognitiveModuleV25;
484
- /** Check if module supports streaming */
485
- export declare function moduleSupportsStreaming(module: CognitiveModule): boolean;
486
- /** Check if module supports multimodal input */
487
- export declare function moduleSupportsMultimodal(module: CognitiveModule): boolean;
488
- /** Get supported input modalities for module */
489
- export declare function getModuleInputModalities(module: CognitiveModule): ModalityType[];
490
- /** Get supported output modalities for module */
491
- export declare function getModuleOutputModalities(module: CognitiveModule): ModalityType[];
package/dist/types.js CHANGED
@@ -1,6 +1,6 @@
1
1
  /**
2
2
  * Cognitive Runtime - Core Types
3
- * Version 2.5 - With streaming response and multimodal support
3
+ * Version 2.2 - With Control/Data plane separation, tier, overflow, extensible enums
4
4
  */
5
5
  // =============================================================================
6
6
  // Utility Types
@@ -90,84 +90,3 @@ export function shouldEscalate(response, confidenceThreshold = 0.7) {
90
90
  }
91
91
  return false;
92
92
  }
93
- /** Supported image MIME types */
94
- export const SUPPORTED_IMAGE_TYPES = [
95
- 'image/jpeg',
96
- 'image/png',
97
- 'image/webp',
98
- 'image/gif'
99
- ];
100
- /** Supported audio MIME types */
101
- export const SUPPORTED_AUDIO_TYPES = [
102
- 'audio/mpeg',
103
- 'audio/wav',
104
- 'audio/ogg',
105
- 'audio/webm'
106
- ];
107
- /** Supported video MIME types */
108
- export const SUPPORTED_VIDEO_TYPES = [
109
- 'video/mp4',
110
- 'video/webm',
111
- 'video/quicktime'
112
- ];
113
- // =============================================================================
114
- // v2.5 Error Codes
115
- // =============================================================================
116
- /** v2.5 Error codes for streaming and multimodal */
117
- export const ErrorCodesV25 = {
118
- // Media errors (E1xxx)
119
- UNSUPPORTED_MEDIA_TYPE: 'E1010',
120
- MEDIA_TOO_LARGE: 'E1011',
121
- MEDIA_FETCH_FAILED: 'E1012',
122
- MEDIA_DECODE_FAILED: 'E1013',
123
- // Streaming errors (E2xxx)
124
- STREAM_INTERRUPTED: 'E2010',
125
- STREAM_TIMEOUT: 'E2011',
126
- // Capability errors (E4xxx)
127
- STREAMING_NOT_SUPPORTED: 'E4010',
128
- MULTIMODAL_NOT_SUPPORTED: 'E4011',
129
- };
130
- /** Default runtime capabilities */
131
- export const DEFAULT_RUNTIME_CAPABILITIES = {
132
- streaming: true,
133
- multimodal: {
134
- input: ['text', 'image'],
135
- output: ['text']
136
- },
137
- max_media_size_mb: 20,
138
- supported_transports: ['sse', 'ndjson']
139
- };
140
- /** Type guard for v2.5 provider */
141
- export function isProviderV25(provider) {
142
- return 'invokeStream' in provider || 'supportsStreaming' in provider;
143
- }
144
- /** Type guard for v2.5 module */
145
- export function isModuleV25(module) {
146
- return 'response' in module || 'modalities' in module;
147
- }
148
- /** Check if module supports streaming */
149
- export function moduleSupportsStreaming(module) {
150
- if (!isModuleV25(module))
151
- return false;
152
- const mode = module.response?.mode;
153
- return mode === 'streaming' || mode === 'both';
154
- }
155
- /** Check if module supports multimodal input */
156
- export function moduleSupportsMultimodal(module) {
157
- if (!isModuleV25(module))
158
- return false;
159
- const modalities = module.modalities?.input ?? ['text'];
160
- return modalities.some(m => m !== 'text');
161
- }
162
- /** Get supported input modalities for module */
163
- export function getModuleInputModalities(module) {
164
- if (!isModuleV25(module))
165
- return ['text'];
166
- return module.modalities?.input ?? ['text'];
167
- }
168
- /** Get supported output modalities for module */
169
- export function getModuleOutputModalities(module) {
170
- if (!isModuleV25(module))
171
- return ['text'];
172
- return module.modalities?.output ?? ['text'];
173
- }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "cognitive-modules-cli",
3
- "version": "1.4.0",
3
+ "version": "2.2.0",
4
4
  "description": "Cognitive Modules - Structured AI Task Execution with version management",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",