cognitive-modules-cli 1.4.0 → 2.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,17 +1,8 @@
1
1
  /**
2
2
  * Base Provider - Abstract class for all LLM providers
3
- * v2.5: Added streaming and multimodal support
4
3
  */
5
4
 
6
- import type {
7
- Provider,
8
- InvokeParams,
9
- InvokeResult,
10
- ProviderV25,
11
- InvokeParamsV25,
12
- StreamingInvokeResult,
13
- ModalityType
14
- } from '../types.js';
5
+ import type { Provider, InvokeParams, InvokeResult } from '../types.js';
15
6
 
16
7
  export abstract class BaseProvider implements Provider {
17
8
  abstract name: string;
@@ -36,79 +27,3 @@ export abstract class BaseProvider implements Provider {
36
27
  }
37
28
  }
38
29
  }
39
-
40
- /**
41
- * Base Provider with v2.5 streaming and multimodal support
42
- */
43
- export abstract class BaseProviderV25 extends BaseProvider implements ProviderV25 {
44
- /**
45
- * Check if this provider supports streaming
46
- * Override in subclass to enable streaming
47
- */
48
- supportsStreaming(): boolean {
49
- return false;
50
- }
51
-
52
- /**
53
- * Check if this provider supports multimodal input/output
54
- * Override in subclass to enable multimodal
55
- */
56
- supportsMultimodal(): { input: ModalityType[]; output: ModalityType[] } {
57
- return {
58
- input: ['text'],
59
- output: ['text']
60
- };
61
- }
62
-
63
- /**
64
- * Invoke with streaming response
65
- * Override in subclass to implement streaming
66
- */
67
- async invokeStream(params: InvokeParamsV25): Promise<StreamingInvokeResult> {
68
- // Default: fallback to non-streaming with async generator wrapper
69
- const result = await this.invoke(params);
70
-
71
- async function* generateChunks(): AsyncIterable<string> {
72
- yield result.content;
73
- }
74
-
75
- return {
76
- stream: generateChunks(),
77
- usage: result.usage
78
- };
79
- }
80
-
81
- /**
82
- * Format media inputs for the specific provider API
83
- * Override in subclass for provider-specific formatting
84
- */
85
- protected formatMediaForProvider(
86
- images?: Array<{ type: string; url?: string; data?: string; media_type?: string }>,
87
- _audio?: Array<{ type: string; url?: string; data?: string; media_type?: string }>,
88
- _video?: Array<{ type: string; url?: string; data?: string; media_type?: string }>
89
- ): unknown[] {
90
- // Default implementation for image-only providers (like OpenAI Vision)
91
- if (!images || images.length === 0) {
92
- return [];
93
- }
94
-
95
- return images.map(img => {
96
- if (img.type === 'url' && img.url) {
97
- return {
98
- type: 'image_url',
99
- image_url: {
100
- url: img.url
101
- }
102
- };
103
- } else if (img.type === 'base64' && img.data && img.media_type) {
104
- return {
105
- type: 'image_url',
106
- image_url: {
107
- url: `data:${img.media_type};base64,${img.data}`
108
- }
109
- };
110
- }
111
- return null;
112
- }).filter(Boolean);
113
- }
114
- }
@@ -1,35 +1,17 @@
1
1
  /**
2
2
  * OpenAI Provider - OpenAI API (and compatible APIs)
3
- * v2.5: Added streaming and multimodal (vision) support
4
3
  */
5
4
 
6
- import { BaseProviderV25 } from './base.js';
7
- import type {
8
- InvokeParams,
9
- InvokeResult,
10
- InvokeParamsV25,
11
- StreamingInvokeResult,
12
- ModalityType,
13
- MediaInput
14
- } from '../types.js';
5
+ import { BaseProvider } from './base.js';
6
+ import type { InvokeParams, InvokeResult } from '../types.js';
15
7
 
16
- // Type for OpenAI message content
17
- type OpenAIContentPart =
18
- | { type: 'text'; text: string }
19
- | { type: 'image_url'; image_url: { url: string; detail?: 'low' | 'high' | 'auto' } };
20
-
21
- type OpenAIMessage = {
22
- role: 'system' | 'user' | 'assistant';
23
- content: string | OpenAIContentPart[];
24
- };
25
-
26
- export class OpenAIProvider extends BaseProviderV25 {
8
+ export class OpenAIProvider extends BaseProvider {
27
9
  name = 'openai';
28
10
  private apiKey: string;
29
11
  private model: string;
30
12
  private baseUrl: string;
31
13
 
32
- constructor(apiKey?: string, model = 'gpt-4o', baseUrl = 'https://api.openai.com/v1') {
14
+ constructor(apiKey?: string, model = 'gpt-5.2', baseUrl = 'https://api.openai.com/v1') {
33
15
  super();
34
16
  this.apiKey = apiKey || process.env.OPENAI_API_KEY || '';
35
17
  this.model = model;
@@ -40,27 +22,6 @@ export class OpenAIProvider extends BaseProviderV25 {
40
22
  return !!this.apiKey;
41
23
  }
42
24
 
43
- /**
44
- * Check if streaming is supported (always true for OpenAI)
45
- */
46
- supportsStreaming(): boolean {
47
- return true;
48
- }
49
-
50
- /**
51
- * Check multimodal support (vision models)
52
- */
53
- supportsMultimodal(): { input: ModalityType[]; output: ModalityType[] } {
54
- // Vision models support image input
55
- const visionModels = ['gpt-4o', 'gpt-4-vision', 'gpt-4-turbo', 'gpt-4o-mini'];
56
- const supportsVision = visionModels.some(m => this.model.includes(m));
57
-
58
- return {
59
- input: supportsVision ? ['text', 'image'] : ['text'],
60
- output: ['text'] // DALL-E would be separate
61
- };
62
- }
63
-
64
25
  async invoke(params: InvokeParams): Promise<InvokeResult> {
65
26
  if (!this.isConfigured()) {
66
27
  throw new Error('OpenAI API key not configured. Set OPENAI_API_KEY environment variable.');
@@ -120,187 +81,4 @@ export class OpenAIProvider extends BaseProviderV25 {
120
81
  } : undefined,
121
82
  };
122
83
  }
123
-
124
- /**
125
- * Invoke with streaming response
126
- */
127
- async invokeStream(params: InvokeParamsV25): Promise<StreamingInvokeResult> {
128
- if (!this.isConfigured()) {
129
- throw new Error('OpenAI API key not configured. Set OPENAI_API_KEY environment variable.');
130
- }
131
-
132
- const url = `${this.baseUrl}/chat/completions`;
133
-
134
- // Build messages with multimodal content if present
135
- const messages = this.buildMessagesWithMedia(params);
136
-
137
- const body: Record<string, unknown> = {
138
- model: this.model,
139
- messages,
140
- temperature: params.temperature ?? 0.7,
141
- max_tokens: params.maxTokens ?? 4096,
142
- stream: true,
143
- };
144
-
145
- // Add JSON mode if schema provided
146
- if (params.jsonSchema) {
147
- body.response_format = { type: 'json_object' };
148
- }
149
-
150
- const response = await fetch(url, {
151
- method: 'POST',
152
- headers: {
153
- 'Content-Type': 'application/json',
154
- 'Authorization': `Bearer ${this.apiKey}`,
155
- },
156
- body: JSON.stringify(body),
157
- });
158
-
159
- if (!response.ok) {
160
- const error = await response.text();
161
- throw new Error(`OpenAI API error: ${response.status} - ${error}`);
162
- }
163
-
164
- const bodyReader = response.body?.getReader();
165
- if (!bodyReader) {
166
- throw new Error('No response body');
167
- }
168
-
169
- const decoder = new TextDecoder();
170
- let usage: { promptTokens: number; completionTokens: number; totalTokens: number } | undefined;
171
-
172
- // Capture reader reference for closure
173
- const reader = bodyReader;
174
-
175
- // Create async generator for streaming
176
- async function* streamGenerator(): AsyncIterable<string> {
177
- let buffer = '';
178
-
179
- while (true) {
180
- const { done, value } = await reader.read();
181
-
182
- if (done) break;
183
-
184
- buffer += decoder.decode(value, { stream: true });
185
-
186
- // Parse SSE events
187
- const lines = buffer.split('\n');
188
- buffer = lines.pop() || '';
189
-
190
- for (const line of lines) {
191
- if (line.startsWith('data: ')) {
192
- const data = line.slice(6);
193
-
194
- if (data === '[DONE]') {
195
- return;
196
- }
197
-
198
- try {
199
- const parsed = JSON.parse(data) as {
200
- choices?: Array<{ delta?: { content?: string } }>;
201
- usage?: { prompt_tokens?: number; completion_tokens?: number; total_tokens?: number };
202
- };
203
-
204
- const content = parsed.choices?.[0]?.delta?.content;
205
- if (content) {
206
- yield content;
207
- }
208
-
209
- // Capture usage if available
210
- if (parsed.usage) {
211
- usage = {
212
- promptTokens: parsed.usage.prompt_tokens || 0,
213
- completionTokens: parsed.usage.completion_tokens || 0,
214
- totalTokens: parsed.usage.total_tokens || 0,
215
- };
216
- }
217
- } catch {
218
- // Skip malformed JSON
219
- }
220
- }
221
- }
222
- }
223
- }
224
-
225
- return {
226
- stream: streamGenerator(),
227
- usage
228
- };
229
- }
230
-
231
- /**
232
- * Build messages with multimodal content (images)
233
- */
234
- private buildMessagesWithMedia(params: InvokeParamsV25): OpenAIMessage[] {
235
- const hasImages = params.images && params.images.length > 0;
236
-
237
- if (!hasImages) {
238
- return params.messages;
239
- }
240
-
241
- // Find the last user message and add images to it
242
- const messages: OpenAIMessage[] = [];
243
- const lastUserIdx = params.messages.findLastIndex(m => m.role === 'user');
244
-
245
- for (let i = 0; i < params.messages.length; i++) {
246
- const msg = params.messages[i];
247
-
248
- if (i === lastUserIdx && hasImages) {
249
- // Convert to multimodal content
250
- const content: OpenAIContentPart[] = [
251
- { type: 'text', text: msg.content }
252
- ];
253
-
254
- // Add images
255
- for (const img of params.images!) {
256
- const imageUrl = this.mediaInputToUrl(img);
257
- if (imageUrl) {
258
- content.push({
259
- type: 'image_url',
260
- image_url: { url: imageUrl, detail: 'auto' }
261
- });
262
- }
263
- }
264
-
265
- messages.push({ role: msg.role, content });
266
- } else {
267
- messages.push({ role: msg.role, content: msg.content });
268
- }
269
- }
270
-
271
- // Add JSON schema instruction if needed
272
- if (params.jsonSchema && lastUserIdx >= 0) {
273
- const lastMsg = messages[lastUserIdx];
274
- if (typeof lastMsg.content === 'string') {
275
- lastMsg.content = lastMsg.content + this.buildJsonPrompt(params.jsonSchema);
276
- } else {
277
- // Content is array, append to text part
278
- const textPart = lastMsg.content.find(p => p.type === 'text');
279
- if (textPart && textPart.type === 'text') {
280
- textPart.text = textPart.text + this.buildJsonPrompt(params.jsonSchema);
281
- }
282
- }
283
- }
284
-
285
- return messages;
286
- }
287
-
288
- /**
289
- * Convert MediaInput to URL for OpenAI API
290
- */
291
- private mediaInputToUrl(media: MediaInput): string | null {
292
- switch (media.type) {
293
- case 'url':
294
- return media.url;
295
- case 'base64':
296
- return `data:${media.media_type};base64,${media.data}`;
297
- case 'file':
298
- // File paths would need to be loaded first
299
- // This should be handled by the runner before calling the provider
300
- console.warn('[cognitive] File media input not pre-loaded, skipping');
301
- return null;
302
- default:
303
- return null;
304
- }
305
- }
306
84
  }
package/src/types.ts CHANGED
@@ -1,6 +1,6 @@
1
1
  /**
2
2
  * Cognitive Runtime - Core Types
3
- * Version 2.5 - With streaming response and multimodal support
3
+ * Version 2.2 - With Control/Data plane separation, tier, overflow, extensible enums
4
4
  */
5
5
 
6
6
  // =============================================================================
@@ -493,321 +493,3 @@ export function shouldEscalate<T>(
493
493
 
494
494
  return false;
495
495
  }
496
-
497
- // =============================================================================
498
- // v2.5 Streaming Types
499
- // =============================================================================
500
-
501
- /** Response mode configuration */
502
- export type ResponseMode = 'sync' | 'streaming' | 'both';
503
-
504
- /** Chunk type for streaming */
505
- export type ChunkType = 'delta' | 'snapshot';
506
-
507
- /** Response configuration in module.yaml */
508
- export interface ResponseConfig {
509
- mode: ResponseMode;
510
- chunk_type?: ChunkType;
511
- buffer_size?: number;
512
- heartbeat_interval_ms?: number;
513
- max_duration_ms?: number;
514
- }
515
-
516
- /** Meta chunk - initial streaming response */
517
- export interface MetaChunk {
518
- ok: true;
519
- streaming: true;
520
- session_id: string;
521
- meta: Partial<EnvelopeMeta>;
522
- }
523
-
524
- /** Delta chunk - incremental content */
525
- export interface DeltaChunk {
526
- chunk: {
527
- seq: number;
528
- type: 'delta';
529
- field?: string;
530
- delta: string;
531
- };
532
- }
533
-
534
- /** Snapshot chunk - full state replacement */
535
- export interface SnapshotChunk {
536
- chunk: {
537
- seq: number;
538
- type: 'snapshot';
539
- field?: string;
540
- data: unknown;
541
- };
542
- }
543
-
544
- /** Progress chunk - progress update */
545
- export interface ProgressChunk {
546
- progress: {
547
- percent: number;
548
- stage?: string;
549
- message?: string;
550
- };
551
- }
552
-
553
- /** Final chunk - completion signal */
554
- export interface FinalChunk {
555
- final: true;
556
- meta: EnvelopeMeta;
557
- data: ModuleResultData;
558
- usage?: {
559
- input_tokens: number;
560
- output_tokens: number;
561
- total_tokens: number;
562
- };
563
- }
564
-
565
- /** Error chunk during streaming */
566
- export interface ErrorChunk {
567
- ok: false;
568
- streaming: true;
569
- session_id?: string;
570
- error: {
571
- code: string;
572
- message: string;
573
- recoverable?: boolean;
574
- };
575
- partial_data?: unknown;
576
- }
577
-
578
- /** Union of all streaming chunk types */
579
- export type StreamingChunk =
580
- | MetaChunk
581
- | DeltaChunk
582
- | SnapshotChunk
583
- | ProgressChunk
584
- | FinalChunk
585
- | ErrorChunk;
586
-
587
- /** Streaming session state */
588
- export interface StreamingSession {
589
- session_id: string;
590
- module_name: string;
591
- started_at: number;
592
- chunks_sent: number;
593
- accumulated_data: Record<string, unknown>;
594
- accumulated_text: Record<string, string>;
595
- }
596
-
597
- // =============================================================================
598
- // v2.5 Multimodal Types
599
- // =============================================================================
600
-
601
- /** Supported modality types */
602
- export type ModalityType = 'text' | 'image' | 'audio' | 'video' | 'document';
603
-
604
- /** Modalities configuration in module.yaml */
605
- export interface ModalitiesConfig {
606
- input: ModalityType[];
607
- output: ModalityType[];
608
- constraints?: MediaConstraints;
609
- }
610
-
611
- /** Media size/duration constraints */
612
- export interface MediaConstraints {
613
- max_image_size_mb?: number;
614
- max_audio_size_mb?: number;
615
- max_video_size_mb?: number;
616
- max_audio_duration_s?: number;
617
- max_video_duration_s?: number;
618
- allowed_image_types?: string[];
619
- allowed_audio_types?: string[];
620
- allowed_video_types?: string[];
621
- }
622
-
623
- /** Media input - URL reference */
624
- export interface UrlMediaInput {
625
- type: 'url';
626
- url: string;
627
- media_type?: string;
628
- }
629
-
630
- /** Media input - Base64 inline */
631
- export interface Base64MediaInput {
632
- type: 'base64';
633
- media_type: string;
634
- data: string;
635
- }
636
-
637
- /** Media input - File path */
638
- export interface FileMediaInput {
639
- type: 'file';
640
- path: string;
641
- }
642
-
643
- /** Union of media input types */
644
- export type MediaInput = UrlMediaInput | Base64MediaInput | FileMediaInput;
645
-
646
- /** Media output with metadata */
647
- export interface MediaOutput {
648
- type: 'url' | 'base64' | 'file';
649
- media_type: string;
650
- url?: string;
651
- data?: string;
652
- path?: string;
653
- width?: number;
654
- height?: number;
655
- duration_ms?: number;
656
- expires_at?: string;
657
- generation_params?: Record<string, unknown>;
658
- }
659
-
660
- /** Supported image MIME types */
661
- export const SUPPORTED_IMAGE_TYPES = [
662
- 'image/jpeg',
663
- 'image/png',
664
- 'image/webp',
665
- 'image/gif'
666
- ] as const;
667
-
668
- /** Supported audio MIME types */
669
- export const SUPPORTED_AUDIO_TYPES = [
670
- 'audio/mpeg',
671
- 'audio/wav',
672
- 'audio/ogg',
673
- 'audio/webm'
674
- ] as const;
675
-
676
- /** Supported video MIME types */
677
- export const SUPPORTED_VIDEO_TYPES = [
678
- 'video/mp4',
679
- 'video/webm',
680
- 'video/quicktime'
681
- ] as const;
682
-
683
- // =============================================================================
684
- // v2.5 Error Codes
685
- // =============================================================================
686
-
687
- /** v2.5 Error codes for streaming and multimodal */
688
- export const ErrorCodesV25 = {
689
- // Media errors (E1xxx)
690
- UNSUPPORTED_MEDIA_TYPE: 'E1010',
691
- MEDIA_TOO_LARGE: 'E1011',
692
- MEDIA_FETCH_FAILED: 'E1012',
693
- MEDIA_DECODE_FAILED: 'E1013',
694
-
695
- // Streaming errors (E2xxx)
696
- STREAM_INTERRUPTED: 'E2010',
697
- STREAM_TIMEOUT: 'E2011',
698
-
699
- // Capability errors (E4xxx)
700
- STREAMING_NOT_SUPPORTED: 'E4010',
701
- MULTIMODAL_NOT_SUPPORTED: 'E4011',
702
- } as const;
703
-
704
- export type ErrorCodeV25 = typeof ErrorCodesV25[keyof typeof ErrorCodesV25];
705
-
706
- // =============================================================================
707
- // v2.5 Runtime Capabilities
708
- // =============================================================================
709
-
710
- /** Runtime capability declaration */
711
- export interface RuntimeCapabilities {
712
- streaming: boolean;
713
- multimodal: {
714
- input: ModalityType[];
715
- output: ModalityType[];
716
- };
717
- max_media_size_mb: number;
718
- supported_transports: ('sse' | 'websocket' | 'ndjson')[];
719
- }
720
-
721
- /** Default runtime capabilities */
722
- export const DEFAULT_RUNTIME_CAPABILITIES: RuntimeCapabilities = {
723
- streaming: true,
724
- multimodal: {
725
- input: ['text', 'image'],
726
- output: ['text']
727
- },
728
- max_media_size_mb: 20,
729
- supported_transports: ['sse', 'ndjson']
730
- };
731
-
732
- // =============================================================================
733
- // v2.5 Extended Provider Interface
734
- // =============================================================================
735
-
736
- /** Extended invoke params with streaming support */
737
- export interface InvokeParamsV25 extends InvokeParams {
738
- stream?: boolean;
739
- images?: MediaInput[];
740
- audio?: MediaInput[];
741
- video?: MediaInput[];
742
- }
743
-
744
- /** Streaming invoke result */
745
- export interface StreamingInvokeResult {
746
- stream: AsyncIterable<string>;
747
- usage?: {
748
- promptTokens: number;
749
- completionTokens: number;
750
- totalTokens: number;
751
- };
752
- }
753
-
754
- /** Extended provider interface for v2.5 */
755
- export interface ProviderV25 extends Provider {
756
- /** Check if provider supports streaming */
757
- supportsStreaming?(): boolean;
758
-
759
- /** Check if provider supports multimodal input */
760
- supportsMultimodal?(): { input: ModalityType[]; output: ModalityType[] };
761
-
762
- /** Invoke with streaming */
763
- invokeStream?(params: InvokeParamsV25): Promise<StreamingInvokeResult>;
764
- }
765
-
766
- /** Type guard for v2.5 provider */
767
- export function isProviderV25(provider: Provider): provider is ProviderV25 {
768
- return 'invokeStream' in provider || 'supportsStreaming' in provider;
769
- }
770
-
771
- // =============================================================================
772
- // v2.5 Module Configuration Extensions
773
- // =============================================================================
774
-
775
- /** Extended module interface for v2.5 */
776
- export interface CognitiveModuleV25 extends CognitiveModule {
777
- /** v2.5: Response configuration */
778
- response?: ResponseConfig;
779
-
780
- /** v2.5: Modalities configuration */
781
- modalities?: ModalitiesConfig;
782
- }
783
-
784
- /** Type guard for v2.5 module */
785
- export function isModuleV25(module: CognitiveModule): module is CognitiveModuleV25 {
786
- return 'response' in module || 'modalities' in module;
787
- }
788
-
789
- /** Check if module supports streaming */
790
- export function moduleSupportsStreaming(module: CognitiveModule): boolean {
791
- if (!isModuleV25(module)) return false;
792
- const mode = module.response?.mode;
793
- return mode === 'streaming' || mode === 'both';
794
- }
795
-
796
- /** Check if module supports multimodal input */
797
- export function moduleSupportsMultimodal(module: CognitiveModule): boolean {
798
- if (!isModuleV25(module)) return false;
799
- const modalities = module.modalities?.input ?? ['text'];
800
- return modalities.some(m => m !== 'text');
801
- }
802
-
803
- /** Get supported input modalities for module */
804
- export function getModuleInputModalities(module: CognitiveModule): ModalityType[] {
805
- if (!isModuleV25(module)) return ['text'];
806
- return module.modalities?.input ?? ['text'];
807
- }
808
-
809
- /** Get supported output modalities for module */
810
- export function getModuleOutputModalities(module: CognitiveModule): ModalityType[] {
811
- if (!isModuleV25(module)) return ['text'];
812
- return module.modalities?.output ?? ['text'];
813
- }