cognitive-runtime 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/README.md +124 -0
  2. package/dist/cli.d.ts +10 -0
  3. package/dist/cli.js +200 -0
  4. package/dist/commands/index.d.ts +7 -0
  5. package/dist/commands/index.js +7 -0
  6. package/dist/commands/init.d.ts +5 -0
  7. package/dist/commands/init.js +78 -0
  8. package/dist/commands/list.d.ts +5 -0
  9. package/dist/commands/list.js +28 -0
  10. package/dist/commands/pipe.d.ts +9 -0
  11. package/dist/commands/pipe.js +57 -0
  12. package/dist/commands/run.d.ts +12 -0
  13. package/dist/commands/run.js +48 -0
  14. package/dist/index.d.ts +9 -0
  15. package/dist/index.js +11 -0
  16. package/dist/modules/index.d.ts +5 -0
  17. package/dist/modules/index.js +5 -0
  18. package/dist/modules/loader.d.ts +8 -0
  19. package/dist/modules/loader.js +91 -0
  20. package/dist/modules/runner.d.ts +12 -0
  21. package/dist/modules/runner.js +93 -0
  22. package/dist/providers/anthropic.d.ts +14 -0
  23. package/dist/providers/anthropic.js +70 -0
  24. package/dist/providers/base.d.ts +11 -0
  25. package/dist/providers/base.js +19 -0
  26. package/dist/providers/gemini.d.ts +19 -0
  27. package/dist/providers/gemini.js +94 -0
  28. package/dist/providers/index.d.ts +13 -0
  29. package/dist/providers/index.js +38 -0
  30. package/dist/providers/openai.d.ts +14 -0
  31. package/dist/providers/openai.js +67 -0
  32. package/dist/types.d.ts +53 -0
  33. package/dist/types.js +4 -0
  34. package/package.json +33 -0
  35. package/src/cli.ts +223 -0
  36. package/src/commands/index.ts +8 -0
  37. package/src/commands/init.ts +94 -0
  38. package/src/commands/list.ts +33 -0
  39. package/src/commands/pipe.ts +74 -0
  40. package/src/commands/run.ts +65 -0
  41. package/src/index.ts +39 -0
  42. package/src/modules/index.ts +6 -0
  43. package/src/modules/loader.ts +106 -0
  44. package/src/modules/runner.ts +121 -0
  45. package/src/providers/anthropic.ts +89 -0
  46. package/src/providers/base.ts +29 -0
  47. package/src/providers/gemini.ts +117 -0
  48. package/src/providers/index.ts +43 -0
  49. package/src/providers/openai.ts +84 -0
  50. package/src/types.ts +64 -0
  51. package/tsconfig.json +17 -0
@@ -0,0 +1,121 @@
1
+ /**
2
+ * Module Runner - Execute Cognitive Modules
3
+ */
4
+
5
+ import type { Provider, CognitiveModule, ModuleResult, Message } from '../types.js';
6
+
7
+ export interface RunOptions {
8
+ args?: string;
9
+ input?: Record<string, unknown>;
10
+ validateInput?: boolean;
11
+ validateOutput?: boolean;
12
+ verbose?: boolean;
13
+ }
14
+
15
+ export async function runModule(
16
+ module: CognitiveModule,
17
+ provider: Provider,
18
+ options: RunOptions = {}
19
+ ): Promise<ModuleResult> {
20
+ const { args, input, verbose = false } = options;
21
+
22
+ // Build input data
23
+ let inputData: Record<string, unknown> = input || {};
24
+ if (args) {
25
+ inputData = { $ARGUMENTS: args, query: args };
26
+ }
27
+
28
+ // Build prompt
29
+ const prompt = buildPrompt(module, inputData);
30
+
31
+ if (verbose) {
32
+ console.error('--- Prompt ---');
33
+ console.error(prompt);
34
+ console.error('--- End Prompt ---');
35
+ }
36
+
37
+ // Build messages
38
+ const messages: Message[] = [
39
+ {
40
+ role: 'system',
41
+ content: `You are executing the "${module.name}" Cognitive Module.
42
+
43
+ RESPONSIBILITY: ${module.responsibility}
44
+
45
+ YOU MUST NOT:
46
+ ${module.excludes.map(e => `- ${e}`).join('\n')}
47
+
48
+ REQUIRED OUTPUT FORMAT:
49
+ You MUST respond with a valid JSON object. Include these fields:
50
+ - All fields required by the output schema
51
+ - "confidence": a number between 0 and 1
52
+ - "rationale": a string explaining your reasoning
53
+
54
+ Respond with ONLY valid JSON, no markdown code blocks.`,
55
+ },
56
+ {
57
+ role: 'user',
58
+ content: prompt,
59
+ },
60
+ ];
61
+
62
+ // Invoke provider
63
+ const result = await provider.invoke({
64
+ messages,
65
+ jsonSchema: module.outputSchema,
66
+ temperature: 0.3,
67
+ });
68
+
69
+ if (verbose) {
70
+ console.error('--- Response ---');
71
+ console.error(result.content);
72
+ console.error('--- End Response ---');
73
+ }
74
+
75
+ // Parse response
76
+ let output: unknown;
77
+ try {
78
+ // Try to extract JSON from markdown code blocks
79
+ const jsonMatch = result.content.match(/```(?:json)?\s*([\s\S]*?)\s*```/);
80
+ const jsonStr = jsonMatch ? jsonMatch[1] : result.content;
81
+ output = JSON.parse(jsonStr.trim());
82
+ } catch {
83
+ throw new Error(`Failed to parse JSON response: ${result.content.substring(0, 500)}`);
84
+ }
85
+
86
+ // Extract confidence and rationale
87
+ const outputObj = output as Record<string, unknown>;
88
+ const confidence = typeof outputObj.confidence === 'number' ? outputObj.confidence : 0.5;
89
+ const rationale = typeof outputObj.rationale === 'string' ? outputObj.rationale : '';
90
+
91
+ return {
92
+ output,
93
+ confidence,
94
+ rationale,
95
+ raw: result.content,
96
+ };
97
+ }
98
+
99
+ function buildPrompt(module: CognitiveModule, inputData: Record<string, unknown>): string {
100
+ let prompt = module.prompt;
101
+
102
+ // Substitute $ARGUMENTS
103
+ const argsValue = String(inputData.$ARGUMENTS || inputData.query || '');
104
+ prompt = prompt.replace(/\$ARGUMENTS/g, argsValue);
105
+
106
+ // Substitute $N placeholders
107
+ const argsList = argsValue.split(/\s+/);
108
+ argsList.forEach((arg, i) => {
109
+ prompt = prompt.replace(new RegExp(`\\$${i}`, 'g'), arg);
110
+ prompt = prompt.replace(new RegExp(`\\$ARGUMENTS\\[${i}\\]`, 'g'), arg);
111
+ });
112
+
113
+ // Substitute other input fields
114
+ for (const [key, value] of Object.entries(inputData)) {
115
+ if (key !== '$ARGUMENTS' && key !== 'query') {
116
+ prompt = prompt.replace(new RegExp(`\\$\\{${key}\\}`, 'g'), String(value));
117
+ }
118
+ }
119
+
120
+ return prompt;
121
+ }
@@ -0,0 +1,89 @@
1
+ /**
2
+ * Anthropic Provider - Claude API
3
+ */
4
+
5
+ import { BaseProvider } from './base.js';
6
+ import type { InvokeParams, InvokeResult } from '../types.js';
7
+
8
+ export class AnthropicProvider extends BaseProvider {
9
+ name = 'anthropic';
10
+ private apiKey: string;
11
+ private model: string;
12
+ private baseUrl = 'https://api.anthropic.com/v1';
13
+
14
+ constructor(apiKey?: string, model = 'claude-sonnet-4-20250514') {
15
+ super();
16
+ this.apiKey = apiKey || process.env.ANTHROPIC_API_KEY || '';
17
+ this.model = model;
18
+ }
19
+
20
+ isConfigured(): boolean {
21
+ return !!this.apiKey;
22
+ }
23
+
24
+ async invoke(params: InvokeParams): Promise<InvokeResult> {
25
+ if (!this.isConfigured()) {
26
+ throw new Error('Anthropic API key not configured. Set ANTHROPIC_API_KEY environment variable.');
27
+ }
28
+
29
+ const url = `${this.baseUrl}/messages`;
30
+
31
+ // Extract system message
32
+ const systemMessage = params.messages.find(m => m.role === 'system');
33
+ const otherMessages = params.messages.filter(m => m.role !== 'system');
34
+
35
+ // Add JSON schema instruction if provided
36
+ let messages = otherMessages;
37
+ if (params.jsonSchema) {
38
+ const lastUserIdx = messages.findLastIndex(m => m.role === 'user');
39
+ if (lastUserIdx >= 0) {
40
+ messages = [...messages];
41
+ messages[lastUserIdx] = {
42
+ ...messages[lastUserIdx],
43
+ content: messages[lastUserIdx].content + this.buildJsonPrompt(params.jsonSchema),
44
+ };
45
+ }
46
+ }
47
+
48
+ const body: Record<string, unknown> = {
49
+ model: this.model,
50
+ messages: messages.map(m => ({ role: m.role, content: m.content })),
51
+ max_tokens: params.maxTokens ?? 4096,
52
+ };
53
+
54
+ if (systemMessage) {
55
+ body.system = systemMessage.content;
56
+ }
57
+
58
+ const response = await fetch(url, {
59
+ method: 'POST',
60
+ headers: {
61
+ 'Content-Type': 'application/json',
62
+ 'x-api-key': this.apiKey,
63
+ 'anthropic-version': '2023-06-01',
64
+ },
65
+ body: JSON.stringify(body),
66
+ });
67
+
68
+ if (!response.ok) {
69
+ const error = await response.text();
70
+ throw new Error(`Anthropic API error: ${response.status} - ${error}`);
71
+ }
72
+
73
+ const data = await response.json() as {
74
+ content?: Array<{ text?: string }>;
75
+ usage?: { input_tokens?: number; output_tokens?: number };
76
+ };
77
+
78
+ const content = data.content?.[0]?.text || '';
79
+
80
+ return {
81
+ content,
82
+ usage: data.usage ? {
83
+ promptTokens: data.usage.input_tokens || 0,
84
+ completionTokens: data.usage.output_tokens || 0,
85
+ totalTokens: (data.usage.input_tokens || 0) + (data.usage.output_tokens || 0),
86
+ } : undefined,
87
+ };
88
+ }
89
+ }
@@ -0,0 +1,29 @@
1
+ /**
2
+ * Base Provider - Abstract class for all LLM providers
3
+ */
4
+
5
+ import type { Provider, InvokeParams, InvokeResult } from '../types.js';
6
+
7
+ export abstract class BaseProvider implements Provider {
8
+ abstract name: string;
9
+
10
+ abstract invoke(params: InvokeParams): Promise<InvokeResult>;
11
+
12
+ abstract isConfigured(): boolean;
13
+
14
+ protected buildJsonPrompt(schema: object): string {
15
+ return `\n\nYou MUST respond with valid JSON matching this schema:\n${JSON.stringify(schema, null, 2)}\n\nRespond with ONLY the JSON, no markdown code blocks.`;
16
+ }
17
+
18
+ protected parseJsonResponse(content: string): unknown {
19
+ // Try to extract JSON from markdown code blocks
20
+ const jsonMatch = content.match(/```(?:json)?\s*([\s\S]*?)\s*```/);
21
+ const jsonStr = jsonMatch ? jsonMatch[1] : content;
22
+
23
+ try {
24
+ return JSON.parse(jsonStr.trim());
25
+ } catch {
26
+ throw new Error(`Failed to parse JSON response: ${content.substring(0, 200)}`);
27
+ }
28
+ }
29
+ }
@@ -0,0 +1,117 @@
1
+ /**
2
+ * Gemini Provider - Google Gemini API
3
+ */
4
+
5
+ import { BaseProvider } from './base.js';
6
+ import type { InvokeParams, InvokeResult } from '../types.js';
7
+
8
+ export class GeminiProvider extends BaseProvider {
9
+ name = 'gemini';
10
+ private apiKey: string;
11
+ private model: string;
12
+ private baseUrl = 'https://generativelanguage.googleapis.com/v1beta';
13
+
14
+ constructor(apiKey?: string, model = 'gemini-2.0-flash') {
15
+ super();
16
+ this.apiKey = apiKey || process.env.GEMINI_API_KEY || '';
17
+ this.model = model;
18
+ }
19
+
20
+ isConfigured(): boolean {
21
+ return !!this.apiKey;
22
+ }
23
+
24
+ /**
25
+ * Clean JSON Schema for Gemini API compatibility
26
+ * Removes unsupported fields like additionalProperties
27
+ */
28
+ private cleanSchemaForGemini(schema: object): object {
29
+ const unsupportedFields = ['additionalProperties', '$schema', 'default', 'examples'];
30
+
31
+ const clean = (obj: unknown): unknown => {
32
+ if (Array.isArray(obj)) {
33
+ return obj.map(clean);
34
+ }
35
+ if (obj && typeof obj === 'object') {
36
+ const result: Record<string, unknown> = {};
37
+ for (const [key, value] of Object.entries(obj)) {
38
+ if (!unsupportedFields.includes(key)) {
39
+ result[key] = clean(value);
40
+ }
41
+ }
42
+ return result;
43
+ }
44
+ return obj;
45
+ };
46
+
47
+ return clean(schema) as object;
48
+ }
49
+
50
+ async invoke(params: InvokeParams): Promise<InvokeResult> {
51
+ if (!this.isConfigured()) {
52
+ throw new Error('Gemini API key not configured. Set GEMINI_API_KEY environment variable.');
53
+ }
54
+
55
+ const url = `${this.baseUrl}/models/${this.model}:generateContent?key=${this.apiKey}`;
56
+
57
+ // Convert messages to Gemini format
58
+ const contents = params.messages
59
+ .filter(m => m.role !== 'system')
60
+ .map(m => ({
61
+ role: m.role === 'assistant' ? 'model' : 'user',
62
+ parts: [{ text: m.content }]
63
+ }));
64
+
65
+ // Add system instruction if present
66
+ const systemMessage = params.messages.find(m => m.role === 'system');
67
+
68
+ const body: Record<string, unknown> = {
69
+ contents,
70
+ generationConfig: {
71
+ temperature: params.temperature ?? 0.7,
72
+ maxOutputTokens: params.maxTokens ?? 8192,
73
+ }
74
+ };
75
+
76
+ if (systemMessage) {
77
+ body.systemInstruction = { parts: [{ text: systemMessage.content }] };
78
+ }
79
+
80
+ // Add JSON schema constraint if provided
81
+ if (params.jsonSchema) {
82
+ const cleanedSchema = this.cleanSchemaForGemini(params.jsonSchema);
83
+ body.generationConfig = {
84
+ ...body.generationConfig as object,
85
+ responseMimeType: 'application/json',
86
+ responseSchema: cleanedSchema,
87
+ };
88
+ }
89
+
90
+ const response = await fetch(url, {
91
+ method: 'POST',
92
+ headers: { 'Content-Type': 'application/json' },
93
+ body: JSON.stringify(body),
94
+ });
95
+
96
+ if (!response.ok) {
97
+ const error = await response.text();
98
+ throw new Error(`Gemini API error: ${response.status} - ${error}`);
99
+ }
100
+
101
+ const data = await response.json() as {
102
+ candidates?: Array<{ content?: { parts?: Array<{ text?: string }> } }>;
103
+ usageMetadata?: { promptTokenCount?: number; candidatesTokenCount?: number; totalTokenCount?: number };
104
+ };
105
+
106
+ const content = data.candidates?.[0]?.content?.parts?.[0]?.text || '';
107
+
108
+ return {
109
+ content,
110
+ usage: data.usageMetadata ? {
111
+ promptTokens: data.usageMetadata.promptTokenCount || 0,
112
+ completionTokens: data.usageMetadata.candidatesTokenCount || 0,
113
+ totalTokens: data.usageMetadata.totalTokenCount || 0,
114
+ } : undefined,
115
+ };
116
+ }
117
+ }
@@ -0,0 +1,43 @@
1
+ /**
2
+ * Provider Registry
3
+ */
4
+
5
+ import type { Provider } from '../types.js';
6
+ import { GeminiProvider } from './gemini.js';
7
+ import { OpenAIProvider } from './openai.js';
8
+ import { AnthropicProvider } from './anthropic.js';
9
+
10
+ export { BaseProvider } from './base.js';
11
+ export { GeminiProvider } from './gemini.js';
12
+ export { OpenAIProvider } from './openai.js';
13
+ export { AnthropicProvider } from './anthropic.js';
14
+
15
+ const providers: Record<string, () => Provider> = {
16
+ gemini: () => new GeminiProvider(),
17
+ openai: () => new OpenAIProvider(),
18
+ anthropic: () => new AnthropicProvider(),
19
+ };
20
+
21
+ export function getProvider(name?: string): Provider {
22
+ // Auto-detect if not specified
23
+ if (!name) {
24
+ if (process.env.GEMINI_API_KEY) return new GeminiProvider();
25
+ if (process.env.OPENAI_API_KEY) return new OpenAIProvider();
26
+ if (process.env.ANTHROPIC_API_KEY) return new AnthropicProvider();
27
+ throw new Error('No LLM provider configured. Set GEMINI_API_KEY, OPENAI_API_KEY, or ANTHROPIC_API_KEY.');
28
+ }
29
+
30
+ const factory = providers[name.toLowerCase()];
31
+ if (!factory) {
32
+ throw new Error(`Unknown provider: ${name}. Available: ${Object.keys(providers).join(', ')}`);
33
+ }
34
+
35
+ return factory();
36
+ }
37
+
38
+ export function listProviders(): Array<{ name: string; configured: boolean }> {
39
+ return Object.entries(providers).map(([name, factory]) => ({
40
+ name,
41
+ configured: factory().isConfigured(),
42
+ }));
43
+ }
@@ -0,0 +1,84 @@
1
+ /**
2
+ * OpenAI Provider - OpenAI API (and compatible APIs)
3
+ */
4
+
5
+ import { BaseProvider } from './base.js';
6
+ import type { InvokeParams, InvokeResult } from '../types.js';
7
+
8
+ export class OpenAIProvider extends BaseProvider {
9
+ name = 'openai';
10
+ private apiKey: string;
11
+ private model: string;
12
+ private baseUrl: string;
13
+
14
+ constructor(apiKey?: string, model = 'gpt-4o', baseUrl = 'https://api.openai.com/v1') {
15
+ super();
16
+ this.apiKey = apiKey || process.env.OPENAI_API_KEY || '';
17
+ this.model = model;
18
+ this.baseUrl = baseUrl;
19
+ }
20
+
21
+ isConfigured(): boolean {
22
+ return !!this.apiKey;
23
+ }
24
+
25
+ async invoke(params: InvokeParams): Promise<InvokeResult> {
26
+ if (!this.isConfigured()) {
27
+ throw new Error('OpenAI API key not configured. Set OPENAI_API_KEY environment variable.');
28
+ }
29
+
30
+ const url = `${this.baseUrl}/chat/completions`;
31
+
32
+ const body: Record<string, unknown> = {
33
+ model: this.model,
34
+ messages: params.messages,
35
+ temperature: params.temperature ?? 0.7,
36
+ max_tokens: params.maxTokens ?? 4096,
37
+ };
38
+
39
+ // Add JSON mode if schema provided
40
+ if (params.jsonSchema) {
41
+ body.response_format = { type: 'json_object' };
42
+ // Append schema instruction to last user message
43
+ const lastUserIdx = params.messages.findLastIndex(m => m.role === 'user');
44
+ if (lastUserIdx >= 0) {
45
+ const messages = [...params.messages];
46
+ messages[lastUserIdx] = {
47
+ ...messages[lastUserIdx],
48
+ content: messages[lastUserIdx].content + this.buildJsonPrompt(params.jsonSchema),
49
+ };
50
+ body.messages = messages;
51
+ }
52
+ }
53
+
54
+ const response = await fetch(url, {
55
+ method: 'POST',
56
+ headers: {
57
+ 'Content-Type': 'application/json',
58
+ 'Authorization': `Bearer ${this.apiKey}`,
59
+ },
60
+ body: JSON.stringify(body),
61
+ });
62
+
63
+ if (!response.ok) {
64
+ const error = await response.text();
65
+ throw new Error(`OpenAI API error: ${response.status} - ${error}`);
66
+ }
67
+
68
+ const data = await response.json() as {
69
+ choices?: Array<{ message?: { content?: string } }>;
70
+ usage?: { prompt_tokens?: number; completion_tokens?: number; total_tokens?: number };
71
+ };
72
+
73
+ const content = data.choices?.[0]?.message?.content || '';
74
+
75
+ return {
76
+ content,
77
+ usage: data.usage ? {
78
+ promptTokens: data.usage.prompt_tokens || 0,
79
+ completionTokens: data.usage.completion_tokens || 0,
80
+ totalTokens: data.usage.total_tokens || 0,
81
+ } : undefined,
82
+ };
83
+ }
84
+ }
package/src/types.ts ADDED
@@ -0,0 +1,64 @@
1
+ /**
2
+ * Cognitive Runtime - Core Types
3
+ */
4
+
5
+ // Provider interface - all LLM providers implement this
6
+ export interface Provider {
7
+ name: string;
8
+ invoke(params: InvokeParams): Promise<InvokeResult>;
9
+ isConfigured(): boolean;
10
+ }
11
+
12
+ export interface InvokeParams {
13
+ messages: Message[];
14
+ jsonSchema?: object;
15
+ temperature?: number;
16
+ maxTokens?: number;
17
+ }
18
+
19
+ export interface Message {
20
+ role: 'system' | 'user' | 'assistant';
21
+ content: string;
22
+ }
23
+
24
+ export interface InvokeResult {
25
+ content: string;
26
+ usage?: {
27
+ promptTokens: number;
28
+ completionTokens: number;
29
+ totalTokens: number;
30
+ };
31
+ }
32
+
33
+ // Module types
34
+ export interface CognitiveModule {
35
+ name: string;
36
+ version: string;
37
+ responsibility: string;
38
+ excludes: string[];
39
+ context?: 'fork' | 'main';
40
+ prompt: string;
41
+ inputSchema?: object;
42
+ outputSchema?: object;
43
+ location: string;
44
+ }
45
+
46
+ export interface ModuleResult {
47
+ output: unknown;
48
+ confidence: number;
49
+ rationale: string;
50
+ raw?: string;
51
+ }
52
+
53
+ // Command types
54
+ export interface CommandContext {
55
+ cwd: string;
56
+ provider: Provider;
57
+ verbose?: boolean;
58
+ }
59
+
60
+ export interface CommandResult {
61
+ success: boolean;
62
+ data?: unknown;
63
+ error?: string;
64
+ }
package/tsconfig.json ADDED
@@ -0,0 +1,17 @@
1
+ {
2
+ "compilerOptions": {
3
+ "target": "ES2023",
4
+ "lib": ["ES2023"],
5
+ "module": "NodeNext",
6
+ "moduleResolution": "NodeNext",
7
+ "outDir": "./dist",
8
+ "rootDir": "./src",
9
+ "strict": true,
10
+ "esModuleInterop": true,
11
+ "skipLibCheck": true,
12
+ "declaration": true,
13
+ "resolveJsonModule": true
14
+ },
15
+ "include": ["src/**/*"],
16
+ "exclude": ["node_modules", "dist"]
17
+ }