cognitive-modules 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. package/README.md +165 -0
  2. package/dist/cli.d.ts +16 -0
  3. package/dist/cli.js +335 -0
  4. package/dist/commands/add.d.ts +34 -0
  5. package/dist/commands/add.js +229 -0
  6. package/dist/commands/index.d.ts +11 -0
  7. package/dist/commands/index.js +11 -0
  8. package/dist/commands/init.d.ts +5 -0
  9. package/dist/commands/init.js +78 -0
  10. package/dist/commands/list.d.ts +5 -0
  11. package/dist/commands/list.js +28 -0
  12. package/dist/commands/pipe.d.ts +9 -0
  13. package/dist/commands/pipe.js +59 -0
  14. package/dist/commands/remove.d.ts +10 -0
  15. package/dist/commands/remove.js +47 -0
  16. package/dist/commands/run.d.ts +12 -0
  17. package/dist/commands/run.js +65 -0
  18. package/dist/commands/update.d.ts +14 -0
  19. package/dist/commands/update.js +105 -0
  20. package/dist/commands/versions.d.ts +13 -0
  21. package/dist/commands/versions.js +60 -0
  22. package/dist/index.d.ts +9 -0
  23. package/dist/index.js +11 -0
  24. package/dist/modules/index.d.ts +5 -0
  25. package/dist/modules/index.js +5 -0
  26. package/dist/modules/loader.d.ts +12 -0
  27. package/dist/modules/loader.js +197 -0
  28. package/dist/modules/runner.d.ts +12 -0
  29. package/dist/modules/runner.js +229 -0
  30. package/dist/providers/anthropic.d.ts +14 -0
  31. package/dist/providers/anthropic.js +70 -0
  32. package/dist/providers/base.d.ts +11 -0
  33. package/dist/providers/base.js +19 -0
  34. package/dist/providers/deepseek.d.ts +14 -0
  35. package/dist/providers/deepseek.js +66 -0
  36. package/dist/providers/gemini.d.ts +19 -0
  37. package/dist/providers/gemini.js +94 -0
  38. package/dist/providers/index.d.ts +19 -0
  39. package/dist/providers/index.js +74 -0
  40. package/dist/providers/minimax.d.ts +14 -0
  41. package/dist/providers/minimax.js +64 -0
  42. package/dist/providers/moonshot.d.ts +14 -0
  43. package/dist/providers/moonshot.js +65 -0
  44. package/dist/providers/ollama.d.ts +13 -0
  45. package/dist/providers/ollama.js +64 -0
  46. package/dist/providers/openai.d.ts +14 -0
  47. package/dist/providers/openai.js +67 -0
  48. package/dist/providers/qwen.d.ts +14 -0
  49. package/dist/providers/qwen.js +65 -0
  50. package/dist/types.d.ts +136 -0
  51. package/dist/types.js +5 -0
  52. package/package.json +48 -0
  53. package/src/cli.ts +375 -0
  54. package/src/commands/add.ts +315 -0
  55. package/src/commands/index.ts +12 -0
  56. package/src/commands/init.ts +94 -0
  57. package/src/commands/list.ts +33 -0
  58. package/src/commands/pipe.ts +76 -0
  59. package/src/commands/remove.ts +57 -0
  60. package/src/commands/run.ts +80 -0
  61. package/src/commands/update.ts +130 -0
  62. package/src/commands/versions.ts +79 -0
  63. package/src/index.ts +44 -0
  64. package/src/modules/index.ts +6 -0
  65. package/src/modules/loader.ts +219 -0
  66. package/src/modules/runner.ts +278 -0
  67. package/src/providers/anthropic.ts +89 -0
  68. package/src/providers/base.ts +29 -0
  69. package/src/providers/deepseek.ts +83 -0
  70. package/src/providers/gemini.ts +117 -0
  71. package/src/providers/index.ts +78 -0
  72. package/src/providers/minimax.ts +81 -0
  73. package/src/providers/moonshot.ts +82 -0
  74. package/src/providers/ollama.ts +83 -0
  75. package/src/providers/openai.ts +84 -0
  76. package/src/providers/qwen.ts +82 -0
  77. package/src/types.ts +184 -0
  78. package/tsconfig.json +17 -0
@@ -0,0 +1,117 @@
1
+ /**
2
+ * Gemini Provider - Google Gemini API
3
+ */
4
+
5
+ import { BaseProvider } from './base.js';
6
+ import type { InvokeParams, InvokeResult } from '../types.js';
7
+
8
+ export class GeminiProvider extends BaseProvider {
9
+ name = 'gemini';
10
+ private apiKey: string;
11
+ private model: string;
12
+ private baseUrl = 'https://generativelanguage.googleapis.com/v1beta';
13
+
14
+ constructor(apiKey?: string, model = 'gemini-3-flash') {
15
+ super();
16
+ this.apiKey = apiKey || process.env.GEMINI_API_KEY || '';
17
+ this.model = model;
18
+ }
19
+
20
+ isConfigured(): boolean {
21
+ return !!this.apiKey;
22
+ }
23
+
24
+ /**
25
+ * Clean JSON Schema for Gemini API compatibility
26
+ * Removes unsupported fields like additionalProperties
27
+ */
28
+ private cleanSchemaForGemini(schema: object): object {
29
+ const unsupportedFields = ['additionalProperties', '$schema', 'default', 'examples'];
30
+
31
+ const clean = (obj: unknown): unknown => {
32
+ if (Array.isArray(obj)) {
33
+ return obj.map(clean);
34
+ }
35
+ if (obj && typeof obj === 'object') {
36
+ const result: Record<string, unknown> = {};
37
+ for (const [key, value] of Object.entries(obj)) {
38
+ if (!unsupportedFields.includes(key)) {
39
+ result[key] = clean(value);
40
+ }
41
+ }
42
+ return result;
43
+ }
44
+ return obj;
45
+ };
46
+
47
+ return clean(schema) as object;
48
+ }
49
+
50
+ async invoke(params: InvokeParams): Promise<InvokeResult> {
51
+ if (!this.isConfigured()) {
52
+ throw new Error('Gemini API key not configured. Set GEMINI_API_KEY environment variable.');
53
+ }
54
+
55
+ const url = `${this.baseUrl}/models/${this.model}:generateContent?key=${this.apiKey}`;
56
+
57
+ // Convert messages to Gemini format
58
+ const contents = params.messages
59
+ .filter(m => m.role !== 'system')
60
+ .map(m => ({
61
+ role: m.role === 'assistant' ? 'model' : 'user',
62
+ parts: [{ text: m.content }]
63
+ }));
64
+
65
+ // Add system instruction if present
66
+ const systemMessage = params.messages.find(m => m.role === 'system');
67
+
68
+ const body: Record<string, unknown> = {
69
+ contents,
70
+ generationConfig: {
71
+ temperature: params.temperature ?? 0.7,
72
+ maxOutputTokens: params.maxTokens ?? 8192,
73
+ }
74
+ };
75
+
76
+ if (systemMessage) {
77
+ body.systemInstruction = { parts: [{ text: systemMessage.content }] };
78
+ }
79
+
80
+ // Add JSON schema constraint if provided
81
+ if (params.jsonSchema) {
82
+ const cleanedSchema = this.cleanSchemaForGemini(params.jsonSchema);
83
+ body.generationConfig = {
84
+ ...body.generationConfig as object,
85
+ responseMimeType: 'application/json',
86
+ responseSchema: cleanedSchema,
87
+ };
88
+ }
89
+
90
+ const response = await fetch(url, {
91
+ method: 'POST',
92
+ headers: { 'Content-Type': 'application/json' },
93
+ body: JSON.stringify(body),
94
+ });
95
+
96
+ if (!response.ok) {
97
+ const error = await response.text();
98
+ throw new Error(`Gemini API error: ${response.status} - ${error}`);
99
+ }
100
+
101
+ const data = await response.json() as {
102
+ candidates?: Array<{ content?: { parts?: Array<{ text?: string }> } }>;
103
+ usageMetadata?: { promptTokenCount?: number; candidatesTokenCount?: number; totalTokenCount?: number };
104
+ };
105
+
106
+ const content = data.candidates?.[0]?.content?.parts?.[0]?.text || '';
107
+
108
+ return {
109
+ content,
110
+ usage: data.usageMetadata ? {
111
+ promptTokens: data.usageMetadata.promptTokenCount || 0,
112
+ completionTokens: data.usageMetadata.candidatesTokenCount || 0,
113
+ totalTokens: data.usageMetadata.totalTokenCount || 0,
114
+ } : undefined,
115
+ };
116
+ }
117
+ }
@@ -0,0 +1,78 @@
1
+ /**
2
+ * Provider Registry
3
+ */
4
+
5
+ import type { Provider } from '../types.js';
6
+ import { GeminiProvider } from './gemini.js';
7
+ import { OpenAIProvider } from './openai.js';
8
+ import { AnthropicProvider } from './anthropic.js';
9
+ import { MiniMaxProvider } from './minimax.js';
10
+ import { DeepSeekProvider } from './deepseek.js';
11
+ import { MoonshotProvider } from './moonshot.js';
12
+ import { QwenProvider } from './qwen.js';
13
+ import { OllamaProvider } from './ollama.js';
14
+
15
+ export { BaseProvider } from './base.js';
16
+ export { GeminiProvider } from './gemini.js';
17
+ export { OpenAIProvider } from './openai.js';
18
+ export { AnthropicProvider } from './anthropic.js';
19
+ export { MiniMaxProvider } from './minimax.js';
20
+ export { DeepSeekProvider } from './deepseek.js';
21
+ export { MoonshotProvider } from './moonshot.js';
22
+ export { QwenProvider } from './qwen.js';
23
+ export { OllamaProvider } from './ollama.js';
24
+
25
+ type ProviderFactory = (model?: string) => Provider;
26
+
27
+ const providers: Record<string, ProviderFactory> = {
28
+ gemini: (model) => new GeminiProvider(undefined, model),
29
+ openai: (model) => new OpenAIProvider(undefined, model),
30
+ anthropic: (model) => new AnthropicProvider(undefined, model),
31
+ minimax: (model) => new MiniMaxProvider(undefined, model),
32
+ deepseek: (model) => new DeepSeekProvider(undefined, model),
33
+ moonshot: (model) => new MoonshotProvider(undefined, model),
34
+ kimi: (model) => new MoonshotProvider(undefined, model), // Alias
35
+ qwen: (model) => new QwenProvider(undefined, model),
36
+ tongyi: (model) => new QwenProvider(undefined, model), // Alias
37
+ dashscope: (model) => new QwenProvider(undefined, model), // Alias
38
+ ollama: (model) => new OllamaProvider(model),
39
+ local: (model) => new OllamaProvider(model), // Alias
40
+ };
41
+
42
+ export function getProvider(name?: string, model?: string): Provider {
43
+ // Check for model override from environment
44
+ const modelOverride = model || process.env.COG_MODEL;
45
+
46
+ // Auto-detect if not specified
47
+ if (!name) {
48
+ if (process.env.GEMINI_API_KEY) return new GeminiProvider(undefined, modelOverride);
49
+ if (process.env.OPENAI_API_KEY) return new OpenAIProvider(undefined, modelOverride);
50
+ if (process.env.ANTHROPIC_API_KEY) return new AnthropicProvider(undefined, modelOverride);
51
+ if (process.env.DEEPSEEK_API_KEY) return new DeepSeekProvider(undefined, modelOverride);
52
+ if (process.env.MINIMAX_API_KEY) return new MiniMaxProvider(undefined, modelOverride);
53
+ if (process.env.MOONSHOT_API_KEY) return new MoonshotProvider(undefined, modelOverride);
54
+ if (process.env.DASHSCOPE_API_KEY || process.env.QWEN_API_KEY) return new QwenProvider(undefined, modelOverride);
55
+ // Ollama is always available as fallback if nothing else is configured
56
+ return new OllamaProvider(modelOverride);
57
+ }
58
+
59
+ const factory = providers[name.toLowerCase()];
60
+ if (!factory) {
61
+ throw new Error(`Unknown provider: ${name}. Available: ${Object.keys(providers).join(', ')}`);
62
+ }
63
+
64
+ return factory(modelOverride);
65
+ }
66
+
67
+ export function listProviders(): Array<{ name: string; configured: boolean; model: string }> {
68
+ return [
69
+ { name: 'gemini', configured: !!process.env.GEMINI_API_KEY, model: 'gemini-3-flash' },
70
+ { name: 'openai', configured: !!process.env.OPENAI_API_KEY, model: 'gpt-5.2' },
71
+ { name: 'anthropic', configured: !!process.env.ANTHROPIC_API_KEY, model: 'claude-sonnet-4.5' },
72
+ { name: 'deepseek', configured: !!process.env.DEEPSEEK_API_KEY, model: 'deepseek-v3.2' },
73
+ { name: 'minimax', configured: !!process.env.MINIMAX_API_KEY, model: 'MiniMax-M2.1' },
74
+ { name: 'moonshot', configured: !!process.env.MOONSHOT_API_KEY, model: 'kimi-k2.5' },
75
+ { name: 'qwen', configured: !!(process.env.DASHSCOPE_API_KEY || process.env.QWEN_API_KEY), model: 'qwen3-max' },
76
+ { name: 'ollama', configured: true, model: 'llama4 (local)' },
77
+ ];
78
+ }
@@ -0,0 +1,81 @@
1
+ /**
2
+ * MiniMax Provider - MiniMax API
3
+ */
4
+
5
+ import { BaseProvider } from './base.js';
6
+ import type { InvokeParams, InvokeResult } from '../types.js';
7
+
8
+ export class MiniMaxProvider extends BaseProvider {
9
+ name = 'minimax';
10
+ private apiKey: string;
11
+ private model: string;
12
+ private baseUrl = 'https://api.minimax.chat/v1';
13
+
14
+ constructor(apiKey?: string, model = 'MiniMax-M2.1') {
15
+ super();
16
+ this.apiKey = apiKey || process.env.MINIMAX_API_KEY || '';
17
+ this.model = model;
18
+ }
19
+
20
+ isConfigured(): boolean {
21
+ return !!this.apiKey;
22
+ }
23
+
24
+ async invoke(params: InvokeParams): Promise<InvokeResult> {
25
+ if (!this.isConfigured()) {
26
+ throw new Error('MiniMax API key not configured. Set MINIMAX_API_KEY environment variable.');
27
+ }
28
+
29
+ const url = `${this.baseUrl}/text/chatcompletion_v2`;
30
+
31
+ const body: Record<string, unknown> = {
32
+ model: this.model,
33
+ messages: params.messages.map(m => ({ role: m.role, content: m.content })),
34
+ temperature: params.temperature ?? 0.7,
35
+ max_tokens: params.maxTokens ?? 4096,
36
+ };
37
+
38
+ // Add JSON mode if schema provided
39
+ if (params.jsonSchema) {
40
+ const lastUserIdx = params.messages.findLastIndex(m => m.role === 'user');
41
+ if (lastUserIdx >= 0) {
42
+ const messages = [...params.messages];
43
+ messages[lastUserIdx] = {
44
+ ...messages[lastUserIdx],
45
+ content: messages[lastUserIdx].content + this.buildJsonPrompt(params.jsonSchema),
46
+ };
47
+ body.messages = messages.map(m => ({ role: m.role, content: m.content }));
48
+ }
49
+ }
50
+
51
+ const response = await fetch(url, {
52
+ method: 'POST',
53
+ headers: {
54
+ 'Content-Type': 'application/json',
55
+ 'Authorization': `Bearer ${this.apiKey}`,
56
+ },
57
+ body: JSON.stringify(body),
58
+ });
59
+
60
+ if (!response.ok) {
61
+ const error = await response.text();
62
+ throw new Error(`MiniMax API error: ${response.status} - ${error}`);
63
+ }
64
+
65
+ const data = await response.json() as {
66
+ choices?: Array<{ message?: { content?: string } }>;
67
+ usage?: { prompt_tokens?: number; completion_tokens?: number; total_tokens?: number };
68
+ };
69
+
70
+ const content = data.choices?.[0]?.message?.content || '';
71
+
72
+ return {
73
+ content,
74
+ usage: data.usage ? {
75
+ promptTokens: data.usage.prompt_tokens || 0,
76
+ completionTokens: data.usage.completion_tokens || 0,
77
+ totalTokens: data.usage.total_tokens || 0,
78
+ } : undefined,
79
+ };
80
+ }
81
+ }
@@ -0,0 +1,82 @@
1
+ /**
2
+ * Moonshot Provider - Moonshot AI (Kimi) API
3
+ */
4
+
5
+ import { BaseProvider } from './base.js';
6
+ import type { InvokeParams, InvokeResult } from '../types.js';
7
+
8
+ export class MoonshotProvider extends BaseProvider {
9
+ name = 'moonshot';
10
+ private apiKey: string;
11
+ private model: string;
12
+ private baseUrl = 'https://api.moonshot.cn/v1';
13
+
14
+ constructor(apiKey?: string, model = 'kimi-k2.5') {
15
+ super();
16
+ this.apiKey = apiKey || process.env.MOONSHOT_API_KEY || '';
17
+ this.model = model;
18
+ }
19
+
20
+ isConfigured(): boolean {
21
+ return !!this.apiKey;
22
+ }
23
+
24
+ async invoke(params: InvokeParams): Promise<InvokeResult> {
25
+ if (!this.isConfigured()) {
26
+ throw new Error('Moonshot API key not configured. Set MOONSHOT_API_KEY environment variable.');
27
+ }
28
+
29
+ const url = `${this.baseUrl}/chat/completions`;
30
+
31
+ const body: Record<string, unknown> = {
32
+ model: this.model,
33
+ messages: params.messages.map(m => ({ role: m.role, content: m.content })),
34
+ temperature: params.temperature ?? 0.7,
35
+ max_tokens: params.maxTokens ?? 4096,
36
+ };
37
+
38
+ // Add JSON mode if schema provided
39
+ if (params.jsonSchema) {
40
+ body.response_format = { type: 'json_object' };
41
+ const lastUserIdx = params.messages.findLastIndex(m => m.role === 'user');
42
+ if (lastUserIdx >= 0) {
43
+ const messages = [...params.messages];
44
+ messages[lastUserIdx] = {
45
+ ...messages[lastUserIdx],
46
+ content: messages[lastUserIdx].content + this.buildJsonPrompt(params.jsonSchema),
47
+ };
48
+ body.messages = messages.map(m => ({ role: m.role, content: m.content }));
49
+ }
50
+ }
51
+
52
+ const response = await fetch(url, {
53
+ method: 'POST',
54
+ headers: {
55
+ 'Content-Type': 'application/json',
56
+ 'Authorization': `Bearer ${this.apiKey}`,
57
+ },
58
+ body: JSON.stringify(body),
59
+ });
60
+
61
+ if (!response.ok) {
62
+ const error = await response.text();
63
+ throw new Error(`Moonshot API error: ${response.status} - ${error}`);
64
+ }
65
+
66
+ const data = await response.json() as {
67
+ choices?: Array<{ message?: { content?: string } }>;
68
+ usage?: { prompt_tokens?: number; completion_tokens?: number; total_tokens?: number };
69
+ };
70
+
71
+ const content = data.choices?.[0]?.message?.content || '';
72
+
73
+ return {
74
+ content,
75
+ usage: data.usage ? {
76
+ promptTokens: data.usage.prompt_tokens || 0,
77
+ completionTokens: data.usage.completion_tokens || 0,
78
+ totalTokens: data.usage.total_tokens || 0,
79
+ } : undefined,
80
+ };
81
+ }
82
+ }
@@ -0,0 +1,83 @@
1
+ /**
2
+ * Ollama Provider - Local LLM via Ollama
3
+ */
4
+
5
+ import { BaseProvider } from './base.js';
6
+ import type { InvokeParams, InvokeResult } from '../types.js';
7
+
8
+ export class OllamaProvider extends BaseProvider {
9
+ name = 'ollama';
10
+ private model: string;
11
+ private baseUrl: string;
12
+
13
+ constructor(model = 'llama4', baseUrl = 'http://localhost:11434') {
14
+ super();
15
+ this.model = process.env.OLLAMA_MODEL || model;
16
+ this.baseUrl = process.env.OLLAMA_HOST || baseUrl;
17
+ }
18
+
19
+ isConfigured(): boolean {
20
+ return true; // Ollama doesn't need API key
21
+ }
22
+
23
+ async invoke(params: InvokeParams): Promise<InvokeResult> {
24
+ const url = `${this.baseUrl}/api/chat`;
25
+
26
+ let messages = params.messages.map(m => ({ role: m.role, content: m.content }));
27
+
28
+ // Add JSON mode if schema provided
29
+ if (params.jsonSchema) {
30
+ const lastUserIdx = messages.findLastIndex(m => m.role === 'user');
31
+ if (lastUserIdx >= 0) {
32
+ messages = [...messages];
33
+ messages[lastUserIdx] = {
34
+ ...messages[lastUserIdx],
35
+ content: messages[lastUserIdx].content + this.buildJsonPrompt(params.jsonSchema),
36
+ };
37
+ }
38
+ }
39
+
40
+ const body: Record<string, unknown> = {
41
+ model: this.model,
42
+ messages,
43
+ stream: false,
44
+ options: {
45
+ temperature: params.temperature ?? 0.7,
46
+ num_predict: params.maxTokens ?? 4096,
47
+ },
48
+ };
49
+
50
+ // Request JSON format
51
+ if (params.jsonSchema) {
52
+ body.format = 'json';
53
+ }
54
+
55
+ const response = await fetch(url, {
56
+ method: 'POST',
57
+ headers: { 'Content-Type': 'application/json' },
58
+ body: JSON.stringify(body),
59
+ });
60
+
61
+ if (!response.ok) {
62
+ const error = await response.text();
63
+ throw new Error(`Ollama API error: ${response.status} - ${error}`);
64
+ }
65
+
66
+ const data = await response.json() as {
67
+ message?: { content?: string };
68
+ prompt_eval_count?: number;
69
+ eval_count?: number;
70
+ };
71
+
72
+ const content = data.message?.content || '';
73
+
74
+ return {
75
+ content,
76
+ usage: {
77
+ promptTokens: data.prompt_eval_count || 0,
78
+ completionTokens: data.eval_count || 0,
79
+ totalTokens: (data.prompt_eval_count || 0) + (data.eval_count || 0),
80
+ },
81
+ };
82
+ }
83
+ }
@@ -0,0 +1,84 @@
1
+ /**
2
+ * OpenAI Provider - OpenAI API (and compatible APIs)
3
+ */
4
+
5
+ import { BaseProvider } from './base.js';
6
+ import type { InvokeParams, InvokeResult } from '../types.js';
7
+
8
+ export class OpenAIProvider extends BaseProvider {
9
+ name = 'openai';
10
+ private apiKey: string;
11
+ private model: string;
12
+ private baseUrl: string;
13
+
14
+ constructor(apiKey?: string, model = 'gpt-5.2', baseUrl = 'https://api.openai.com/v1') {
15
+ super();
16
+ this.apiKey = apiKey || process.env.OPENAI_API_KEY || '';
17
+ this.model = model;
18
+ this.baseUrl = baseUrl;
19
+ }
20
+
21
+ isConfigured(): boolean {
22
+ return !!this.apiKey;
23
+ }
24
+
25
+ async invoke(params: InvokeParams): Promise<InvokeResult> {
26
+ if (!this.isConfigured()) {
27
+ throw new Error('OpenAI API key not configured. Set OPENAI_API_KEY environment variable.');
28
+ }
29
+
30
+ const url = `${this.baseUrl}/chat/completions`;
31
+
32
+ const body: Record<string, unknown> = {
33
+ model: this.model,
34
+ messages: params.messages,
35
+ temperature: params.temperature ?? 0.7,
36
+ max_tokens: params.maxTokens ?? 4096,
37
+ };
38
+
39
+ // Add JSON mode if schema provided
40
+ if (params.jsonSchema) {
41
+ body.response_format = { type: 'json_object' };
42
+ // Append schema instruction to last user message
43
+ const lastUserIdx = params.messages.findLastIndex(m => m.role === 'user');
44
+ if (lastUserIdx >= 0) {
45
+ const messages = [...params.messages];
46
+ messages[lastUserIdx] = {
47
+ ...messages[lastUserIdx],
48
+ content: messages[lastUserIdx].content + this.buildJsonPrompt(params.jsonSchema),
49
+ };
50
+ body.messages = messages;
51
+ }
52
+ }
53
+
54
+ const response = await fetch(url, {
55
+ method: 'POST',
56
+ headers: {
57
+ 'Content-Type': 'application/json',
58
+ 'Authorization': `Bearer ${this.apiKey}`,
59
+ },
60
+ body: JSON.stringify(body),
61
+ });
62
+
63
+ if (!response.ok) {
64
+ const error = await response.text();
65
+ throw new Error(`OpenAI API error: ${response.status} - ${error}`);
66
+ }
67
+
68
+ const data = await response.json() as {
69
+ choices?: Array<{ message?: { content?: string } }>;
70
+ usage?: { prompt_tokens?: number; completion_tokens?: number; total_tokens?: number };
71
+ };
72
+
73
+ const content = data.choices?.[0]?.message?.content || '';
74
+
75
+ return {
76
+ content,
77
+ usage: data.usage ? {
78
+ promptTokens: data.usage.prompt_tokens || 0,
79
+ completionTokens: data.usage.completion_tokens || 0,
80
+ totalTokens: data.usage.total_tokens || 0,
81
+ } : undefined,
82
+ };
83
+ }
84
+ }
@@ -0,0 +1,82 @@
1
+ /**
2
+ * Qwen Provider - Alibaba Tongyi Qianwen (通义千问) via DashScope API
3
+ */
4
+
5
+ import { BaseProvider } from './base.js';
6
+ import type { InvokeParams, InvokeResult } from '../types.js';
7
+
8
+ export class QwenProvider extends BaseProvider {
9
+ name = 'qwen';
10
+ private apiKey: string;
11
+ private model: string;
12
+ private baseUrl = 'https://dashscope.aliyuncs.com/compatible-mode/v1';
13
+
14
+ constructor(apiKey?: string, model = 'qwen3-max') {
15
+ super();
16
+ this.apiKey = apiKey || process.env.DASHSCOPE_API_KEY || process.env.QWEN_API_KEY || '';
17
+ this.model = model;
18
+ }
19
+
20
+ isConfigured(): boolean {
21
+ return !!this.apiKey;
22
+ }
23
+
24
+ async invoke(params: InvokeParams): Promise<InvokeResult> {
25
+ if (!this.isConfigured()) {
26
+ throw new Error('Qwen API key not configured. Set DASHSCOPE_API_KEY or QWEN_API_KEY environment variable.');
27
+ }
28
+
29
+ const url = `${this.baseUrl}/chat/completions`;
30
+
31
+ const body: Record<string, unknown> = {
32
+ model: this.model,
33
+ messages: params.messages.map(m => ({ role: m.role, content: m.content })),
34
+ temperature: params.temperature ?? 0.7,
35
+ max_tokens: params.maxTokens ?? 4096,
36
+ };
37
+
38
+ // Add JSON mode if schema provided
39
+ if (params.jsonSchema) {
40
+ body.response_format = { type: 'json_object' };
41
+ const lastUserIdx = params.messages.findLastIndex(m => m.role === 'user');
42
+ if (lastUserIdx >= 0) {
43
+ const messages = [...params.messages];
44
+ messages[lastUserIdx] = {
45
+ ...messages[lastUserIdx],
46
+ content: messages[lastUserIdx].content + this.buildJsonPrompt(params.jsonSchema),
47
+ };
48
+ body.messages = messages.map(m => ({ role: m.role, content: m.content }));
49
+ }
50
+ }
51
+
52
+ const response = await fetch(url, {
53
+ method: 'POST',
54
+ headers: {
55
+ 'Content-Type': 'application/json',
56
+ 'Authorization': `Bearer ${this.apiKey}`,
57
+ },
58
+ body: JSON.stringify(body),
59
+ });
60
+
61
+ if (!response.ok) {
62
+ const error = await response.text();
63
+ throw new Error(`Qwen API error: ${response.status} - ${error}`);
64
+ }
65
+
66
+ const data = await response.json() as {
67
+ choices?: Array<{ message?: { content?: string } }>;
68
+ usage?: { prompt_tokens?: number; completion_tokens?: number; total_tokens?: number };
69
+ };
70
+
71
+ const content = data.choices?.[0]?.message?.content || '';
72
+
73
+ return {
74
+ content,
75
+ usage: data.usage ? {
76
+ promptTokens: data.usage.prompt_tokens || 0,
77
+ completionTokens: data.usage.completion_tokens || 0,
78
+ totalTokens: data.usage.total_tokens || 0,
79
+ } : undefined,
80
+ };
81
+ }
82
+ }