@juspay/neurolink 1.2.3 → 1.2.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,338 @@
1
+ /**
2
+ * Azure OpenAI Provider
3
+ *
4
+ * Enterprise-grade OpenAI integration through Microsoft Azure.
5
+ * Supports all OpenAI models with enhanced security and compliance.
6
+ */
7
+ import { AIProviderName } from '../core/types.js';
8
+ export class AzureOpenAIProvider {
9
+ name = AIProviderName.AZURE;
10
+ apiKey;
11
+ endpoint;
12
+ deploymentId;
13
+ apiVersion;
14
+ constructor() {
15
+ this.apiKey = this.getApiKey();
16
+ this.endpoint = this.getEndpoint();
17
+ this.deploymentId = this.getDeploymentId();
18
+ this.apiVersion = process.env.AZURE_OPENAI_API_VERSION || '2024-02-15-preview';
19
+ console.log(`[AzureOpenAIProvider] Initialized with endpoint: ${this.endpoint}, deployment: ${this.deploymentId}`);
20
+ }
21
+ getApiKey() {
22
+ const apiKey = process.env.AZURE_OPENAI_API_KEY;
23
+ if (!apiKey) {
24
+ throw new Error('AZURE_OPENAI_API_KEY environment variable is required');
25
+ }
26
+ return apiKey;
27
+ }
28
+ getEndpoint() {
29
+ const endpoint = process.env.AZURE_OPENAI_ENDPOINT;
30
+ if (!endpoint) {
31
+ throw new Error('AZURE_OPENAI_ENDPOINT environment variable is required');
32
+ }
33
+ return endpoint.replace(/\/$/, ''); // Remove trailing slash
34
+ }
35
+ getDeploymentId() {
36
+ const deploymentId = process.env.AZURE_OPENAI_DEPLOYMENT_ID;
37
+ if (!deploymentId) {
38
+ throw new Error('AZURE_OPENAI_DEPLOYMENT_ID environment variable is required');
39
+ }
40
+ return deploymentId;
41
+ }
42
+ getApiUrl(stream = false) {
43
+ return `${this.endpoint}/openai/deployments/${this.deploymentId}/chat/completions?api-version=${this.apiVersion}`;
44
+ }
45
+ async makeRequest(body, stream = false) {
46
+ const url = this.getApiUrl(stream);
47
+ const headers = {
48
+ 'Content-Type': 'application/json',
49
+ 'api-key': this.apiKey
50
+ };
51
+ console.log(`[AzureOpenAIProvider.makeRequest] ${stream ? 'Streaming' : 'Non-streaming'} request to deployment: ${this.deploymentId}`);
52
+ console.log(`[AzureOpenAIProvider.makeRequest] Max tokens: ${body.max_tokens || 'default'}, Temperature: ${body.temperature || 'default'}`);
53
+ const response = await fetch(url, {
54
+ method: 'POST',
55
+ headers,
56
+ body: JSON.stringify(body)
57
+ });
58
+ if (!response.ok) {
59
+ const errorText = await response.text();
60
+ console.error(`[AzureOpenAIProvider.makeRequest] API error ${response.status}: ${errorText}`);
61
+ throw new Error(`Azure OpenAI API error ${response.status}: ${errorText}`);
62
+ }
63
+ return response;
64
+ }
65
+ async generateText(optionsOrPrompt, schema) {
66
+ console.log('[AzureOpenAIProvider.generateText] Starting text generation');
67
+ // Parse parameters with backward compatibility
68
+ const options = typeof optionsOrPrompt === 'string'
69
+ ? { prompt: optionsOrPrompt }
70
+ : optionsOrPrompt;
71
+ const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = 'You are a helpful AI assistant.' } = options;
72
+ console.log(`[AzureOpenAIProvider.generateText] Prompt: "${prompt.substring(0, 100)}...", Temperature: ${temperature}, Max tokens: ${maxTokens}`);
73
+ const messages = [];
74
+ if (systemPrompt) {
75
+ messages.push({
76
+ role: 'system',
77
+ content: systemPrompt
78
+ });
79
+ }
80
+ messages.push({
81
+ role: 'user',
82
+ content: prompt
83
+ });
84
+ const requestBody = {
85
+ messages,
86
+ temperature,
87
+ max_tokens: maxTokens
88
+ };
89
+ try {
90
+ const response = await this.makeRequest(requestBody);
91
+ const data = await response.json();
92
+ console.log(`[AzureOpenAIProvider.generateText] Success. Generated ${data.usage.completion_tokens} tokens`);
93
+ const content = data.choices[0]?.message?.content || '';
94
+ return {
95
+ content,
96
+ provider: this.name,
97
+ model: data.model,
98
+ usage: {
99
+ promptTokens: data.usage.prompt_tokens,
100
+ completionTokens: data.usage.completion_tokens,
101
+ totalTokens: data.usage.total_tokens
102
+ },
103
+ finishReason: data.choices[0]?.finish_reason || 'stop'
104
+ };
105
+ }
106
+ catch (error) {
107
+ console.error('[AzureOpenAIProvider.generateText] Error:', error);
108
+ throw error;
109
+ }
110
+ }
111
+ async streamText(optionsOrPrompt, schema) {
112
+ console.log('[AzureOpenAIProvider.streamText] Starting text streaming');
113
+ // Parse parameters with backward compatibility
114
+ const options = typeof optionsOrPrompt === 'string'
115
+ ? { prompt: optionsOrPrompt }
116
+ : optionsOrPrompt;
117
+ const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = 'You are a helpful AI assistant.' } = options;
118
+ console.log(`[AzureOpenAIProvider.streamText] Streaming prompt: "${prompt.substring(0, 100)}..."`);
119
+ const messages = [];
120
+ if (systemPrompt) {
121
+ messages.push({
122
+ role: 'system',
123
+ content: systemPrompt
124
+ });
125
+ }
126
+ messages.push({
127
+ role: 'user',
128
+ content: prompt
129
+ });
130
+ const requestBody = {
131
+ messages,
132
+ temperature,
133
+ max_tokens: maxTokens,
134
+ stream: true
135
+ };
136
+ try {
137
+ const response = await this.makeRequest(requestBody, true);
138
+ if (!response.body) {
139
+ throw new Error('No response body received');
140
+ }
141
+ // Return a StreamTextResult-like object
142
+ return {
143
+ textStream: this.createAsyncIterable(response.body),
144
+ text: '',
145
+ usage: { promptTokens: 0, completionTokens: 0, totalTokens: 0 },
146
+ finishReason: 'stop'
147
+ };
148
+ }
149
+ catch (error) {
150
+ console.error('[AzureOpenAIProvider.streamText] Error:', error);
151
+ throw error;
152
+ }
153
+ }
154
+ async *createAsyncIterable(body) {
155
+ const reader = body.getReader();
156
+ const decoder = new TextDecoder();
157
+ let buffer = '';
158
+ try {
159
+ while (true) {
160
+ const { done, value } = await reader.read();
161
+ if (done)
162
+ break;
163
+ buffer += decoder.decode(value, { stream: true });
164
+ const lines = buffer.split('\n');
165
+ buffer = lines.pop() || '';
166
+ for (const line of lines) {
167
+ if (line.trim() === '')
168
+ continue;
169
+ if (line.startsWith('data: ')) {
170
+ const data = line.slice(6);
171
+ if (data.trim() === '[DONE]')
172
+ continue;
173
+ try {
174
+ const chunk = JSON.parse(data);
175
+ // Extract text content from chunk
176
+ if (chunk.choices?.[0]?.delta?.content) {
177
+ yield chunk.choices[0].delta.content;
178
+ }
179
+ }
180
+ catch (parseError) {
181
+ console.warn('[AzureOpenAIProvider.createAsyncIterable] Failed to parse chunk:', parseError);
182
+ continue;
183
+ }
184
+ }
185
+ }
186
+ }
187
+ }
188
+ finally {
189
+ reader.releaseLock();
190
+ }
191
+ }
192
+ async *generateTextStream(optionsOrPrompt) {
193
+ console.log('[AzureOpenAIProvider.generateTextStream] Starting text streaming');
194
+ // Parse parameters with backward compatibility
195
+ const options = typeof optionsOrPrompt === 'string'
196
+ ? { prompt: optionsOrPrompt }
197
+ : optionsOrPrompt;
198
+ const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = 'You are a helpful AI assistant.' } = options;
199
+ console.log(`[AzureOpenAIProvider.generateTextStream] Streaming prompt: "${prompt.substring(0, 100)}..."`);
200
+ const messages = [];
201
+ if (systemPrompt) {
202
+ messages.push({
203
+ role: 'system',
204
+ content: systemPrompt
205
+ });
206
+ }
207
+ messages.push({
208
+ role: 'user',
209
+ content: prompt
210
+ });
211
+ const requestBody = {
212
+ messages,
213
+ temperature,
214
+ max_tokens: maxTokens,
215
+ stream: true
216
+ };
217
+ try {
218
+ const response = await this.makeRequest(requestBody, true);
219
+ if (!response.body) {
220
+ throw new Error('No response body received');
221
+ }
222
+ const reader = response.body.getReader();
223
+ const decoder = new TextDecoder();
224
+ let buffer = '';
225
+ try {
226
+ while (true) {
227
+ const { done, value } = await reader.read();
228
+ if (done)
229
+ break;
230
+ buffer += decoder.decode(value, { stream: true });
231
+ const lines = buffer.split('\n');
232
+ buffer = lines.pop() || '';
233
+ for (const line of lines) {
234
+ if (line.trim() === '')
235
+ continue;
236
+ if (line.startsWith('data: ')) {
237
+ const data = line.slice(6);
238
+ if (data.trim() === '[DONE]')
239
+ continue;
240
+ try {
241
+ const chunk = JSON.parse(data);
242
+ // Extract text content from chunk
243
+ if (chunk.choices?.[0]?.delta?.content) {
244
+ yield {
245
+ content: chunk.choices[0].delta.content,
246
+ provider: this.name,
247
+ model: chunk.model || this.deploymentId
248
+ };
249
+ }
250
+ }
251
+ catch (parseError) {
252
+ console.warn('[AzureOpenAIProvider.generateTextStream] Failed to parse chunk:', parseError);
253
+ continue;
254
+ }
255
+ }
256
+ }
257
+ }
258
+ }
259
+ finally {
260
+ reader.releaseLock();
261
+ }
262
+ console.log('[AzureOpenAIProvider.generateTextStream] Streaming completed');
263
+ }
264
+ catch (error) {
265
+ console.error('[AzureOpenAIProvider.generateTextStream] Error:', error);
266
+ throw error;
267
+ }
268
+ }
269
+ async testConnection() {
270
+ console.log('[AzureOpenAIProvider.testConnection] Testing connection to Azure OpenAI');
271
+ const startTime = Date.now();
272
+ try {
273
+ await this.generateText({
274
+ prompt: 'Hello',
275
+ maxTokens: 5
276
+ });
277
+ const responseTime = Date.now() - startTime;
278
+ console.log(`[AzureOpenAIProvider.testConnection] Connection test successful (${responseTime}ms)`);
279
+ return {
280
+ success: true,
281
+ responseTime
282
+ };
283
+ }
284
+ catch (error) {
285
+ const responseTime = Date.now() - startTime;
286
+ console.error(`[AzureOpenAIProvider.testConnection] Connection test failed (${responseTime}ms):`, error);
287
+ return {
288
+ success: false,
289
+ error: error instanceof Error ? error.message : 'Unknown error',
290
+ responseTime
291
+ };
292
+ }
293
+ }
294
+ isConfigured() {
295
+ try {
296
+ this.getApiKey();
297
+ this.getEndpoint();
298
+ this.getDeploymentId();
299
+ return true;
300
+ }
301
+ catch {
302
+ return false;
303
+ }
304
+ }
305
+ getRequiredConfig() {
306
+ return ['AZURE_OPENAI_API_KEY', 'AZURE_OPENAI_ENDPOINT', 'AZURE_OPENAI_DEPLOYMENT_ID'];
307
+ }
308
+ getOptionalConfig() {
309
+ return ['AZURE_OPENAI_API_VERSION'];
310
+ }
311
+ getModels() {
312
+ return [
313
+ 'gpt-4',
314
+ 'gpt-4-turbo',
315
+ 'gpt-4-32k',
316
+ 'gpt-35-turbo',
317
+ 'gpt-35-turbo-16k'
318
+ ];
319
+ }
320
+ supportsStreaming() {
321
+ return true;
322
+ }
323
+ supportsSchema() {
324
+ return true; // Azure OpenAI supports JSON mode and function calling
325
+ }
326
+ getCapabilities() {
327
+ return [
328
+ 'text-generation',
329
+ 'streaming',
330
+ 'conversation',
331
+ 'system-prompts',
332
+ 'json-mode',
333
+ 'function-calling',
334
+ 'enterprise-security',
335
+ 'content-filtering'
336
+ ];
337
+ }
338
+ }
@@ -5,6 +5,8 @@
5
5
  export { GoogleVertexAI } from './googleVertexAI.js';
6
6
  export { AmazonBedrock } from './amazonBedrock.js';
7
7
  export { OpenAI } from './openAI.js';
8
+ export { AnthropicProvider } from './anthropic.js';
9
+ export { AzureOpenAIProvider } from './azureOpenAI.js';
8
10
  export type { AIProvider } from '../core/types.js';
9
11
  /**
10
12
  * Provider registry for dynamic provider instantiation
@@ -13,6 +15,8 @@ export declare const PROVIDERS: {
13
15
  readonly vertex: "GoogleVertexAI";
14
16
  readonly bedrock: "AmazonBedrock";
15
17
  readonly openai: "OpenAI";
18
+ readonly anthropic: "AnthropicProvider";
19
+ readonly azure: "AzureOpenAIProvider";
16
20
  };
17
21
  /**
18
22
  * Type for valid provider names
@@ -5,13 +5,17 @@
5
5
  export { GoogleVertexAI } from './googleVertexAI.js';
6
6
  export { AmazonBedrock } from './amazonBedrock.js';
7
7
  export { OpenAI } from './openAI.js';
8
+ export { AnthropicProvider } from './anthropic.js';
9
+ export { AzureOpenAIProvider } from './azureOpenAI.js';
8
10
  /**
9
11
  * Provider registry for dynamic provider instantiation
10
12
  */
11
13
  export const PROVIDERS = {
12
14
  vertex: 'GoogleVertexAI',
13
15
  bedrock: 'AmazonBedrock',
14
- openai: 'OpenAI'
16
+ openai: 'OpenAI',
17
+ anthropic: 'AnthropicProvider',
18
+ azure: 'AzureOpenAIProvider'
15
19
  };
16
20
  /**
17
21
  * List of all available provider names
package/package.json CHANGED
@@ -1,98 +1,94 @@
1
1
  {
2
- "name": "@juspay/neurolink",
3
- "version": "1.2.3",
4
- "description": "AI toolkit with multi-provider support for OpenAI, Amazon Bedrock, and Google Vertex AI",
5
- "author": "Juspay Technologies",
6
- "license": "MIT",
7
- "scripts": {
8
- "dev": "vite dev",
9
- "build": "vite build && npm run prepack",
10
- "build:cli": "tsc src/cli/index.ts --outDir dist/cli --target es2022 --module esnext --moduleResolution bundler --allowImportingTsExtensions false --resolveJsonModule --esModuleInterop --allowSyntheticDefaultImports --strict --rootDir src/cli",
11
- "cli": "node dist/cli/index.js",
12
- "preview": "vite preview",
13
- "prepare": "svelte-kit sync || echo ''",
14
- "prepack": "svelte-kit sync && svelte-package && npm run build:cli && publint",
15
- "check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json",
16
- "check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch",
17
- "test": "vitest",
18
- "test:run": "vitest run",
19
- "lint": "prettier --check . && eslint .",
20
- "format": "prettier --write .",
21
- "changeset": "changeset",
22
- "changeset:version": "changeset version && git add --all",
23
- "publish": "npm run build && npm publish --access public"
24
- },
25
- "files": [
26
- "dist",
27
- "!dist/**/*.test.*",
28
- "!dist/**/*.spec.*"
29
- ],
30
- "sideEffects": [
31
- "**/*.css"
32
- ],
33
- "svelte": "./dist/index.js",
34
- "types": "./dist/index.d.ts",
35
- "type": "module",
36
- "main": "./dist/index.js",
37
- "bin": {
38
- "neurolink": "./dist/cli/index.js"
39
- },
40
- "exports": {
41
- ".": {
42
- "types": "./dist/index.d.ts",
43
- "svelte": "./dist/index.js",
44
- "import": "./dist/index.js",
45
- "default": "./dist/index.js"
46
- }
47
- },
48
- "peerDependencies": {
49
- "@ai-sdk/amazon-bedrock": "^1.0.0",
50
- "@ai-sdk/google-vertex": "^2.2.0",
51
- "@ai-sdk/openai": "^1.0.0",
52
- "ai": "^4.0.0",
53
- "zod": "^3.22.0"
54
- },
55
- "dependencies": {
56
- "chalk": "^5.3.0",
57
- "ora": "^7.0.1",
58
- "playwright": "^1.52.0",
59
- "yargs": "^17.7.2"
60
- },
61
- "devDependencies": {
62
- "@changesets/cli": "^2.26.2",
63
- "@sveltejs/adapter-auto": "^6.0.0",
64
- "@sveltejs/kit": "^2.16.0",
65
- "@sveltejs/package": "^2.0.0",
66
- "@sveltejs/vite-plugin-svelte": "^5.0.0",
67
- "@types/node": "^20.0.0",
68
- "@types/yargs": "^17.0.33",
69
- "eslint": "^9.0.0",
70
- "prettier": "^3.0.0",
71
- "publint": "^0.3.2",
72
- "svelte": "^5.0.0",
73
- "svelte-check": "^4.0.0",
74
- "tslib": "^2.4.1",
75
- "typescript": "^5.0.0",
76
- "vite": "^6.2.6",
77
- "vitest": "^2.0.0"
78
- },
79
- "keywords": [
80
- "ai",
81
- "llm",
82
- "openai",
83
- "anthropic",
84
- "google",
85
- "bedrock",
86
- "vertex",
87
- "streaming",
88
- "tools",
89
- "neurolink",
90
- "juspay",
91
- "svelte"
92
- ],
93
- "pnpm": {
94
- "onlyBuiltDependencies": [
95
- "esbuild"
96
- ]
97
- }
98
- }
2
+ "name": "@juspay/neurolink",
3
+ "version": "1.2.4",
4
+ "description": "AI toolkit with multi-provider support for OpenAI, Amazon Bedrock, and Google Vertex AI",
5
+ "author": "Juspay Technologies",
6
+ "license": "MIT",
7
+ "files": [
8
+ "dist",
9
+ "!dist/**/*.test.*",
10
+ "!dist/**/*.spec.*"
11
+ ],
12
+ "sideEffects": [
13
+ "**/*.css"
14
+ ],
15
+ "svelte": "./dist/index.js",
16
+ "types": "./dist/index.d.ts",
17
+ "type": "module",
18
+ "main": "./dist/index.js",
19
+ "bin": {
20
+ "neurolink": "./dist/cli/index.js"
21
+ },
22
+ "exports": {
23
+ ".": {
24
+ "types": "./dist/index.d.ts",
25
+ "svelte": "./dist/index.js",
26
+ "import": "./dist/index.js",
27
+ "default": "./dist/index.js"
28
+ }
29
+ },
30
+ "peerDependencies": {
31
+ "@ai-sdk/amazon-bedrock": "^1.0.0",
32
+ "@ai-sdk/google-vertex": "^2.2.0",
33
+ "@ai-sdk/openai": "^1.0.0",
34
+ "ai": "^4.0.0",
35
+ "zod": "^3.22.0"
36
+ },
37
+ "dependencies": {
38
+ "chalk": "^5.3.0",
39
+ "dotenv": "^16.5.0",
40
+ "inquirer": "^9.2.15",
41
+ "ora": "^7.0.1",
42
+ "playwright": "^1.52.0",
43
+ "yargs": "^17.7.2",
44
+ "zod": "^3.22.0"
45
+ },
46
+ "devDependencies": {
47
+ "@changesets/cli": "^2.26.2",
48
+ "@sveltejs/adapter-auto": "^6.0.0",
49
+ "@sveltejs/kit": "^2.16.0",
50
+ "@sveltejs/package": "^2.0.0",
51
+ "@sveltejs/vite-plugin-svelte": "^5.0.0",
52
+ "@types/inquirer": "^9.0.7",
53
+ "@types/node": "^20.0.0",
54
+ "@types/yargs": "^17.0.33",
55
+ "eslint": "^9.0.0",
56
+ "prettier": "^3.0.0",
57
+ "publint": "^0.3.2",
58
+ "svelte": "^5.0.0",
59
+ "svelte-check": "^4.0.0",
60
+ "tslib": "^2.4.1",
61
+ "typescript": "^5.0.0",
62
+ "vite": "^6.2.6",
63
+ "vitest": "^2.0.0"
64
+ },
65
+ "keywords": [
66
+ "ai",
67
+ "llm",
68
+ "openai",
69
+ "anthropic",
70
+ "google",
71
+ "bedrock",
72
+ "vertex",
73
+ "streaming",
74
+ "tools",
75
+ "neurolink",
76
+ "juspay",
77
+ "svelte"
78
+ ],
79
+ "scripts": {
80
+ "dev": "vite dev",
81
+ "build": "vite build && pnpm run prepack",
82
+ "build:cli": "echo 'Building CLI...' && tsc src/cli/index.ts --outDir dist/cli --target es2022 --module esnext --moduleResolution bundler --allowImportingTsExtensions false --resolveJsonModule --esModuleInterop --allowSyntheticDefaultImports --strict --rootDir src/cli",
83
+ "cli": "node dist/cli/index.js",
84
+ "preview": "vite preview",
85
+ "check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json",
86
+ "check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch",
87
+ "test": "vitest",
88
+ "test:run": "vitest run",
89
+ "lint": "prettier --check . && eslint .",
90
+ "format": "prettier --write .",
91
+ "changeset": "changeset",
92
+ "changeset:version": "changeset version && git add --all"
93
+ }
94
+ }