neural-ai-sdk 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +201 -0
- package/dist/index.d.ts +21 -0
- package/dist/index.js +49 -0
- package/dist/models/base-model.d.ts +9 -0
- package/dist/models/base-model.js +15 -0
- package/dist/models/deepseek-model.d.ts +9 -0
- package/dist/models/deepseek-model.js +105 -0
- package/dist/models/google-model.d.ts +10 -0
- package/dist/models/google-model.js +61 -0
- package/dist/models/huggingface-model.d.ts +9 -0
- package/dist/models/huggingface-model.js +72 -0
- package/dist/models/ollama-model.d.ts +9 -0
- package/dist/models/ollama-model.js +74 -0
- package/dist/models/openai-model.d.ts +9 -0
- package/dist/models/openai-model.js +79 -0
- package/dist/types.d.ts +34 -0
- package/dist/types.js +11 -0
- package/dist/utils.d.ts +19 -0
- package/dist/utils.js +44 -0
- package/package.json +66 -0
package/LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
MIT License
|
2
|
+
|
3
|
+
Copyright (c) 2025 NeuralArc
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
13
|
+
copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
@@ -0,0 +1,201 @@
|
|
1
|
+
# Neural AI SDK
|
2
|
+
|
3
|
+
A unified JavaScript/TypeScript SDK for interacting with various AI LLM providers. This SDK allows you to integrate multiple AI models from different organizations through a single consistent interface.
|
4
|
+
|
5
|
+
## Supported AI Providers
|
6
|
+
|
7
|
+
- OpenAI (GPT models)
|
8
|
+
- Google (Gemini models)
|
9
|
+
- DeepSeek
|
10
|
+
- Ollama (local models)
|
11
|
+
- HuggingFace
|
12
|
+
|
13
|
+
## Installation
|
14
|
+
|
15
|
+
```bash
|
16
|
+
npm install neural-ai-sdk
|
17
|
+
```
|
18
|
+
|
19
|
+
## Usage
|
20
|
+
|
21
|
+
### Basic Example
|
22
|
+
|
23
|
+
```typescript
|
24
|
+
import { NeuralAI, AIProvider } from "neural-ai-sdk";
|
25
|
+
|
26
|
+
// Create an OpenAI model
|
27
|
+
const openaiModel = NeuralAI.createModel(AIProvider.OPENAI, {
|
28
|
+
apiKey: "your-openai-api-key", // Optional if OPENAI_API_KEY environment variable is set
|
29
|
+
model: "gpt-4",
|
30
|
+
});
|
31
|
+
|
32
|
+
// Generate a response
|
33
|
+
async function generateResponse() {
|
34
|
+
const response = await openaiModel.generate({
|
35
|
+
prompt: "What is artificial intelligence?",
|
36
|
+
systemPrompt: "You are a helpful AI assistant.",
|
37
|
+
});
|
38
|
+
|
39
|
+
console.log(response.text);
|
40
|
+
}
|
41
|
+
|
42
|
+
generateResponse();
|
43
|
+
```
|
44
|
+
|
45
|
+
### Environment Variables Support
|
46
|
+
|
47
|
+
You can provide API keys and base URLs through environment variables instead of directly in code:
|
48
|
+
|
49
|
+
```typescript
|
50
|
+
// No need to provide API keys in code if they're set as environment variables
|
51
|
+
const openaiModel = NeuralAI.createModel(AIProvider.OPENAI, {
|
52
|
+
model: "gpt-4",
|
53
|
+
});
|
54
|
+
|
55
|
+
const googleModel = NeuralAI.createModel(AIProvider.GOOGLE, {
|
56
|
+
model: "gemini-pro",
|
57
|
+
});
|
58
|
+
```
|
59
|
+
|
60
|
+
Available environment variables:
|
61
|
+
|
62
|
+
| Provider | API Key Variable | Base URL Variable (optional) |
|
63
|
+
| ----------- | --------------------- | ---------------------------- |
|
64
|
+
| OpenAI | `OPENAI_API_KEY` | - |
|
65
|
+
| Google | `GOOGLE_API_KEY` | - |
|
66
|
+
| DeepSeek | `DEEPSEEK_API_KEY` | `DEEPSEEK_BASE_URL` |
|
67
|
+
| HuggingFace | `HUGGINGFACE_API_KEY` | `HUGGINGFACE_BASE_URL` |
|
68
|
+
| Ollama | - | `OLLAMA_BASE_URL` |
|
69
|
+
|
70
|
+
### Using Streaming
|
71
|
+
|
72
|
+
```typescript
|
73
|
+
import { NeuralAI, AIProvider } from "neural-ai-sdk";
|
74
|
+
|
75
|
+
// Create a Google model
|
76
|
+
const googleModel = NeuralAI.createModel(AIProvider.GOOGLE, {
|
77
|
+
apiKey: "your-google-api-key",
|
78
|
+
model: "gemini-pro",
|
79
|
+
});
|
80
|
+
|
81
|
+
// Stream a response
|
82
|
+
async function streamResponse() {
|
83
|
+
const stream = googleModel.stream({
|
84
|
+
prompt: "Write a short story about AI.",
|
85
|
+
});
|
86
|
+
|
87
|
+
for await (const chunk of stream) {
|
88
|
+
process.stdout.write(chunk);
|
89
|
+
}
|
90
|
+
}
|
91
|
+
|
92
|
+
streamResponse();
|
93
|
+
```
|
94
|
+
|
95
|
+
### Working With Different Providers
|
96
|
+
|
97
|
+
```typescript
|
98
|
+
import { NeuralAI, AIProvider } from "neural-ai-sdk";
|
99
|
+
|
100
|
+
// Create Ollama model (for local inference)
|
101
|
+
const ollamaModel = NeuralAI.createModel(AIProvider.OLLAMA, {
|
102
|
+
// baseURL is optional if OLLAMA_BASE_URL environment variable is set
|
103
|
+
model: "llama2",
|
104
|
+
});
|
105
|
+
|
106
|
+
// Create HuggingFace model
|
107
|
+
const huggingfaceModel = NeuralAI.createModel(AIProvider.HUGGINGFACE, {
|
108
|
+
// apiKey is optional if HUGGINGFACE_API_KEY environment variable is set
|
109
|
+
model: "meta-llama/Llama-2-7b-chat-hf",
|
110
|
+
});
|
111
|
+
|
112
|
+
// Create DeepSeek model
|
113
|
+
const deepseekModel = NeuralAI.createModel(AIProvider.DEEPSEEK, {
|
114
|
+
// apiKey is optional if DEEPSEEK_API_KEY environment variable is set
|
115
|
+
model: "deepseek-chat",
|
116
|
+
});
|
117
|
+
```
|
118
|
+
|
119
|
+
## Environment Configuration
|
120
|
+
|
121
|
+
You can set up environment variables by:
|
122
|
+
|
123
|
+
1. Creating a `.env` file in your project root
|
124
|
+
2. Setting environment variables in your deployment platform
|
125
|
+
3. Setting them in your system environment
|
126
|
+
|
127
|
+
Example `.env` file:
|
128
|
+
|
129
|
+
```
|
130
|
+
OPENAI_API_KEY=your_openai_key_here
|
131
|
+
GOOGLE_API_KEY=your_google_key_here
|
132
|
+
DEEPSEEK_API_KEY=your_deepseek_key_here
|
133
|
+
HUGGINGFACE_API_KEY=your_huggingface_key_here
|
134
|
+
OLLAMA_BASE_URL=http://localhost:11434/api
|
135
|
+
```
|
136
|
+
|
137
|
+
Make sure to load environment variables from your `.env` file using a package like `dotenv`:
|
138
|
+
|
139
|
+
```javascript
|
140
|
+
require("dotenv").config();
|
141
|
+
```
|
142
|
+
|
143
|
+
## Configuration Options
|
144
|
+
|
145
|
+
All models accept the following configuration options:
|
146
|
+
|
147
|
+
| Option | Description |
|
148
|
+
| ------------- | ------------------------------------------------------------------------------ |
|
149
|
+
| `apiKey` | API key for authentication (optional if set as environment variable) |
|
150
|
+
| `baseURL` | Base URL for the API (optional, uses environment variable or default endpoint) |
|
151
|
+
| `model` | The model to use (optional, each provider has a default) |
|
152
|
+
| `temperature` | Controls randomness (0.0 to 1.0) |
|
153
|
+
| `maxTokens` | Maximum number of tokens to generate |
|
154
|
+
| `topP` | Nucleus sampling parameter |
|
155
|
+
|
156
|
+
## Using Request Options
|
157
|
+
|
158
|
+
You can provide options at the time of the request that override the model's default configuration:
|
159
|
+
|
160
|
+
```typescript
|
161
|
+
const response = await openaiModel.generate({
|
162
|
+
prompt: "Explain quantum computing",
|
163
|
+
options: {
|
164
|
+
temperature: 0.7,
|
165
|
+
maxTokens: 500,
|
166
|
+
},
|
167
|
+
});
|
168
|
+
```
|
169
|
+
|
170
|
+
## Advanced Usage
|
171
|
+
|
172
|
+
### Access Raw API Responses
|
173
|
+
|
174
|
+
Each response includes a `raw` property with the full response data from the provider:
|
175
|
+
|
176
|
+
```typescript
|
177
|
+
const response = await openaiModel.generate({
|
178
|
+
prompt: "Summarize machine learning",
|
179
|
+
});
|
180
|
+
|
181
|
+
// Access the raw response data
|
182
|
+
console.log(response.raw);
|
183
|
+
```
|
184
|
+
|
185
|
+
### Response Usage Information
|
186
|
+
|
187
|
+
When available, you can access token usage information:
|
188
|
+
|
189
|
+
```typescript
|
190
|
+
const response = await openaiModel.generate({
|
191
|
+
prompt: "Explain neural networks",
|
192
|
+
});
|
193
|
+
|
194
|
+
console.log(`Prompt tokens: ${response.usage?.promptTokens}`);
|
195
|
+
console.log(`Completion tokens: ${response.usage?.completionTokens}`);
|
196
|
+
console.log(`Total tokens: ${response.usage?.totalTokens}`);
|
197
|
+
```
|
198
|
+
|
199
|
+
## License
|
200
|
+
|
201
|
+
MIT
|
package/dist/index.d.ts
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
export { AIProvider, type AIModelConfig, type AIModelRequest, type AIModelResponse, type AIModel, } from "./types";
|
2
|
+
export { OpenAIModel } from "./models/openai-model";
|
3
|
+
export { GoogleModel } from "./models/google-model";
|
4
|
+
export { DeepSeekModel } from "./models/deepseek-model";
|
5
|
+
export { OllamaModel } from "./models/ollama-model";
|
6
|
+
export { HuggingFaceModel } from "./models/huggingface-model";
|
7
|
+
import { AIProvider, AIModelConfig } from "./types";
|
8
|
+
import { OpenAIModel } from "./models/openai-model";
|
9
|
+
import { GoogleModel } from "./models/google-model";
|
10
|
+
import { DeepSeekModel } from "./models/deepseek-model";
|
11
|
+
import { OllamaModel } from "./models/ollama-model";
|
12
|
+
import { HuggingFaceModel } from "./models/huggingface-model";
|
13
|
+
export declare class NeuralAI {
|
14
|
+
/**
|
15
|
+
* Create an AI model instance based on the provider and configuration
|
16
|
+
* @param provider The AI provider to use
|
17
|
+
* @param config Configuration for the AI model
|
18
|
+
* @returns An instance of the specified AI model
|
19
|
+
*/
|
20
|
+
static createModel(provider: AIProvider, config: AIModelConfig): OpenAIModel | GoogleModel | DeepSeekModel | OllamaModel | HuggingFaceModel;
|
21
|
+
}
|
package/dist/index.js
ADDED
@@ -0,0 +1,49 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.NeuralAI = exports.HuggingFaceModel = exports.OllamaModel = exports.DeepSeekModel = exports.GoogleModel = exports.OpenAIModel = exports.AIProvider = void 0;
|
4
|
+
// Type exports
|
5
|
+
var types_1 = require("./types");
|
6
|
+
Object.defineProperty(exports, "AIProvider", { enumerable: true, get: function () { return types_1.AIProvider; } });
|
7
|
+
// Model implementations
|
8
|
+
var openai_model_1 = require("./models/openai-model");
|
9
|
+
Object.defineProperty(exports, "OpenAIModel", { enumerable: true, get: function () { return openai_model_1.OpenAIModel; } });
|
10
|
+
var google_model_1 = require("./models/google-model");
|
11
|
+
Object.defineProperty(exports, "GoogleModel", { enumerable: true, get: function () { return google_model_1.GoogleModel; } });
|
12
|
+
var deepseek_model_1 = require("./models/deepseek-model");
|
13
|
+
Object.defineProperty(exports, "DeepSeekModel", { enumerable: true, get: function () { return deepseek_model_1.DeepSeekModel; } });
|
14
|
+
var ollama_model_1 = require("./models/ollama-model");
|
15
|
+
Object.defineProperty(exports, "OllamaModel", { enumerable: true, get: function () { return ollama_model_1.OllamaModel; } });
|
16
|
+
var huggingface_model_1 = require("./models/huggingface-model");
|
17
|
+
Object.defineProperty(exports, "HuggingFaceModel", { enumerable: true, get: function () { return huggingface_model_1.HuggingFaceModel; } });
|
18
|
+
// Factory class for easier model creation
|
19
|
+
const types_2 = require("./types");
|
20
|
+
const openai_model_2 = require("./models/openai-model");
|
21
|
+
const google_model_2 = require("./models/google-model");
|
22
|
+
const deepseek_model_2 = require("./models/deepseek-model");
|
23
|
+
const ollama_model_2 = require("./models/ollama-model");
|
24
|
+
const huggingface_model_2 = require("./models/huggingface-model");
|
25
|
+
class NeuralAI {
|
26
|
+
/**
|
27
|
+
* Create an AI model instance based on the provider and configuration
|
28
|
+
* @param provider The AI provider to use
|
29
|
+
* @param config Configuration for the AI model
|
30
|
+
* @returns An instance of the specified AI model
|
31
|
+
*/
|
32
|
+
static createModel(provider, config) {
|
33
|
+
switch (provider) {
|
34
|
+
case types_2.AIProvider.OPENAI:
|
35
|
+
return new openai_model_2.OpenAIModel(config);
|
36
|
+
case types_2.AIProvider.GOOGLE:
|
37
|
+
return new google_model_2.GoogleModel(config);
|
38
|
+
case types_2.AIProvider.DEEPSEEK:
|
39
|
+
return new deepseek_model_2.DeepSeekModel(config);
|
40
|
+
case types_2.AIProvider.OLLAMA:
|
41
|
+
return new ollama_model_2.OllamaModel(config);
|
42
|
+
case types_2.AIProvider.HUGGINGFACE:
|
43
|
+
return new huggingface_model_2.HuggingFaceModel(config);
|
44
|
+
default:
|
45
|
+
throw new Error(`Unsupported AI provider: ${provider}`);
|
46
|
+
}
|
47
|
+
}
|
48
|
+
}
|
49
|
+
exports.NeuralAI = NeuralAI;
|
@@ -0,0 +1,9 @@
|
|
1
|
+
import { AIModel, AIModelConfig, AIModelRequest, AIModelResponse, AIProvider } from "../types";
|
2
|
+
export declare abstract class BaseModel implements AIModel {
|
3
|
+
protected config: AIModelConfig;
|
4
|
+
abstract provider: AIProvider;
|
5
|
+
constructor(config: AIModelConfig);
|
6
|
+
abstract generate(request: AIModelRequest): Promise<AIModelResponse>;
|
7
|
+
abstract stream(request: AIModelRequest): AsyncGenerator<string, void, unknown>;
|
8
|
+
protected mergeConfig(options?: Partial<AIModelConfig>): AIModelConfig;
|
9
|
+
}
|
@@ -0,0 +1,15 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.BaseModel = void 0;
|
4
|
+
class BaseModel {
|
5
|
+
constructor(config) {
|
6
|
+
this.config = config;
|
7
|
+
}
|
8
|
+
mergeConfig(options) {
|
9
|
+
return {
|
10
|
+
...this.config,
|
11
|
+
...(options || {}),
|
12
|
+
};
|
13
|
+
}
|
14
|
+
}
|
15
|
+
exports.BaseModel = BaseModel;
|
@@ -0,0 +1,9 @@
|
|
1
|
+
import { AIModelConfig, AIModelRequest, AIModelResponse, AIProvider } from "../types";
|
2
|
+
import { BaseModel } from "./base-model";
|
3
|
+
export declare class DeepSeekModel extends BaseModel {
|
4
|
+
readonly provider = AIProvider.DEEPSEEK;
|
5
|
+
private baseURL;
|
6
|
+
constructor(config: AIModelConfig);
|
7
|
+
generate(request: AIModelRequest): Promise<AIModelResponse>;
|
8
|
+
stream(request: AIModelRequest): AsyncGenerator<string, void, unknown>;
|
9
|
+
}
|
@@ -0,0 +1,105 @@
|
|
1
|
+
"use strict";
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
4
|
+
};
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
6
|
+
exports.DeepSeekModel = void 0;
|
7
|
+
const axios_1 = __importDefault(require("axios"));
|
8
|
+
const types_1 = require("../types");
|
9
|
+
const base_model_1 = require("./base-model");
|
10
|
+
const utils_1 = require("../utils");
|
11
|
+
class DeepSeekModel extends base_model_1.BaseModel {
|
12
|
+
constructor(config) {
|
13
|
+
super(config);
|
14
|
+
this.provider = types_1.AIProvider.DEEPSEEK;
|
15
|
+
const apiKey = (0, utils_1.getApiKey)(config.apiKey, "DEEPSEEK_API_KEY", "DeepSeek");
|
16
|
+
this.baseURL = (0, utils_1.getBaseUrl)(config.baseURL, "DEEPSEEK_BASE_URL", "https://api.deepseek.com/v1");
|
17
|
+
}
|
18
|
+
async generate(request) {
|
19
|
+
const config = this.mergeConfig(request.options);
|
20
|
+
const messages = [];
|
21
|
+
if (request.systemPrompt) {
|
22
|
+
messages.push({
|
23
|
+
role: "system",
|
24
|
+
content: request.systemPrompt,
|
25
|
+
});
|
26
|
+
}
|
27
|
+
messages.push({
|
28
|
+
role: "user",
|
29
|
+
content: request.prompt,
|
30
|
+
});
|
31
|
+
const response = await axios_1.default.post(`${this.baseURL}/chat/completions`, {
|
32
|
+
model: config.model || "deepseek-chat",
|
33
|
+
messages,
|
34
|
+
temperature: config.temperature,
|
35
|
+
max_tokens: config.maxTokens,
|
36
|
+
top_p: config.topP,
|
37
|
+
}, {
|
38
|
+
headers: {
|
39
|
+
"Content-Type": "application/json",
|
40
|
+
Authorization: `Bearer ${config.apiKey ||
|
41
|
+
(0, utils_1.getApiKey)(config.apiKey, "DEEPSEEK_API_KEY", "DeepSeek")}`,
|
42
|
+
},
|
43
|
+
});
|
44
|
+
return {
|
45
|
+
text: response.data.choices[0].message.content,
|
46
|
+
usage: {
|
47
|
+
promptTokens: response.data.usage?.prompt_tokens,
|
48
|
+
completionTokens: response.data.usage?.completion_tokens,
|
49
|
+
totalTokens: response.data.usage?.total_tokens,
|
50
|
+
},
|
51
|
+
raw: response.data,
|
52
|
+
};
|
53
|
+
}
|
54
|
+
async *stream(request) {
|
55
|
+
const config = this.mergeConfig(request.options);
|
56
|
+
const messages = [];
|
57
|
+
if (request.systemPrompt) {
|
58
|
+
messages.push({
|
59
|
+
role: "system",
|
60
|
+
content: request.systemPrompt,
|
61
|
+
});
|
62
|
+
}
|
63
|
+
messages.push({
|
64
|
+
role: "user",
|
65
|
+
content: request.prompt,
|
66
|
+
});
|
67
|
+
const response = await axios_1.default.post(`${this.baseURL}/chat/completions`, {
|
68
|
+
model: config.model || "deepseek-chat",
|
69
|
+
messages,
|
70
|
+
temperature: config.temperature,
|
71
|
+
max_tokens: config.maxTokens,
|
72
|
+
top_p: config.topP,
|
73
|
+
stream: true,
|
74
|
+
}, {
|
75
|
+
headers: {
|
76
|
+
"Content-Type": "application/json",
|
77
|
+
Authorization: `Bearer ${config.apiKey ||
|
78
|
+
(0, utils_1.getApiKey)(config.apiKey, "DEEPSEEK_API_KEY", "DeepSeek")}`,
|
79
|
+
},
|
80
|
+
responseType: "stream",
|
81
|
+
});
|
82
|
+
const reader = response.data;
|
83
|
+
for await (const chunk of reader) {
|
84
|
+
const lines = chunk.toString().split("\n").filter(Boolean);
|
85
|
+
for (const line of lines) {
|
86
|
+
if (line.startsWith("data: ")) {
|
87
|
+
const data = line.slice(6);
|
88
|
+
if (data === "[DONE]")
|
89
|
+
continue;
|
90
|
+
try {
|
91
|
+
const parsed = JSON.parse(data);
|
92
|
+
const content = parsed.choices[0]?.delta?.content;
|
93
|
+
if (content) {
|
94
|
+
yield content;
|
95
|
+
}
|
96
|
+
}
|
97
|
+
catch (error) {
|
98
|
+
console.error("Error parsing DeepSeek stream data:", error);
|
99
|
+
}
|
100
|
+
}
|
101
|
+
}
|
102
|
+
}
|
103
|
+
}
|
104
|
+
}
|
105
|
+
exports.DeepSeekModel = DeepSeekModel;
|
@@ -0,0 +1,10 @@
|
|
1
|
+
import { AIModelConfig, AIModelRequest, AIModelResponse, AIProvider } from "../types";
|
2
|
+
import { BaseModel } from "./base-model";
|
3
|
+
export declare class GoogleModel extends BaseModel {
|
4
|
+
readonly provider = AIProvider.GOOGLE;
|
5
|
+
private client;
|
6
|
+
constructor(config: AIModelConfig);
|
7
|
+
generate(request: AIModelRequest): Promise<AIModelResponse>;
|
8
|
+
stream(request: AIModelRequest): AsyncGenerator<string, void, unknown>;
|
9
|
+
private formatPrompt;
|
10
|
+
}
|
@@ -0,0 +1,61 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.GoogleModel = void 0;
|
4
|
+
const generative_ai_1 = require("@google/generative-ai");
|
5
|
+
const types_1 = require("../types");
|
6
|
+
const base_model_1 = require("./base-model");
|
7
|
+
const utils_1 = require("../utils");
|
8
|
+
class GoogleModel extends base_model_1.BaseModel {
|
9
|
+
constructor(config) {
|
10
|
+
super(config);
|
11
|
+
this.provider = types_1.AIProvider.GOOGLE;
|
12
|
+
const apiKey = (0, utils_1.getApiKey)(config.apiKey, "GOOGLE_API_KEY", "Google");
|
13
|
+
this.client = new generative_ai_1.GoogleGenerativeAI(apiKey);
|
14
|
+
}
|
15
|
+
async generate(request) {
|
16
|
+
const config = this.mergeConfig(request.options);
|
17
|
+
const model = this.client.getGenerativeModel({
|
18
|
+
model: config.model || "gemini-2.0-flash", // Updated default model
|
19
|
+
generationConfig: {
|
20
|
+
temperature: config.temperature,
|
21
|
+
maxOutputTokens: config.maxTokens,
|
22
|
+
topP: config.topP,
|
23
|
+
},
|
24
|
+
});
|
25
|
+
const prompt = this.formatPrompt(request);
|
26
|
+
const result = await model.generateContent(prompt);
|
27
|
+
const response = result.response;
|
28
|
+
return {
|
29
|
+
text: response.text(),
|
30
|
+
raw: response,
|
31
|
+
};
|
32
|
+
}
|
33
|
+
async *stream(request) {
|
34
|
+
const config = this.mergeConfig(request.options);
|
35
|
+
const model = this.client.getGenerativeModel({
|
36
|
+
model: config.model || "gemini-2.0-flash", // Updated default model
|
37
|
+
generationConfig: {
|
38
|
+
temperature: config.temperature,
|
39
|
+
maxOutputTokens: config.maxTokens,
|
40
|
+
topP: config.topP,
|
41
|
+
},
|
42
|
+
});
|
43
|
+
const prompt = this.formatPrompt(request);
|
44
|
+
const result = await model.generateContentStream(prompt);
|
45
|
+
for await (const chunk of result.stream) {
|
46
|
+
const text = chunk.text();
|
47
|
+
if (text) {
|
48
|
+
yield text;
|
49
|
+
}
|
50
|
+
}
|
51
|
+
}
|
52
|
+
formatPrompt(request) {
|
53
|
+
const parts = [];
|
54
|
+
if (request.systemPrompt) {
|
55
|
+
parts.push(request.systemPrompt);
|
56
|
+
}
|
57
|
+
parts.push(request.prompt);
|
58
|
+
return parts;
|
59
|
+
}
|
60
|
+
}
|
61
|
+
exports.GoogleModel = GoogleModel;
|
@@ -0,0 +1,9 @@
|
|
1
|
+
import { AIModelConfig, AIModelRequest, AIModelResponse, AIProvider } from "../types";
|
2
|
+
import { BaseModel } from "./base-model";
|
3
|
+
export declare class HuggingFaceModel extends BaseModel {
|
4
|
+
readonly provider = AIProvider.HUGGINGFACE;
|
5
|
+
private baseURL;
|
6
|
+
constructor(config: AIModelConfig);
|
7
|
+
generate(request: AIModelRequest): Promise<AIModelResponse>;
|
8
|
+
stream(request: AIModelRequest): AsyncGenerator<string, void, unknown>;
|
9
|
+
}
|
@@ -0,0 +1,72 @@
|
|
1
|
+
"use strict";
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
4
|
+
};
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
6
|
+
exports.HuggingFaceModel = void 0;
|
7
|
+
const axios_1 = __importDefault(require("axios"));
|
8
|
+
const types_1 = require("../types");
|
9
|
+
const base_model_1 = require("./base-model");
|
10
|
+
const utils_1 = require("../utils");
|
11
|
+
class HuggingFaceModel extends base_model_1.BaseModel {
|
12
|
+
constructor(config) {
|
13
|
+
super(config);
|
14
|
+
this.provider = types_1.AIProvider.HUGGINGFACE;
|
15
|
+
const apiKey = (0, utils_1.getApiKey)(config.apiKey, "HUGGINGFACE_API_KEY", "HuggingFace");
|
16
|
+
this.baseURL = (0, utils_1.getBaseUrl)(config.baseURL, "HUGGINGFACE_BASE_URL", "https://api-inference.huggingface.co/models");
|
17
|
+
}
|
18
|
+
async generate(request) {
|
19
|
+
const config = this.mergeConfig(request.options);
|
20
|
+
const model = config.model || "meta-llama/Llama-2-7b-chat-hf";
|
21
|
+
let fullPrompt = request.prompt;
|
22
|
+
if (request.systemPrompt) {
|
23
|
+
fullPrompt = `${request.systemPrompt}\n\n${fullPrompt}`;
|
24
|
+
}
|
25
|
+
const payload = {
|
26
|
+
inputs: fullPrompt,
|
27
|
+
parameters: {
|
28
|
+
temperature: config.temperature,
|
29
|
+
max_new_tokens: config.maxTokens,
|
30
|
+
top_p: config.topP,
|
31
|
+
return_full_text: false,
|
32
|
+
},
|
33
|
+
};
|
34
|
+
const response = await axios_1.default.post(`${this.baseURL}/${model}`, payload, {
|
35
|
+
headers: {
|
36
|
+
Authorization: `Bearer ${config.apiKey ||
|
37
|
+
(0, utils_1.getApiKey)(config.apiKey, "HUGGINGFACE_API_KEY", "HuggingFace")}`,
|
38
|
+
"Content-Type": "application/json",
|
39
|
+
},
|
40
|
+
});
|
41
|
+
// HuggingFace can return different formats depending on the model
|
42
|
+
let text = "";
|
43
|
+
if (Array.isArray(response.data)) {
|
44
|
+
text = response.data[0]?.generated_text || "";
|
45
|
+
}
|
46
|
+
else if (response.data.generated_text) {
|
47
|
+
text = response.data.generated_text;
|
48
|
+
}
|
49
|
+
else {
|
50
|
+
text = JSON.stringify(response.data);
|
51
|
+
}
|
52
|
+
return {
|
53
|
+
text,
|
54
|
+
raw: response.data,
|
55
|
+
};
|
56
|
+
}
|
57
|
+
async *stream(request) {
|
58
|
+
// HuggingFace Inference API doesn't natively support streaming for all models
|
59
|
+
// We'll implement a basic chunking on top of the non-streaming API
|
60
|
+
const response = await this.generate(request);
|
61
|
+
// Simple chunking for demonstration purposes
|
62
|
+
const chunkSize = 10;
|
63
|
+
const text = response.text;
|
64
|
+
for (let i = 0; i < text.length; i += chunkSize) {
|
65
|
+
const chunk = text.slice(i, i + chunkSize);
|
66
|
+
yield chunk;
|
67
|
+
// Add a small delay to simulate streaming
|
68
|
+
await new Promise((resolve) => setTimeout(resolve, 10));
|
69
|
+
}
|
70
|
+
}
|
71
|
+
}
|
72
|
+
exports.HuggingFaceModel = HuggingFaceModel;
|
@@ -0,0 +1,9 @@
|
|
1
|
+
import { AIModelConfig, AIModelRequest, AIModelResponse, AIProvider } from "../types";
|
2
|
+
import { BaseModel } from "./base-model";
|
3
|
+
export declare class OllamaModel extends BaseModel {
|
4
|
+
readonly provider = AIProvider.OLLAMA;
|
5
|
+
private baseURL;
|
6
|
+
constructor(config: AIModelConfig);
|
7
|
+
generate(request: AIModelRequest): Promise<AIModelResponse>;
|
8
|
+
stream(request: AIModelRequest): AsyncGenerator<string, void, unknown>;
|
9
|
+
}
|
@@ -0,0 +1,74 @@
|
|
1
|
+
"use strict";
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
4
|
+
};
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
6
|
+
exports.OllamaModel = void 0;
|
7
|
+
const axios_1 = __importDefault(require("axios"));
|
8
|
+
const types_1 = require("../types");
|
9
|
+
const base_model_1 = require("./base-model");
|
10
|
+
const utils_1 = require("../utils");
|
11
|
+
class OllamaModel extends base_model_1.BaseModel {
|
12
|
+
constructor(config) {
|
13
|
+
super(config);
|
14
|
+
this.provider = types_1.AIProvider.OLLAMA;
|
15
|
+
this.baseURL = (0, utils_1.getBaseUrl)(config.baseURL, "OLLAMA_BASE_URL", "http://localhost:11434/api");
|
16
|
+
}
|
17
|
+
async generate(request) {
|
18
|
+
const config = this.mergeConfig(request.options);
|
19
|
+
let prompt = request.prompt;
|
20
|
+
// Add system prompt if provided
|
21
|
+
if (request.systemPrompt) {
|
22
|
+
prompt = `${request.systemPrompt}\n\n${prompt}`;
|
23
|
+
}
|
24
|
+
const response = await axios_1.default.post(`${this.baseURL}/generate`, {
|
25
|
+
model: config.model || "llama2",
|
26
|
+
prompt,
|
27
|
+
temperature: config.temperature,
|
28
|
+
num_predict: config.maxTokens,
|
29
|
+
top_p: config.topP,
|
30
|
+
});
|
31
|
+
return {
|
32
|
+
text: response.data.response,
|
33
|
+
usage: {
|
34
|
+
promptTokens: response.data.prompt_eval_count,
|
35
|
+
completionTokens: response.data.eval_count,
|
36
|
+
totalTokens: response.data.prompt_eval_count + response.data.eval_count,
|
37
|
+
},
|
38
|
+
raw: response.data,
|
39
|
+
};
|
40
|
+
}
|
41
|
+
async *stream(request) {
|
42
|
+
const config = this.mergeConfig(request.options);
|
43
|
+
let prompt = request.prompt;
|
44
|
+
if (request.systemPrompt) {
|
45
|
+
prompt = `${request.systemPrompt}\n\n${prompt}`;
|
46
|
+
}
|
47
|
+
const response = await axios_1.default.post(`${this.baseURL}/generate`, {
|
48
|
+
model: config.model || "llama2",
|
49
|
+
prompt,
|
50
|
+
temperature: config.temperature,
|
51
|
+
num_predict: config.maxTokens,
|
52
|
+
top_p: config.topP,
|
53
|
+
stream: true,
|
54
|
+
}, {
|
55
|
+
responseType: "stream",
|
56
|
+
});
|
57
|
+
const reader = response.data;
|
58
|
+
for await (const chunk of reader) {
|
59
|
+
const lines = chunk.toString().split("\n").filter(Boolean);
|
60
|
+
for (const line of lines) {
|
61
|
+
try {
|
62
|
+
const parsed = JSON.parse(line);
|
63
|
+
if (parsed.response) {
|
64
|
+
yield parsed.response;
|
65
|
+
}
|
66
|
+
}
|
67
|
+
catch (error) {
|
68
|
+
console.error("Error parsing Ollama stream data:", error);
|
69
|
+
}
|
70
|
+
}
|
71
|
+
}
|
72
|
+
}
|
73
|
+
}
|
74
|
+
exports.OllamaModel = OllamaModel;
|
@@ -0,0 +1,9 @@
|
|
1
|
+
import { AIModelConfig, AIModelRequest, AIModelResponse, AIProvider } from "../types";
|
2
|
+
import { BaseModel } from "./base-model";
|
3
|
+
export declare class OpenAIModel extends BaseModel {
|
4
|
+
readonly provider = AIProvider.OPENAI;
|
5
|
+
private client;
|
6
|
+
constructor(config: AIModelConfig);
|
7
|
+
generate(request: AIModelRequest): Promise<AIModelResponse>;
|
8
|
+
stream(request: AIModelRequest): AsyncGenerator<string, void, unknown>;
|
9
|
+
}
|
@@ -0,0 +1,79 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.OpenAIModel = void 0;
|
4
|
+
const openai_1 = require("openai");
|
5
|
+
const types_1 = require("../types");
|
6
|
+
const base_model_1 = require("./base-model");
|
7
|
+
const utils_1 = require("../utils");
|
8
|
+
class OpenAIModel extends base_model_1.BaseModel {
|
9
|
+
constructor(config) {
|
10
|
+
super(config);
|
11
|
+
this.provider = types_1.AIProvider.OPENAI;
|
12
|
+
const apiKey = (0, utils_1.getApiKey)(config.apiKey, "OPENAI_API_KEY", "OpenAI");
|
13
|
+
this.client = new openai_1.OpenAI({
|
14
|
+
apiKey: apiKey,
|
15
|
+
baseURL: config.baseURL,
|
16
|
+
});
|
17
|
+
}
|
18
|
+
async generate(request) {
|
19
|
+
const config = this.mergeConfig(request.options);
|
20
|
+
const messages = [];
|
21
|
+
// Add system prompt if provided
|
22
|
+
if (request.systemPrompt) {
|
23
|
+
messages.push({
|
24
|
+
role: "system",
|
25
|
+
content: request.systemPrompt,
|
26
|
+
});
|
27
|
+
}
|
28
|
+
// Add user prompt
|
29
|
+
messages.push({
|
30
|
+
role: "user",
|
31
|
+
content: request.prompt,
|
32
|
+
});
|
33
|
+
const response = await this.client.chat.completions.create({
|
34
|
+
model: config.model || "gpt-3.5-turbo",
|
35
|
+
messages,
|
36
|
+
temperature: config.temperature,
|
37
|
+
max_tokens: config.maxTokens,
|
38
|
+
top_p: config.topP,
|
39
|
+
});
|
40
|
+
return {
|
41
|
+
text: response.choices[0].message.content || "",
|
42
|
+
usage: {
|
43
|
+
promptTokens: response.usage?.prompt_tokens,
|
44
|
+
completionTokens: response.usage?.completion_tokens,
|
45
|
+
totalTokens: response.usage?.total_tokens,
|
46
|
+
},
|
47
|
+
raw: response,
|
48
|
+
};
|
49
|
+
}
|
50
|
+
async *stream(request) {
|
51
|
+
const config = this.mergeConfig(request.options);
|
52
|
+
const messages = [];
|
53
|
+
if (request.systemPrompt) {
|
54
|
+
messages.push({
|
55
|
+
role: "system",
|
56
|
+
content: request.systemPrompt,
|
57
|
+
});
|
58
|
+
}
|
59
|
+
messages.push({
|
60
|
+
role: "user",
|
61
|
+
content: request.prompt,
|
62
|
+
});
|
63
|
+
const stream = await this.client.chat.completions.create({
|
64
|
+
model: config.model || "gpt-3.5-turbo",
|
65
|
+
messages,
|
66
|
+
temperature: config.temperature,
|
67
|
+
max_tokens: config.maxTokens,
|
68
|
+
top_p: config.topP,
|
69
|
+
stream: true,
|
70
|
+
});
|
71
|
+
for await (const chunk of stream) {
|
72
|
+
const content = chunk.choices[0]?.delta?.content || "";
|
73
|
+
if (content) {
|
74
|
+
yield content;
|
75
|
+
}
|
76
|
+
}
|
77
|
+
}
|
78
|
+
}
|
79
|
+
exports.OpenAIModel = OpenAIModel;
|
package/dist/types.d.ts
ADDED
@@ -0,0 +1,34 @@
|
|
1
|
+
export interface AIModelConfig {
|
2
|
+
apiKey?: string;
|
3
|
+
baseURL?: string;
|
4
|
+
model?: string;
|
5
|
+
temperature?: number;
|
6
|
+
maxTokens?: number;
|
7
|
+
topP?: number;
|
8
|
+
}
|
9
|
+
export declare enum AIProvider {
|
10
|
+
OPENAI = "openai",
|
11
|
+
GOOGLE = "google",
|
12
|
+
DEEPSEEK = "deepseek",
|
13
|
+
OLLAMA = "ollama",
|
14
|
+
HUGGINGFACE = "huggingface"
|
15
|
+
}
|
16
|
+
export interface AIModelResponse {
|
17
|
+
text: string;
|
18
|
+
usage?: {
|
19
|
+
promptTokens?: number;
|
20
|
+
completionTokens?: number;
|
21
|
+
totalTokens?: number;
|
22
|
+
};
|
23
|
+
raw?: any;
|
24
|
+
}
|
25
|
+
export interface AIModelRequest {
|
26
|
+
prompt: string;
|
27
|
+
systemPrompt?: string;
|
28
|
+
options?: Partial<AIModelConfig>;
|
29
|
+
}
|
30
|
+
export interface AIModel {
|
31
|
+
provider: AIProvider;
|
32
|
+
generate(request: AIModelRequest): Promise<AIModelResponse>;
|
33
|
+
stream(request: AIModelRequest): AsyncGenerator<string, void, unknown>;
|
34
|
+
}
|
package/dist/types.js
ADDED
@@ -0,0 +1,11 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.AIProvider = void 0;
|
4
|
+
var AIProvider;
|
5
|
+
(function (AIProvider) {
|
6
|
+
AIProvider["OPENAI"] = "openai";
|
7
|
+
AIProvider["GOOGLE"] = "google";
|
8
|
+
AIProvider["DEEPSEEK"] = "deepseek";
|
9
|
+
AIProvider["OLLAMA"] = "ollama";
|
10
|
+
AIProvider["HUGGINGFACE"] = "huggingface";
|
11
|
+
})(AIProvider || (exports.AIProvider = AIProvider = {}));
|
package/dist/utils.d.ts
ADDED
@@ -0,0 +1,19 @@
|
|
1
|
+
/**
|
2
|
+
* Utilities for the Neural AI SDK
|
3
|
+
*/
|
4
|
+
/**
|
5
|
+
* Get an API key from config or environment variables
|
6
|
+
* @param configKey The API key from the config object
|
7
|
+
* @param envVarName The name of the environment variable to check
|
8
|
+
* @param providerName The name of the AI provider (for error messages)
|
9
|
+
* @returns The API key if found, throws an error otherwise
|
10
|
+
*/
|
11
|
+
export declare function getApiKey(configKey: string | undefined, envVarName: string, providerName: string): string;
|
12
|
+
/**
|
13
|
+
* Get a base URL from config or environment variables
|
14
|
+
* @param configUrl The URL from the config object
|
15
|
+
* @param envVarName The name of the environment variable to check
|
16
|
+
* @param defaultUrl The default URL to use if not provided
|
17
|
+
* @returns The base URL
|
18
|
+
*/
|
19
|
+
export declare function getBaseUrl(configUrl: string | undefined, envVarName: string, defaultUrl: string): string;
|
package/dist/utils.js
ADDED
@@ -0,0 +1,44 @@
|
|
1
|
+
"use strict";
|
2
|
+
/**
|
3
|
+
* Utilities for the Neural AI SDK
|
4
|
+
*/
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
6
|
+
exports.getApiKey = getApiKey;
|
7
|
+
exports.getBaseUrl = getBaseUrl;
|
8
|
+
/**
|
9
|
+
* Get an API key from config or environment variables
|
10
|
+
* @param configKey The API key from the config object
|
11
|
+
* @param envVarName The name of the environment variable to check
|
12
|
+
* @param providerName The name of the AI provider (for error messages)
|
13
|
+
* @returns The API key if found, throws an error otherwise
|
14
|
+
*/
|
15
|
+
function getApiKey(configKey, envVarName, providerName) {
|
16
|
+
// First check if the API key is provided in the config
|
17
|
+
if (configKey) {
|
18
|
+
return configKey;
|
19
|
+
}
|
20
|
+
// Then check environment variables
|
21
|
+
const envKey = process.env[envVarName];
|
22
|
+
if (envKey) {
|
23
|
+
return envKey;
|
24
|
+
}
|
25
|
+
// If no API key is found, throw a helpful error message
|
26
|
+
throw new Error(`${providerName} API key is required.\n` +
|
27
|
+
`Please provide it via the 'apiKey' option or set the ${envVarName} environment variable.\n` +
|
28
|
+
`Example:\n` +
|
29
|
+
`- In code: NeuralAI.createModel(AIProvider.${providerName.toUpperCase()}, { apiKey: "your-api-key" })\n` +
|
30
|
+
`- In .env: ${envVarName}=your-api-key`);
|
31
|
+
}
|
32
|
+
/**
|
33
|
+
* Get a base URL from config or environment variables
|
34
|
+
* @param configUrl The URL from the config object
|
35
|
+
* @param envVarName The name of the environment variable to check
|
36
|
+
* @param defaultUrl The default URL to use if not provided
|
37
|
+
* @returns The base URL
|
38
|
+
*/
|
39
|
+
function getBaseUrl(configUrl, envVarName, defaultUrl) {
|
40
|
+
if (configUrl) {
|
41
|
+
return configUrl;
|
42
|
+
}
|
43
|
+
return process.env[envVarName] || defaultUrl;
|
44
|
+
}
|
package/package.json
ADDED
@@ -0,0 +1,66 @@
|
|
1
|
+
{
|
2
|
+
"name": "neural-ai-sdk",
|
3
|
+
"version": "0.1.1",
|
4
|
+
"description": "Unified SDK for interacting with various AI LLM providers",
|
5
|
+
"main": "dist/index.js",
|
6
|
+
"types": "dist/index.d.ts",
|
7
|
+
"scripts": {
|
8
|
+
"build": "tsc",
|
9
|
+
"test": "jest",
|
10
|
+
"lint": "eslint src --ext .ts",
|
11
|
+
"prepare": "npm run build",
|
12
|
+
"example": "ts-node examples/all-providers.ts",
|
13
|
+
"clean": "rimraf dist",
|
14
|
+
"publish": "npm publish"
|
15
|
+
},
|
16
|
+
"publishConfig": {
|
17
|
+
"access": "public"
|
18
|
+
},
|
19
|
+
"repository": {
|
20
|
+
"type": "git",
|
21
|
+
"url": "git+https://github.com/NeuralArc/neural-ai-sdk.git"
|
22
|
+
},
|
23
|
+
"keywords": [
|
24
|
+
"ai",
|
25
|
+
"llm",
|
26
|
+
"openai",
|
27
|
+
"google",
|
28
|
+
"deepseek",
|
29
|
+
"ollama",
|
30
|
+
"huggingface",
|
31
|
+
"sdk",
|
32
|
+
"api"
|
33
|
+
],
|
34
|
+
"files": [
|
35
|
+
"dist/**/*",
|
36
|
+
"README.md",
|
37
|
+
"LICENSE"
|
38
|
+
],
|
39
|
+
"author": "NeuralArc",
|
40
|
+
"license": "MIT",
|
41
|
+
"bugs": {
|
42
|
+
"url": "https://github.com/NeuralArc/neural-ai-sdk/issues"
|
43
|
+
},
|
44
|
+
"homepage": "https://github.com/NeuralArc/neural-ai-sdk#readme",
|
45
|
+
"dependencies": {
|
46
|
+
"@google/generative-ai": "^0.2.0",
|
47
|
+
"axios": "^1.6.0",
|
48
|
+
"openai": "^4.28.0"
|
49
|
+
},
|
50
|
+
"devDependencies": {
|
51
|
+
"@types/jest": "^29.5.5",
|
52
|
+
"@types/node": "^20.8.3",
|
53
|
+
"@typescript-eslint/eslint-plugin": "^6.7.4",
|
54
|
+
"@typescript-eslint/parser": "^6.7.4",
|
55
|
+
"dotenv": "^16.3.1",
|
56
|
+
"eslint": "^8.51.0",
|
57
|
+
"jest": "^29.7.0",
|
58
|
+
"rimraf": "^5.0.10",
|
59
|
+
"ts-jest": "^29.1.1",
|
60
|
+
"ts-node": "^10.9.2",
|
61
|
+
"typescript": "^5.2.2"
|
62
|
+
},
|
63
|
+
"peerDependencies": {
|
64
|
+
"dotenv": "^16.0.0"
|
65
|
+
}
|
66
|
+
}
|