dank-ai 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/config.js ADDED
@@ -0,0 +1,180 @@
1
+ /**
2
+ * AgentConfig - Configuration management utilities
3
+ */
4
+
5
+ const Joi = require('joi');
6
+ const { SUPPORTED_LLMS } = require('./constants');
7
+
8
+ class AgentConfig {
9
+ /**
10
+ * Validate LLM configuration
11
+ */
12
+ static validateLLMConfig(config) {
13
+ const schemas = {
14
+ openai: Joi.object({
15
+ provider: Joi.string().valid('openai').required(),
16
+ apiKey: Joi.string().allow('').default(''),
17
+ model: Joi.string().default('gpt-3.5-turbo'),
18
+ baseURL: Joi.string().uri().optional(),
19
+ temperature: Joi.number().min(0).max(2).default(0.7),
20
+ maxTokens: Joi.number().min(1).default(1000),
21
+ topP: Joi.number().min(0).max(1).optional(),
22
+ frequencyPenalty: Joi.number().min(-2).max(2).optional(),
23
+ presencePenalty: Joi.number().min(-2).max(2).optional()
24
+ }),
25
+
26
+ anthropic: Joi.object({
27
+ provider: Joi.string().valid('anthropic').required(),
28
+ apiKey: Joi.string().required(),
29
+ model: Joi.string().default('claude-3-sonnet-20240229'),
30
+ maxTokens: Joi.number().min(1).default(1000),
31
+ temperature: Joi.number().min(0).max(1).default(0.7),
32
+ topP: Joi.number().min(0).max(1).optional(),
33
+ topK: Joi.number().min(0).optional()
34
+ }),
35
+
36
+ cohere: Joi.object({
37
+ provider: Joi.string().valid('cohere').required(),
38
+ apiKey: Joi.string().required(),
39
+ model: Joi.string().default('command'),
40
+ temperature: Joi.number().min(0).max(5).default(0.7),
41
+ maxTokens: Joi.number().min(1).default(1000),
42
+ k: Joi.number().min(0).optional(),
43
+ p: Joi.number().min(0).max(1).optional()
44
+ }),
45
+
46
+ ollama: Joi.object({
47
+ provider: Joi.string().valid('ollama').required(),
48
+ baseURL: Joi.string().uri().default('http://localhost:11434'),
49
+ model: Joi.string().required(),
50
+ temperature: Joi.number().min(0).max(2).default(0.7),
51
+ numCtx: Joi.number().min(1).optional(),
52
+ numPredict: Joi.number().min(1).default(1000)
53
+ }),
54
+
55
+ custom: Joi.object({
56
+ provider: Joi.string().valid('custom').required(),
57
+ baseURL: Joi.string().uri().required(),
58
+ apiKey: Joi.string().optional(),
59
+ model: Joi.string().required(),
60
+ headers: Joi.object().optional(),
61
+ requestFormat: Joi.string().valid('openai', 'anthropic', 'custom').default('openai')
62
+ })
63
+ };
64
+
65
+ const schema = schemas[config.provider];
66
+ if (!schema) {
67
+ throw new Error(`Unsupported LLM provider: ${config.provider}`);
68
+ }
69
+
70
+ const { error, value } = schema.validate(config);
71
+ if (error) {
72
+ throw new Error(`Invalid LLM configuration: ${error.message}`);
73
+ }
74
+
75
+ return value;
76
+ }
77
+
78
+ /**
79
+ * Validate resource configuration
80
+ */
81
+ static validateResources(resources) {
82
+ const schema = Joi.object({
83
+ memory: Joi.string()
84
+ .pattern(/^\d+[mMgG]$/)
85
+ .default('512m')
86
+ .messages({
87
+ 'string.pattern.base': 'Memory must be in format like "512m" or "1g"'
88
+ }),
89
+ cpu: Joi.number().min(0.1).max(32).default(1),
90
+ timeout: Joi.number().min(1000).default(30000),
91
+ maxRestarts: Joi.number().min(0).default(3),
92
+ healthCheckInterval: Joi.number().min(1000).default(10000)
93
+ });
94
+
95
+ const { error, value } = schema.validate(resources);
96
+ if (error) {
97
+ throw new Error(`Invalid resource configuration: ${error.message}`);
98
+ }
99
+
100
+ return value;
101
+ }
102
+
103
+ /**
104
+ * Parse memory string to bytes
105
+ */
106
+ static parseMemory(memoryStr) {
107
+ const match = memoryStr.match(/^(\d+)([mMgG])$/);
108
+ if (!match) {
109
+ throw new Error('Invalid memory format');
110
+ }
111
+
112
+ const [, amount, unit] = match;
113
+ const multipliers = {
114
+ m: 1024 * 1024,
115
+ M: 1024 * 1024,
116
+ g: 1024 * 1024 * 1024,
117
+ G: 1024 * 1024 * 1024
118
+ };
119
+
120
+ return parseInt(amount) * multipliers[unit];
121
+ }
122
+
123
+ /**
124
+ * Generate environment variables for agent container
125
+ */
126
+ static generateContainerEnv(agent) {
127
+ const env = {
128
+ AGENT_NAME: agent.name,
129
+ AGENT_ID: agent.id,
130
+ LLM_PROVIDER: agent.config.llm?.provider || 'openai',
131
+ LLM_MODEL: agent.config.llm?.model || 'gpt-3.5-turbo',
132
+ AGENT_PROMPT: agent.config.prompt,
133
+ NODE_ENV: process.env.NODE_ENV || 'production',
134
+ ...agent.config.environment
135
+ };
136
+
137
+ // Add LLM-specific environment variables
138
+ if (agent.config.llm?.apiKey) {
139
+ env.LLM_API_KEY = agent.config.llm.apiKey;
140
+ }
141
+ if (agent.config.llm?.baseURL) {
142
+ env.LLM_BASE_URL = agent.config.llm.baseURL;
143
+ }
144
+
145
+ // Add HTTP server environment variables
146
+ if (agent.config.http && agent.config.http.enabled) {
147
+ env.HTTP_ENABLED = 'true';
148
+ env.HTTP_PORT = agent.config.http.port.toString();
149
+ env.HTTP_HOST = agent.config.http.host;
150
+ env.HTTP_CORS = agent.config.http.cors.toString();
151
+
152
+ if (agent.config.http.rateLimit) {
153
+ env.HTTP_RATE_LIMIT = 'true';
154
+ env.HTTP_RATE_LIMIT_WINDOW = agent.config.http.rateLimit.windowMs?.toString() || '900000';
155
+ env.HTTP_RATE_LIMIT_MAX = agent.config.http.rateLimit.max?.toString() || '100';
156
+ env.HTTP_RATE_LIMIT_MESSAGE = agent.config.http.rateLimit.message || 'Too many requests';
157
+ }
158
+ }
159
+
160
+ // Add direct prompting environment variables
161
+ if (agent.config.communication?.directPrompting?.enabled) {
162
+ env.DIRECT_PROMPTING_ENABLED = 'true';
163
+ env.DIRECT_PROMPTING_PROTOCOL = agent.config.communication.directPrompting.protocol || 'websocket';
164
+ env.DIRECT_PROMPTING_MAX_CONNECTIONS = agent.config.communication.directPrompting.maxConnections?.toString() || '100';
165
+ env.DIRECT_PROMPTING_AUTHENTICATION = agent.config.communication.directPrompting.authentication?.toString() || 'false';
166
+ env.DIRECT_PROMPTING_TIMEOUT = agent.config.communication.directPrompting.timeout?.toString() || '30000';
167
+ } else {
168
+ env.DIRECT_PROMPTING_ENABLED = 'false';
169
+ }
170
+
171
+ // Add main Docker port
172
+ if (agent.config.docker?.port) {
173
+ env.DOCKER_PORT = agent.config.docker.port.toString();
174
+ }
175
+
176
+ return env;
177
+ }
178
+ }
179
+
180
+ module.exports = { AgentConfig };
@@ -0,0 +1,58 @@
1
+ /**
2
+ * Constants and default configurations
3
+ */
4
+
5
+ const SUPPORTED_LLMS = [
6
+ 'openai',
7
+ 'anthropic',
8
+ 'cohere',
9
+ 'huggingface',
10
+ 'ollama',
11
+ 'custom'
12
+ ];
13
+
14
+ const DEFAULT_CONFIG = {
15
+ llm: {
16
+ provider: 'openai',
17
+ model: 'gpt-3.5-turbo',
18
+ temperature: 0.7,
19
+ maxTokens: 1000
20
+ },
21
+ prompt: 'You are a helpful AI assistant.',
22
+ resources: {
23
+ memory: '512m',
24
+ cpu: 1,
25
+ timeout: 30000
26
+ },
27
+ environment: {},
28
+ custom: {}
29
+ };
30
+
31
+ const DOCKER_CONFIG = {
32
+ baseImage: 'deltadarkly/dank-agent-base',
33
+ baseImagePrefix: 'deltadarkly/dank-agent-base',
34
+ defaultTag: 'latest',
35
+ networkName: 'dank-network',
36
+ volumeName: 'dank-volume',
37
+ workDir: '/app',
38
+ codeDropPath: '/app/agent-code',
39
+ entrypoint: '/app/entrypoint.js',
40
+ defaultPort: 3000,
41
+ healthCheckPort: 3001
42
+ };
43
+
44
+ const AGENT_EVENTS = {
45
+ OUTPUT: 'output',
46
+ ERROR: 'error',
47
+ START: 'start',
48
+ STOP: 'stop',
49
+ HEARTBEAT: 'heartbeat',
50
+ CUSTOM: 'custom'
51
+ };
52
+
53
+ module.exports = {
54
+ SUPPORTED_LLMS,
55
+ DEFAULT_CONFIG,
56
+ DOCKER_CONFIG,
57
+ AGENT_EVENTS
58
+ };