millas 0.2.12-beta-2 → 0.2.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +3 -2
- package/src/admin/Admin.js +122 -38
- package/src/admin/ViewContext.js +12 -3
- package/src/admin/resources/AdminResource.js +10 -0
- package/src/admin/static/admin.css +95 -14
- package/src/admin/views/layouts/base.njk +23 -34
- package/src/admin/views/pages/detail.njk +16 -5
- package/src/admin/views/pages/error.njk +65 -0
- package/src/admin/views/pages/list.njk +127 -2
- package/src/admin/views/partials/form-scripts.njk +7 -3
- package/src/admin/views/partials/form-widget.njk +2 -1
- package/src/admin/views/partials/icons.njk +64 -0
- package/src/ai/AIManager.js +954 -0
- package/src/ai/AITokenBudget.js +250 -0
- package/src/ai/PromptGuard.js +216 -0
- package/src/ai/agents.js +218 -0
- package/src/ai/conversation.js +213 -0
- package/src/ai/drivers.js +734 -0
- package/src/ai/files.js +249 -0
- package/src/ai/media.js +303 -0
- package/src/ai/pricing.js +152 -0
- package/src/ai/provider_tools.js +114 -0
- package/src/ai/types.js +356 -0
- package/src/commands/createsuperuser.js +17 -4
- package/src/commands/serve.js +2 -4
- package/src/container/AppInitializer.js +39 -15
- package/src/container/Application.js +31 -1
- package/src/core/foundation.js +1 -1
- package/src/errors/HttpError.js +32 -16
- package/src/facades/AI.js +411 -0
- package/src/facades/Hash.js +67 -0
- package/src/facades/Process.js +144 -0
- package/src/hashing/Hash.js +262 -0
- package/src/http/HtmlEscape.js +162 -0
- package/src/http/MillasRequest.js +63 -7
- package/src/http/MillasResponse.js +70 -4
- package/src/http/ResponseDispatcher.js +21 -27
- package/src/http/SafeFilePath.js +195 -0
- package/src/http/SafeRedirect.js +62 -0
- package/src/http/SecurityBootstrap.js +70 -0
- package/src/http/helpers.js +40 -125
- package/src/http/index.js +10 -1
- package/src/http/middleware/CsrfMiddleware.js +258 -0
- package/src/http/middleware/RateLimiter.js +314 -0
- package/src/http/middleware/SecurityHeaders.js +281 -0
- package/src/i18n/Translator.js +10 -2
- package/src/logger/LogRedactor.js +247 -0
- package/src/logger/Logger.js +1 -1
- package/src/logger/formatters/JsonFormatter.js +11 -4
- package/src/logger/formatters/PrettyFormatter.js +3 -1
- package/src/logger/formatters/SimpleFormatter.js +14 -3
- package/src/middleware/ThrottleMiddleware.js +27 -4
- package/src/process/Process.js +333 -0
- package/src/router/MiddlewareRegistry.js +27 -2
- package/src/scaffold/templates.js +3 -0
- package/src/validation/Validator.js +348 -607
- package/src/admin.zip +0 -0
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
4
|
+
// Token pricing per model — USD per 1M tokens
|
|
5
|
+
// Updated: 2025. Check provider pricing pages for latest rates.
|
|
6
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
7
|
+
|
|
8
|
+
const PRICING = {
|
|
9
|
+
// ── Anthropic ───────────────────────────────────────────────────────────────
|
|
10
|
+
'claude-opus-4-5': { input: 15.00, output: 75.00 },
|
|
11
|
+
'claude-sonnet-4-20250514': { input: 3.00, output: 15.00 },
|
|
12
|
+
'claude-sonnet-4-5': { input: 3.00, output: 15.00 },
|
|
13
|
+
'claude-haiku-4-5-20251001': { input: 0.80, output: 4.00 },
|
|
14
|
+
'claude-3-7-sonnet-20250219': { input: 3.00, output: 15.00 },
|
|
15
|
+
'claude-3-5-sonnet-20241022': { input: 3.00, output: 15.00 },
|
|
16
|
+
'claude-3-5-haiku-20241022': { input: 0.80, output: 4.00 },
|
|
17
|
+
'claude-3-opus-20240229': { input: 15.00, output: 75.00 },
|
|
18
|
+
|
|
19
|
+
// ── OpenAI ──────────────────────────────────────────────────────────────────
|
|
20
|
+
'gpt-4o': { input: 2.50, output: 10.00 },
|
|
21
|
+
'gpt-4o-mini': { input: 0.15, output: 0.60 },
|
|
22
|
+
'gpt-4-turbo': { input: 10.00, output: 30.00 },
|
|
23
|
+
'gpt-4': { input: 30.00, output: 60.00 },
|
|
24
|
+
'gpt-3.5-turbo': { input: 0.50, output: 1.50 },
|
|
25
|
+
'o1': { input: 15.00, output: 60.00 },
|
|
26
|
+
'o1-mini': { input: 3.00, output: 12.00 },
|
|
27
|
+
'o3-mini': { input: 1.10, output: 4.40 },
|
|
28
|
+
|
|
29
|
+
// ── Gemini ──────────────────────────────────────────────────────────────────
|
|
30
|
+
'gemini-2.5-pro': { input: 1.25, output: 10.00 },
|
|
31
|
+
'gemini-2.5-flash': { input: 0.15, output: 0.60 },
|
|
32
|
+
'gemini-2.0-flash': { input: 0.10, output: 0.40 },
|
|
33
|
+
'gemini-2.0-flash-lite': { input: 0.075, output: 0.30 },
|
|
34
|
+
'gemini-1.5-pro': { input: 1.25, output: 5.00 },
|
|
35
|
+
'gemini-1.5-flash': { input: 0.075, output: 0.30 },
|
|
36
|
+
|
|
37
|
+
// ── Groq ────────────────────────────────────────────────────────────────────
|
|
38
|
+
'llama-3.3-70b-versatile': { input: 0.59, output: 0.79 },
|
|
39
|
+
'llama-3.1-70b-versatile': { input: 0.59, output: 0.79 },
|
|
40
|
+
'llama-3.1-8b-instant': { input: 0.05, output: 0.08 },
|
|
41
|
+
'mixtral-8x7b-32768': { input: 0.24, output: 0.24 },
|
|
42
|
+
|
|
43
|
+
// ── Mistral ──────────────────────────────────────────────────────────────────
|
|
44
|
+
'mistral-large-latest': { input: 2.00, output: 6.00 },
|
|
45
|
+
'mistral-small-latest': { input: 0.20, output: 0.60 },
|
|
46
|
+
'open-mistral-7b': { input: 0.25, output: 0.25 },
|
|
47
|
+
|
|
48
|
+
// ── DeepSeek ─────────────────────────────────────────────────────────────────
|
|
49
|
+
'deepseek-chat': { input: 0.27, output: 1.10 },
|
|
50
|
+
'deepseek-reasoner': { input: 0.55, output: 2.19 },
|
|
51
|
+
|
|
52
|
+
// ── xAI ─────────────────────────────────────────────────────────────────────
|
|
53
|
+
'grok-2': { input: 2.00, output: 10.00 },
|
|
54
|
+
'grok-2-mini': { input: 0.20, output: 1.00 },
|
|
55
|
+
};
|
|
56
|
+
|
|
57
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
58
|
+
// CostCalculator
|
|
59
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
60
|
+
|
|
61
|
+
class CostCalculator {
|
|
62
|
+
/**
|
|
63
|
+
* Calculate cost for a completed response.
|
|
64
|
+
*
|
|
65
|
+
* const cost = CostCalculator.forResponse(response);
|
|
66
|
+
* cost.input // 0.0003
|
|
67
|
+
* cost.output // 0.0015
|
|
68
|
+
* cost.total // 0.0018
|
|
69
|
+
* cost.currency // 'USD'
|
|
70
|
+
* cost.formatted // '$0.0018'
|
|
71
|
+
*
|
|
72
|
+
* @param {AIResponse} response
|
|
73
|
+
* @returns {{ input, output, total, currency, formatted } | null}
|
|
74
|
+
*/
|
|
75
|
+
static forResponse(response) {
|
|
76
|
+
return CostCalculator.calculate(
|
|
77
|
+
response.model,
|
|
78
|
+
response.inputTokens,
|
|
79
|
+
response.outputTokens
|
|
80
|
+
);
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
/**
|
|
84
|
+
* Calculate cost given model and token counts.
|
|
85
|
+
*
|
|
86
|
+
* @param {string} model
|
|
87
|
+
* @param {number} inputTokens
|
|
88
|
+
* @param {number} outputTokens
|
|
89
|
+
* @returns {{ input, output, total, currency, formatted } | null}
|
|
90
|
+
*/
|
|
91
|
+
static calculate(model, inputTokens, outputTokens) {
|
|
92
|
+
const pricing = CostCalculator._lookup(model);
|
|
93
|
+
if (!pricing) return null;
|
|
94
|
+
|
|
95
|
+
const input = (inputTokens / 1_000_000) * pricing.input;
|
|
96
|
+
const output = (outputTokens / 1_000_000) * pricing.output;
|
|
97
|
+
const total = input + output;
|
|
98
|
+
|
|
99
|
+
return {
|
|
100
|
+
input: parseFloat(input.toFixed(6)),
|
|
101
|
+
output: parseFloat(output.toFixed(6)),
|
|
102
|
+
total: parseFloat(total.toFixed(6)),
|
|
103
|
+
currency: 'USD',
|
|
104
|
+
formatted: `$${total.toFixed(4)}`,
|
|
105
|
+
};
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
/**
|
|
109
|
+
* Estimate cost for a prompt before sending it.
|
|
110
|
+
* Uses a rough character-to-token ratio (1 token ≈ 4 chars).
|
|
111
|
+
*
|
|
112
|
+
* const est = AI.estimateCost('My long prompt here...', 'claude-sonnet-4-20250514');
|
|
113
|
+
* est.estimated // { input: 0.00003, output: 0.00015, total: 0.00018 }
|
|
114
|
+
* est.note // 'Estimate only. Output tokens unknown.'
|
|
115
|
+
*
|
|
116
|
+
* @param {string} prompt
|
|
117
|
+
* @param {string} model
|
|
118
|
+
* @param {number} [expectedOutputTokens=500]
|
|
119
|
+
*/
|
|
120
|
+
static estimate(prompt, model, expectedOutputTokens = 500) {
|
|
121
|
+
const inputTokens = Math.ceil(prompt.length / 4);
|
|
122
|
+
const cost = CostCalculator.calculate(model, inputTokens, expectedOutputTokens);
|
|
123
|
+
if (!cost) return { estimated: null, note: `No pricing data for model: ${model}` };
|
|
124
|
+
return {
|
|
125
|
+
estimated: cost,
|
|
126
|
+
inputTokens,
|
|
127
|
+
outputTokens: expectedOutputTokens,
|
|
128
|
+
note: 'Estimate only. Output tokens are approximate.',
|
|
129
|
+
};
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
static _lookup(model) {
|
|
133
|
+
if (!model) return null;
|
|
134
|
+
// Exact match
|
|
135
|
+
if (PRICING[model]) return PRICING[model];
|
|
136
|
+
// Prefix match — handles versioned model strings like 'claude-3-5-sonnet-20241022-v2'
|
|
137
|
+
for (const key of Object.keys(PRICING)) {
|
|
138
|
+
if (model.startsWith(key) || key.startsWith(model.split('-').slice(0, 4).join('-'))) {
|
|
139
|
+
return PRICING[key];
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
return null;
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
/** Check if pricing data exists for a model. */
|
|
146
|
+
static hasPricing(model) { return !!CostCalculator._lookup(model); }
|
|
147
|
+
|
|
148
|
+
/** List all models with known pricing. */
|
|
149
|
+
static supportedModels() { return Object.keys(PRICING); }
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
module.exports = { PRICING, CostCalculator };
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
4
|
+
// Provider-native tools — executed by the AI provider, not your app
|
|
5
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* WebSearch — let the model search the web in real-time.
|
|
9
|
+
* Supported: Anthropic, OpenAI, Gemini
|
|
10
|
+
*
|
|
11
|
+
* AI.tools([new WebSearch()]).generate('What happened in tech today?')
|
|
12
|
+
* AI.tools([new WebSearch().max(5).allow(['techcrunch.com'])]).generate('...')
|
|
13
|
+
*/
|
|
14
|
+
class WebSearch {
|
|
15
|
+
constructor() {
|
|
16
|
+
this._max = null;
|
|
17
|
+
this._domains = [];
|
|
18
|
+
this._location = null;
|
|
19
|
+
this._isProvider = true;
|
|
20
|
+
this.name = 'web_search';
|
|
21
|
+
this.description = 'Search the web for real-time information.';
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
/** Max number of searches the model may perform. */
|
|
25
|
+
max(n) { this._max = n; return this; }
|
|
26
|
+
|
|
27
|
+
/** Restrict results to specific domains. */
|
|
28
|
+
allow(domains) { this._domains = domains; return this; }
|
|
29
|
+
|
|
30
|
+
/** Bias results toward a location. */
|
|
31
|
+
location({ city, region, country } = {}) {
|
|
32
|
+
this._location = { city, region, country };
|
|
33
|
+
return this;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
toProviderSchema(provider) {
|
|
37
|
+
if (provider === 'anthropic') {
|
|
38
|
+
const tool = { type: 'web_search_20250305', name: 'web_search' };
|
|
39
|
+
if (this._max) tool.max_uses = this._max;
|
|
40
|
+
if (this._domains?.length) tool.allowed_domains = this._domains;
|
|
41
|
+
return tool;
|
|
42
|
+
}
|
|
43
|
+
if (provider === 'openai') {
|
|
44
|
+
const tool = { type: 'web_search_preview' };
|
|
45
|
+
if (this._location) tool.search_context_size = 'medium';
|
|
46
|
+
if (this._domains?.length) tool.user_location = this._location;
|
|
47
|
+
return tool;
|
|
48
|
+
}
|
|
49
|
+
if (provider === 'gemini') {
|
|
50
|
+
return { google_search: {} };
|
|
51
|
+
}
|
|
52
|
+
return null;
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* WebFetch — let the model fetch and read web pages.
|
|
58
|
+
* Supported: Anthropic, Gemini
|
|
59
|
+
*
|
|
60
|
+
* AI.tools([new WebFetch()]).generate('Summarize https://example.com/page')
|
|
61
|
+
*/
|
|
62
|
+
class WebFetch {
|
|
63
|
+
constructor() {
|
|
64
|
+
this._max = null;
|
|
65
|
+
this._domains = [];
|
|
66
|
+
this._isProvider = true;
|
|
67
|
+
this.name = 'web_fetch';
|
|
68
|
+
this.description = 'Fetch and read the content of web pages.';
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
max(n) { this._max = n; return this; }
|
|
72
|
+
allow(domains) { this._domains = domains; return this; }
|
|
73
|
+
|
|
74
|
+
toProviderSchema(provider) {
|
|
75
|
+
if (provider === 'anthropic') {
|
|
76
|
+
const tool = { type: 'web_search_20250305', name: 'web_search' };
|
|
77
|
+
if (this._max) tool.max_uses = this._max;
|
|
78
|
+
if (this._domains?.length) tool.allowed_domains = this._domains;
|
|
79
|
+
return tool;
|
|
80
|
+
}
|
|
81
|
+
if (provider === 'gemini') return { url_context: {} };
|
|
82
|
+
return null;
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
/**
|
|
87
|
+
* FileSearch — search through files in vector stores.
|
|
88
|
+
* Supported: OpenAI, Gemini
|
|
89
|
+
*
|
|
90
|
+
* AI.tools([new FileSearch({ stores: ['store_abc'] })]).generate('...')
|
|
91
|
+
*/
|
|
92
|
+
class FileSearch {
|
|
93
|
+
constructor({ stores = [], where = null } = {}) {
|
|
94
|
+
this._stores = stores;
|
|
95
|
+
this._where = where;
|
|
96
|
+
this._isProvider = true;
|
|
97
|
+
this.name = 'file_search';
|
|
98
|
+
this.description = 'Search through files in vector stores.';
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
toProviderSchema(provider) {
|
|
102
|
+
if (provider === 'openai') {
|
|
103
|
+
const tool = { type: 'file_search', vector_store_ids: this._stores };
|
|
104
|
+
if (this._where) tool.filters = this._where;
|
|
105
|
+
return tool;
|
|
106
|
+
}
|
|
107
|
+
if (provider === 'gemini') {
|
|
108
|
+
return { retrieval: { vertex_ai_search: { datastore: this._stores[0] } } };
|
|
109
|
+
}
|
|
110
|
+
return null;
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
module.exports = { WebSearch, WebFetch, FileSearch };
|
package/src/ai/types.js
ADDED
|
@@ -0,0 +1,356 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
4
|
+
// AIMessage — a single message in a conversation
|
|
5
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
6
|
+
|
|
7
|
+
class AIMessage {
|
|
8
|
+
/**
|
|
9
|
+
* @param {'user'|'assistant'|'system'|'tool'} role
|
|
10
|
+
* @param {string|Array} content — string or array of content parts
|
|
11
|
+
* @param {object} [meta] — tool_call_id, name, usage, etc.
|
|
12
|
+
*/
|
|
13
|
+
constructor(role, content, meta = {}) {
|
|
14
|
+
this.role = role;
|
|
15
|
+
this.content = content;
|
|
16
|
+
this.meta = meta;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
static user(content) { return new AIMessage('user', content); }
|
|
20
|
+
static assistant(content) { return new AIMessage('assistant', content); }
|
|
21
|
+
static system(content) { return new AIMessage('system', content); }
|
|
22
|
+
static tool(id, name, content) {
|
|
23
|
+
return new AIMessage('tool', content, { tool_call_id: id, name });
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
toJSON() {
|
|
27
|
+
const base = { role: this.role, content: this.content };
|
|
28
|
+
if (this.meta.tool_call_id) {
|
|
29
|
+
base.tool_call_id = this.meta.tool_call_id;
|
|
30
|
+
base.name = this.meta.name;
|
|
31
|
+
}
|
|
32
|
+
return base;
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
37
|
+
// AIResponse — structured result from any provider
|
|
38
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
39
|
+
|
|
40
|
+
class AIResponse {
|
|
41
|
+
constructor({
|
|
42
|
+
text, role = 'assistant', model, provider,
|
|
43
|
+
inputTokens = 0, outputTokens = 0,
|
|
44
|
+
toolCalls = [], finishReason = 'stop',
|
|
45
|
+
raw = null,
|
|
46
|
+
}) {
|
|
47
|
+
this.text = text || '';
|
|
48
|
+
this.role = role;
|
|
49
|
+
this.model = model;
|
|
50
|
+
this.provider = provider;
|
|
51
|
+
this.inputTokens = inputTokens;
|
|
52
|
+
this.outputTokens = outputTokens;
|
|
53
|
+
this.totalTokens = inputTokens + outputTokens;
|
|
54
|
+
this.toolCalls = toolCalls; // [{ id, name, arguments }]
|
|
55
|
+
this.finishReason = finishReason;
|
|
56
|
+
this.raw = raw; // original provider response
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
/** True when the model wants to call tools. */
|
|
60
|
+
get hasToolCalls() { return this.toolCalls.length > 0; }
|
|
61
|
+
|
|
62
|
+
/** True when the model stopped naturally. */
|
|
63
|
+
get isComplete() { return this.finishReason === 'stop'; }
|
|
64
|
+
|
|
65
|
+
/** True when the model hit a token limit. */
|
|
66
|
+
get isTokenLimited() { return this.finishReason === 'length'; }
|
|
67
|
+
|
|
68
|
+
/** Cast to the assistant AIMessage to append to a thread. */
|
|
69
|
+
toMessage() {
|
|
70
|
+
const content = this.hasToolCalls
|
|
71
|
+
? [{ type: 'text', text: this.text }, ...this.toolCalls.map(tc => ({
|
|
72
|
+
type: 'tool_use', id: tc.id, name: tc.name, input: tc.arguments,
|
|
73
|
+
}))]
|
|
74
|
+
: this.text;
|
|
75
|
+
return new AIMessage('assistant', content);
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
toString() { return this.text; }
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
82
|
+
// AIStreamEvent — typed events emitted during streaming
|
|
83
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
84
|
+
|
|
85
|
+
class AIStreamEvent {
|
|
86
|
+
constructor(type, data) { this.type = type; this.data = data; }
|
|
87
|
+
|
|
88
|
+
static delta(text) { return new AIStreamEvent('delta', { text }); }
|
|
89
|
+
static thinking(text) { return new AIStreamEvent('thinking', { text }); }
|
|
90
|
+
static toolCall(tc) { return new AIStreamEvent('tool_call', tc); }
|
|
91
|
+
static complete(response) { return new AIStreamEvent('complete', response); }
|
|
92
|
+
static error(err) { return new AIStreamEvent('error', { error: err }); }
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
96
|
+
// Tool — define a callable tool the model can invoke
|
|
97
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
98
|
+
|
|
99
|
+
class Tool {
|
|
100
|
+
/**
|
|
101
|
+
* @param {string} name
|
|
102
|
+
* @param {string} description
|
|
103
|
+
* @param {object} schema — JSON Schema object for parameters
|
|
104
|
+
* @param {function} handler — async (args) => result
|
|
105
|
+
*/
|
|
106
|
+
constructor(name, description, schema, handler) {
|
|
107
|
+
this.name = name;
|
|
108
|
+
this.description = description;
|
|
109
|
+
this.schema = schema;
|
|
110
|
+
this.handler = handler;
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
/**
|
|
114
|
+
* Fluent factory:
|
|
115
|
+
*
|
|
116
|
+
* Tool.define('get_weather')
|
|
117
|
+
* .description('Get the weather for a city')
|
|
118
|
+
* .parameters({
|
|
119
|
+
* type: 'object',
|
|
120
|
+
* properties: {
|
|
121
|
+
* city: { type: 'string', description: 'City name' },
|
|
122
|
+
* units: { type: 'string', enum: ['celsius', 'fahrenheit'] },
|
|
123
|
+
* },
|
|
124
|
+
* required: ['city'],
|
|
125
|
+
* })
|
|
126
|
+
* .handle(async ({ city, units }) => {
|
|
127
|
+
* return await WeatherService.get(city, units);
|
|
128
|
+
* })
|
|
129
|
+
*/
|
|
130
|
+
static define(name) { return new ToolBuilder(name); }
|
|
131
|
+
|
|
132
|
+
toProviderSchema() {
|
|
133
|
+
return { name: this.name, description: this.description, input_schema: this.schema };
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
toOpenAISchema() {
|
|
137
|
+
return {
|
|
138
|
+
type: 'function',
|
|
139
|
+
function: { name: this.name, description: this.description, parameters: this.schema },
|
|
140
|
+
};
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
class ToolBuilder {
|
|
145
|
+
constructor(name) {
|
|
146
|
+
this._name = name;
|
|
147
|
+
this._description = '';
|
|
148
|
+
this._schema = { type: 'object', properties: {}, required: [] };
|
|
149
|
+
this._handler = null;
|
|
150
|
+
}
|
|
151
|
+
description(d) { this._description = d; return this; }
|
|
152
|
+
parameters(s) { this._schema = s; return this; }
|
|
153
|
+
handle(fn) { this._handler = fn; return this; }
|
|
154
|
+
build() {
|
|
155
|
+
return new Tool(this._name, this._description, this._schema, this._handler);
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
160
|
+
// Thread — conversation memory manager
|
|
161
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
162
|
+
|
|
163
|
+
class Thread {
|
|
164
|
+
constructor(systemPrompt = null) {
|
|
165
|
+
this._messages = [];
|
|
166
|
+
this._systemPrompt = systemPrompt;
|
|
167
|
+
this._maxMessages = null; // null = unlimited
|
|
168
|
+
this._summaryFn = null; // async (messages) => summaryString
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
/** Set a new or updated system prompt. */
|
|
172
|
+
system(prompt) { this._systemPrompt = prompt; return this; }
|
|
173
|
+
|
|
174
|
+
/** Limit history to the last N messages (sliding window). */
|
|
175
|
+
limit(n) { this._maxMessages = n; return this; }
|
|
176
|
+
|
|
177
|
+
/**
|
|
178
|
+
* Provide a summarisation function. When the thread exceeds limit(),
|
|
179
|
+
* older messages are collapsed into a summary instead of dropped.
|
|
180
|
+
*
|
|
181
|
+
* thread.summariseWith(async (msgs) => {
|
|
182
|
+
* const res = await AI.text(`Summarise this: ${msgs.map(m=>m.content).join('\n')}`);
|
|
183
|
+
* return res.text;
|
|
184
|
+
* });
|
|
185
|
+
*/
|
|
186
|
+
summariseWith(fn) { this._summaryFn = fn; return this; }
|
|
187
|
+
|
|
188
|
+
add(message) {
|
|
189
|
+
this._messages.push(message);
|
|
190
|
+
return this;
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
addUser(content) { return this.add(AIMessage.user(content)); }
|
|
194
|
+
addAssistant(content) { return this.add(AIMessage.assistant(content)); }
|
|
195
|
+
|
|
196
|
+
/** Messages formatted for the provider, respecting limit. */
|
|
197
|
+
async toArray() {
|
|
198
|
+
let msgs = [...this._messages];
|
|
199
|
+
|
|
200
|
+
if (this._maxMessages && msgs.length > this._maxMessages) {
|
|
201
|
+
const overflow = msgs.slice(0, msgs.length - this._maxMessages);
|
|
202
|
+
msgs = msgs.slice(msgs.length - this._maxMessages);
|
|
203
|
+
|
|
204
|
+
if (this._summaryFn) {
|
|
205
|
+
const summary = await this._summaryFn(overflow);
|
|
206
|
+
msgs.unshift(AIMessage.system(`Earlier conversation summary: ${summary}`));
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
return msgs.map(m => m.toJSON());
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
get length() { return this._messages.length; }
|
|
214
|
+
|
|
215
|
+
clear() { this._messages = []; return this; }
|
|
216
|
+
|
|
217
|
+
/** Last assistant message text. */
|
|
218
|
+
get lastReply() {
|
|
219
|
+
const last = [...this._messages].reverse().find(m => m.role === 'assistant');
|
|
220
|
+
return last ? (typeof last.content === 'string' ? last.content : last.content?.[0]?.text || '') : null;
|
|
221
|
+
}
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
225
|
+
// Prompt — template with variable substitution
|
|
226
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
227
|
+
|
|
228
|
+
class Prompt {
|
|
229
|
+
/**
|
|
230
|
+
* @param {string} template — use {{variable}} syntax
|
|
231
|
+
*
|
|
232
|
+
* Prompt.make('Summarise this in {{language}}: {{text}}')
|
|
233
|
+
* .with({ language: 'French', text: article })
|
|
234
|
+
* .toString()
|
|
235
|
+
*/
|
|
236
|
+
constructor(template) {
|
|
237
|
+
this._template = template;
|
|
238
|
+
this._vars = {};
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
static make(template) { return new Prompt(template); }
|
|
242
|
+
|
|
243
|
+
with(vars) { Object.assign(this._vars, vars); return this; }
|
|
244
|
+
|
|
245
|
+
toString() {
|
|
246
|
+
return this._template.replace(/\{\{(\w+)\}\}/g, (_, k) =>
|
|
247
|
+
Object.prototype.hasOwnProperty.call(this._vars, k) ? String(this._vars[k]) : `{{${k}}}`
|
|
248
|
+
);
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
253
|
+
// Schema — structured output enforcement
|
|
254
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
255
|
+
|
|
256
|
+
class Schema {
|
|
257
|
+
/**
|
|
258
|
+
* Define expected output structure. The AI manager will:
|
|
259
|
+
* 1. Inject the schema into the prompt
|
|
260
|
+
* 2. Parse and validate the response JSON
|
|
261
|
+
* 3. Retry once if parsing fails
|
|
262
|
+
*
|
|
263
|
+
* Schema.define({
|
|
264
|
+
* name: { type: 'string' },
|
|
265
|
+
* confidence: { type: 'number', min: 0, max: 1 },
|
|
266
|
+
* tags: { type: 'array' },
|
|
267
|
+
* })
|
|
268
|
+
*/
|
|
269
|
+
constructor(shape) {
|
|
270
|
+
this._shape = shape;
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
static define(shape) { return new Schema(shape); }
|
|
274
|
+
|
|
275
|
+
/** JSON Schema representation passed to the provider. */
|
|
276
|
+
toJSONSchema() {
|
|
277
|
+
const props = {};
|
|
278
|
+
const required = [];
|
|
279
|
+
for (const [key, def] of Object.entries(this._shape)) {
|
|
280
|
+
props[key] = { type: def.type, description: def.description || '' };
|
|
281
|
+
if (def.enum) props[key].enum = def.enum;
|
|
282
|
+
if (def.items) props[key].items = def.items;
|
|
283
|
+
if (def.required !== false) required.push(key);
|
|
284
|
+
}
|
|
285
|
+
return { type: 'object', properties: props, required };
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
/** Validate and cast a parsed object against the shape. */
|
|
289
|
+
validate(obj) {
|
|
290
|
+
const result = {};
|
|
291
|
+
const errors = [];
|
|
292
|
+
for (const [key, def] of Object.entries(this._shape)) {
|
|
293
|
+
const val = obj[key];
|
|
294
|
+
if (val === undefined || val === null) {
|
|
295
|
+
if (def.required !== false) errors.push(`Missing field: ${key}`);
|
|
296
|
+
result[key] = def.default ?? null;
|
|
297
|
+
continue;
|
|
298
|
+
}
|
|
299
|
+
if (def.type === 'number') {
|
|
300
|
+
const n = Number(val);
|
|
301
|
+
if (isNaN(n)) { errors.push(`${key}: expected number`); continue; }
|
|
302
|
+
if (def.min !== undefined && n < def.min) errors.push(`${key}: below min ${def.min}`);
|
|
303
|
+
if (def.max !== undefined && n > def.max) errors.push(`${key}: above max ${def.max}`);
|
|
304
|
+
result[key] = n;
|
|
305
|
+
} else {
|
|
306
|
+
result[key] = val;
|
|
307
|
+
}
|
|
308
|
+
}
|
|
309
|
+
if (errors.length) throw new AIStructuredOutputError(errors.join('; '), obj);
|
|
310
|
+
return result;
|
|
311
|
+
}
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
315
|
+
// Error types
|
|
316
|
+
// ─────────────────────────────────────────────────────────────────────────────
|
|
317
|
+
|
|
318
|
+
class AIError extends Error {
|
|
319
|
+
constructor(message, provider, cause = null) {
|
|
320
|
+
super(message);
|
|
321
|
+
this.name = 'AIError';
|
|
322
|
+
this.provider = provider;
|
|
323
|
+
this.cause = cause;
|
|
324
|
+
}
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
class AIRateLimitError extends AIError {
|
|
328
|
+
constructor(provider, retryAfter = null) {
|
|
329
|
+
super(`Rate limit exceeded on provider "${provider}"`, provider);
|
|
330
|
+
this.name = 'AIRateLimitError';
|
|
331
|
+
this.retryAfter = retryAfter;
|
|
332
|
+
}
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
class AIStructuredOutputError extends Error {
|
|
336
|
+
constructor(message, raw) {
|
|
337
|
+
super(`Structured output validation failed: ${message}`);
|
|
338
|
+
this.name = 'AIStructuredOutputError';
|
|
339
|
+
this.raw = raw;
|
|
340
|
+
}
|
|
341
|
+
}
|
|
342
|
+
|
|
343
|
+
class AIProviderError extends AIError {
|
|
344
|
+
constructor(provider, message, statusCode = null) {
|
|
345
|
+
super(message, provider);
|
|
346
|
+
this.name = 'AIProviderError';
|
|
347
|
+
this.statusCode = statusCode;
|
|
348
|
+
}
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
module.exports = {
|
|
352
|
+
AIMessage, AIResponse, AIStreamEvent,
|
|
353
|
+
Tool, ToolBuilder,
|
|
354
|
+
Thread, Prompt, Schema,
|
|
355
|
+
AIError, AIRateLimitError, AIStructuredOutputError, AIProviderError,
|
|
356
|
+
};
|
|
@@ -171,6 +171,7 @@ module.exports = function (program) {
|
|
|
171
171
|
* Also boots the DB connection and verifies the resolved model's table exists,
|
|
172
172
|
* giving a clear error if migrations haven't been run yet.
|
|
173
173
|
*/
|
|
174
|
+
|
|
174
175
|
async function resolveUserModel() {
|
|
175
176
|
const cwd = process.cwd();
|
|
176
177
|
const configPath = path.join(cwd, 'config/database.js');
|
|
@@ -178,9 +179,16 @@ async function resolveUserModel() {
|
|
|
178
179
|
throw new Error('config/database.js not found. Are you inside a Millas project?');
|
|
179
180
|
}
|
|
180
181
|
|
|
181
|
-
//
|
|
182
|
-
|
|
183
|
-
|
|
182
|
+
// Always require DatabaseManager from the project-local node_modules.
|
|
183
|
+
// This ensures the same singleton is shared with the project's models,
|
|
184
|
+
// avoiding the "not configured" error when millas is installed globally.
|
|
185
|
+
const dbConfig = require(configPath);
|
|
186
|
+
let DatabaseManager;
|
|
187
|
+
try {
|
|
188
|
+
DatabaseManager = require(path.join(cwd, 'node_modules/millas/src/orm/drivers/DatabaseManager'));
|
|
189
|
+
} catch {
|
|
190
|
+
DatabaseManager = require('../orm/drivers/DatabaseManager');
|
|
191
|
+
}
|
|
184
192
|
DatabaseManager.configure(dbConfig);
|
|
185
193
|
const db = DatabaseManager.connection();
|
|
186
194
|
|
|
@@ -208,8 +216,13 @@ async function resolveUserModel() {
|
|
|
208
216
|
throw new Error(`Could not load app/models/index.js: ${err.message}`);
|
|
209
217
|
}
|
|
210
218
|
} else {
|
|
219
|
+
// -- Step 2: try app/models/User.js --
|
|
220
|
+
try {
|
|
221
|
+
User = require(path.join(cwd, 'app/models/User'));
|
|
222
|
+
} catch {
|
|
223
|
+
// -- Step 3: abstract AuthUser fallback --
|
|
211
224
|
User = require('../auth/AuthUser');
|
|
212
|
-
|
|
225
|
+
}
|
|
213
226
|
}
|
|
214
227
|
|
|
215
228
|
// -- Verify the model's table exists (uses the model's own table name) --
|
package/src/commands/serve.js
CHANGED
|
@@ -121,13 +121,11 @@ class HotReloader {
|
|
|
121
121
|
}
|
|
122
122
|
|
|
123
123
|
_restart(changedFile) {
|
|
124
|
-
|
|
125
|
-
? `\x1b]8;;file://${changedFile}\x07${path.relative(process.cwd(), changedFile)}\x1b]8;;\x07`
|
|
126
|
-
: '';
|
|
124
|
+
|
|
127
125
|
console.warn(
|
|
128
126
|
chalk.yellow('↺') + ' ' +
|
|
129
127
|
chalk.white('Reloading') +
|
|
130
|
-
(
|
|
128
|
+
(changedFile ? chalk.blueBright(' ' + changedFile) : '')
|
|
131
129
|
);
|
|
132
130
|
|
|
133
131
|
this._restarts++;
|