@hotmeshio/long-tail 0.1.0 → 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/build/routes/oauth.js +4 -4
- package/build/services/db/migrate.js +6 -1
- package/build/services/oauth/providers/github.js +44 -3
- package/build/services/oauth/providers/google.js +45 -3
- package/build/services/oauth/providers/microsoft.js +45 -3
- package/build/services/oauth/providers/types.d.ts +1 -1
- package/package.json +4 -3
- package/services/db/README.md +8 -0
- package/services/db/schemas/001_schema.sql +307 -0
- package/services/db/schemas/002_seed.sql +67 -0
- package/services/db/schemas/003_workflow_discovery.sql +39 -0
- package/services/db/schemas/004_query_router.sql +38 -0
- package/services/db/schemas/005_triage_router.sql +37 -0
- package/services/db/schemas/006_oauth.sql +50 -0
- package/services/db/schemas/007_security.sql +27 -0
- package/services/db/schemas/008_bot_accounts.sql +30 -0
- package/services/db/schemas/009_audit_trail.sql +7 -0
- package/services/db/schemas/010_credential_providers.sql +4 -0
- package/services/db/schemas/011_system_workflow_configs.sql +23 -0
- package/services/db/schemas/012_drop_modality.sql +6 -0
- package/services/db/schemas/013_execute_as.sql +9 -0
- package/services/db/schemas/014_ephemeral_credentials.sql +16 -0
package/build/routes/oauth.js
CHANGED
|
@@ -51,7 +51,7 @@ router.get('/connections', auth_1.requireAuth, async (req, res) => {
|
|
|
51
51
|
* GET /api/auth/oauth/:provider
|
|
52
52
|
* Initiate the OAuth flow — redirects the browser to the provider.
|
|
53
53
|
*/
|
|
54
|
-
router.get('/:provider', (req, res) => {
|
|
54
|
+
router.get('/:provider', async (req, res) => {
|
|
55
55
|
const { provider } = req.params;
|
|
56
56
|
const handler = (0, oauth_1.getProvider)(provider);
|
|
57
57
|
if (!handler) {
|
|
@@ -65,7 +65,7 @@ router.get('/:provider', (req, res) => {
|
|
|
65
65
|
if (!handler.config.redirectUri) {
|
|
66
66
|
handler.config.redirectUri = `${baseUrl}/api/auth/oauth/${provider}/callback`;
|
|
67
67
|
}
|
|
68
|
-
const url = handler.createAuthorizationURL(state, codeVerifier);
|
|
68
|
+
const url = await handler.createAuthorizationURL(state, codeVerifier);
|
|
69
69
|
res.redirect(url.toString());
|
|
70
70
|
});
|
|
71
71
|
/**
|
|
@@ -178,7 +178,7 @@ router.get('/connect/:provider', (req, res, next) => {
|
|
|
178
178
|
req.headers.authorization = `Bearer ${token}`;
|
|
179
179
|
}
|
|
180
180
|
next();
|
|
181
|
-
}, auth_1.requireAuth, (req, res) => {
|
|
181
|
+
}, auth_1.requireAuth, async (req, res) => {
|
|
182
182
|
const userId = req.auth.userId;
|
|
183
183
|
const provider = req.params.provider;
|
|
184
184
|
const handler = (0, oauth_1.getProvider)(provider);
|
|
@@ -197,7 +197,7 @@ router.get('/connect/:provider', (req, res, next) => {
|
|
|
197
197
|
if (!handler.config.redirectUri) {
|
|
198
198
|
handler.config.redirectUri = `${baseUrl}/api/auth/oauth/${provider}/callback`;
|
|
199
199
|
}
|
|
200
|
-
const url = handler.createAuthorizationURL(state, codeVerifier);
|
|
200
|
+
const url = await handler.createAuthorizationURL(state, codeVerifier);
|
|
201
201
|
res.redirect(url.toString());
|
|
202
202
|
});
|
|
203
203
|
/**
|
|
@@ -38,7 +38,12 @@ const fs = __importStar(require("fs"));
|
|
|
38
38
|
const path = __importStar(require("path"));
|
|
39
39
|
const index_1 = require("./index");
|
|
40
40
|
const logger_1 = require("../logger");
|
|
41
|
-
|
|
41
|
+
// In dev: __dirname = services/db → schemas is ./schemas
|
|
42
|
+
// In built/published: __dirname = build/services/db → schemas is ../../../services/db/schemas
|
|
43
|
+
const devPath = path.join(__dirname, 'schemas');
|
|
44
|
+
const SCHEMAS_DIR = fs.existsSync(devPath)
|
|
45
|
+
? devPath
|
|
46
|
+
: path.join(__dirname, '..', '..', '..', 'services', 'db', 'schemas');
|
|
42
47
|
async function migrate() {
|
|
43
48
|
const pool = (0, index_1.getPool)();
|
|
44
49
|
// ensure migration tracking table
|
|
@@ -1,17 +1,58 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
15
|
+
}) : function(o, v) {
|
|
16
|
+
o["default"] = v;
|
|
17
|
+
});
|
|
18
|
+
var __importStar = (this && this.__importStar) || (function () {
|
|
19
|
+
var ownKeys = function(o) {
|
|
20
|
+
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
21
|
+
var ar = [];
|
|
22
|
+
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
23
|
+
return ar;
|
|
24
|
+
};
|
|
25
|
+
return ownKeys(o);
|
|
26
|
+
};
|
|
27
|
+
return function (mod) {
|
|
28
|
+
if (mod && mod.__esModule) return mod;
|
|
29
|
+
var result = {};
|
|
30
|
+
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
31
|
+
__setModuleDefault(result, mod);
|
|
32
|
+
return result;
|
|
33
|
+
};
|
|
34
|
+
})();
|
|
2
35
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
36
|
exports.createGitHubHandler = createGitHubHandler;
|
|
4
|
-
const arctic_1 = require("arctic");
|
|
5
37
|
function createGitHubHandler(cfg) {
|
|
6
38
|
const redirectUri = cfg.redirectUri || null;
|
|
7
|
-
|
|
39
|
+
let _github;
|
|
40
|
+
async function getClient() {
|
|
41
|
+
if (!_github) {
|
|
42
|
+
const { GitHub } = await Promise.resolve().then(() => __importStar(require('arctic')));
|
|
43
|
+
_github = new GitHub(cfg.clientId, cfg.clientSecret, redirectUri);
|
|
44
|
+
}
|
|
45
|
+
return _github;
|
|
46
|
+
}
|
|
8
47
|
return {
|
|
9
48
|
config: cfg,
|
|
10
|
-
createAuthorizationURL(state, _codeVerifier) {
|
|
49
|
+
async createAuthorizationURL(state, _codeVerifier) {
|
|
50
|
+
const github = await getClient();
|
|
11
51
|
const scopes = cfg.scopes.length > 0 ? cfg.scopes : ['read:user', 'user:email'];
|
|
12
52
|
return github.createAuthorizationURL(state, scopes);
|
|
13
53
|
},
|
|
14
54
|
async validateAuthorizationCode(code, _codeVerifier) {
|
|
55
|
+
const github = await getClient();
|
|
15
56
|
const tokens = await github.validateAuthorizationCode(code);
|
|
16
57
|
return {
|
|
17
58
|
accessToken: tokens.accessToken(),
|
|
@@ -1,17 +1,58 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
15
|
+
}) : function(o, v) {
|
|
16
|
+
o["default"] = v;
|
|
17
|
+
});
|
|
18
|
+
var __importStar = (this && this.__importStar) || (function () {
|
|
19
|
+
var ownKeys = function(o) {
|
|
20
|
+
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
21
|
+
var ar = [];
|
|
22
|
+
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
23
|
+
return ar;
|
|
24
|
+
};
|
|
25
|
+
return ownKeys(o);
|
|
26
|
+
};
|
|
27
|
+
return function (mod) {
|
|
28
|
+
if (mod && mod.__esModule) return mod;
|
|
29
|
+
var result = {};
|
|
30
|
+
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
31
|
+
__setModuleDefault(result, mod);
|
|
32
|
+
return result;
|
|
33
|
+
};
|
|
34
|
+
})();
|
|
2
35
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
36
|
exports.createGoogleHandler = createGoogleHandler;
|
|
4
|
-
const arctic_1 = require("arctic");
|
|
5
37
|
function createGoogleHandler(cfg) {
|
|
6
38
|
const redirectUri = cfg.redirectUri || '';
|
|
7
|
-
|
|
39
|
+
let _google;
|
|
40
|
+
async function getClient() {
|
|
41
|
+
if (!_google) {
|
|
42
|
+
const { Google } = await Promise.resolve().then(() => __importStar(require('arctic')));
|
|
43
|
+
_google = new Google(cfg.clientId, cfg.clientSecret, redirectUri);
|
|
44
|
+
}
|
|
45
|
+
return _google;
|
|
46
|
+
}
|
|
8
47
|
return {
|
|
9
48
|
config: cfg,
|
|
10
|
-
createAuthorizationURL(state, codeVerifier) {
|
|
49
|
+
async createAuthorizationURL(state, codeVerifier) {
|
|
50
|
+
const google = await getClient();
|
|
11
51
|
const scopes = cfg.scopes.length > 0 ? cfg.scopes : ['openid', 'email', 'profile'];
|
|
12
52
|
return google.createAuthorizationURL(state, codeVerifier, scopes);
|
|
13
53
|
},
|
|
14
54
|
async validateAuthorizationCode(code, codeVerifier) {
|
|
55
|
+
const google = await getClient();
|
|
15
56
|
const tokens = await google.validateAuthorizationCode(code, codeVerifier);
|
|
16
57
|
return {
|
|
17
58
|
accessToken: tokens.accessToken(),
|
|
@@ -20,6 +61,7 @@ function createGoogleHandler(cfg) {
|
|
|
20
61
|
};
|
|
21
62
|
},
|
|
22
63
|
async refreshAccessToken(refreshToken) {
|
|
64
|
+
const google = await getClient();
|
|
23
65
|
const tokens = await google.refreshAccessToken(refreshToken);
|
|
24
66
|
return {
|
|
25
67
|
accessToken: tokens.accessToken(),
|
|
@@ -1,18 +1,59 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
15
|
+
}) : function(o, v) {
|
|
16
|
+
o["default"] = v;
|
|
17
|
+
});
|
|
18
|
+
var __importStar = (this && this.__importStar) || (function () {
|
|
19
|
+
var ownKeys = function(o) {
|
|
20
|
+
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
21
|
+
var ar = [];
|
|
22
|
+
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
23
|
+
return ar;
|
|
24
|
+
};
|
|
25
|
+
return ownKeys(o);
|
|
26
|
+
};
|
|
27
|
+
return function (mod) {
|
|
28
|
+
if (mod && mod.__esModule) return mod;
|
|
29
|
+
var result = {};
|
|
30
|
+
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
31
|
+
__setModuleDefault(result, mod);
|
|
32
|
+
return result;
|
|
33
|
+
};
|
|
34
|
+
})();
|
|
2
35
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
36
|
exports.createMicrosoftHandler = createMicrosoftHandler;
|
|
4
|
-
const arctic_1 = require("arctic");
|
|
5
37
|
function createMicrosoftHandler(cfg) {
|
|
6
38
|
const redirectUri = cfg.redirectUri || '';
|
|
7
39
|
const tenantId = process.env.OAUTH_MICROSOFT_TENANT_ID || 'common';
|
|
8
|
-
|
|
40
|
+
let _ms;
|
|
41
|
+
async function getClient() {
|
|
42
|
+
if (!_ms) {
|
|
43
|
+
const { MicrosoftEntraId } = await Promise.resolve().then(() => __importStar(require('arctic')));
|
|
44
|
+
_ms = new MicrosoftEntraId(tenantId, cfg.clientId, cfg.clientSecret, redirectUri);
|
|
45
|
+
}
|
|
46
|
+
return _ms;
|
|
47
|
+
}
|
|
9
48
|
return {
|
|
10
49
|
config: cfg,
|
|
11
|
-
createAuthorizationURL(state, codeVerifier) {
|
|
50
|
+
async createAuthorizationURL(state, codeVerifier) {
|
|
51
|
+
const ms = await getClient();
|
|
12
52
|
const scopes = cfg.scopes.length > 0 ? cfg.scopes : ['openid', 'email', 'profile'];
|
|
13
53
|
return ms.createAuthorizationURL(state, codeVerifier, scopes);
|
|
14
54
|
},
|
|
15
55
|
async validateAuthorizationCode(code, codeVerifier) {
|
|
56
|
+
const ms = await getClient();
|
|
16
57
|
const tokens = await ms.validateAuthorizationCode(code, codeVerifier);
|
|
17
58
|
return {
|
|
18
59
|
accessToken: tokens.accessToken(),
|
|
@@ -21,6 +62,7 @@ function createMicrosoftHandler(cfg) {
|
|
|
21
62
|
};
|
|
22
63
|
},
|
|
23
64
|
async refreshAccessToken(refreshToken) {
|
|
65
|
+
const ms = await getClient();
|
|
24
66
|
const scopes = cfg.scopes.length > 0 ? cfg.scopes : ['openid', 'email', 'profile'];
|
|
25
67
|
const tokens = await ms.refreshAccessToken(refreshToken, scopes);
|
|
26
68
|
return {
|
|
@@ -6,7 +6,7 @@ export interface OAuthTokens {
|
|
|
6
6
|
}
|
|
7
7
|
export interface ProviderHandler {
|
|
8
8
|
config: LTOAuthProviderConfig;
|
|
9
|
-
createAuthorizationURL(state: string, codeVerifier: string): URL
|
|
9
|
+
createAuthorizationURL(state: string, codeVerifier: string): URL | Promise<URL>;
|
|
10
10
|
validateAuthorizationCode(code: string, codeVerifier: string): Promise<OAuthTokens>;
|
|
11
11
|
refreshAccessToken(refreshToken: string): Promise<OAuthTokens>;
|
|
12
12
|
fetchUserInfo(accessToken: string): Promise<LTOAuthUserInfo>;
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@hotmeshio/long-tail",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.2",
|
|
4
4
|
"description": "Long Tail Workflows — Durable AI workflows with human-in-the-loop escalation. Powered by PostgreSQL.",
|
|
5
5
|
"main": "./build/index.js",
|
|
6
6
|
"types": "./build/index.d.ts",
|
|
@@ -10,6 +10,7 @@
|
|
|
10
10
|
},
|
|
11
11
|
"files": [
|
|
12
12
|
"build/",
|
|
13
|
+
"services/db/schemas/",
|
|
13
14
|
"dashboard/dist/",
|
|
14
15
|
"LICENSE",
|
|
15
16
|
"README.md"
|
|
@@ -57,7 +58,7 @@
|
|
|
57
58
|
"author": "luke.birdeau@gmail.com",
|
|
58
59
|
"license": "SEE LICENSE IN LICENSE",
|
|
59
60
|
"dependencies": {
|
|
60
|
-
"@anthropic-ai/sdk": "^0.
|
|
61
|
+
"@anthropic-ai/sdk": "^0.82.0",
|
|
61
62
|
"@aws-sdk/client-s3": "^3.1017.0",
|
|
62
63
|
"@hotmeshio/hotmesh": "^0.13.0",
|
|
63
64
|
"@modelcontextprotocol/sdk": "^1.27.1",
|
|
@@ -91,6 +92,6 @@
|
|
|
91
92
|
"ts-node": "^10.9.1",
|
|
92
93
|
"ts-node-dev": "^2.0.0",
|
|
93
94
|
"typescript": "^5.0.4",
|
|
94
|
-
"vitest": "^
|
|
95
|
+
"vitest": "^4.1.2"
|
|
95
96
|
}
|
|
96
97
|
}
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
PostgreSQL connection pool and migration runner. Provides the shared `pg.Pool` singleton used by all services and a sequential SQL migration system.
|
|
2
|
+
|
|
3
|
+
Key files:
|
|
4
|
+
- `index.ts` — `getPool()` (lazy singleton) and `closePool()` for the shared `pg.Pool`
|
|
5
|
+
- `migrate.ts` — Reads `.sql` files from `schemas/`, tracks applied migrations in `lt_migrations`, applies new ones in sort order
|
|
6
|
+
- `schemas/` — Numbered SQL migration files (001_schema.sql, 002_seed.sql, etc.)
|
|
7
|
+
|
|
8
|
+
The `migrate.ts` file contains inline SQL for creating the `lt_migrations` tracking table and querying/inserting migration records. This is intentional — the migration runner bootstraps itself before any `sql.ts` infrastructure exists, so externalizing these queries would add complexity without benefit.
|
|
@@ -0,0 +1,307 @@
|
|
|
1
|
+
-- Long Tail Workflows: Schema
|
|
2
|
+
-- Tasks track workflow executions; escalations track human interventions.
|
|
3
|
+
|
|
4
|
+
-- ─── updated_at trigger ─────────────────────────────────────────────────────
|
|
5
|
+
|
|
6
|
+
CREATE OR REPLACE FUNCTION lt_set_updated_at()
|
|
7
|
+
RETURNS TRIGGER AS $$
|
|
8
|
+
BEGIN
|
|
9
|
+
NEW.updated_at = NOW();
|
|
10
|
+
RETURN NEW;
|
|
11
|
+
END;
|
|
12
|
+
$$ LANGUAGE plpgsql;
|
|
13
|
+
|
|
14
|
+
-- ─── lt_roles (canonical role registry) ────────────────────────────────────
|
|
15
|
+
|
|
16
|
+
CREATE TABLE IF NOT EXISTS lt_roles (
|
|
17
|
+
role TEXT PRIMARY KEY,
|
|
18
|
+
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
|
19
|
+
);
|
|
20
|
+
|
|
21
|
+
INSERT INTO lt_roles (role) VALUES
|
|
22
|
+
('reviewer'),
|
|
23
|
+
('engineer'),
|
|
24
|
+
('admin'),
|
|
25
|
+
('superadmin')
|
|
26
|
+
ON CONFLICT DO NOTHING;
|
|
27
|
+
|
|
28
|
+
-- ─── lt_tasks ────────────────────────────────────────────────────────────────
|
|
29
|
+
|
|
30
|
+
CREATE TABLE IF NOT EXISTS lt_tasks (
|
|
31
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
32
|
+
workflow_id TEXT NOT NULL,
|
|
33
|
+
workflow_type TEXT NOT NULL,
|
|
34
|
+
lt_type TEXT NOT NULL,
|
|
35
|
+
task_queue TEXT,
|
|
36
|
+
status TEXT NOT NULL DEFAULT 'pending',
|
|
37
|
+
priority INTEGER NOT NULL DEFAULT 2,
|
|
38
|
+
signal_id TEXT NOT NULL,
|
|
39
|
+
parent_workflow_id TEXT NOT NULL,
|
|
40
|
+
origin_id TEXT,
|
|
41
|
+
parent_id TEXT,
|
|
42
|
+
started_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
43
|
+
completed_at TIMESTAMPTZ,
|
|
44
|
+
envelope TEXT NOT NULL,
|
|
45
|
+
metadata JSONB,
|
|
46
|
+
error TEXT,
|
|
47
|
+
milestones JSONB NOT NULL DEFAULT '[]'::JSONB,
|
|
48
|
+
data TEXT,
|
|
49
|
+
trace_id TEXT,
|
|
50
|
+
span_id TEXT,
|
|
51
|
+
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
52
|
+
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
|
53
|
+
);
|
|
54
|
+
|
|
55
|
+
CREATE INDEX IF NOT EXISTS idx_lt_tasks_status_type ON lt_tasks (status, workflow_type, created_at DESC);
|
|
56
|
+
CREATE INDEX IF NOT EXISTS idx_lt_tasks_parent ON lt_tasks (parent_workflow_id, created_at DESC);
|
|
57
|
+
CREATE INDEX IF NOT EXISTS idx_lt_tasks_lt_type ON lt_tasks (lt_type, status, created_at DESC);
|
|
58
|
+
CREATE INDEX IF NOT EXISTS idx_lt_tasks_completed ON lt_tasks (completed_at, status);
|
|
59
|
+
CREATE INDEX IF NOT EXISTS idx_lt_tasks_signal ON lt_tasks (signal_id);
|
|
60
|
+
CREATE INDEX IF NOT EXISTS idx_lt_tasks_origin ON lt_tasks (origin_id, created_at DESC);
|
|
61
|
+
CREATE INDEX IF NOT EXISTS idx_lt_tasks_workflow_id ON lt_tasks (workflow_id);
|
|
62
|
+
CREATE INDEX IF NOT EXISTS idx_lt_tasks_origin_id ON lt_tasks (origin_id) WHERE origin_id IS NOT NULL;
|
|
63
|
+
CREATE INDEX IF NOT EXISTS idx_lt_tasks_trace ON lt_tasks (trace_id) WHERE trace_id IS NOT NULL;
|
|
64
|
+
|
|
65
|
+
CREATE OR REPLACE TRIGGER trg_lt_tasks_updated_at
|
|
66
|
+
BEFORE UPDATE ON lt_tasks
|
|
67
|
+
FOR EACH ROW EXECUTE FUNCTION lt_set_updated_at();
|
|
68
|
+
|
|
69
|
+
-- ─── lt_escalations ─────────────────────────────────────────────────────────
|
|
70
|
+
|
|
71
|
+
CREATE TABLE IF NOT EXISTS lt_escalations (
|
|
72
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
73
|
+
type TEXT NOT NULL,
|
|
74
|
+
subtype TEXT NOT NULL,
|
|
75
|
+
description TEXT,
|
|
76
|
+
status TEXT NOT NULL DEFAULT 'pending',
|
|
77
|
+
priority INTEGER NOT NULL DEFAULT 2,
|
|
78
|
+
task_id UUID REFERENCES lt_tasks(id),
|
|
79
|
+
origin_id TEXT,
|
|
80
|
+
parent_id TEXT,
|
|
81
|
+
workflow_id TEXT,
|
|
82
|
+
task_queue TEXT,
|
|
83
|
+
workflow_type TEXT,
|
|
84
|
+
role TEXT NOT NULL REFERENCES lt_roles(role),
|
|
85
|
+
assigned_to TEXT,
|
|
86
|
+
assigned_until TIMESTAMPTZ,
|
|
87
|
+
resolved_at TIMESTAMPTZ,
|
|
88
|
+
claimed_at TIMESTAMPTZ,
|
|
89
|
+
envelope TEXT NOT NULL,
|
|
90
|
+
metadata JSONB,
|
|
91
|
+
escalation_payload TEXT,
|
|
92
|
+
resolver_payload TEXT,
|
|
93
|
+
trace_id TEXT,
|
|
94
|
+
span_id TEXT,
|
|
95
|
+
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
96
|
+
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
|
97
|
+
);
|
|
98
|
+
|
|
99
|
+
CREATE INDEX IF NOT EXISTS idx_lt_escalations_available ON lt_escalations (status, role, assigned_until, created_at DESC);
|
|
100
|
+
CREATE INDEX IF NOT EXISTS idx_lt_escalations_available_v2 ON lt_escalations (role, priority, created_at DESC) WHERE status = 'pending';
|
|
101
|
+
CREATE INDEX IF NOT EXISTS idx_lt_escalations_assigned ON lt_escalations (assigned_to, assigned_until, created_at DESC);
|
|
102
|
+
CREATE INDEX IF NOT EXISTS idx_lt_escalations_expiry ON lt_escalations (assigned_until, assigned_to);
|
|
103
|
+
CREATE INDEX IF NOT EXISTS idx_lt_escalations_role_type ON lt_escalations (role, status, type, created_at DESC);
|
|
104
|
+
CREATE INDEX IF NOT EXISTS idx_lt_escalations_role_subtype ON lt_escalations (role, status, type, subtype, created_at DESC);
|
|
105
|
+
CREATE INDEX IF NOT EXISTS idx_lt_escalations_status ON lt_escalations (status, created_at DESC);
|
|
106
|
+
CREATE INDEX IF NOT EXISTS idx_lt_escalations_task ON lt_escalations (task_id);
|
|
107
|
+
CREATE INDEX IF NOT EXISTS idx_lt_escalations_origin ON lt_escalations (origin_id, created_at DESC);
|
|
108
|
+
CREATE INDEX IF NOT EXISTS idx_lt_escalations_workflow ON lt_escalations (workflow_id);
|
|
109
|
+
CREATE INDEX IF NOT EXISTS idx_lt_escalations_type ON lt_escalations (type);
|
|
110
|
+
CREATE INDEX IF NOT EXISTS idx_lt_escalations_pending_sort ON lt_escalations (priority ASC, created_at ASC) WHERE status = 'pending';
|
|
111
|
+
CREATE INDEX IF NOT EXISTS idx_lt_escalations_origin_id ON lt_escalations (origin_id) WHERE origin_id IS NOT NULL;
|
|
112
|
+
CREATE INDEX IF NOT EXISTS idx_lt_escalations_trace ON lt_escalations (trace_id) WHERE trace_id IS NOT NULL;
|
|
113
|
+
CREATE INDEX IF NOT EXISTS idx_lt_escalations_created_desc ON lt_escalations (created_at DESC);
|
|
114
|
+
CREATE INDEX IF NOT EXISTS idx_lt_escalations_updated_desc ON lt_escalations (updated_at DESC);
|
|
115
|
+
CREATE INDEX IF NOT EXISTS idx_lt_escalations_priority_desc ON lt_escalations (priority DESC, created_at DESC);
|
|
116
|
+
|
|
117
|
+
CREATE OR REPLACE TRIGGER trg_lt_escalations_updated_at
|
|
118
|
+
BEFORE UPDATE ON lt_escalations
|
|
119
|
+
FOR EACH ROW EXECUTE FUNCTION lt_set_updated_at();
|
|
120
|
+
|
|
121
|
+
-- ─── lt_users ────────────────────────────────────────────────────────────────
|
|
122
|
+
|
|
123
|
+
CREATE TABLE IF NOT EXISTS lt_users (
|
|
124
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
125
|
+
external_id TEXT UNIQUE NOT NULL,
|
|
126
|
+
email TEXT,
|
|
127
|
+
display_name TEXT,
|
|
128
|
+
password_hash TEXT,
|
|
129
|
+
status TEXT NOT NULL DEFAULT 'active' CHECK (status IN ('active', 'inactive', 'suspended')),
|
|
130
|
+
metadata JSONB,
|
|
131
|
+
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
132
|
+
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
|
133
|
+
);
|
|
134
|
+
|
|
135
|
+
CREATE TRIGGER lt_users_updated_at
|
|
136
|
+
BEFORE UPDATE ON lt_users
|
|
137
|
+
FOR EACH ROW EXECUTE FUNCTION lt_set_updated_at();
|
|
138
|
+
|
|
139
|
+
CREATE INDEX IF NOT EXISTS idx_lt_users_status ON lt_users (status);
|
|
140
|
+
|
|
141
|
+
-- ─── lt_user_roles ───────────────────────────────────────────────────────────
|
|
142
|
+
|
|
143
|
+
CREATE TABLE IF NOT EXISTS lt_user_roles (
|
|
144
|
+
user_id UUID NOT NULL REFERENCES lt_users(id) ON DELETE CASCADE,
|
|
145
|
+
role TEXT NOT NULL REFERENCES lt_roles(role),
|
|
146
|
+
type TEXT NOT NULL DEFAULT 'member' CHECK (type IN ('superadmin', 'admin', 'member')),
|
|
147
|
+
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
148
|
+
PRIMARY KEY (user_id, role)
|
|
149
|
+
);
|
|
150
|
+
|
|
151
|
+
CREATE INDEX IF NOT EXISTS idx_lt_user_roles_type ON lt_user_roles (type);
|
|
152
|
+
CREATE INDEX IF NOT EXISTS idx_lt_user_roles_user_id ON lt_user_roles (user_id);
|
|
153
|
+
|
|
154
|
+
-- ─── lt_config_workflows ────────────────────────────────────────────────────
|
|
155
|
+
|
|
156
|
+
CREATE TABLE IF NOT EXISTS lt_config_workflows (
|
|
157
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
158
|
+
workflow_type TEXT UNIQUE NOT NULL,
|
|
159
|
+
invocable BOOLEAN NOT NULL DEFAULT false,
|
|
160
|
+
task_queue TEXT,
|
|
161
|
+
default_role TEXT NOT NULL DEFAULT 'reviewer' REFERENCES lt_roles(role),
|
|
162
|
+
description TEXT,
|
|
163
|
+
consumes TEXT[] NOT NULL DEFAULT '{}',
|
|
164
|
+
tool_tags TEXT[] NOT NULL DEFAULT '{}',
|
|
165
|
+
envelope_schema JSONB,
|
|
166
|
+
resolver_schema JSONB,
|
|
167
|
+
cron_schedule TEXT,
|
|
168
|
+
execute_as TEXT,
|
|
169
|
+
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
170
|
+
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
|
171
|
+
);
|
|
172
|
+
|
|
173
|
+
CREATE TRIGGER lt_config_workflows_updated_at
|
|
174
|
+
BEFORE UPDATE ON lt_config_workflows
|
|
175
|
+
FOR EACH ROW EXECUTE FUNCTION lt_set_updated_at();
|
|
176
|
+
|
|
177
|
+
CREATE INDEX IF NOT EXISTS idx_config_workflows_tool_tags
|
|
178
|
+
ON lt_config_workflows USING GIN (tool_tags);
|
|
179
|
+
|
|
180
|
+
CREATE TABLE IF NOT EXISTS lt_config_roles (
|
|
181
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
182
|
+
workflow_type TEXT NOT NULL REFERENCES lt_config_workflows(workflow_type) ON DELETE CASCADE,
|
|
183
|
+
role TEXT NOT NULL REFERENCES lt_roles(role),
|
|
184
|
+
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
185
|
+
UNIQUE(workflow_type, role)
|
|
186
|
+
);
|
|
187
|
+
|
|
188
|
+
CREATE TABLE IF NOT EXISTS lt_config_invocation_roles (
|
|
189
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
190
|
+
workflow_type TEXT NOT NULL REFERENCES lt_config_workflows(workflow_type) ON DELETE CASCADE,
|
|
191
|
+
role TEXT NOT NULL REFERENCES lt_roles(role),
|
|
192
|
+
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
193
|
+
UNIQUE(workflow_type, role)
|
|
194
|
+
);
|
|
195
|
+
|
|
196
|
+
-- ─── lt_mcp_servers ─────────────────────────────────────────────────────────
|
|
197
|
+
|
|
198
|
+
CREATE TABLE IF NOT EXISTS lt_mcp_servers (
|
|
199
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
200
|
+
name TEXT UNIQUE NOT NULL,
|
|
201
|
+
description TEXT,
|
|
202
|
+
transport_type TEXT NOT NULL CHECK (transport_type IN ('stdio', 'sse')),
|
|
203
|
+
transport_config JSONB NOT NULL DEFAULT '{}'::JSONB,
|
|
204
|
+
auto_connect BOOLEAN NOT NULL DEFAULT false,
|
|
205
|
+
tool_manifest JSONB,
|
|
206
|
+
status TEXT NOT NULL DEFAULT 'registered'
|
|
207
|
+
CHECK (status IN ('registered', 'connected', 'error', 'disconnected')),
|
|
208
|
+
last_connected_at TIMESTAMPTZ,
|
|
209
|
+
metadata JSONB,
|
|
210
|
+
tags TEXT[] NOT NULL DEFAULT '{}',
|
|
211
|
+
compile_hints TEXT,
|
|
212
|
+
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
213
|
+
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
|
214
|
+
);
|
|
215
|
+
|
|
216
|
+
CREATE INDEX IF NOT EXISTS idx_lt_mcp_servers_name ON lt_mcp_servers (name);
|
|
217
|
+
CREATE INDEX IF NOT EXISTS idx_lt_mcp_servers_status ON lt_mcp_servers (status);
|
|
218
|
+
CREATE INDEX IF NOT EXISTS idx_lt_mcp_servers_auto_connect ON lt_mcp_servers (auto_connect) WHERE auto_connect = true;
|
|
219
|
+
CREATE INDEX IF NOT EXISTS idx_lt_mcp_servers_tags ON lt_mcp_servers USING GIN (tags);
|
|
220
|
+
|
|
221
|
+
CREATE OR REPLACE TRIGGER trg_lt_mcp_servers_updated_at
|
|
222
|
+
BEFORE UPDATE ON lt_mcp_servers
|
|
223
|
+
FOR EACH ROW EXECUTE FUNCTION lt_set_updated_at();
|
|
224
|
+
|
|
225
|
+
-- ─── lt_config_role_escalations ─────────────────────────────────────────────
|
|
226
|
+
|
|
227
|
+
CREATE TABLE IF NOT EXISTS lt_config_role_escalations (
|
|
228
|
+
source_role TEXT NOT NULL REFERENCES lt_roles(role),
|
|
229
|
+
target_role TEXT NOT NULL REFERENCES lt_roles(role),
|
|
230
|
+
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
231
|
+
PRIMARY KEY (source_role, target_role)
|
|
232
|
+
);
|
|
233
|
+
|
|
234
|
+
CREATE INDEX IF NOT EXISTS idx_lt_config_role_escalations_source
|
|
235
|
+
ON lt_config_role_escalations (source_role);
|
|
236
|
+
|
|
237
|
+
-- ─── lt_yaml_workflows ──────────────────────────────────────────────────────
|
|
238
|
+
|
|
239
|
+
CREATE TABLE IF NOT EXISTS lt_yaml_workflows (
|
|
240
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
241
|
+
name TEXT UNIQUE NOT NULL,
|
|
242
|
+
description TEXT,
|
|
243
|
+
app_id TEXT NOT NULL,
|
|
244
|
+
app_version TEXT NOT NULL DEFAULT '1',
|
|
245
|
+
source_workflow_id TEXT,
|
|
246
|
+
source_workflow_type TEXT,
|
|
247
|
+
yaml_content TEXT NOT NULL,
|
|
248
|
+
graph_topic TEXT NOT NULL,
|
|
249
|
+
input_schema JSONB NOT NULL DEFAULT '{}'::JSONB,
|
|
250
|
+
output_schema JSONB NOT NULL DEFAULT '{}'::JSONB,
|
|
251
|
+
activity_manifest JSONB NOT NULL DEFAULT '[]'::JSONB,
|
|
252
|
+
status TEXT NOT NULL DEFAULT 'draft'
|
|
253
|
+
CHECK (status IN ('draft', 'deployed', 'active', 'archived')),
|
|
254
|
+
deployed_at TIMESTAMPTZ,
|
|
255
|
+
activated_at TIMESTAMPTZ,
|
|
256
|
+
content_version INTEGER NOT NULL DEFAULT 1,
|
|
257
|
+
deployed_content_version INTEGER,
|
|
258
|
+
tags TEXT[] NOT NULL DEFAULT '{}',
|
|
259
|
+
input_field_meta JSONB NOT NULL DEFAULT '[]'::JSONB,
|
|
260
|
+
metadata JSONB,
|
|
261
|
+
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
262
|
+
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
|
263
|
+
);
|
|
264
|
+
|
|
265
|
+
CREATE INDEX IF NOT EXISTS idx_lt_yaml_workflows_status ON lt_yaml_workflows (status);
|
|
266
|
+
CREATE INDEX IF NOT EXISTS idx_lt_yaml_workflows_app_id ON lt_yaml_workflows (app_id);
|
|
267
|
+
CREATE INDEX IF NOT EXISTS idx_lt_yaml_workflows_tags ON lt_yaml_workflows USING GIN (tags);
|
|
268
|
+
|
|
269
|
+
CREATE OR REPLACE TRIGGER trg_lt_yaml_workflows_updated_at
|
|
270
|
+
BEFORE UPDATE ON lt_yaml_workflows
|
|
271
|
+
FOR EACH ROW EXECUTE FUNCTION lt_set_updated_at();
|
|
272
|
+
|
|
273
|
+
-- ─── lt_yaml_workflow_versions ──────────────────────────────────────────────
|
|
274
|
+
|
|
275
|
+
CREATE TABLE IF NOT EXISTS lt_yaml_workflow_versions (
|
|
276
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
277
|
+
workflow_id UUID NOT NULL REFERENCES lt_yaml_workflows(id) ON DELETE CASCADE,
|
|
278
|
+
version INTEGER NOT NULL,
|
|
279
|
+
yaml_content TEXT NOT NULL,
|
|
280
|
+
activity_manifest JSONB NOT NULL DEFAULT '[]'::JSONB,
|
|
281
|
+
input_schema JSONB NOT NULL DEFAULT '{}'::JSONB,
|
|
282
|
+
output_schema JSONB NOT NULL DEFAULT '{}'::JSONB,
|
|
283
|
+
input_field_meta JSONB NOT NULL DEFAULT '[]'::JSONB,
|
|
284
|
+
change_summary TEXT,
|
|
285
|
+
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
286
|
+
UNIQUE (workflow_id, version)
|
|
287
|
+
);
|
|
288
|
+
|
|
289
|
+
CREATE INDEX IF NOT EXISTS idx_lt_yaml_wf_versions_workflow
|
|
290
|
+
ON lt_yaml_workflow_versions (workflow_id, version DESC);
|
|
291
|
+
|
|
292
|
+
-- ─── lt_namespaces ──────────────────────────────────────────────────────────
|
|
293
|
+
|
|
294
|
+
CREATE TABLE IF NOT EXISTS lt_namespaces (
|
|
295
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
296
|
+
name TEXT UNIQUE NOT NULL,
|
|
297
|
+
description TEXT,
|
|
298
|
+
schema_name TEXT NOT NULL,
|
|
299
|
+
is_default BOOLEAN NOT NULL DEFAULT false,
|
|
300
|
+
metadata JSONB,
|
|
301
|
+
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
302
|
+
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
|
303
|
+
);
|
|
304
|
+
|
|
305
|
+
INSERT INTO lt_namespaces (name, schema_name, is_default, description)
|
|
306
|
+
VALUES ('longtail', 'longtail', true, 'Default Long Tail namespace')
|
|
307
|
+
ON CONFLICT (name) DO NOTHING;
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
-- Seed data: workflow configs, MCP servers, escalation chains.
|
|
2
|
+
|
|
3
|
+
-- ─── MCP servers ────────────────────────────────────────────────────────────
|
|
4
|
+
|
|
5
|
+
INSERT INTO lt_mcp_servers (name, description, transport_type, transport_config, auto_connect, status)
|
|
6
|
+
VALUES (
|
|
7
|
+
'long-tail-db-query',
|
|
8
|
+
'Built-in read-only query server for tasks, escalations, processes, and system health',
|
|
9
|
+
'stdio',
|
|
10
|
+
'{"builtin": true}'::jsonb,
|
|
11
|
+
false,
|
|
12
|
+
'connected'
|
|
13
|
+
)
|
|
14
|
+
ON CONFLICT (name) DO NOTHING;
|
|
15
|
+
|
|
16
|
+
-- ─── Escalation chains ─────────────────────────────────────────────────────
|
|
17
|
+
|
|
18
|
+
INSERT INTO lt_config_role_escalations (source_role, target_role) VALUES
|
|
19
|
+
('reviewer', 'engineer'),
|
|
20
|
+
('reviewer', 'admin'),
|
|
21
|
+
('engineer', 'admin'),
|
|
22
|
+
('engineer', 'superadmin'),
|
|
23
|
+
('admin', 'engineer'),
|
|
24
|
+
('admin', 'superadmin')
|
|
25
|
+
ON CONFLICT DO NOTHING;
|
|
26
|
+
|
|
27
|
+
-- ─── Example workflows (all directly invocable) ────────────────────────────
|
|
28
|
+
|
|
29
|
+
INSERT INTO lt_config_workflows
|
|
30
|
+
(workflow_type, task_queue, default_role, invocable, description, tool_tags, envelope_schema, resolver_schema)
|
|
31
|
+
VALUES
|
|
32
|
+
-- Review content
|
|
33
|
+
('reviewContent', 'long-tail-examples', 'reviewer', true,
|
|
34
|
+
'Content review — AI-powered moderation with human escalation for low-confidence results',
|
|
35
|
+
ARRAY['document-processing', 'vision', 'ocr', 'translation'],
|
|
36
|
+
'{"data": {"contentId": "article-001", "content": "Content to review...", "contentType": "article"}, "metadata": {"source": "dashboard"}}'::jsonb,
|
|
37
|
+
'{"approved": true, "analysis": {"confidence": 0.95, "flags": [], "summary": "Manually reviewed and approved."}}'::jsonb),
|
|
38
|
+
|
|
39
|
+
-- Verify document
|
|
40
|
+
('verifyDocument', 'long-tail-examples', 'reviewer', true,
|
|
41
|
+
'Document verification — AI Vision analyzes identity documents',
|
|
42
|
+
ARRAY['document-processing', 'vision', 'ocr', 'translation'],
|
|
43
|
+
'{"data": {"documentId": "doc-001", "documentUrl": "https://example.com/doc.jpg", "documentType": "drivers_license", "memberId": "member-12345"}, "metadata": {"source": "dashboard"}}'::jsonb,
|
|
44
|
+
'{"memberId": "", "extractedInfo": {}, "validationResult": "match", "confidence": 1.0}'::jsonb),
|
|
45
|
+
|
|
46
|
+
-- Process claim
|
|
47
|
+
('processClaim', 'long-tail-examples', 'reviewer', true,
|
|
48
|
+
'Insurance claim processing — document analysis, validation, and human review',
|
|
49
|
+
ARRAY['document-processing', 'vision', 'database', 'query'],
|
|
50
|
+
'{"data": {"claimId": "CLM-2024-001", "claimantId": "POL-5551234", "claimType": "auto_collision", "amount": 12500, "documents": ["incident_report.pdf", "photo_evidence.jpg"]}, "metadata": {"source": "dashboard"}}'::jsonb,
|
|
51
|
+
'{"approved": true, "analysis": {"confidence": 0.92, "flags": [], "summary": "Documents reviewed and verified."}, "status": "resolved"}'::jsonb),
|
|
52
|
+
|
|
53
|
+
-- Kitchen sink
|
|
54
|
+
('kitchenSink', 'long-tail-examples', 'reviewer', true,
|
|
55
|
+
'Kitchen sink — demonstrates sleep, signals, parallel activities, escalation, and every durable primitive',
|
|
56
|
+
'{}',
|
|
57
|
+
'{"data": {"name": "World", "mode": "full"}, "metadata": {"source": "dashboard"}}'::jsonb,
|
|
58
|
+
NULL)
|
|
59
|
+
ON CONFLICT (workflow_type) DO NOTHING;
|
|
60
|
+
|
|
61
|
+
-- ─── Assign roles to all workflows ──────────────────────────────────────────
|
|
62
|
+
|
|
63
|
+
INSERT INTO lt_config_roles (workflow_type, role)
|
|
64
|
+
SELECT workflow_type, unnest(ARRAY['reviewer', 'engineer', 'admin'])
|
|
65
|
+
FROM lt_config_workflows
|
|
66
|
+
ON CONFLICT (workflow_type, role) DO NOTHING;
|
|
67
|
+
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
-- Workflow discovery: full-text search, original prompt, and category.
|
|
2
|
+
|
|
3
|
+
-- Original prompt that spawned this workflow (richest semantic signal)
|
|
4
|
+
ALTER TABLE lt_yaml_workflows ADD COLUMN IF NOT EXISTS original_prompt TEXT;
|
|
5
|
+
|
|
6
|
+
-- Capability category derived from tool usage patterns
|
|
7
|
+
ALTER TABLE lt_yaml_workflows ADD COLUMN IF NOT EXISTS category TEXT;
|
|
8
|
+
|
|
9
|
+
-- Full-text search vector, auto-maintained by trigger
|
|
10
|
+
ALTER TABLE lt_yaml_workflows ADD COLUMN IF NOT EXISTS search_vector TSVECTOR;
|
|
11
|
+
|
|
12
|
+
-- GIN index on search_vector for fast full-text search
|
|
13
|
+
CREATE INDEX IF NOT EXISTS idx_lt_yaml_workflows_search
|
|
14
|
+
ON lt_yaml_workflows USING GIN (search_vector);
|
|
15
|
+
|
|
16
|
+
-- Index on category for filtered queries
|
|
17
|
+
CREATE INDEX IF NOT EXISTS idx_lt_yaml_workflows_category
|
|
18
|
+
ON lt_yaml_workflows (category) WHERE category IS NOT NULL;
|
|
19
|
+
|
|
20
|
+
-- Trigger: rebuild search_vector from name, description, tags, original_prompt, category
|
|
21
|
+
CREATE OR REPLACE FUNCTION lt_yaml_workflows_search_vector_update()
|
|
22
|
+
RETURNS TRIGGER AS $$
|
|
23
|
+
BEGIN
|
|
24
|
+
NEW.search_vector :=
|
|
25
|
+
setweight(to_tsvector('english', coalesce(NEW.name, '')), 'A') ||
|
|
26
|
+
setweight(to_tsvector('english', coalesce(NEW.original_prompt, '')), 'A') ||
|
|
27
|
+
setweight(to_tsvector('english', coalesce(NEW.description, '')), 'B') ||
|
|
28
|
+
setweight(to_tsvector('english', coalesce(NEW.category, '')), 'C') ||
|
|
29
|
+
setweight(to_tsvector('english', coalesce(array_to_string(NEW.tags, ' '), '')), 'C');
|
|
30
|
+
RETURN NEW;
|
|
31
|
+
END;
|
|
32
|
+
$$ LANGUAGE plpgsql;
|
|
33
|
+
|
|
34
|
+
CREATE OR REPLACE TRIGGER trg_lt_yaml_workflows_search_vector
|
|
35
|
+
BEFORE INSERT OR UPDATE ON lt_yaml_workflows
|
|
36
|
+
FOR EACH ROW EXECUTE FUNCTION lt_yaml_workflows_search_vector_update();
|
|
37
|
+
|
|
38
|
+
-- Backfill search_vector for existing rows
|
|
39
|
+
UPDATE lt_yaml_workflows SET updated_at = NOW();
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
-- Split mcpQuery into router + dynamic + deterministic workflows.
|
|
2
|
+
-- mcpQueryRouter is the new entry point (orchestrator).
|
|
3
|
+
-- mcpQuery becomes dynamic-only (leaf).
|
|
4
|
+
-- mcpDeterministic invokes compiled YAML workflows (leaf).
|
|
5
|
+
|
|
6
|
+
-- Update existing mcpQuery: no longer directly invocable (called via router)
|
|
7
|
+
UPDATE lt_config_workflows
|
|
8
|
+
SET invocable = false,
|
|
9
|
+
description = 'Dynamic MCP tool orchestration — LLM agentic loop with raw MCP tools'
|
|
10
|
+
WHERE workflow_type = 'mcpQuery';
|
|
11
|
+
|
|
12
|
+
-- Add mcpQueryRouter (orchestrator — the new entry point)
|
|
13
|
+
INSERT INTO lt_config_workflows
|
|
14
|
+
(workflow_type, task_queue, default_role, invocable, description, tool_tags, envelope_schema)
|
|
15
|
+
VALUES
|
|
16
|
+
('mcpQueryRouter', 'long-tail-system', 'engineer', true,
|
|
17
|
+
'Do anything with tools — browser automation, file operations, HTTP requests, database queries, document processing, and more',
|
|
18
|
+
'{}',
|
|
19
|
+
'{"data": {"prompt": "Describe what you want to accomplish using available tools..."}, "metadata": {"source": "dashboard"}}'::jsonb)
|
|
20
|
+
ON CONFLICT (workflow_type) DO NOTHING;
|
|
21
|
+
|
|
22
|
+
-- Add mcpDeterministic (leaf — invokes compiled YAML workflows)
|
|
23
|
+
INSERT INTO lt_config_workflows
|
|
24
|
+
(workflow_type, task_queue, default_role, invocable, description, tool_tags)
|
|
25
|
+
VALUES
|
|
26
|
+
('mcpDeterministic', 'long-tail-system', 'engineer', false,
|
|
27
|
+
'Deterministic execution — invokes matched compiled YAML workflows with extracted inputs',
|
|
28
|
+
'{}')
|
|
29
|
+
ON CONFLICT (workflow_type) DO NOTHING;
|
|
30
|
+
|
|
31
|
+
-- Assign roles
|
|
32
|
+
INSERT INTO lt_config_roles (workflow_type, role)
|
|
33
|
+
SELECT 'mcpQueryRouter', unnest(ARRAY['reviewer', 'engineer', 'admin'])
|
|
34
|
+
ON CONFLICT (workflow_type, role) DO NOTHING;
|
|
35
|
+
|
|
36
|
+
INSERT INTO lt_config_roles (workflow_type, role)
|
|
37
|
+
SELECT 'mcpDeterministic', unnest(ARRAY['reviewer', 'engineer', 'admin'])
|
|
38
|
+
ON CONFLICT (workflow_type, role) DO NOTHING;
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
-- Split mcpTriage into router + dynamic + deterministic workflows.
|
|
2
|
+
-- mcpTriageRouter is the new entry point (orchestrator).
|
|
3
|
+
-- mcpTriage becomes dynamic-only (leaf).
|
|
4
|
+
-- mcpTriageDeterministic invokes compiled YAML workflows (leaf).
|
|
5
|
+
|
|
6
|
+
-- Update existing mcpTriage: no longer directly invocable (called via router)
|
|
7
|
+
UPDATE lt_config_workflows
|
|
8
|
+
SET invocable = false,
|
|
9
|
+
description = 'Dynamic MCP triage — LLM agentic loop for escalation remediation'
|
|
10
|
+
WHERE workflow_type = 'mcpTriage';
|
|
11
|
+
|
|
12
|
+
-- Add mcpTriageRouter (orchestrator — the new entry point for triage)
|
|
13
|
+
INSERT INTO lt_config_workflows
|
|
14
|
+
(workflow_type, task_queue, default_role, invocable, description, tool_tags)
|
|
15
|
+
VALUES
|
|
16
|
+
('mcpTriageRouter', 'long-tail-system', 'engineer', false,
|
|
17
|
+
'Triage router — discovers compiled workflows for remediation, routes to deterministic or dynamic triage',
|
|
18
|
+
'{}')
|
|
19
|
+
ON CONFLICT (workflow_type) DO NOTHING;
|
|
20
|
+
|
|
21
|
+
-- Add mcpTriageDeterministic (leaf — invokes compiled triage workflows)
|
|
22
|
+
INSERT INTO lt_config_workflows
|
|
23
|
+
(workflow_type, task_queue, default_role, invocable, description, tool_tags)
|
|
24
|
+
VALUES
|
|
25
|
+
('mcpTriageDeterministic', 'long-tail-system', 'engineer', false,
|
|
26
|
+
'Deterministic triage — invokes matched compiled workflows for escalation remediation',
|
|
27
|
+
'{}')
|
|
28
|
+
ON CONFLICT (workflow_type) DO NOTHING;
|
|
29
|
+
|
|
30
|
+
-- Assign roles
|
|
31
|
+
INSERT INTO lt_config_roles (workflow_type, role)
|
|
32
|
+
SELECT 'mcpTriageRouter', unnest(ARRAY['reviewer', 'engineer', 'admin'])
|
|
33
|
+
ON CONFLICT (workflow_type, role) DO NOTHING;
|
|
34
|
+
|
|
35
|
+
INSERT INTO lt_config_roles (workflow_type, role)
|
|
36
|
+
SELECT 'mcpTriageDeterministic', unnest(ARRAY['reviewer', 'engineer', 'admin'])
|
|
37
|
+
ON CONFLICT (workflow_type, role) DO NOTHING;
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
-- ── OAuth token storage ─────────────────────────────────────────────────────
|
|
2
|
+
-- Encrypted per-user, per-provider OAuth tokens for identity and resource OAuth.
|
|
3
|
+
|
|
4
|
+
CREATE TABLE IF NOT EXISTS lt_oauth_tokens (
|
|
5
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
6
|
+
user_id UUID NOT NULL REFERENCES lt_users(id) ON DELETE CASCADE,
|
|
7
|
+
provider TEXT NOT NULL,
|
|
8
|
+
label TEXT NOT NULL DEFAULT 'default',
|
|
9
|
+
access_token_enc TEXT NOT NULL,
|
|
10
|
+
refresh_token_enc TEXT,
|
|
11
|
+
token_type TEXT NOT NULL DEFAULT 'bearer',
|
|
12
|
+
scopes TEXT[] NOT NULL DEFAULT '{}',
|
|
13
|
+
expires_at TIMESTAMPTZ,
|
|
14
|
+
provider_user_id TEXT NOT NULL,
|
|
15
|
+
provider_email TEXT,
|
|
16
|
+
metadata JSONB,
|
|
17
|
+
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
18
|
+
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
19
|
+
UNIQUE (user_id, provider, label)
|
|
20
|
+
);
|
|
21
|
+
|
|
22
|
+
-- Migration: add label column for multiple credentials per provider per user.
|
|
23
|
+
-- Existing rows get 'default'. The unique constraint moves from (user_id, provider)
|
|
24
|
+
-- to (user_id, provider, label).
|
|
25
|
+
ALTER TABLE lt_oauth_tokens ADD COLUMN IF NOT EXISTS label TEXT NOT NULL DEFAULT 'default';
|
|
26
|
+
|
|
27
|
+
-- Drop old unique constraint if it exists (safe no-op if already migrated)
|
|
28
|
+
DO $$ BEGIN
|
|
29
|
+
ALTER TABLE lt_oauth_tokens DROP CONSTRAINT IF EXISTS lt_oauth_tokens_user_id_provider_key;
|
|
30
|
+
EXCEPTION WHEN undefined_object THEN NULL;
|
|
31
|
+
END $$;
|
|
32
|
+
|
|
33
|
+
-- Create the new composite unique constraint (idempotent via IF NOT EXISTS on index)
|
|
34
|
+
CREATE UNIQUE INDEX IF NOT EXISTS lt_oauth_tokens_user_id_provider_label_key
|
|
35
|
+
ON lt_oauth_tokens (user_id, provider, label);
|
|
36
|
+
|
|
37
|
+
CREATE INDEX IF NOT EXISTS idx_lt_oauth_tokens_provider
|
|
38
|
+
ON lt_oauth_tokens (provider, user_id);
|
|
39
|
+
|
|
40
|
+
CREATE OR REPLACE TRIGGER trg_lt_oauth_tokens_updated_at
|
|
41
|
+
BEFORE UPDATE ON lt_oauth_tokens
|
|
42
|
+
FOR EACH ROW EXECUTE FUNCTION lt_set_updated_at();
|
|
43
|
+
|
|
44
|
+
-- ── Identity link columns on lt_users ──────────────────────────────────────
|
|
45
|
+
ALTER TABLE lt_users ADD COLUMN IF NOT EXISTS oauth_provider TEXT;
|
|
46
|
+
ALTER TABLE lt_users ADD COLUMN IF NOT EXISTS oauth_provider_id TEXT;
|
|
47
|
+
|
|
48
|
+
CREATE INDEX IF NOT EXISTS idx_lt_users_oauth
|
|
49
|
+
ON lt_users (oauth_provider, oauth_provider_id)
|
|
50
|
+
WHERE oauth_provider IS NOT NULL;
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
-- ── Service tokens for external MCP servers ─────────────────────────────────
|
|
2
|
+
CREATE TABLE IF NOT EXISTS lt_service_tokens (
|
|
3
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
4
|
+
name TEXT UNIQUE NOT NULL,
|
|
5
|
+
token_hash TEXT NOT NULL,
|
|
6
|
+
server_id UUID REFERENCES lt_mcp_servers(id) ON DELETE CASCADE,
|
|
7
|
+
scopes TEXT[] NOT NULL DEFAULT '{}',
|
|
8
|
+
expires_at TIMESTAMPTZ,
|
|
9
|
+
last_used_at TIMESTAMPTZ,
|
|
10
|
+
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
11
|
+
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
|
12
|
+
);
|
|
13
|
+
|
|
14
|
+
CREATE INDEX IF NOT EXISTS idx_lt_service_tokens_server
|
|
15
|
+
ON lt_service_tokens (server_id);
|
|
16
|
+
|
|
17
|
+
CREATE OR REPLACE TRIGGER trg_lt_service_tokens_updated_at
|
|
18
|
+
BEFORE UPDATE ON lt_service_tokens
|
|
19
|
+
FOR EACH ROW EXECUTE FUNCTION lt_set_updated_at();
|
|
20
|
+
|
|
21
|
+
-- ── Audit: who initiated escalations ────────────────────────────────────────
|
|
22
|
+
ALTER TABLE lt_escalations ADD COLUMN IF NOT EXISTS created_by UUID REFERENCES lt_users(id);
|
|
23
|
+
CREATE INDEX IF NOT EXISTS idx_lt_escalations_created_by
|
|
24
|
+
ON lt_escalations (created_by) WHERE created_by IS NOT NULL;
|
|
25
|
+
|
|
26
|
+
-- ── Scope declarations for MCP servers ──────────────────────────────────────
|
|
27
|
+
ALTER TABLE lt_mcp_servers ADD COLUMN IF NOT EXISTS required_scopes TEXT[] NOT NULL DEFAULT '{}';
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
-- 008_bot_accounts.sql
|
|
2
|
+
-- Bot/service account support for universal IAM.
|
|
3
|
+
-- Bots live in lt_users (account_type = 'bot') and authenticate via API keys.
|
|
4
|
+
|
|
5
|
+
-- Add account_type column to lt_users to distinguish human vs bot accounts.
|
|
6
|
+
ALTER TABLE lt_users ADD COLUMN IF NOT EXISTS account_type TEXT NOT NULL DEFAULT 'user';
|
|
7
|
+
|
|
8
|
+
-- Apply check constraint (idempotent: skip if already exists).
|
|
9
|
+
DO $$ BEGIN
|
|
10
|
+
ALTER TABLE lt_users ADD CONSTRAINT lt_users_account_type_check
|
|
11
|
+
CHECK (account_type IN ('user', 'bot'));
|
|
12
|
+
EXCEPTION
|
|
13
|
+
WHEN duplicate_object THEN NULL;
|
|
14
|
+
END $$;
|
|
15
|
+
|
|
16
|
+
-- Bot API keys — similar to lt_service_tokens but scoped to a user (bot) account.
|
|
17
|
+
CREATE TABLE IF NOT EXISTS lt_bot_api_keys (
|
|
18
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
19
|
+
name TEXT NOT NULL,
|
|
20
|
+
user_id UUID NOT NULL REFERENCES lt_users(id) ON DELETE CASCADE,
|
|
21
|
+
key_hash TEXT NOT NULL,
|
|
22
|
+
scopes TEXT[] NOT NULL DEFAULT '{}',
|
|
23
|
+
expires_at TIMESTAMPTZ,
|
|
24
|
+
last_used_at TIMESTAMPTZ,
|
|
25
|
+
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
26
|
+
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
27
|
+
UNIQUE (user_id, name)
|
|
28
|
+
);
|
|
29
|
+
|
|
30
|
+
CREATE INDEX IF NOT EXISTS idx_bot_api_keys_user_id ON lt_bot_api_keys (user_id);
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
-- 009_audit_trail.sql
|
|
2
|
+
-- Add IAM audit columns to lt_tasks for identity traceability.
|
|
3
|
+
|
|
4
|
+
ALTER TABLE lt_tasks ADD COLUMN IF NOT EXISTS initiated_by UUID REFERENCES lt_users(id) ON DELETE SET NULL;
|
|
5
|
+
ALTER TABLE lt_tasks ADD COLUMN IF NOT EXISTS principal_type TEXT DEFAULT 'user';
|
|
6
|
+
|
|
7
|
+
CREATE INDEX IF NOT EXISTS idx_lt_tasks_initiated_by ON lt_tasks (initiated_by) WHERE initiated_by IS NOT NULL;
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
-- Ensure all system leaf workflows have config entries.
|
|
2
|
+
-- Migrations 004/005 tried to UPDATE these but they were never seeded —
|
|
3
|
+
-- the interceptor needs config entries to wrap workflows with lifecycle events.
|
|
4
|
+
|
|
5
|
+
INSERT INTO lt_config_workflows
|
|
6
|
+
(workflow_type, task_queue, default_role, invocable, description, tool_tags)
|
|
7
|
+
VALUES
|
|
8
|
+
('mcpQuery', 'long-tail-system', 'engineer', false,
|
|
9
|
+
'Dynamic MCP tool orchestration — LLM agentic loop with raw MCP tools',
|
|
10
|
+
'{}'),
|
|
11
|
+
('mcpTriage', 'long-tail-system', 'engineer', false,
|
|
12
|
+
'Dynamic MCP triage — LLM agentic loop for escalation remediation',
|
|
13
|
+
'{}')
|
|
14
|
+
ON CONFLICT (workflow_type) DO NOTHING;
|
|
15
|
+
|
|
16
|
+
-- Assign roles
|
|
17
|
+
INSERT INTO lt_config_roles (workflow_type, role)
|
|
18
|
+
SELECT 'mcpQuery', unnest(ARRAY['reviewer', 'engineer', 'admin'])
|
|
19
|
+
ON CONFLICT (workflow_type, role) DO NOTHING;
|
|
20
|
+
|
|
21
|
+
INSERT INTO lt_config_roles (workflow_type, role)
|
|
22
|
+
SELECT 'mcpTriage', unnest(ARRAY['reviewer', 'engineer', 'admin'])
|
|
23
|
+
ON CONFLICT (workflow_type, role) DO NOTHING;
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
-- Remove delivery modality — the concept was never used for actual routing.
|
|
2
|
+
-- Alpha cleanup: drop from config, escalations, and tasks tables.
|
|
3
|
+
|
|
4
|
+
ALTER TABLE lt_config_workflows DROP COLUMN IF EXISTS default_modality;
|
|
5
|
+
ALTER TABLE lt_escalations DROP COLUMN IF EXISTS modality;
|
|
6
|
+
ALTER TABLE lt_tasks DROP COLUMN IF EXISTS modality;
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
-- Add execute_as to workflow configs: proxy invocation identity.
|
|
2
|
+
-- When set, workflows run as the named bot instead of the invoking user.
|
|
3
|
+
|
|
4
|
+
ALTER TABLE lt_config_workflows ADD COLUMN IF NOT EXISTS execute_as TEXT;
|
|
5
|
+
|
|
6
|
+
-- Add executing_as to tasks: records the actual executing principal
|
|
7
|
+
-- (may differ from initiated_by when proxy invocation is used).
|
|
8
|
+
|
|
9
|
+
ALTER TABLE lt_tasks ADD COLUMN IF NOT EXISTS executing_as TEXT;
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
-- Ephemeral credential store for sensitive fields in waitFor signal payloads.
|
|
2
|
+
-- Supports max_uses (0 = unlimited) and TTL-based expiry.
|
|
3
|
+
|
|
4
|
+
CREATE TABLE IF NOT EXISTS lt_ephemeral_credentials (
|
|
5
|
+
token UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
6
|
+
value BYTEA NOT NULL,
|
|
7
|
+
label TEXT,
|
|
8
|
+
max_uses INTEGER NOT NULL DEFAULT 0,
|
|
9
|
+
use_count INTEGER NOT NULL DEFAULT 0,
|
|
10
|
+
expires_at TIMESTAMPTZ,
|
|
11
|
+
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
|
12
|
+
);
|
|
13
|
+
|
|
14
|
+
CREATE INDEX IF NOT EXISTS idx_lt_ephemeral_expiry
|
|
15
|
+
ON lt_ephemeral_credentials (expires_at)
|
|
16
|
+
WHERE expires_at IS NOT NULL;
|