agent-relay 1.3.0 → 1.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.trajectories/active/traj_3yx9dy148mge.json +42 -0
- package/.trajectories/completed/2026-01/traj_1g7yx6qtg4ai.json +49 -0
- package/.trajectories/completed/2026-01/traj_1g7yx6qtg4ai.md +31 -0
- package/.trajectories/completed/2026-01/traj_4qwd4zmhfwp4.json +49 -0
- package/.trajectories/completed/2026-01/traj_4qwd4zmhfwp4.md +31 -0
- package/.trajectories/completed/2026-01/traj_6unwwmgyj5sq.json +109 -0
- package/.trajectories/completed/2026-01/traj_a0tqx8biw9c4.json +49 -0
- package/.trajectories/completed/2026-01/traj_a0tqx8biw9c4.md +31 -0
- package/.trajectories/completed/2026-01/traj_ax8uungxz2qh.json +66 -0
- package/.trajectories/completed/2026-01/traj_ax8uungxz2qh.md +36 -0
- package/.trajectories/completed/2026-01/traj_c9izbh2snpzf.json +49 -0
- package/.trajectories/completed/2026-01/traj_c9izbh2snpzf.md +31 -0
- package/.trajectories/completed/2026-01/traj_cpn70dw066nt.json +65 -0
- package/.trajectories/completed/2026-01/traj_cpn70dw066nt.md +37 -0
- package/.trajectories/completed/2026-01/traj_erglv2f8t9eh.json +36 -0
- package/.trajectories/completed/2026-01/traj_erglv2f8t9eh.md +21 -0
- package/.trajectories/completed/2026-01/traj_he75f24d1xfm.json +101 -0
- package/.trajectories/completed/2026-01/traj_he75f24d1xfm.md +52 -0
- package/.trajectories/completed/2026-01/traj_lgtodco7dp1n.json +61 -0
- package/.trajectories/completed/2026-01/traj_lgtodco7dp1n.md +36 -0
- package/.trajectories/completed/2026-01/traj_oszg9flv74pk.json +73 -0
- package/.trajectories/completed/2026-01/traj_oszg9flv74pk.md +41 -0
- package/.trajectories/completed/2026-01/traj_pulomd3y8cvj.json +77 -0
- package/.trajectories/completed/2026-01/traj_pulomd3y8cvj.md +42 -0
- package/.trajectories/completed/2026-01/traj_rsavt0jipi3c.json +109 -0
- package/.trajectories/completed/2026-01/traj_rsavt0jipi3c.md +56 -0
- package/.trajectories/completed/2026-01/traj_x721m1j9rzup.json +113 -0
- package/.trajectories/completed/2026-01/traj_x721m1j9rzup.md +57 -0
- package/.trajectories/completed/2026-01/traj_xjqvmep5ed3h.json +61 -0
- package/.trajectories/completed/2026-01/traj_xjqvmep5ed3h.md +36 -0
- package/.trajectories/completed/2026-01/traj_y7n6hfbf7dmg.json +49 -0
- package/.trajectories/completed/2026-01/traj_y7n6hfbf7dmg.md +31 -0
- package/.trajectories/completed/2026-01/traj_yvfkwnkdiso2.json +49 -0
- package/.trajectories/completed/2026-01/traj_yvfkwnkdiso2.md +31 -0
- package/.trajectories/index.json +140 -1
- package/TRAIL_GIT_AUTH_FIX.md +113 -0
- package/deploy/workspace/codex.config.toml +1 -1
- package/deploy/workspace/entrypoint.sh +20 -79
- package/deploy/workspace/gh-relay +156 -0
- package/deploy/workspace/git-credential-relay +5 -1
- package/dist/bridge/multi-project-client.js +13 -10
- package/dist/bridge/spawner.d.ts +2 -0
- package/dist/bridge/spawner.js +19 -1
- package/dist/bridge/types.d.ts +2 -0
- package/dist/cli/index.d.ts +1 -1
- package/dist/cli/index.js +115 -69
- package/dist/cloud/api/admin.js +16 -3
- package/dist/cloud/api/codex-auth-helper.js +28 -8
- package/dist/cloud/api/consensus.d.ts +13 -0
- package/dist/cloud/api/consensus.js +259 -0
- package/dist/cloud/api/daemons.js +205 -1
- package/dist/cloud/api/git.js +37 -7
- package/dist/cloud/api/onboarding.js +4 -1
- package/dist/cloud/api/provider-env.d.ts +5 -0
- package/dist/cloud/api/provider-env.js +27 -0
- package/dist/cloud/api/providers.js +2 -0
- package/dist/cloud/api/test-helpers.js +130 -0
- package/dist/cloud/api/workspaces.js +38 -3
- package/dist/cloud/db/bulk-ingest.d.ts +88 -0
- package/dist/cloud/db/bulk-ingest.js +268 -0
- package/dist/cloud/db/drizzle.d.ts +33 -0
- package/dist/cloud/db/drizzle.js +174 -2
- package/dist/cloud/db/index.d.ts +24 -5
- package/dist/cloud/db/index.js +19 -4
- package/dist/cloud/db/schema.d.ts +397 -3
- package/dist/cloud/db/schema.js +75 -1
- package/dist/cloud/provisioner/index.d.ts +8 -0
- package/dist/cloud/provisioner/index.js +256 -50
- package/dist/cloud/server.js +47 -3
- package/dist/cloud/services/index.d.ts +1 -0
- package/dist/cloud/services/index.js +2 -0
- package/dist/cloud/services/nango.d.ts +3 -4
- package/dist/cloud/services/nango.js +11 -33
- package/dist/cloud/services/workspace-keepalive.d.ts +76 -0
- package/dist/cloud/services/workspace-keepalive.js +234 -0
- package/dist/config/relay-config.d.ts +23 -0
- package/dist/config/relay-config.js +23 -0
- package/dist/daemon/agent-manager.d.ts +20 -1
- package/dist/daemon/agent-manager.js +47 -0
- package/dist/daemon/agent-registry.js +4 -4
- package/dist/daemon/agent-signing.d.ts +158 -0
- package/dist/daemon/agent-signing.js +523 -0
- package/dist/daemon/api.js +18 -1
- package/dist/daemon/cli-auth.d.ts +4 -1
- package/dist/daemon/cli-auth.js +55 -11
- package/dist/daemon/cloud-sync.d.ts +47 -1
- package/dist/daemon/cloud-sync.js +152 -3
- package/dist/daemon/connection.d.ts +28 -0
- package/dist/daemon/connection.js +98 -15
- package/dist/daemon/consensus-integration.d.ts +167 -0
- package/dist/daemon/consensus-integration.js +371 -0
- package/dist/daemon/consensus.d.ts +271 -0
- package/dist/daemon/consensus.js +632 -0
- package/dist/daemon/delivery-tracker.d.ts +34 -0
- package/dist/daemon/delivery-tracker.js +104 -0
- package/dist/daemon/enhanced-features.d.ts +118 -0
- package/dist/daemon/enhanced-features.js +178 -0
- package/dist/daemon/index.d.ts +4 -0
- package/dist/daemon/index.js +5 -0
- package/dist/daemon/rate-limiter.d.ts +68 -0
- package/dist/daemon/rate-limiter.js +130 -0
- package/dist/daemon/router.d.ts +18 -11
- package/dist/daemon/router.js +55 -111
- package/dist/daemon/server.d.ts +13 -1
- package/dist/daemon/server.js +71 -9
- package/dist/daemon/sync-queue.d.ts +116 -0
- package/dist/daemon/sync-queue.js +361 -0
- package/dist/health-worker-manager.d.ts +62 -0
- package/dist/health-worker-manager.js +144 -0
- package/dist/health-worker.d.ts +9 -0
- package/dist/health-worker.js +79 -0
- package/dist/index.d.ts +2 -1
- package/dist/index.js +5 -1
- package/dist/memory/context-compaction.d.ts +156 -0
- package/dist/memory/context-compaction.js +453 -0
- package/dist/memory/index.d.ts +1 -0
- package/dist/memory/index.js +1 -0
- package/dist/protocol/channels.js +4 -4
- package/dist/protocol/framing.d.ts +72 -10
- package/dist/protocol/framing.js +194 -25
- package/dist/storage/adapter.d.ts +8 -1
- package/dist/storage/adapter.js +11 -0
- package/dist/storage/batched-sqlite-adapter.d.ts +71 -0
- package/dist/storage/batched-sqlite-adapter.js +183 -0
- package/dist/storage/dead-letter-queue.d.ts +196 -0
- package/dist/storage/dead-letter-queue.js +427 -0
- package/dist/storage/dlq-adapter.d.ts +195 -0
- package/dist/storage/dlq-adapter.js +664 -0
- package/dist/trajectory/config.d.ts +32 -14
- package/dist/trajectory/config.js +38 -16
- package/dist/trajectory/integration.js +217 -64
- package/dist/utils/git-remote.d.ts +47 -0
- package/dist/utils/git-remote.js +125 -0
- package/dist/utils/id-generator.d.ts +35 -0
- package/dist/utils/id-generator.js +60 -0
- package/dist/utils/index.d.ts +1 -0
- package/dist/utils/index.js +1 -0
- package/dist/utils/precompiled-patterns.d.ts +110 -0
- package/dist/utils/precompiled-patterns.js +322 -0
- package/dist/wrapper/auth-detection.js +1 -1
- package/dist/wrapper/base-wrapper.d.ts +36 -0
- package/dist/wrapper/base-wrapper.js +48 -2
- package/dist/wrapper/client.d.ts +14 -4
- package/dist/wrapper/client.js +84 -31
- package/dist/wrapper/idle-detector.d.ts +102 -0
- package/dist/wrapper/idle-detector.js +279 -0
- package/dist/wrapper/parser.d.ts +4 -0
- package/dist/wrapper/parser.js +19 -1
- package/dist/wrapper/pty-wrapper.d.ts +7 -1
- package/dist/wrapper/pty-wrapper.js +51 -27
- package/dist/wrapper/tmux-wrapper.d.ts +12 -1
- package/dist/wrapper/tmux-wrapper.js +65 -17
- package/package.json +5 -5
- package/scripts/run-migrations.js +43 -0
- package/scripts/verify-schema.js +134 -0
- package/tests/benchmarks/protocol.bench.ts +310 -0
- package/dist/dashboard/out/404.html +0 -1
- package/dist/dashboard/out/_next/static/T1tgCqVWHFIkV7ClEtzD7/_buildManifest.js +0 -1
- package/dist/dashboard/out/_next/static/T1tgCqVWHFIkV7ClEtzD7/_ssgManifest.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/116-2502180def231162.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/117-f7b8ab0809342e77.js +0 -2
- package/dist/dashboard/out/_next/static/chunks/282-980c2eb8fff20123.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/532-bace199897eeab37.js +0 -9
- package/dist/dashboard/out/_next/static/chunks/648-5cc6e1921389a58a.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/766-b54f0853794b78c3.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/83-b51836037078006c.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/891-6cd50de1224f70bb.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/899-bb19a9b3d9b39ea6.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/app/_not-found/page-53b8a69f76db17d0.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/app/app/onboarding/page-8939b0fc700f7eca.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/app/app/page-5af1b6b439858aa6.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/app/connect-repos/page-f45ecbc3e06134fc.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/app/history/page-8c8bed33beb2bf1c.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/app/layout-2433bb48965f4333.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/app/login/page-16f3b49e55b1e0ed.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/app/metrics/page-ac39dc0cc3c26fa7.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/app/page-4a5938c18a11a654.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/app/pricing/page-982a7000fee44014.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/app/providers/page-ac3a6ac433fd6001.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/app/providers/setup/[provider]/page-09f9caae98a18c09.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/app/signup/page-547dd0ca55ecd0ba.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/e868780c-48e5f147c90a3a41.js +0 -18
- package/dist/dashboard/out/_next/static/chunks/fd9d1056-609918ca7b6280bb.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/framework-f66176bb897dc684.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/main-2ee6beb2ae96d210.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/main-app-5d692157a8eb1fd9.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/pages/_app-72b849fbd24ac258.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/pages/_error-7ba65e1336b92748.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/polyfills-42372ed130431b0a.js +0 -1
- package/dist/dashboard/out/_next/static/chunks/webpack-1cdd8ed57114d5e1.js +0 -1
- package/dist/dashboard/out/_next/static/css/85d2af9c7ac74d62.css +0 -1
- package/dist/dashboard/out/_next/static/css/fe4b28883eeff359.css +0 -1
- package/dist/dashboard/out/alt-logos/agent-relay-logo-128.png +0 -0
- package/dist/dashboard/out/alt-logos/agent-relay-logo-256.png +0 -0
- package/dist/dashboard/out/alt-logos/agent-relay-logo-32.png +0 -0
- package/dist/dashboard/out/alt-logos/agent-relay-logo-512.png +0 -0
- package/dist/dashboard/out/alt-logos/agent-relay-logo-64.png +0 -0
- package/dist/dashboard/out/alt-logos/agent-relay-logo.svg +0 -45
- package/dist/dashboard/out/alt-logos/logo.svg +0 -38
- package/dist/dashboard/out/alt-logos/monogram-logo-128.png +0 -0
- package/dist/dashboard/out/alt-logos/monogram-logo-256.png +0 -0
- package/dist/dashboard/out/alt-logos/monogram-logo-32.png +0 -0
- package/dist/dashboard/out/alt-logos/monogram-logo-512.png +0 -0
- package/dist/dashboard/out/alt-logos/monogram-logo-64.png +0 -0
- package/dist/dashboard/out/alt-logos/monogram-logo.svg +0 -38
- package/dist/dashboard/out/app/onboarding.html +0 -1
- package/dist/dashboard/out/app/onboarding.txt +0 -7
- package/dist/dashboard/out/app.html +0 -1
- package/dist/dashboard/out/app.txt +0 -7
- package/dist/dashboard/out/apple-icon.png +0 -0
- package/dist/dashboard/out/connect-repos.html +0 -1
- package/dist/dashboard/out/connect-repos.txt +0 -7
- package/dist/dashboard/out/history.html +0 -1
- package/dist/dashboard/out/history.txt +0 -7
- package/dist/dashboard/out/index.html +0 -1
- package/dist/dashboard/out/index.txt +0 -7
- package/dist/dashboard/out/login.html +0 -6
- package/dist/dashboard/out/login.txt +0 -7
- package/dist/dashboard/out/metrics.html +0 -1
- package/dist/dashboard/out/metrics.txt +0 -7
- package/dist/dashboard/out/pricing.html +0 -13
- package/dist/dashboard/out/pricing.txt +0 -7
- package/dist/dashboard/out/providers/setup/claude.html +0 -1
- package/dist/dashboard/out/providers/setup/claude.txt +0 -8
- package/dist/dashboard/out/providers/setup/codex.html +0 -1
- package/dist/dashboard/out/providers/setup/codex.txt +0 -8
- package/dist/dashboard/out/providers.html +0 -1
- package/dist/dashboard/out/providers.txt +0 -7
- package/dist/dashboard/out/signup.html +0 -6
- package/dist/dashboard/out/signup.txt +0 -7
- package/dist/dashboard-server/metrics.d.ts +0 -105
- package/dist/dashboard-server/metrics.js +0 -193
- package/dist/dashboard-server/needs-attention.d.ts +0 -24
- package/dist/dashboard-server/needs-attention.js +0 -78
- package/dist/dashboard-server/server.d.ts +0 -15
- package/dist/dashboard-server/server.js +0 -3776
- package/dist/dashboard-server/start.d.ts +0 -6
- package/dist/dashboard-server/start.js +0 -13
- package/dist/dashboard-server/user-bridge.d.ts +0 -103
- package/dist/dashboard-server/user-bridge.js +0 -189
|
@@ -108,6 +108,136 @@ testHelpersRouter.post('/create-daemon', async (req, res) => {
|
|
|
108
108
|
res.status(500).json({ error: 'Failed to create test daemon' });
|
|
109
109
|
}
|
|
110
110
|
});
|
|
111
|
+
/**
|
|
112
|
+
* POST /api/test/create-workspace
|
|
113
|
+
* Creates a test workspace for integration tests with optional linked repository
|
|
114
|
+
*/
|
|
115
|
+
testHelpersRouter.post('/create-workspace', async (req, res) => {
|
|
116
|
+
if (!isTestMode) {
|
|
117
|
+
return res.status(403).json({ error: 'Test endpoints disabled in production' });
|
|
118
|
+
}
|
|
119
|
+
try {
|
|
120
|
+
const { name, repoFullName, userId: providedUserId } = req.body;
|
|
121
|
+
const db = getDb();
|
|
122
|
+
// Use provided userId, session userId, or create a test user
|
|
123
|
+
let targetUserId = providedUserId || req.session.userId;
|
|
124
|
+
if (!targetUserId) {
|
|
125
|
+
// Create a test user if none exists
|
|
126
|
+
const testId = `test-ws-${randomUUID()}`;
|
|
127
|
+
const [newUser] = await db.insert(users).values({
|
|
128
|
+
email: `${testId}@test.local`,
|
|
129
|
+
githubId: testId,
|
|
130
|
+
githubUsername: 'workspace-test-user',
|
|
131
|
+
avatarUrl: null,
|
|
132
|
+
plan: 'free',
|
|
133
|
+
}).returning();
|
|
134
|
+
targetUserId = newUser.id;
|
|
135
|
+
}
|
|
136
|
+
const targetRepoFullName = repoFullName || `test-org/test-repo-${Date.now()}`;
|
|
137
|
+
// Create workspace
|
|
138
|
+
const [workspace] = await db.insert(workspaces).values({
|
|
139
|
+
userId: targetUserId,
|
|
140
|
+
name: name || `Test Workspace ${Date.now()}`,
|
|
141
|
+
status: 'running',
|
|
142
|
+
publicUrl: 'http://localhost:3889',
|
|
143
|
+
computeProvider: 'docker',
|
|
144
|
+
computeId: `test-${randomUUID().slice(0, 8)}`,
|
|
145
|
+
config: {
|
|
146
|
+
providers: ['anthropic'],
|
|
147
|
+
repositories: [targetRepoFullName],
|
|
148
|
+
supervisorEnabled: true,
|
|
149
|
+
maxAgents: 10,
|
|
150
|
+
},
|
|
151
|
+
}).returning();
|
|
152
|
+
// Create a linked repository for workspace lookup via repoFullName
|
|
153
|
+
const [repo] = await db.insert(repositories).values({
|
|
154
|
+
userId: targetUserId,
|
|
155
|
+
workspaceId: workspace.id,
|
|
156
|
+
githubId: Math.floor(Math.random() * 1000000),
|
|
157
|
+
githubFullName: targetRepoFullName,
|
|
158
|
+
isPrivate: false,
|
|
159
|
+
defaultBranch: 'main',
|
|
160
|
+
syncStatus: 'synced',
|
|
161
|
+
nangoConnectionId: `mock-${randomUUID().slice(0, 8)}`,
|
|
162
|
+
lastSyncedAt: new Date(),
|
|
163
|
+
}).returning();
|
|
164
|
+
res.json({
|
|
165
|
+
workspaceId: workspace.id,
|
|
166
|
+
name: workspace.name,
|
|
167
|
+
repoFullName: repo.githubFullName,
|
|
168
|
+
repoId: repo.id,
|
|
169
|
+
userId: targetUserId,
|
|
170
|
+
});
|
|
171
|
+
}
|
|
172
|
+
catch (error) {
|
|
173
|
+
console.error('Error creating test workspace:', error);
|
|
174
|
+
res.status(500).json({ error: 'Failed to create test workspace' });
|
|
175
|
+
}
|
|
176
|
+
});
|
|
177
|
+
/**
|
|
178
|
+
* POST /api/test/create-daemon-with-workspace
|
|
179
|
+
* Creates a test daemon linked to a workspace
|
|
180
|
+
*/
|
|
181
|
+
testHelpersRouter.post('/create-daemon-with-workspace', async (req, res) => {
|
|
182
|
+
if (!isTestMode) {
|
|
183
|
+
return res.status(403).json({ error: 'Test endpoints disabled in production' });
|
|
184
|
+
}
|
|
185
|
+
try {
|
|
186
|
+
const { name, machineId, workspaceId, userId: providedUserId } = req.body;
|
|
187
|
+
if (!name) {
|
|
188
|
+
return res.status(400).json({ error: 'name is required' });
|
|
189
|
+
}
|
|
190
|
+
const db = getDb();
|
|
191
|
+
// Get or create test user
|
|
192
|
+
let targetUserId = providedUserId;
|
|
193
|
+
if (!targetUserId) {
|
|
194
|
+
const existingUsers = await db.select().from(users).limit(1);
|
|
195
|
+
if (existingUsers.length > 0) {
|
|
196
|
+
targetUserId = existingUsers[0].id;
|
|
197
|
+
}
|
|
198
|
+
else {
|
|
199
|
+
const testId = `test-daemon-${randomUUID()}`;
|
|
200
|
+
const [newUser] = await db.insert(users).values({
|
|
201
|
+
email: `${testId}@test.local`,
|
|
202
|
+
githubId: testId,
|
|
203
|
+
githubUsername: 'daemon-test-user',
|
|
204
|
+
avatarUrl: null,
|
|
205
|
+
plan: 'free',
|
|
206
|
+
}).returning();
|
|
207
|
+
targetUserId = newUser.id;
|
|
208
|
+
}
|
|
209
|
+
}
|
|
210
|
+
// Generate API key
|
|
211
|
+
const apiKey = `ar_live_${randomBytes(32).toString('hex')}`;
|
|
212
|
+
const apiKeyHash = createHash('sha256').update(apiKey).digest('hex');
|
|
213
|
+
// Create daemon with optional workspace link
|
|
214
|
+
const [daemon] = await db.insert(linkedDaemons).values({
|
|
215
|
+
userId: targetUserId,
|
|
216
|
+
workspaceId: workspaceId || null,
|
|
217
|
+
name,
|
|
218
|
+
machineId: machineId || randomUUID(),
|
|
219
|
+
apiKeyHash,
|
|
220
|
+
status: 'online',
|
|
221
|
+
metadata: {
|
|
222
|
+
hostname: 'test-host',
|
|
223
|
+
platform: 'linux',
|
|
224
|
+
version: '1.0.0-test',
|
|
225
|
+
},
|
|
226
|
+
}).returning();
|
|
227
|
+
res.json({
|
|
228
|
+
daemonId: daemon.id,
|
|
229
|
+
apiKey,
|
|
230
|
+
name: daemon.name,
|
|
231
|
+
machineId: daemon.machineId,
|
|
232
|
+
workspaceId: daemon.workspaceId,
|
|
233
|
+
userId: targetUserId,
|
|
234
|
+
});
|
|
235
|
+
}
|
|
236
|
+
catch (error) {
|
|
237
|
+
console.error('Error creating test daemon:', error);
|
|
238
|
+
res.status(500).json({ error: 'Failed to create test daemon' });
|
|
239
|
+
}
|
|
240
|
+
});
|
|
111
241
|
/**
|
|
112
242
|
* DELETE /api/test/cleanup
|
|
113
243
|
* Cleans up test data
|
|
@@ -891,10 +891,41 @@ workspacesRouter.post('/:id/repos', async (req, res) => {
|
|
|
891
891
|
if (workspace.userId !== userId) {
|
|
892
892
|
return res.status(403).json({ error: 'Unauthorized' });
|
|
893
893
|
}
|
|
894
|
-
|
|
894
|
+
const reposToAssign = [];
|
|
895
|
+
const repoFullNames = [];
|
|
895
896
|
for (const repoId of repositoryIds) {
|
|
897
|
+
const repo = await db.repositories.findById(repoId);
|
|
898
|
+
if (!repo || repo.userId !== userId) {
|
|
899
|
+
return res.status(404).json({ error: 'Repository not found' });
|
|
900
|
+
}
|
|
901
|
+
if (repo.workspaceId && repo.workspaceId !== id) {
|
|
902
|
+
return res.status(409).json({
|
|
903
|
+
error: 'Repository already linked to another workspace',
|
|
904
|
+
workspaceId: repo.workspaceId,
|
|
905
|
+
});
|
|
906
|
+
}
|
|
907
|
+
if (!repo.installationId) {
|
|
908
|
+
return res.status(400).json({
|
|
909
|
+
error: 'Repository not authorized via GitHub App',
|
|
910
|
+
message: 'Install the GitHub App for this repository before adding it to a workspace.',
|
|
911
|
+
});
|
|
912
|
+
}
|
|
913
|
+
reposToAssign.push(repo.id);
|
|
914
|
+
repoFullNames.push(repo.githubFullName);
|
|
915
|
+
}
|
|
916
|
+
// Assign repositories to workspace
|
|
917
|
+
for (const repoId of reposToAssign) {
|
|
896
918
|
await db.repositories.assignToWorkspace(repoId, id);
|
|
897
919
|
}
|
|
920
|
+
// Update workspace config repositories list
|
|
921
|
+
const existingRepos = workspace.config.repositories ?? [];
|
|
922
|
+
const updatedRepos = Array.from(new Set([...existingRepos, ...repoFullNames]));
|
|
923
|
+
if (updatedRepos.length !== existingRepos.length) {
|
|
924
|
+
await db.workspaces.updateConfig(id, {
|
|
925
|
+
...workspace.config,
|
|
926
|
+
repositories: updatedRepos,
|
|
927
|
+
});
|
|
928
|
+
}
|
|
898
929
|
res.json({ success: true, message: 'Repositories added' });
|
|
899
930
|
}
|
|
900
931
|
catch (error) {
|
|
@@ -1456,7 +1487,10 @@ workspacesRouter.all('/:id/proxy/{*proxyPath}', async (req, res) => {
|
|
|
1456
1487
|
// The workspace port is 3888 inside the container
|
|
1457
1488
|
targetBaseUrl = `http://${workspace.computeId}:3888`;
|
|
1458
1489
|
}
|
|
1459
|
-
|
|
1490
|
+
// Preserve query string when proxying - this is critical for API calls like
|
|
1491
|
+
// /trajectory/steps?trajectoryId=xxx which need the query params forwarded
|
|
1492
|
+
const queryString = req.url.includes('?') ? req.url.substring(req.url.indexOf('?')) : '';
|
|
1493
|
+
const targetUrl = `${targetBaseUrl}/api/${proxyPath}${queryString}`;
|
|
1460
1494
|
console.log(`[workspace-proxy] ${req.method} ${targetUrl}`);
|
|
1461
1495
|
// Store targetUrl for error handling
|
|
1462
1496
|
req._proxyTargetUrl = targetUrl;
|
|
@@ -1534,7 +1568,7 @@ workspacesRouter.all('/:id/proxy/{*proxyPath}', async (req, res) => {
|
|
|
1534
1568
|
workspacesRouter.post('/:id/agents', async (req, res) => {
|
|
1535
1569
|
const userId = req.session.userId;
|
|
1536
1570
|
const { id } = req.params;
|
|
1537
|
-
const { name, provider, task, temporary, interactive } = req.body;
|
|
1571
|
+
const { name, provider, task, temporary: _temporary, interactive } = req.body;
|
|
1538
1572
|
if (!userId) {
|
|
1539
1573
|
return res.status(401).json({ error: 'Not authenticated' });
|
|
1540
1574
|
}
|
|
@@ -1571,6 +1605,7 @@ workspacesRouter.post('/:id/agents', async (req, res) => {
|
|
|
1571
1605
|
cli: provider || 'claude', // Map provider to cli
|
|
1572
1606
|
task: task || '', // Empty task = interactive mode, user responds to prompts
|
|
1573
1607
|
interactive: interactive ?? true, // Default to interactive for setup flows
|
|
1608
|
+
userId,
|
|
1574
1609
|
}),
|
|
1575
1610
|
signal: AbortSignal.timeout(30000),
|
|
1576
1611
|
});
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Bulk Ingest Utilities
|
|
3
|
+
*
|
|
4
|
+
* Optimized bulk insert operations for high-volume message sync.
|
|
5
|
+
* Uses raw SQL for performance instead of ORM-generated queries.
|
|
6
|
+
*
|
|
7
|
+
* Key optimizations:
|
|
8
|
+
* - Multi-row INSERT with VALUES for batches
|
|
9
|
+
* - Streaming COPY for very large batches (>1000 rows)
|
|
10
|
+
* - Proper JSONB serialization
|
|
11
|
+
* - Connection reuse via pool
|
|
12
|
+
* - Chunk processing for memory efficiency
|
|
13
|
+
*/
|
|
14
|
+
import { Pool } from 'pg';
|
|
15
|
+
import type { NewAgentMessage } from './schema.js';
|
|
16
|
+
export interface PoolConfig {
|
|
17
|
+
connectionString: string;
|
|
18
|
+
/** Maximum number of connections in pool (default: 20) */
|
|
19
|
+
max?: number;
|
|
20
|
+
/** How long a client can be idle before being closed (default: 30000) */
|
|
21
|
+
idleTimeoutMillis?: number;
|
|
22
|
+
/** Max time to wait for a connection (default: 10000) */
|
|
23
|
+
connectionTimeoutMillis?: number;
|
|
24
|
+
/** Enable SSL (default: based on connection string) */
|
|
25
|
+
ssl?: boolean | {
|
|
26
|
+
rejectUnauthorized: boolean;
|
|
27
|
+
};
|
|
28
|
+
}
|
|
29
|
+
export declare const DEFAULT_POOL_CONFIG: Partial<PoolConfig>;
|
|
30
|
+
/**
|
|
31
|
+
* Result of a bulk insert operation
|
|
32
|
+
*/
|
|
33
|
+
export interface BulkInsertResult {
|
|
34
|
+
inserted: number;
|
|
35
|
+
duplicates: number;
|
|
36
|
+
errors: number;
|
|
37
|
+
durationMs: number;
|
|
38
|
+
}
|
|
39
|
+
/**
|
|
40
|
+
* Bulk insert messages using optimized multi-row INSERT.
|
|
41
|
+
*
|
|
42
|
+
* Uses ON CONFLICT DO NOTHING for deduplication.
|
|
43
|
+
* Much faster than individual inserts for batches.
|
|
44
|
+
*
|
|
45
|
+
* @param pool Database connection pool
|
|
46
|
+
* @param messages Messages to insert
|
|
47
|
+
* @param chunkSize Number of rows per INSERT statement (default: 100)
|
|
48
|
+
*/
|
|
49
|
+
export declare function bulkInsertMessages(pool: Pool, messages: NewAgentMessage[], chunkSize?: number): Promise<BulkInsertResult>;
|
|
50
|
+
/**
|
|
51
|
+
* Streaming bulk insert using staging table for very large batches.
|
|
52
|
+
*
|
|
53
|
+
* Uses chunked multi-row INSERT into a temp staging table,
|
|
54
|
+
* then a single INSERT SELECT for deduplication.
|
|
55
|
+
* This avoids holding all data in memory and is efficient for large batches.
|
|
56
|
+
*
|
|
57
|
+
* @param pool Database connection pool
|
|
58
|
+
* @param messages Messages to insert
|
|
59
|
+
*/
|
|
60
|
+
export declare function streamingBulkInsert(pool: Pool, messages: NewAgentMessage[]): Promise<BulkInsertResult>;
|
|
61
|
+
/**
|
|
62
|
+
* Optimized bulk insert that chooses strategy based on batch size.
|
|
63
|
+
*
|
|
64
|
+
* - Small batches (<100): Use regular ORM insert
|
|
65
|
+
* - Medium batches (100-1000): Use multi-row INSERT
|
|
66
|
+
* - Large batches (>1000): Use streaming COPY
|
|
67
|
+
*
|
|
68
|
+
* @param pool Database connection pool
|
|
69
|
+
* @param messages Messages to insert
|
|
70
|
+
*/
|
|
71
|
+
export declare function optimizedBulkInsert(pool: Pool, messages: NewAgentMessage[]): Promise<BulkInsertResult>;
|
|
72
|
+
/**
|
|
73
|
+
* Get pool statistics for monitoring.
|
|
74
|
+
*/
|
|
75
|
+
export declare function getPoolStats(pool: Pool): {
|
|
76
|
+
total: number;
|
|
77
|
+
idle: number;
|
|
78
|
+
waiting: number;
|
|
79
|
+
};
|
|
80
|
+
/**
|
|
81
|
+
* Health check for the connection pool.
|
|
82
|
+
*/
|
|
83
|
+
export declare function checkPoolHealth(pool: Pool): Promise<{
|
|
84
|
+
healthy: boolean;
|
|
85
|
+
latencyMs: number;
|
|
86
|
+
error?: string;
|
|
87
|
+
}>;
|
|
88
|
+
//# sourceMappingURL=bulk-ingest.d.ts.map
|
|
@@ -0,0 +1,268 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Bulk Ingest Utilities
|
|
3
|
+
*
|
|
4
|
+
* Optimized bulk insert operations for high-volume message sync.
|
|
5
|
+
* Uses raw SQL for performance instead of ORM-generated queries.
|
|
6
|
+
*
|
|
7
|
+
* Key optimizations:
|
|
8
|
+
* - Multi-row INSERT with VALUES for batches
|
|
9
|
+
* - Streaming COPY for very large batches (>1000 rows)
|
|
10
|
+
* - Proper JSONB serialization
|
|
11
|
+
* - Connection reuse via pool
|
|
12
|
+
* - Chunk processing for memory efficiency
|
|
13
|
+
*/
|
|
14
|
+
export const DEFAULT_POOL_CONFIG = {
|
|
15
|
+
max: 20,
|
|
16
|
+
idleTimeoutMillis: 30000,
|
|
17
|
+
connectionTimeoutMillis: 10000,
|
|
18
|
+
};
|
|
19
|
+
/**
|
|
20
|
+
* Bulk insert messages using optimized multi-row INSERT.
|
|
21
|
+
*
|
|
22
|
+
* Uses ON CONFLICT DO NOTHING for deduplication.
|
|
23
|
+
* Much faster than individual inserts for batches.
|
|
24
|
+
*
|
|
25
|
+
* @param pool Database connection pool
|
|
26
|
+
* @param messages Messages to insert
|
|
27
|
+
* @param chunkSize Number of rows per INSERT statement (default: 100)
|
|
28
|
+
*/
|
|
29
|
+
export async function bulkInsertMessages(pool, messages, chunkSize = 100) {
|
|
30
|
+
if (messages.length === 0) {
|
|
31
|
+
return { inserted: 0, duplicates: 0, errors: 0, durationMs: 0 };
|
|
32
|
+
}
|
|
33
|
+
const startTime = Date.now();
|
|
34
|
+
let totalInserted = 0;
|
|
35
|
+
let totalErrors = 0;
|
|
36
|
+
// Process in chunks to avoid query size limits
|
|
37
|
+
for (let i = 0; i < messages.length; i += chunkSize) {
|
|
38
|
+
const chunk = messages.slice(i, i + chunkSize);
|
|
39
|
+
const result = await insertMessageChunk(pool, chunk);
|
|
40
|
+
totalInserted += result.inserted;
|
|
41
|
+
totalErrors += result.errors;
|
|
42
|
+
}
|
|
43
|
+
return {
|
|
44
|
+
inserted: totalInserted,
|
|
45
|
+
duplicates: messages.length - totalInserted - totalErrors,
|
|
46
|
+
errors: totalErrors,
|
|
47
|
+
durationMs: Date.now() - startTime,
|
|
48
|
+
};
|
|
49
|
+
}
|
|
50
|
+
/**
|
|
51
|
+
* Insert a chunk of messages with multi-row VALUES.
|
|
52
|
+
*/
|
|
53
|
+
async function insertMessageChunk(pool, messages) {
|
|
54
|
+
if (messages.length === 0) {
|
|
55
|
+
return { inserted: 0, errors: 0 };
|
|
56
|
+
}
|
|
57
|
+
// Build parameterized multi-row INSERT
|
|
58
|
+
const columns = [
|
|
59
|
+
'workspace_id',
|
|
60
|
+
'daemon_id',
|
|
61
|
+
'original_id',
|
|
62
|
+
'from_agent',
|
|
63
|
+
'to_agent',
|
|
64
|
+
'body',
|
|
65
|
+
'kind',
|
|
66
|
+
'topic',
|
|
67
|
+
'thread',
|
|
68
|
+
'channel',
|
|
69
|
+
'is_broadcast',
|
|
70
|
+
'is_urgent',
|
|
71
|
+
'data',
|
|
72
|
+
'payload_meta',
|
|
73
|
+
'message_ts',
|
|
74
|
+
'expires_at',
|
|
75
|
+
];
|
|
76
|
+
const values = [];
|
|
77
|
+
const placeholders = [];
|
|
78
|
+
for (let i = 0; i < messages.length; i++) {
|
|
79
|
+
const msg = messages[i];
|
|
80
|
+
const offset = i * columns.length;
|
|
81
|
+
const rowPlaceholders = columns.map((_, j) => `$${offset + j + 1}`);
|
|
82
|
+
placeholders.push(`(${rowPlaceholders.join(', ')})`);
|
|
83
|
+
values.push(msg.workspaceId, msg.daemonId ?? null, msg.originalId, msg.fromAgent, msg.toAgent, msg.body, msg.kind ?? 'message', msg.topic ?? null, msg.thread ?? null, msg.channel ?? null, msg.isBroadcast ?? false, msg.isUrgent ?? false, msg.data ? JSON.stringify(msg.data) : null, msg.payloadMeta ? JSON.stringify(msg.payloadMeta) : null, msg.messageTs, msg.expiresAt ?? null);
|
|
84
|
+
}
|
|
85
|
+
const query = `
|
|
86
|
+
INSERT INTO agent_messages (${columns.join(', ')})
|
|
87
|
+
VALUES ${placeholders.join(', ')}
|
|
88
|
+
ON CONFLICT (workspace_id, original_id) DO NOTHING
|
|
89
|
+
`;
|
|
90
|
+
try {
|
|
91
|
+
const result = await pool.query(query, values);
|
|
92
|
+
return { inserted: result.rowCount ?? 0, errors: 0 };
|
|
93
|
+
}
|
|
94
|
+
catch (err) {
|
|
95
|
+
console.error('[bulk-ingest] Chunk insert failed:', err);
|
|
96
|
+
return { inserted: 0, errors: messages.length };
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
/**
|
|
100
|
+
* Streaming bulk insert using staging table for very large batches.
|
|
101
|
+
*
|
|
102
|
+
* Uses chunked multi-row INSERT into a temp staging table,
|
|
103
|
+
* then a single INSERT SELECT for deduplication.
|
|
104
|
+
* This avoids holding all data in memory and is efficient for large batches.
|
|
105
|
+
*
|
|
106
|
+
* @param pool Database connection pool
|
|
107
|
+
* @param messages Messages to insert
|
|
108
|
+
*/
|
|
109
|
+
export async function streamingBulkInsert(pool, messages) {
|
|
110
|
+
if (messages.length === 0) {
|
|
111
|
+
return { inserted: 0, duplicates: 0, errors: 0, durationMs: 0 };
|
|
112
|
+
}
|
|
113
|
+
const startTime = Date.now();
|
|
114
|
+
const client = await pool.connect();
|
|
115
|
+
try {
|
|
116
|
+
// Start transaction
|
|
117
|
+
await client.query('BEGIN');
|
|
118
|
+
// Create temp staging table
|
|
119
|
+
await client.query(`
|
|
120
|
+
CREATE TEMP TABLE _staging_messages (
|
|
121
|
+
workspace_id UUID NOT NULL,
|
|
122
|
+
daemon_id UUID,
|
|
123
|
+
original_id VARCHAR(255) NOT NULL,
|
|
124
|
+
from_agent VARCHAR(255) NOT NULL,
|
|
125
|
+
to_agent VARCHAR(255) NOT NULL,
|
|
126
|
+
body TEXT NOT NULL,
|
|
127
|
+
kind VARCHAR(50) DEFAULT 'message',
|
|
128
|
+
topic VARCHAR(255),
|
|
129
|
+
thread VARCHAR(255),
|
|
130
|
+
channel VARCHAR(255),
|
|
131
|
+
is_broadcast BOOLEAN DEFAULT false,
|
|
132
|
+
is_urgent BOOLEAN DEFAULT false,
|
|
133
|
+
data JSONB,
|
|
134
|
+
payload_meta JSONB,
|
|
135
|
+
message_ts TIMESTAMP NOT NULL,
|
|
136
|
+
expires_at TIMESTAMP
|
|
137
|
+
) ON COMMIT DROP
|
|
138
|
+
`);
|
|
139
|
+
// Insert into staging table in chunks
|
|
140
|
+
const chunkSize = 200;
|
|
141
|
+
for (let i = 0; i < messages.length; i += chunkSize) {
|
|
142
|
+
const chunk = messages.slice(i, i + chunkSize);
|
|
143
|
+
await insertStagingChunk(client, chunk);
|
|
144
|
+
}
|
|
145
|
+
// Insert from staging to main table with dedup
|
|
146
|
+
const result = await client.query(`
|
|
147
|
+
INSERT INTO agent_messages (
|
|
148
|
+
workspace_id, daemon_id, original_id, from_agent, to_agent, body,
|
|
149
|
+
kind, topic, thread, channel, is_broadcast, is_urgent,
|
|
150
|
+
data, payload_meta, message_ts, expires_at
|
|
151
|
+
)
|
|
152
|
+
SELECT
|
|
153
|
+
workspace_id, daemon_id, original_id, from_agent, to_agent, body,
|
|
154
|
+
kind, topic, thread, channel, is_broadcast, is_urgent,
|
|
155
|
+
data, payload_meta, message_ts, expires_at
|
|
156
|
+
FROM _staging_messages
|
|
157
|
+
ON CONFLICT (workspace_id, original_id) DO NOTHING
|
|
158
|
+
`);
|
|
159
|
+
await client.query('COMMIT');
|
|
160
|
+
return {
|
|
161
|
+
inserted: result.rowCount ?? 0,
|
|
162
|
+
duplicates: messages.length - (result.rowCount ?? 0),
|
|
163
|
+
errors: 0,
|
|
164
|
+
durationMs: Date.now() - startTime,
|
|
165
|
+
};
|
|
166
|
+
}
|
|
167
|
+
catch (err) {
|
|
168
|
+
await client.query('ROLLBACK').catch(() => { });
|
|
169
|
+
console.error('[bulk-ingest] Streaming insert failed:', err);
|
|
170
|
+
return {
|
|
171
|
+
inserted: 0,
|
|
172
|
+
duplicates: 0,
|
|
173
|
+
errors: messages.length,
|
|
174
|
+
durationMs: Date.now() - startTime,
|
|
175
|
+
};
|
|
176
|
+
}
|
|
177
|
+
finally {
|
|
178
|
+
client.release();
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
/**
|
|
182
|
+
* Insert a chunk of messages into the staging table.
|
|
183
|
+
*/
|
|
184
|
+
async function insertStagingChunk(client, messages) {
|
|
185
|
+
if (messages.length === 0)
|
|
186
|
+
return;
|
|
187
|
+
const columns = [
|
|
188
|
+
'workspace_id',
|
|
189
|
+
'daemon_id',
|
|
190
|
+
'original_id',
|
|
191
|
+
'from_agent',
|
|
192
|
+
'to_agent',
|
|
193
|
+
'body',
|
|
194
|
+
'kind',
|
|
195
|
+
'topic',
|
|
196
|
+
'thread',
|
|
197
|
+
'channel',
|
|
198
|
+
'is_broadcast',
|
|
199
|
+
'is_urgent',
|
|
200
|
+
'data',
|
|
201
|
+
'payload_meta',
|
|
202
|
+
'message_ts',
|
|
203
|
+
'expires_at',
|
|
204
|
+
];
|
|
205
|
+
const values = [];
|
|
206
|
+
const placeholders = [];
|
|
207
|
+
for (let i = 0; i < messages.length; i++) {
|
|
208
|
+
const msg = messages[i];
|
|
209
|
+
const offset = i * columns.length;
|
|
210
|
+
const rowPlaceholders = columns.map((_, j) => `$${offset + j + 1}`);
|
|
211
|
+
placeholders.push(`(${rowPlaceholders.join(', ')})`);
|
|
212
|
+
values.push(msg.workspaceId, msg.daemonId ?? null, msg.originalId, msg.fromAgent, msg.toAgent, msg.body, msg.kind ?? 'message', msg.topic ?? null, msg.thread ?? null, msg.channel ?? null, msg.isBroadcast ?? false, msg.isUrgent ?? false, msg.data ? JSON.stringify(msg.data) : null, msg.payloadMeta ? JSON.stringify(msg.payloadMeta) : null, msg.messageTs, msg.expiresAt ?? null);
|
|
213
|
+
}
|
|
214
|
+
await client.query(`INSERT INTO _staging_messages (${columns.join(', ')}) VALUES ${placeholders.join(', ')}`, values);
|
|
215
|
+
}
|
|
216
|
+
/**
|
|
217
|
+
* Optimized bulk insert that chooses strategy based on batch size.
|
|
218
|
+
*
|
|
219
|
+
* - Small batches (<100): Use regular ORM insert
|
|
220
|
+
* - Medium batches (100-1000): Use multi-row INSERT
|
|
221
|
+
* - Large batches (>1000): Use streaming COPY
|
|
222
|
+
*
|
|
223
|
+
* @param pool Database connection pool
|
|
224
|
+
* @param messages Messages to insert
|
|
225
|
+
*/
|
|
226
|
+
export async function optimizedBulkInsert(pool, messages) {
|
|
227
|
+
const count = messages.length;
|
|
228
|
+
if (count === 0) {
|
|
229
|
+
return { inserted: 0, duplicates: 0, errors: 0, durationMs: 0 };
|
|
230
|
+
}
|
|
231
|
+
// For very large batches, use streaming COPY
|
|
232
|
+
if (count > 1000) {
|
|
233
|
+
return streamingBulkInsert(pool, messages);
|
|
234
|
+
}
|
|
235
|
+
// For medium batches, use multi-row INSERT
|
|
236
|
+
return bulkInsertMessages(pool, messages);
|
|
237
|
+
}
|
|
238
|
+
/**
|
|
239
|
+
* Get pool statistics for monitoring.
|
|
240
|
+
*/
|
|
241
|
+
export function getPoolStats(pool) {
|
|
242
|
+
return {
|
|
243
|
+
total: pool.totalCount,
|
|
244
|
+
idle: pool.idleCount,
|
|
245
|
+
waiting: pool.waitingCount,
|
|
246
|
+
};
|
|
247
|
+
}
|
|
248
|
+
/**
|
|
249
|
+
* Health check for the connection pool.
|
|
250
|
+
*/
|
|
251
|
+
export async function checkPoolHealth(pool) {
|
|
252
|
+
const start = Date.now();
|
|
253
|
+
try {
|
|
254
|
+
await pool.query('SELECT 1');
|
|
255
|
+
return {
|
|
256
|
+
healthy: true,
|
|
257
|
+
latencyMs: Date.now() - start,
|
|
258
|
+
};
|
|
259
|
+
}
|
|
260
|
+
catch (err) {
|
|
261
|
+
return {
|
|
262
|
+
healthy: false,
|
|
263
|
+
latencyMs: Date.now() - start,
|
|
264
|
+
error: String(err),
|
|
265
|
+
};
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
//# sourceMappingURL=bulk-ingest.js.map
|
|
@@ -8,6 +8,11 @@ import { Pool } from 'pg';
|
|
|
8
8
|
import * as schema from './schema.js';
|
|
9
9
|
export type { User, NewUser, GitHubInstallation, NewGitHubInstallation, Credential, NewCredential, Workspace, NewWorkspace, WorkspaceConfig, WorkspaceMember, NewWorkspaceMember, ProjectGroup, NewProjectGroup, CoordinatorAgentConfig, ProjectAgentConfig, Repository, NewRepository, LinkedDaemon, NewLinkedDaemon, Subscription, NewSubscription, UsageRecord, NewUsageRecord, AgentSession, NewAgentSession, AgentSummary, NewAgentSummary, } from './schema.js';
|
|
10
10
|
export * from './schema.js';
|
|
11
|
+
/**
|
|
12
|
+
* Get the raw connection pool for bulk operations.
|
|
13
|
+
* Use this for optimized bulk inserts that bypass the ORM.
|
|
14
|
+
*/
|
|
15
|
+
export declare function getRawPool(): Pool;
|
|
11
16
|
export declare function getDb(): import("drizzle-orm/node-postgres").NodePgDatabase<Record<string, unknown>> & {
|
|
12
17
|
$client: Pool;
|
|
13
18
|
};
|
|
@@ -51,6 +56,7 @@ export interface WorkspaceQueries {
|
|
|
51
56
|
findById(id: string): Promise<schema.Workspace | null>;
|
|
52
57
|
findByUserId(userId: string): Promise<schema.Workspace[]>;
|
|
53
58
|
findByCustomDomain(domain: string): Promise<schema.Workspace | null>;
|
|
59
|
+
findByRepoFullName(repoFullName: string): Promise<schema.Workspace | null>;
|
|
54
60
|
findAll(): Promise<schema.Workspace[]>;
|
|
55
61
|
create(data: schema.NewWorkspace): Promise<schema.Workspace>;
|
|
56
62
|
update(id: string, data: Partial<Pick<schema.Workspace, 'name' | 'config'>>): Promise<void>;
|
|
@@ -101,6 +107,7 @@ export interface DaemonUpdate {
|
|
|
101
107
|
export interface LinkedDaemonQueries {
|
|
102
108
|
findById(id: string): Promise<schema.LinkedDaemon | null>;
|
|
103
109
|
findByUserId(userId: string): Promise<schema.LinkedDaemon[]>;
|
|
110
|
+
findByWorkspaceId(workspaceId: string): Promise<schema.LinkedDaemon[]>;
|
|
104
111
|
findByMachineId(userId: string, machineId: string): Promise<schema.LinkedDaemon | null>;
|
|
105
112
|
findByApiKeyHash(apiKeyHash: string): Promise<schema.LinkedDaemon | null>;
|
|
106
113
|
create(data: schema.NewLinkedDaemon): Promise<schema.LinkedDaemon>;
|
|
@@ -109,6 +116,7 @@ export interface LinkedDaemonQueries {
|
|
|
109
116
|
delete(id: string): Promise<void>;
|
|
110
117
|
markStale(): Promise<number>;
|
|
111
118
|
getAllAgentsForUser(userId: string): Promise<DaemonAgentInfo[]>;
|
|
119
|
+
getAgentsForWorkspace(workspaceId: string): Promise<DaemonAgentInfo[]>;
|
|
112
120
|
getPendingUpdates(id: string): Promise<DaemonUpdate[]>;
|
|
113
121
|
queueUpdate(id: string, update: DaemonUpdate): Promise<void>;
|
|
114
122
|
queueMessage(id: string, message: Record<string, unknown>): Promise<void>;
|
|
@@ -213,6 +221,31 @@ export interface CommentMentionQueries {
|
|
|
213
221
|
markIgnored(id: string): Promise<void>;
|
|
214
222
|
}
|
|
215
223
|
export declare const commentMentionQueries: CommentMentionQueries;
|
|
224
|
+
export interface MessageQuery {
|
|
225
|
+
workspaceId: string;
|
|
226
|
+
limit?: number;
|
|
227
|
+
offset?: number;
|
|
228
|
+
fromAgent?: string;
|
|
229
|
+
toAgent?: string;
|
|
230
|
+
thread?: string;
|
|
231
|
+
channel?: string;
|
|
232
|
+
sinceTs?: Date;
|
|
233
|
+
beforeTs?: Date;
|
|
234
|
+
includeExpired?: boolean;
|
|
235
|
+
}
|
|
236
|
+
export interface AgentMessageQueries {
|
|
237
|
+
create(data: schema.NewAgentMessage): Promise<schema.AgentMessage>;
|
|
238
|
+
createMany(data: schema.NewAgentMessage[]): Promise<schema.AgentMessage[]>;
|
|
239
|
+
findById(id: string): Promise<schema.AgentMessage | null>;
|
|
240
|
+
findByOriginalId(workspaceId: string, originalId: string): Promise<schema.AgentMessage | null>;
|
|
241
|
+
query(params: MessageQuery): Promise<schema.AgentMessage[]>;
|
|
242
|
+
getUnindexed(workspaceId: string, limit?: number): Promise<schema.AgentMessage[]>;
|
|
243
|
+
markIndexed(ids: string[]): Promise<void>;
|
|
244
|
+
deleteExpired(): Promise<number>;
|
|
245
|
+
countByWorkspace(workspaceId: string): Promise<number>;
|
|
246
|
+
getThreadMessages(workspaceId: string, thread: string, limit?: number): Promise<schema.AgentMessage[]>;
|
|
247
|
+
}
|
|
248
|
+
export declare const agentMessageQueries: AgentMessageQueries;
|
|
216
249
|
export declare function runMigrations(): Promise<void>;
|
|
217
250
|
export declare function closeDb(): Promise<void>;
|
|
218
251
|
//# sourceMappingURL=drizzle.d.ts.map
|