@kaitranntt/ccs 7.61.0-dev.1 → 7.61.1-dev.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -2
- package/scripts/github/ccs-backlog-sync-lib.mjs +379 -0
- package/scripts/github/ccs-backlog-sync.mjs +6 -0
- package/docker/Dockerfile +0 -85
- package/docker/Dockerfile.integrated +0 -23
- package/docker/README.md +0 -260
- package/docker/docker-compose.integrated.yml +0 -32
- package/docker/docker-compose.yml +0 -53
- package/docker/entrypoint-integrated.sh +0 -9
- package/docker/entrypoint.sh +0 -34
- package/docker/supervisord.conf +0 -43
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@kaitranntt/ccs",
|
|
3
|
-
"version": "7.61.
|
|
3
|
+
"version": "7.61.1-dev.1",
|
|
4
4
|
"description": "Claude Code Switch - Instant profile switching between Claude, GLM, Kimi, and more",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"cli",
|
|
@@ -35,7 +35,6 @@
|
|
|
35
35
|
"lib/",
|
|
36
36
|
"scripts/",
|
|
37
37
|
"config/",
|
|
38
|
-
"docker/",
|
|
39
38
|
".claude/",
|
|
40
39
|
"VERSION",
|
|
41
40
|
"README.md",
|
|
@@ -0,0 +1,379 @@
|
|
|
1
|
+
const REQUIRED_PROJECT_FIELDS = ['Status', 'Priority', 'Follow-up', 'Next review'];
|
|
2
|
+
const DEFAULT_REPO_FULL_NAME = 'kaitranntt/ccs';
|
|
3
|
+
const DEFAULT_CLOSED_LOOKBACK_DAYS = 14;
|
|
4
|
+
const PRIORITY_FOR = { bug: 'P1', default: 'P2', split: 'P3' };
|
|
5
|
+
const FOLLOW_UP_FOR = {
|
|
6
|
+
ready: 'Ready',
|
|
7
|
+
repro: 'Needs repro',
|
|
8
|
+
upstream: 'Blocked upstream',
|
|
9
|
+
split: 'Needs split',
|
|
10
|
+
docs: 'Docs follow-up',
|
|
11
|
+
};
|
|
12
|
+
|
|
13
|
+
const PROJECT_QUERY = `query($owner: String!, $number: Int!, $itemCursor: String) {
|
|
14
|
+
user(login: $owner) {
|
|
15
|
+
projectV2(number: $number) {
|
|
16
|
+
id
|
|
17
|
+
fields(first: 50) { nodes { __typename ... on ProjectV2Field { id name } ... on ProjectV2SingleSelectField { id name options { id name } } } }
|
|
18
|
+
items(first: 100, after: $itemCursor) {
|
|
19
|
+
pageInfo { hasNextPage endCursor }
|
|
20
|
+
nodes { id content { __typename ... on Issue { number id repository { nameWithOwner } } } }
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
}`;
|
|
25
|
+
const ADD_ITEM_MUTATION = `mutation($projectId: ID!, $contentId: ID!) {
|
|
26
|
+
addProjectV2ItemById(input: {projectId: $projectId, contentId: $contentId}) { item { id } }
|
|
27
|
+
}`;
|
|
28
|
+
const SET_SINGLE_SELECT_MUTATION = `mutation($projectId: ID!, $itemId: ID!, $fieldId: ID!, $optionId: String!) {
|
|
29
|
+
updateProjectV2ItemFieldValue(input: {
|
|
30
|
+
projectId: $projectId, itemId: $itemId, fieldId: $fieldId, value: { singleSelectOptionId: $optionId }
|
|
31
|
+
}) { projectV2Item { id } }
|
|
32
|
+
}`;
|
|
33
|
+
const SET_DATE_MUTATION = `mutation($projectId: ID!, $itemId: ID!, $fieldId: ID!, $date: Date!) {
|
|
34
|
+
updateProjectV2ItemFieldValue(input: {
|
|
35
|
+
projectId: $projectId, itemId: $itemId, fieldId: $fieldId, value: { date: $date }
|
|
36
|
+
}) { projectV2Item { id } }
|
|
37
|
+
}`;
|
|
38
|
+
const CLEAR_FIELD_MUTATION = `mutation($projectId: ID!, $itemId: ID!, $fieldId: ID!) {
|
|
39
|
+
clearProjectV2ItemFieldValue(input: {projectId: $projectId, itemId: $itemId, fieldId: $fieldId}) { projectV2Item { id } }
|
|
40
|
+
}`;
|
|
41
|
+
|
|
42
|
+
export function isoDate(daysFromNow, now = new Date()) {
|
|
43
|
+
const date = new Date(now);
|
|
44
|
+
date.setUTCDate(date.getUTCDate() + daysFromNow);
|
|
45
|
+
return date.toISOString().slice(0, 10);
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
export function classify(labels, state, now = new Date()) {
|
|
49
|
+
const names = new Set(labels.map((label) => label.name));
|
|
50
|
+
const priority = names.has('bug')
|
|
51
|
+
? PRIORITY_FOR.bug
|
|
52
|
+
: names.has('needs-split')
|
|
53
|
+
? PRIORITY_FOR.split
|
|
54
|
+
: PRIORITY_FOR.default;
|
|
55
|
+
if (state === 'closed')
|
|
56
|
+
return { priority, followUp: FOLLOW_UP_FOR.ready, nextReview: null, status: 'Done' };
|
|
57
|
+
if (names.has('upstream-blocked'))
|
|
58
|
+
return {
|
|
59
|
+
priority,
|
|
60
|
+
followUp: FOLLOW_UP_FOR.upstream,
|
|
61
|
+
nextReview: isoDate(7, now),
|
|
62
|
+
status: 'Todo',
|
|
63
|
+
};
|
|
64
|
+
if (names.has('needs-repro'))
|
|
65
|
+
return {
|
|
66
|
+
priority,
|
|
67
|
+
followUp: FOLLOW_UP_FOR.repro,
|
|
68
|
+
nextReview: isoDate(14, now),
|
|
69
|
+
status: 'Todo',
|
|
70
|
+
};
|
|
71
|
+
if (names.has('needs-split'))
|
|
72
|
+
return {
|
|
73
|
+
priority,
|
|
74
|
+
followUp: FOLLOW_UP_FOR.split,
|
|
75
|
+
nextReview: isoDate(14, now),
|
|
76
|
+
status: 'Todo',
|
|
77
|
+
};
|
|
78
|
+
if (names.has('docs-gap'))
|
|
79
|
+
return { priority, followUp: FOLLOW_UP_FOR.docs, nextReview: isoDate(7, now), status: 'Todo' };
|
|
80
|
+
return { priority, followUp: FOLLOW_UP_FOR.ready, nextReview: null, status: 'Todo' };
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
export function parseRepoFullName(repoFullName = DEFAULT_REPO_FULL_NAME) {
|
|
84
|
+
const [repoOwner, repoName, extra] = String(repoFullName).split('/');
|
|
85
|
+
if (!repoOwner || !repoName || extra) {
|
|
86
|
+
throw new Error(`Invalid GITHUB_REPOSITORY value "${repoFullName}". Expected OWNER/REPO.`);
|
|
87
|
+
}
|
|
88
|
+
return { repoOwner, repoName, repoFullName: `${repoOwner}/${repoName}` };
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
export function parseNextLink(linkHeader) {
|
|
92
|
+
if (!linkHeader) return null;
|
|
93
|
+
for (const segment of linkHeader.split(',')) {
|
|
94
|
+
const match = segment.match(/<([^>]+)>\s*;\s*rel="([^"]+)"/);
|
|
95
|
+
if (match?.[2] === 'next') return match[1];
|
|
96
|
+
}
|
|
97
|
+
return null;
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
function getHeader(headers, name) {
|
|
101
|
+
if (typeof headers?.get === 'function') return headers.get(name);
|
|
102
|
+
return headers?.[name] || headers?.[name.toLowerCase()] || null;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
function buildCutoffTimestamp(now, days) {
|
|
106
|
+
const cutoff = new Date(now);
|
|
107
|
+
cutoff.setUTCDate(cutoff.getUTCDate() - days);
|
|
108
|
+
return cutoff.toISOString();
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
function isRecentlyClosed(issue, now, days) {
|
|
112
|
+
if (issue.state !== 'closed' || !issue.closed_at) return false;
|
|
113
|
+
return Date.parse(issue.closed_at) >= Date.parse(buildCutoffTimestamp(now, days));
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
export function validateProjectFields(fields) {
|
|
117
|
+
const missing = REQUIRED_PROJECT_FIELDS.filter((name) => !fields.has(name));
|
|
118
|
+
if (missing.length > 0) {
|
|
119
|
+
throw new Error(
|
|
120
|
+
`Missing required project field${missing.length > 1 ? 's' : ''}: ${missing.map((name) => `"${name}"`).join(', ')}`
|
|
121
|
+
);
|
|
122
|
+
}
|
|
123
|
+
return {
|
|
124
|
+
statusField: fields.get('Status'),
|
|
125
|
+
priorityField: fields.get('Priority'),
|
|
126
|
+
followUpField: fields.get('Follow-up'),
|
|
127
|
+
nextReviewField: fields.get('Next review'),
|
|
128
|
+
};
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
export async function listGithubCollection(initialPath, githubRequest) {
|
|
132
|
+
const items = [];
|
|
133
|
+
let nextPath = initialPath;
|
|
134
|
+
while (nextPath) {
|
|
135
|
+
const { body, headers } = await githubRequest(nextPath);
|
|
136
|
+
if (!Array.isArray(body)) throw new Error(`Expected array response for ${nextPath}`);
|
|
137
|
+
items.push(...body);
|
|
138
|
+
nextPath = parseNextLink(getHeader(headers, 'link'));
|
|
139
|
+
}
|
|
140
|
+
return items;
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
export async function getProjectContext({ owner, projectNumber, repoFullName, graphqlRequest }) {
|
|
144
|
+
const fields = new Map();
|
|
145
|
+
const itemsByNumber = new Map();
|
|
146
|
+
let projectId = null;
|
|
147
|
+
let itemCursor = null;
|
|
148
|
+
|
|
149
|
+
do {
|
|
150
|
+
const data = await graphqlRequest(PROJECT_QUERY, { owner, number: projectNumber, itemCursor });
|
|
151
|
+
const project = data.user?.projectV2;
|
|
152
|
+
if (!project) throw new Error(`Project ${owner}/${projectNumber} not found`);
|
|
153
|
+
projectId = projectId || project.id;
|
|
154
|
+
|
|
155
|
+
if (fields.size === 0) {
|
|
156
|
+
for (const node of project.fields.nodes) {
|
|
157
|
+
if (!node?.name) continue;
|
|
158
|
+
fields.set(node.name, {
|
|
159
|
+
id: node.id,
|
|
160
|
+
options: new Map((node.options || []).map((opt) => [opt.name, opt.id])),
|
|
161
|
+
});
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
for (const node of project.items.nodes) {
|
|
166
|
+
if (
|
|
167
|
+
node?.content?.__typename === 'Issue' &&
|
|
168
|
+
node.content.repository.nameWithOwner === repoFullName
|
|
169
|
+
) {
|
|
170
|
+
itemsByNumber.set(node.content.number, node.id);
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
itemCursor = project.items.pageInfo.hasNextPage ? project.items.pageInfo.endCursor : null;
|
|
175
|
+
} while (itemCursor);
|
|
176
|
+
|
|
177
|
+
return { projectId, itemsByNumber, ...validateProjectFields(fields) };
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
export async function listIssuesForSync({
|
|
181
|
+
repoOwner,
|
|
182
|
+
repoName,
|
|
183
|
+
githubRequest,
|
|
184
|
+
eventPath,
|
|
185
|
+
now = new Date(),
|
|
186
|
+
closedLookbackDays = DEFAULT_CLOSED_LOOKBACK_DAYS,
|
|
187
|
+
}) {
|
|
188
|
+
if (eventPath) {
|
|
189
|
+
const event = JSON.parse(
|
|
190
|
+
await import('node:fs/promises').then((fs) => fs.readFile(eventPath, 'utf8'))
|
|
191
|
+
);
|
|
192
|
+
if (event.issue && !event.issue.pull_request) return [event.issue];
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
const openIssues = await listGithubCollection(
|
|
196
|
+
`/repos/${repoOwner}/${repoName}/issues?state=open&per_page=100`,
|
|
197
|
+
githubRequest
|
|
198
|
+
);
|
|
199
|
+
const recentlyClosedIssues = await listGithubCollection(
|
|
200
|
+
`/repos/${repoOwner}/${repoName}/issues?state=closed&per_page=100&since=${encodeURIComponent(buildCutoffTimestamp(now, closedLookbackDays))}`,
|
|
201
|
+
githubRequest
|
|
202
|
+
);
|
|
203
|
+
|
|
204
|
+
const byNumber = new Map();
|
|
205
|
+
for (const issue of openIssues) {
|
|
206
|
+
if (!issue.pull_request) byNumber.set(issue.number, issue);
|
|
207
|
+
}
|
|
208
|
+
for (const issue of recentlyClosedIssues) {
|
|
209
|
+
if (!issue.pull_request && isRecentlyClosed(issue, now, closedLookbackDays))
|
|
210
|
+
byNumber.set(issue.number, issue);
|
|
211
|
+
}
|
|
212
|
+
return [...byNumber.values()];
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
async function ensureProjectItem(projectId, itemsByNumber, issue, graphqlRequest) {
|
|
216
|
+
const existing = itemsByNumber.get(issue.number);
|
|
217
|
+
if (existing) return existing;
|
|
218
|
+
if (!issue.node_id) throw new Error(`Issue #${issue.number} is missing node_id`);
|
|
219
|
+
const data = await graphqlRequest(ADD_ITEM_MUTATION, { projectId, contentId: issue.node_id });
|
|
220
|
+
const itemId = data.addProjectV2ItemById.item.id;
|
|
221
|
+
itemsByNumber.set(issue.number, itemId);
|
|
222
|
+
return itemId;
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
async function setSingleSelect(projectId, itemId, field, optionName, graphqlRequest) {
|
|
226
|
+
const optionId = field.options.get(optionName);
|
|
227
|
+
if (!optionId) throw new Error(`Missing option "${optionName}" on field ${field.id}`);
|
|
228
|
+
await graphqlRequest(SET_SINGLE_SELECT_MUTATION, {
|
|
229
|
+
projectId,
|
|
230
|
+
itemId,
|
|
231
|
+
fieldId: field.id,
|
|
232
|
+
optionId,
|
|
233
|
+
});
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
async function setDate(projectId, itemId, fieldId, date, graphqlRequest) {
|
|
237
|
+
if (!date) {
|
|
238
|
+
await graphqlRequest(CLEAR_FIELD_MUTATION, { projectId, itemId, fieldId });
|
|
239
|
+
return;
|
|
240
|
+
}
|
|
241
|
+
await graphqlRequest(SET_DATE_MUTATION, { projectId, itemId, fieldId, date });
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
export async function syncIssues({
|
|
245
|
+
issues,
|
|
246
|
+
context,
|
|
247
|
+
graphqlRequest,
|
|
248
|
+
logger = console,
|
|
249
|
+
now = new Date(),
|
|
250
|
+
}) {
|
|
251
|
+
const failures = [];
|
|
252
|
+
for (const issue of issues) {
|
|
253
|
+
try {
|
|
254
|
+
if (issue.state === 'closed' && !context.itemsByNumber.has(issue.number)) {
|
|
255
|
+
logger.log(
|
|
256
|
+
`skipped #${issue.number}: closed issue is not currently tracked in the project`
|
|
257
|
+
);
|
|
258
|
+
continue;
|
|
259
|
+
}
|
|
260
|
+
const itemId = await ensureProjectItem(
|
|
261
|
+
context.projectId,
|
|
262
|
+
context.itemsByNumber,
|
|
263
|
+
issue,
|
|
264
|
+
graphqlRequest
|
|
265
|
+
);
|
|
266
|
+
const plan = classify(issue.labels || [], issue.state, now);
|
|
267
|
+
await setSingleSelect(
|
|
268
|
+
context.projectId,
|
|
269
|
+
itemId,
|
|
270
|
+
context.statusField,
|
|
271
|
+
plan.status,
|
|
272
|
+
graphqlRequest
|
|
273
|
+
);
|
|
274
|
+
await setSingleSelect(
|
|
275
|
+
context.projectId,
|
|
276
|
+
itemId,
|
|
277
|
+
context.priorityField,
|
|
278
|
+
plan.priority,
|
|
279
|
+
graphqlRequest
|
|
280
|
+
);
|
|
281
|
+
await setSingleSelect(
|
|
282
|
+
context.projectId,
|
|
283
|
+
itemId,
|
|
284
|
+
context.followUpField,
|
|
285
|
+
plan.followUp,
|
|
286
|
+
graphqlRequest
|
|
287
|
+
);
|
|
288
|
+
await setDate(
|
|
289
|
+
context.projectId,
|
|
290
|
+
itemId,
|
|
291
|
+
context.nextReviewField.id,
|
|
292
|
+
plan.nextReview,
|
|
293
|
+
graphqlRequest
|
|
294
|
+
);
|
|
295
|
+
logger.log(
|
|
296
|
+
`synced #${issue.number}: ${plan.status} / ${plan.priority} / ${plan.followUp}${plan.nextReview ? ` / ${plan.nextReview}` : ''}`
|
|
297
|
+
);
|
|
298
|
+
} catch (error) {
|
|
299
|
+
const detail = error instanceof Error ? error.message : String(error);
|
|
300
|
+
failures.push(`#${issue.number} (${detail})`);
|
|
301
|
+
logger.error(`[X] Failed to sync #${issue.number}: ${detail}`);
|
|
302
|
+
}
|
|
303
|
+
}
|
|
304
|
+
if (failures.length > 0)
|
|
305
|
+
throw new Error(`Failed to sync ${failures.length} issue(s): ${failures.join(', ')}`);
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
function formatGraphqlError(errors) {
|
|
309
|
+
const raw = JSON.stringify(errors);
|
|
310
|
+
if (/resource not accessible|insufficient|forbidden|project/i.test(raw)) {
|
|
311
|
+
return `GitHub Project access failed. Ensure GH_TOKEN or GITHUB_TOKEN has project scope and access to the target project. Raw: ${raw}`;
|
|
312
|
+
}
|
|
313
|
+
return `GitHub GraphQL failed: ${raw}`;
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
function buildRuntimeConfig(env = process.env) {
|
|
317
|
+
const token = env.GH_TOKEN || env.GITHUB_TOKEN;
|
|
318
|
+
if (!token) throw new Error('Missing GH_TOKEN or GITHUB_TOKEN');
|
|
319
|
+
const projectNumber = Number(env.CCS_PROJECT_NUMBER || '3');
|
|
320
|
+
if (!Number.isInteger(projectNumber) || projectNumber <= 0)
|
|
321
|
+
throw new Error('CCS_PROJECT_NUMBER must be a positive integer');
|
|
322
|
+
return {
|
|
323
|
+
token,
|
|
324
|
+
owner: env.CCS_PROJECT_OWNER || 'kaitranntt',
|
|
325
|
+
projectNumber,
|
|
326
|
+
eventPath: env.GITHUB_EVENT_PATH,
|
|
327
|
+
closedLookbackDays: Number(
|
|
328
|
+
env.CCS_PROJECT_RECENTLY_CLOSED_DAYS || String(DEFAULT_CLOSED_LOOKBACK_DAYS)
|
|
329
|
+
),
|
|
330
|
+
...parseRepoFullName(env.GITHUB_REPOSITORY || DEFAULT_REPO_FULL_NAME),
|
|
331
|
+
};
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
export async function runSync({ env = process.env, logger = console, fetchImpl = fetch } = {}) {
|
|
335
|
+
const config = buildRuntimeConfig(env);
|
|
336
|
+
const githubRequest = async (path, init = {}) => {
|
|
337
|
+
const response = await fetchImpl(
|
|
338
|
+
path.startsWith('http') ? path : `https://api.github.com${path}`,
|
|
339
|
+
{
|
|
340
|
+
...init,
|
|
341
|
+
headers: {
|
|
342
|
+
Accept: 'application/vnd.github+json',
|
|
343
|
+
Authorization: `Bearer ${config.token}`,
|
|
344
|
+
'X-GitHub-Api-Version': '2022-11-28',
|
|
345
|
+
...(init.headers || {}),
|
|
346
|
+
},
|
|
347
|
+
}
|
|
348
|
+
);
|
|
349
|
+
const body = await response.json();
|
|
350
|
+
if (!response.ok) throw new Error(`GitHub REST ${response.status}: ${JSON.stringify(body)}`);
|
|
351
|
+
return { body, headers: response.headers };
|
|
352
|
+
};
|
|
353
|
+
const graphqlRequest = async (query, variables = {}) => {
|
|
354
|
+
const response = await fetchImpl('https://api.github.com/graphql', {
|
|
355
|
+
method: 'POST',
|
|
356
|
+
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${config.token}` },
|
|
357
|
+
body: JSON.stringify({ query, variables }),
|
|
358
|
+
});
|
|
359
|
+
const body = await response.json();
|
|
360
|
+
if (!response.ok || body.errors) throw new Error(formatGraphqlError(body.errors || body));
|
|
361
|
+
return body.data;
|
|
362
|
+
};
|
|
363
|
+
|
|
364
|
+
const issues = await listIssuesForSync({
|
|
365
|
+
repoOwner: config.repoOwner,
|
|
366
|
+
repoName: config.repoName,
|
|
367
|
+
githubRequest,
|
|
368
|
+
eventPath: config.eventPath,
|
|
369
|
+
now: new Date(),
|
|
370
|
+
closedLookbackDays: config.closedLookbackDays,
|
|
371
|
+
});
|
|
372
|
+
const context = await getProjectContext({
|
|
373
|
+
owner: config.owner,
|
|
374
|
+
projectNumber: config.projectNumber,
|
|
375
|
+
repoFullName: config.repoFullName,
|
|
376
|
+
graphqlRequest,
|
|
377
|
+
});
|
|
378
|
+
await syncIssues({ issues, context, graphqlRequest, logger, now: new Date() });
|
|
379
|
+
}
|
package/docker/Dockerfile
DELETED
|
@@ -1,85 +0,0 @@
|
|
|
1
|
-
# syntax=docker/dockerfile:1
|
|
2
|
-
|
|
3
|
-
# =============================================================================
|
|
4
|
-
# Build stage: compile TypeScript and build UI
|
|
5
|
-
# =============================================================================
|
|
6
|
-
FROM node:20-bookworm-slim AS build
|
|
7
|
-
|
|
8
|
-
SHELL ["/bin/bash", "-lc"]
|
|
9
|
-
|
|
10
|
-
# Pin bun version for reproducible builds
|
|
11
|
-
ARG BUN_VERSION=1.2.21
|
|
12
|
-
ENV BUN_INSTALL=/usr/local/bun
|
|
13
|
-
ENV PATH="$BUN_INSTALL/bin:$PATH"
|
|
14
|
-
|
|
15
|
-
RUN apt-get update \
|
|
16
|
-
&& apt-get install -y --no-install-recommends curl ca-certificates unzip \
|
|
17
|
-
&& rm -rf /var/lib/apt/lists/*
|
|
18
|
-
|
|
19
|
-
# Install specific bun version
|
|
20
|
-
RUN curl -fsSL https://bun.sh/install | bash -s "bun-v${BUN_VERSION}"
|
|
21
|
-
|
|
22
|
-
WORKDIR /app
|
|
23
|
-
|
|
24
|
-
# Dependency install layer (avoid running install scripts inside the image build)
|
|
25
|
-
COPY package.json bun.lock bunfig.toml ./
|
|
26
|
-
COPY ui/package.json ui/bun.lock ./ui/
|
|
27
|
-
|
|
28
|
-
RUN bun install --frozen-lockfile --ignore-scripts \
|
|
29
|
-
&& (cd ui && bun install --frozen-lockfile --ignore-scripts)
|
|
30
|
-
|
|
31
|
-
COPY . .
|
|
32
|
-
|
|
33
|
-
RUN bun run build:all
|
|
34
|
-
|
|
35
|
-
# Validate build artifacts exist
|
|
36
|
-
RUN test -d dist && test -d lib && echo "[OK] Build artifacts validated"
|
|
37
|
-
|
|
38
|
-
# =============================================================================
|
|
39
|
-
# Runtime stage: minimal production image
|
|
40
|
-
# =============================================================================
|
|
41
|
-
FROM node:20-bookworm-slim AS runtime
|
|
42
|
-
|
|
43
|
-
SHELL ["/bin/bash", "-lc"]
|
|
44
|
-
|
|
45
|
-
# Pin bun version for reproducible builds
|
|
46
|
-
ARG BUN_VERSION=1.2.21
|
|
47
|
-
ENV BUN_INSTALL=/usr/local/bun
|
|
48
|
-
ENV PATH="$BUN_INSTALL/bin:/home/node/.opencode/bin:$PATH"
|
|
49
|
-
|
|
50
|
-
RUN apt-get update \
|
|
51
|
-
&& apt-get install -y --no-install-recommends curl ca-certificates unzip \
|
|
52
|
-
&& rm -rf /var/lib/apt/lists/*
|
|
53
|
-
|
|
54
|
-
# Install specific bun version
|
|
55
|
-
RUN curl -fsSL https://bun.sh/install | bash -s "bun-v${BUN_VERSION}"
|
|
56
|
-
|
|
57
|
-
WORKDIR /app
|
|
58
|
-
|
|
59
|
-
COPY package.json bun.lock ./
|
|
60
|
-
RUN bun install --frozen-lockfile --production --ignore-scripts
|
|
61
|
-
|
|
62
|
-
COPY docker/entrypoint.sh /usr/local/bin/ccs-entrypoint
|
|
63
|
-
RUN chmod +x /usr/local/bin/ccs-entrypoint
|
|
64
|
-
|
|
65
|
-
COPY --from=build /app/dist ./dist
|
|
66
|
-
COPY --from=build /app/lib ./lib
|
|
67
|
-
COPY --from=build /app/config ./config
|
|
68
|
-
COPY --from=build /app/scripts ./scripts
|
|
69
|
-
COPY --from=build /app/README.md ./README.md
|
|
70
|
-
COPY --from=build /app/LICENSE ./LICENSE
|
|
71
|
-
|
|
72
|
-
# Install AI CLI tools (using latest - pin versions in production if needed)
|
|
73
|
-
# These are optional tools for docker exec usage
|
|
74
|
-
RUN npm install -g @google/gemini-cli @vibe-kit/grok-cli @anthropic-ai/claude-code \
|
|
75
|
-
&& npm install -g @kaitranntt/ccs --force \
|
|
76
|
-
&& npm cache clean --force \
|
|
77
|
-
&& su -s /bin/bash node -c 'curl -fsSL https://opencode.ai/install | bash -s -- --no-modify-path' \
|
|
78
|
-
&& ln -sf /app/dist/ccs.js /usr/local/bin/ccs
|
|
79
|
-
|
|
80
|
-
ENV CCS_PORT=3000
|
|
81
|
-
EXPOSE 3000 8317
|
|
82
|
-
|
|
83
|
-
ENTRYPOINT ["/usr/local/bin/ccs-entrypoint"]
|
|
84
|
-
|
|
85
|
-
CMD ["bash", "-c", "node dist/ccs.js config --port ${CCS_PORT}"]
|
|
@@ -1,23 +0,0 @@
|
|
|
1
|
-
FROM eceasy/cli-proxy-api:latest
|
|
2
|
-
|
|
3
|
-
ARG CCS_NPM_VERSION=latest
|
|
4
|
-
|
|
5
|
-
RUN apk add --no-cache \
|
|
6
|
-
curl \
|
|
7
|
-
jq \
|
|
8
|
-
nodejs \
|
|
9
|
-
npm \
|
|
10
|
-
supervisor
|
|
11
|
-
|
|
12
|
-
RUN npm install -g @kaitranntt/ccs@${CCS_NPM_VERSION} \
|
|
13
|
-
&& ln -sf /usr/local/lib/node_modules/@kaitranntt/ccs/dist/docker/docker-bootstrap.js /usr/local/bin/ccs-docker-bootstrap
|
|
14
|
-
|
|
15
|
-
COPY supervisord.conf /etc/supervisord.conf
|
|
16
|
-
COPY entrypoint-integrated.sh /entrypoint-integrated.sh
|
|
17
|
-
|
|
18
|
-
RUN chmod +x /entrypoint-integrated.sh \
|
|
19
|
-
&& mkdir -p /var/log/ccs
|
|
20
|
-
|
|
21
|
-
EXPOSE 3000 8085 8317
|
|
22
|
-
|
|
23
|
-
ENTRYPOINT ["/entrypoint-integrated.sh"]
|
package/docker/README.md
DELETED
|
@@ -1,260 +0,0 @@
|
|
|
1
|
-
<div align="center">
|
|
2
|
-
|
|
3
|
-
# CCS Docker Deployment
|
|
4
|
-
|
|
5
|
-

|
|
6
|
-
|
|
7
|
-
### Run CCS in Docker, locally or over SSH.
|
|
8
|
-
Persistent config, restart on reboot.
|
|
9
|
-
|
|
10
|
-
**[Back to README](../README.md)**
|
|
11
|
-
|
|
12
|
-
</div>
|
|
13
|
-
|
|
14
|
-
<br>
|
|
15
|
-
|
|
16
|
-
## Preferred: `ccs docker`
|
|
17
|
-
|
|
18
|
-
The CLI now ships a first-class Docker command suite for the integrated CCS + CLIProxy stack:
|
|
19
|
-
|
|
20
|
-
```bash
|
|
21
|
-
ccs docker up
|
|
22
|
-
ccs docker status
|
|
23
|
-
ccs docker logs --follow
|
|
24
|
-
ccs docker config
|
|
25
|
-
ccs docker update
|
|
26
|
-
ccs docker down
|
|
27
|
-
```
|
|
28
|
-
|
|
29
|
-
Remote deployment stages the bundled Docker assets to `~/.ccs/docker` on the target host:
|
|
30
|
-
|
|
31
|
-
```bash
|
|
32
|
-
ccs docker up --host my-server
|
|
33
|
-
ccs docker --host my-server status
|
|
34
|
-
ccs docker status --host my-server
|
|
35
|
-
ccs docker logs --host my-server --service ccs --follow
|
|
36
|
-
ccs docker config --host my-server
|
|
37
|
-
```
|
|
38
|
-
|
|
39
|
-
Use a single SSH target or SSH config alias for `--host`. If you need custom SSH flags such as a port override, configure them in `~/.ssh/config` and reference the alias from `ccs docker`.
|
|
40
|
-
|
|
41
|
-
The `ccs docker` flow uses the integrated assets in this directory:
|
|
42
|
-
|
|
43
|
-
- `docker/Dockerfile.integrated`
|
|
44
|
-
- `docker/docker-compose.integrated.yml`
|
|
45
|
-
- `docker/supervisord.conf`
|
|
46
|
-
- `docker/entrypoint-integrated.sh`
|
|
47
|
-
|
|
48
|
-
## Prebuilt Image Quick Start
|
|
49
|
-
|
|
50
|
-
This existing image still runs the CCS dashboard and its locally managed CLIProxy inside one
|
|
51
|
-
container. It does not provide the remote staging and in-container self-update flow exposed by
|
|
52
|
-
`ccs docker`.
|
|
53
|
-
|
|
54
|
-
Pull the latest stable release image from GitHub Container Registry:
|
|
55
|
-
|
|
56
|
-
```bash
|
|
57
|
-
docker run -d \
|
|
58
|
-
--name ccs-dashboard \
|
|
59
|
-
--restart unless-stopped \
|
|
60
|
-
-p 3000:3000 \
|
|
61
|
-
-p 8317:8317 \
|
|
62
|
-
-e CCS_PORT=3000 \
|
|
63
|
-
-v ccs_home:/home/node/.ccs \
|
|
64
|
-
ghcr.io/kaitranntt/ccs-dashboard:latest
|
|
65
|
-
```
|
|
66
|
-
|
|
67
|
-
Release-tag images are also published as `ghcr.io/kaitranntt/ccs-dashboard:<version>`.
|
|
68
|
-
|
|
69
|
-
## Prebuilt Image Build Locally
|
|
70
|
-
|
|
71
|
-
```bash
|
|
72
|
-
docker build -f docker/Dockerfile -t ccs-dashboard:latest .
|
|
73
|
-
docker run -d \
|
|
74
|
-
--name ccs-dashboard \
|
|
75
|
-
--restart unless-stopped \
|
|
76
|
-
-p 3000:3000 \
|
|
77
|
-
-p 8317:8317 \
|
|
78
|
-
-e CCS_PORT=3000 \
|
|
79
|
-
-v ccs_home:/home/node/.ccs \
|
|
80
|
-
ccs-dashboard:latest
|
|
81
|
-
```
|
|
82
|
-
|
|
83
|
-
Open `http://localhost:3000` (Dashboard).
|
|
84
|
-
|
|
85
|
-
CCS also starts CLIProxy on `http://localhost:8317` (used by Dashboard features and OAuth providers).
|
|
86
|
-
|
|
87
|
-
## Environment Variables
|
|
88
|
-
|
|
89
|
-
Common CCS environment variables (from the docs):
|
|
90
|
-
|
|
91
|
-
- Docs: [Environment variables](https://docs.ccs.kaitran.ca/getting-started/configuration#environment-variables)
|
|
92
|
-
|
|
93
|
-
- `CCS_CONFIG`: override config file path
|
|
94
|
-
- `CCS_UNIFIED_CONFIG=1`: force unified YAML config loader
|
|
95
|
-
- `CCS_MIGRATE=1`: trigger config migration
|
|
96
|
-
- `CCS_SKIP_MIGRATION=1`: skip migrations
|
|
97
|
-
- `CCS_DEBUG=1`: enable verbose logs
|
|
98
|
-
- `NO_COLOR=1`: disable ANSI colors
|
|
99
|
-
- `CCS_SKIP_PREFLIGHT=1`: skip API key validation checks
|
|
100
|
-
- `CCS_WEBSEARCH_SKIP=1`: skip WebSearch hook integration
|
|
101
|
-
- Proxy: `CCS_PROXY_HOST`, `CCS_PROXY_PORT`, `CCS_PROXY_PROTOCOL`, `CCS_PROXY_AUTH_TOKEN`, `CCS_PROXY_TIMEOUT`, `CCS_PROXY_FALLBACK_ENABLED`, `CCS_ALLOW_SELF_SIGNED`
|
|
102
|
-
|
|
103
|
-
Example (passing env vars to the running container):
|
|
104
|
-
|
|
105
|
-
```bash
|
|
106
|
-
docker run -d \
|
|
107
|
-
--name ccs-dashboard \
|
|
108
|
-
--restart unless-stopped \
|
|
109
|
-
-p 3000:3000 \
|
|
110
|
-
-p 8317:8317 \
|
|
111
|
-
-e CCS_PORT=3000 \
|
|
112
|
-
-e CCS_DEBUG=1 \
|
|
113
|
-
-e NO_COLOR=1 \
|
|
114
|
-
-e CCS_PROXY_HOST="proxy.example.com" \
|
|
115
|
-
-e CCS_PROXY_PORT=443 \
|
|
116
|
-
-e CCS_PROXY_PROTOCOL="https" \
|
|
117
|
-
-v ccs_home:/home/node/.ccs \
|
|
118
|
-
ghcr.io/kaitranntt/ccs-dashboard:latest
|
|
119
|
-
```
|
|
120
|
-
|
|
121
|
-
## Useful Commands
|
|
122
|
-
|
|
123
|
-
```bash
|
|
124
|
-
docker logs -f ccs-dashboard
|
|
125
|
-
docker stop ccs-dashboard
|
|
126
|
-
docker start ccs-dashboard
|
|
127
|
-
docker rm -f ccs-dashboard
|
|
128
|
-
```
|
|
129
|
-
|
|
130
|
-
## Prebuilt Image Docker Compose (Optional)
|
|
131
|
-
|
|
132
|
-
Using the included `docker/docker-compose.yml`:
|
|
133
|
-
|
|
134
|
-
```bash
|
|
135
|
-
docker-compose -f docker/docker-compose.yml up --build -d
|
|
136
|
-
docker-compose -f docker/docker-compose.yml logs -f
|
|
137
|
-
```
|
|
138
|
-
|
|
139
|
-
Stop:
|
|
140
|
-
|
|
141
|
-
```bash
|
|
142
|
-
docker-compose -f docker/docker-compose.yml down
|
|
143
|
-
```
|
|
144
|
-
|
|
145
|
-
For the integrated CCS + CLIProxy stack managed by the CLI, use `ccs docker up` instead.
|
|
146
|
-
|
|
147
|
-
## Persistence
|
|
148
|
-
|
|
149
|
-
- CCS stores data in `/home/node/.ccs` inside the container.
|
|
150
|
-
- The examples use a named volume (`ccs_home`) to persist that data.
|
|
151
|
-
- Compose also persists `/home/node/.claude`, `/home/node/.opencode`, and `/home/node/.grok-cli` via named volumes.
|
|
152
|
-
|
|
153
|
-
## Resource Limits
|
|
154
|
-
|
|
155
|
-
For production deployments, limit container resources:
|
|
156
|
-
|
|
157
|
-
```bash
|
|
158
|
-
docker run -d \
|
|
159
|
-
--name ccs-dashboard \
|
|
160
|
-
--restart unless-stopped \
|
|
161
|
-
--memory=1g \
|
|
162
|
-
--cpus=2 \
|
|
163
|
-
-p 3000:3000 \
|
|
164
|
-
-p 8317:8317 \
|
|
165
|
-
-v ccs_home:/home/node/.ccs \
|
|
166
|
-
ghcr.io/kaitranntt/ccs-dashboard:latest
|
|
167
|
-
```
|
|
168
|
-
|
|
169
|
-
Docker Compose includes default limits (1GB RAM, 2 CPUs). Adjust in `docker-compose.yml` under `deploy.resources`.
|
|
170
|
-
|
|
171
|
-
## Graceful Shutdown
|
|
172
|
-
|
|
173
|
-
CCS handles `SIGTERM` gracefully. When stopping the container:
|
|
174
|
-
|
|
175
|
-
```bash
|
|
176
|
-
docker stop ccs-dashboard # Sends SIGTERM, waits 10s, then SIGKILL
|
|
177
|
-
docker stop -t 30 ccs-dashboard # Wait 30s for graceful shutdown
|
|
178
|
-
```
|
|
179
|
-
|
|
180
|
-
The `init: true` in docker-compose.yml ensures proper signal forwarding.
|
|
181
|
-
|
|
182
|
-
## Troubleshooting
|
|
183
|
-
|
|
184
|
-
### Permission Errors (EACCES)
|
|
185
|
-
|
|
186
|
-
If you see permission errors on startup:
|
|
187
|
-
|
|
188
|
-
```bash
|
|
189
|
-
# Check volume permissions
|
|
190
|
-
docker exec ccs-dashboard ls -la /home/node/.ccs
|
|
191
|
-
|
|
192
|
-
# Fix by recreating volumes
|
|
193
|
-
docker-compose down -v
|
|
194
|
-
docker-compose up -d
|
|
195
|
-
```
|
|
196
|
-
|
|
197
|
-
### Port Already in Use
|
|
198
|
-
|
|
199
|
-
```bash
|
|
200
|
-
# Check what's using the port
|
|
201
|
-
lsof -i :3000
|
|
202
|
-
lsof -i :8317
|
|
203
|
-
|
|
204
|
-
# Use different ports
|
|
205
|
-
docker run -p 4000:3000 -p 9317:8317 ...
|
|
206
|
-
|
|
207
|
-
# Or with compose
|
|
208
|
-
CCS_DASHBOARD_PORT=4000 CCS_CLIPROXY_PORT=9317 docker-compose up -d
|
|
209
|
-
```
|
|
210
|
-
|
|
211
|
-
### Container Keeps Restarting
|
|
212
|
-
|
|
213
|
-
```bash
|
|
214
|
-
# Check logs for errors
|
|
215
|
-
docker logs ccs-dashboard --tail 50
|
|
216
|
-
|
|
217
|
-
# Check container health
|
|
218
|
-
docker inspect ccs-dashboard --format='{{.State.Health.Status}}'
|
|
219
|
-
```
|
|
220
|
-
|
|
221
|
-
### Debug Mode
|
|
222
|
-
|
|
223
|
-
Enable verbose logging:
|
|
224
|
-
|
|
225
|
-
```bash
|
|
226
|
-
docker run -e CCS_DEBUG=1 ...
|
|
227
|
-
```
|
|
228
|
-
|
|
229
|
-
## Examples: Claude + Gemini inside Docker
|
|
230
|
-
|
|
231
|
-
Open a shell inside the running container:
|
|
232
|
-
|
|
233
|
-
```bash
|
|
234
|
-
docker exec -it ccs-dashboard bash
|
|
235
|
-
```
|
|
236
|
-
|
|
237
|
-
Claude (non-interactive / print mode):
|
|
238
|
-
|
|
239
|
-
```bash
|
|
240
|
-
docker exec -it ccs-dashboard claude -p "Hello from Docker"
|
|
241
|
-
```
|
|
242
|
-
|
|
243
|
-
Gemini (one-shot prompt):
|
|
244
|
-
|
|
245
|
-
```bash
|
|
246
|
-
docker exec -it ccs-dashboard gemini "Hello from Docker"
|
|
247
|
-
```
|
|
248
|
-
|
|
249
|
-
If you need to configure credentials, do it according to each CLI's docs:
|
|
250
|
-
|
|
251
|
-
```bash
|
|
252
|
-
docker exec -it ccs-dashboard claude --help
|
|
253
|
-
docker exec -it ccs-dashboard gemini --help
|
|
254
|
-
```
|
|
255
|
-
|
|
256
|
-
## Security Notes
|
|
257
|
-
|
|
258
|
-
- **Secrets**: For sensitive values like `CCS_PROXY_AUTH_TOKEN`, consider using Docker secrets or a `.env` file (not committed to git).
|
|
259
|
-
- **Network**: The container exposes ports 3000 and 8317. In production, use a reverse proxy (nginx, traefik) with TLS.
|
|
260
|
-
- **Updates**: Regularly rebuild the image to get security patches: `docker-compose build --pull`
|
|
@@ -1,32 +0,0 @@
|
|
|
1
|
-
services:
|
|
2
|
-
ccs-cliproxy:
|
|
3
|
-
build:
|
|
4
|
-
context: .
|
|
5
|
-
dockerfile: Dockerfile.integrated
|
|
6
|
-
args:
|
|
7
|
-
CCS_NPM_VERSION: "${CCS_NPM_VERSION:-latest}"
|
|
8
|
-
image: ccs-cliproxy:latest
|
|
9
|
-
container_name: ccs-cliproxy
|
|
10
|
-
restart: unless-stopped
|
|
11
|
-
init: true
|
|
12
|
-
ports:
|
|
13
|
-
- "${CCS_DASHBOARD_PORT:-3000}:3000"
|
|
14
|
-
- "${CCS_CLIPROXY_PORT:-8317}:8317"
|
|
15
|
-
environment:
|
|
16
|
-
CCS_PORT: 3000
|
|
17
|
-
NODE_ENV: production
|
|
18
|
-
NO_COLOR: "${NO_COLOR:-}"
|
|
19
|
-
CCS_DEBUG: "${CCS_DEBUG:-}"
|
|
20
|
-
volumes:
|
|
21
|
-
- ccs_home:/root/.ccs
|
|
22
|
-
- ccs_logs:/var/log/ccs
|
|
23
|
-
healthcheck:
|
|
24
|
-
test: ["CMD-SHELL", "curl -fsS --max-time 2 http://localhost:3000/ >/dev/null && curl -fsS --max-time 2 http://127.0.0.1:8317/ >/dev/null"]
|
|
25
|
-
interval: 10s
|
|
26
|
-
timeout: 3s
|
|
27
|
-
retries: 12
|
|
28
|
-
start_period: 30s
|
|
29
|
-
|
|
30
|
-
volumes:
|
|
31
|
-
ccs_home:
|
|
32
|
-
ccs_logs:
|
|
@@ -1,53 +0,0 @@
|
|
|
1
|
-
# CCS Dashboard Docker Compose
|
|
2
|
-
# See docker/README.md for documentation
|
|
3
|
-
services:
|
|
4
|
-
ccs-dashboard:
|
|
5
|
-
image: ccs-dashboard:latest
|
|
6
|
-
build:
|
|
7
|
-
context: ..
|
|
8
|
-
dockerfile: docker/Dockerfile
|
|
9
|
-
restart: unless-stopped
|
|
10
|
-
init: true
|
|
11
|
-
ports:
|
|
12
|
-
- "${CCS_DASHBOARD_PORT:-3000}:3000"
|
|
13
|
-
- "${CCS_CLIPROXY_PORT:-8317}:8317"
|
|
14
|
-
environment:
|
|
15
|
-
CCS_PORT: 3000
|
|
16
|
-
CCS_DEBUG: "${CCS_DEBUG:-}"
|
|
17
|
-
NO_COLOR: "${NO_COLOR:-}"
|
|
18
|
-
CCS_SKIP_PREFLIGHT: "${CCS_SKIP_PREFLIGHT:-}"
|
|
19
|
-
CCS_WEBSEARCH_SKIP: "${CCS_WEBSEARCH_SKIP:-}"
|
|
20
|
-
CCS_PROXY_HOST: "${CCS_PROXY_HOST:-}"
|
|
21
|
-
CCS_PROXY_PORT: "${CCS_PROXY_PORT:-}"
|
|
22
|
-
CCS_PROXY_PROTOCOL: "${CCS_PROXY_PROTOCOL:-}"
|
|
23
|
-
CCS_PROXY_AUTH_TOKEN: "${CCS_PROXY_AUTH_TOKEN:-}"
|
|
24
|
-
CCS_PROXY_TIMEOUT: "${CCS_PROXY_TIMEOUT:-}"
|
|
25
|
-
CCS_PROXY_FALLBACK_ENABLED: "${CCS_PROXY_FALLBACK_ENABLED:-}"
|
|
26
|
-
CCS_ALLOW_SELF_SIGNED: "${CCS_ALLOW_SELF_SIGNED:-}"
|
|
27
|
-
volumes:
|
|
28
|
-
- ccs_home:/home/node/.ccs
|
|
29
|
-
- claude_home:/home/node/.claude
|
|
30
|
-
- opencode_home:/home/node/.opencode
|
|
31
|
-
- grok_home:/home/node/.grok-cli
|
|
32
|
-
# Healthcheck uses internal ports (3000/8317) which are fixed
|
|
33
|
-
healthcheck:
|
|
34
|
-
test: ["CMD-SHELL", "curl -fsS --max-time 2 http://localhost:3000/ >/dev/null && curl -sS --max-time 2 http://127.0.0.1:8317/ >/dev/null"]
|
|
35
|
-
interval: 10s
|
|
36
|
-
timeout: 3s
|
|
37
|
-
retries: 12
|
|
38
|
-
start_period: 30s
|
|
39
|
-
# Resource limits (adjust based on workload)
|
|
40
|
-
deploy:
|
|
41
|
-
resources:
|
|
42
|
-
limits:
|
|
43
|
-
memory: 1G
|
|
44
|
-
cpus: '2'
|
|
45
|
-
reservations:
|
|
46
|
-
memory: 256M
|
|
47
|
-
cpus: '0.5'
|
|
48
|
-
|
|
49
|
-
volumes:
|
|
50
|
-
ccs_home:
|
|
51
|
-
claude_home:
|
|
52
|
-
opencode_home:
|
|
53
|
-
grok_home:
|
package/docker/entrypoint.sh
DELETED
|
@@ -1,34 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env bash
|
|
2
|
-
set -euo pipefail
|
|
3
|
-
|
|
4
|
-
ccs_home_dir="${CCS_HOME_DIR:-/home/node/.ccs}"
|
|
5
|
-
|
|
6
|
-
mkdir -p "$ccs_home_dir"
|
|
7
|
-
|
|
8
|
-
# Fix volume permissions if running as root
|
|
9
|
-
if [ "$(id -u)" = "0" ]; then
|
|
10
|
-
if ! chown -R node:node "$ccs_home_dir" 2>/dev/null; then
|
|
11
|
-
echo "[!] Warning: Could not change ownership of $ccs_home_dir (read-only volume?)" >&2
|
|
12
|
-
fi
|
|
13
|
-
fi
|
|
14
|
-
|
|
15
|
-
# Show usage if no command provided
|
|
16
|
-
if [ "$#" -eq 0 ]; then
|
|
17
|
-
echo "[X] No command provided" >&2
|
|
18
|
-
echo "" >&2
|
|
19
|
-
echo "Usage: docker run ccs-dashboard <command>" >&2
|
|
20
|
-
echo "" >&2
|
|
21
|
-
echo "Examples:" >&2
|
|
22
|
-
echo " docker run ccs-dashboard node dist/ccs.js config" >&2
|
|
23
|
-
echo " docker run ccs-dashboard ccs --help" >&2
|
|
24
|
-
echo "" >&2
|
|
25
|
-
exit 1
|
|
26
|
-
fi
|
|
27
|
-
|
|
28
|
-
# Drop privileges from root to node user
|
|
29
|
-
if [ "$(id -u)" = "0" ]; then
|
|
30
|
-
cmd="$(printf '%q ' "$@")"
|
|
31
|
-
exec su -s /bin/bash node -c "exec ${cmd}"
|
|
32
|
-
fi
|
|
33
|
-
|
|
34
|
-
exec "$@"
|
package/docker/supervisord.conf
DELETED
|
@@ -1,43 +0,0 @@
|
|
|
1
|
-
[unix_http_server]
|
|
2
|
-
file=/var/run/supervisor.sock
|
|
3
|
-
chmod=0700
|
|
4
|
-
|
|
5
|
-
[supervisord]
|
|
6
|
-
nodaemon=true
|
|
7
|
-
user=root
|
|
8
|
-
logfile=/var/log/supervisord.log
|
|
9
|
-
pidfile=/var/run/supervisord.pid
|
|
10
|
-
loglevel=info
|
|
11
|
-
|
|
12
|
-
[rpcinterface:supervisor]
|
|
13
|
-
supervisor.rpcinterface_factory=supervisor.rpcinterface:make_main_rpcinterface
|
|
14
|
-
|
|
15
|
-
[supervisorctl]
|
|
16
|
-
serverurl=unix:///var/run/supervisor.sock
|
|
17
|
-
|
|
18
|
-
[program:cliproxy]
|
|
19
|
-
command=node /usr/local/bin/ccs-docker-bootstrap run-cliproxy
|
|
20
|
-
directory=/root
|
|
21
|
-
autostart=true
|
|
22
|
-
autorestart=true
|
|
23
|
-
startsecs=5
|
|
24
|
-
startretries=3
|
|
25
|
-
stderr_logfile=/var/log/ccs/cliproxy.log
|
|
26
|
-
stderr_logfile_maxbytes=0
|
|
27
|
-
stdout_logfile=/var/log/ccs/cliproxy.log
|
|
28
|
-
stdout_logfile_maxbytes=0
|
|
29
|
-
priority=10
|
|
30
|
-
|
|
31
|
-
[program:ccs-dashboard]
|
|
32
|
-
command=ccs config --host 0.0.0.0 --port 3000
|
|
33
|
-
directory=/root
|
|
34
|
-
autostart=true
|
|
35
|
-
autorestart=true
|
|
36
|
-
startsecs=10
|
|
37
|
-
startretries=3
|
|
38
|
-
stderr_logfile=/var/log/ccs/ccs-dashboard.log
|
|
39
|
-
stderr_logfile_maxbytes=0
|
|
40
|
-
stdout_logfile=/var/log/ccs/ccs-dashboard.log
|
|
41
|
-
stdout_logfile_maxbytes=0
|
|
42
|
-
priority=20
|
|
43
|
-
environment=HOME="/root",NODE_ENV="production"
|