yzcode-cli 1.0.1 → 1.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/assistant/sessionHistory.ts +87 -0
- package/bootstrap/state.ts +1769 -0
- package/bridge/bridgeApi.ts +539 -0
- package/bridge/bridgeConfig.ts +48 -0
- package/bridge/bridgeDebug.ts +135 -0
- package/bridge/bridgeEnabled.ts +202 -0
- package/bridge/bridgeMain.ts +2999 -0
- package/bridge/bridgeMessaging.ts +461 -0
- package/bridge/bridgePermissionCallbacks.ts +43 -0
- package/bridge/bridgePointer.ts +210 -0
- package/bridge/bridgeStatusUtil.ts +163 -0
- package/bridge/bridgeUI.ts +530 -0
- package/bridge/capacityWake.ts +56 -0
- package/bridge/codeSessionApi.ts +168 -0
- package/bridge/createSession.ts +384 -0
- package/bridge/debugUtils.ts +141 -0
- package/bridge/envLessBridgeConfig.ts +165 -0
- package/bridge/flushGate.ts +71 -0
- package/bridge/inboundAttachments.ts +175 -0
- package/bridge/inboundMessages.ts +80 -0
- package/bridge/initReplBridge.ts +569 -0
- package/bridge/jwtUtils.ts +256 -0
- package/bridge/pollConfig.ts +110 -0
- package/bridge/pollConfigDefaults.ts +82 -0
- package/bridge/remoteBridgeCore.ts +1008 -0
- package/bridge/replBridge.ts +2406 -0
- package/bridge/replBridgeHandle.ts +36 -0
- package/bridge/replBridgeTransport.ts +370 -0
- package/bridge/sessionIdCompat.ts +57 -0
- package/bridge/sessionRunner.ts +550 -0
- package/bridge/trustedDevice.ts +210 -0
- package/bridge/types.ts +262 -0
- package/bridge/workSecret.ts +127 -0
- package/buddy/CompanionSprite.tsx +371 -0
- package/buddy/companion.ts +133 -0
- package/buddy/prompt.ts +36 -0
- package/buddy/sprites.ts +514 -0
- package/buddy/types.ts +148 -0
- package/buddy/useBuddyNotification.tsx +98 -0
- package/coordinator/coordinatorMode.ts +369 -0
- package/memdir/findRelevantMemories.ts +141 -0
- package/memdir/memdir.ts +507 -0
- package/memdir/memoryAge.ts +53 -0
- package/memdir/memoryScan.ts +94 -0
- package/memdir/memoryTypes.ts +271 -0
- package/memdir/paths.ts +278 -0
- package/memdir/teamMemPaths.ts +292 -0
- package/memdir/teamMemPrompts.ts +100 -0
- package/migrations/migrateAutoUpdatesToSettings.ts +61 -0
- package/migrations/migrateBypassPermissionsAcceptedToSettings.ts +40 -0
- package/migrations/migrateEnableAllProjectMcpServersToSettings.ts +118 -0
- package/migrations/migrateFennecToOpus.ts +45 -0
- package/migrations/migrateLegacyOpusToCurrent.ts +57 -0
- package/migrations/migrateOpusToOpus1m.ts +43 -0
- package/migrations/migrateReplBridgeEnabledToRemoteControlAtStartup.ts +22 -0
- package/migrations/migrateSonnet1mToSonnet45.ts +48 -0
- package/migrations/migrateSonnet45ToSonnet46.ts +67 -0
- package/migrations/resetAutoModeOptInForDefaultOffer.ts +51 -0
- package/migrations/resetProToOpusDefault.ts +51 -0
- package/native-ts/color-diff/index.ts +999 -0
- package/native-ts/file-index/index.ts +370 -0
- package/native-ts/yoga-layout/enums.ts +134 -0
- package/native-ts/yoga-layout/index.ts +2578 -0
- package/outputStyles/loadOutputStylesDir.ts +98 -0
- package/package.json +22 -5
- package/plugins/builtinPlugins.ts +159 -0
- package/plugins/bundled/index.ts +23 -0
- package/schemas/hooks.ts +222 -0
- package/screens/Doctor.tsx +575 -0
- package/screens/REPL.tsx +5006 -0
- package/screens/ResumeConversation.tsx +399 -0
- package/server/createDirectConnectSession.ts +88 -0
- package/server/directConnectManager.ts +213 -0
- package/server/types.ts +57 -0
- package/skills/bundled/batch.ts +124 -0
- package/skills/bundled/claudeApi.ts +196 -0
- package/skills/bundled/claudeApiContent.ts +75 -0
- package/skills/bundled/claudeInChrome.ts +34 -0
- package/skills/bundled/debug.ts +103 -0
- package/skills/bundled/index.ts +79 -0
- package/skills/bundled/keybindings.ts +339 -0
- package/skills/bundled/loop.ts +92 -0
- package/skills/bundled/loremIpsum.ts +282 -0
- package/skills/bundled/remember.ts +82 -0
- package/skills/bundled/scheduleRemoteAgents.ts +447 -0
- package/skills/bundled/simplify.ts +69 -0
- package/skills/bundled/skillify.ts +197 -0
- package/skills/bundled/stuck.ts +79 -0
- package/skills/bundled/updateConfig.ts +475 -0
- package/skills/bundled/verify/SKILL.md +3 -0
- package/skills/bundled/verify/examples/cli.md +3 -0
- package/skills/bundled/verify/examples/server.md +3 -0
- package/skills/bundled/verify.ts +30 -0
- package/skills/bundled/verifyContent.ts +13 -0
- package/skills/bundledSkills.ts +220 -0
- package/skills/loadSkillsDir.ts +1086 -0
- package/skills/mcpSkillBuilders.ts +44 -0
- package/tasks/DreamTask/DreamTask.ts +157 -0
- package/tasks/InProcessTeammateTask/InProcessTeammateTask.tsx +126 -0
- package/tasks/InProcessTeammateTask/types.ts +121 -0
- package/tasks/LocalAgentTask/LocalAgentTask.tsx +683 -0
- package/tasks/LocalMainSessionTask.ts +479 -0
- package/tasks/LocalShellTask/LocalShellTask.tsx +523 -0
- package/tasks/LocalShellTask/guards.ts +41 -0
- package/tasks/LocalShellTask/killShellTasks.ts +76 -0
- package/tasks/RemoteAgentTask/RemoteAgentTask.tsx +856 -0
- package/tasks/pillLabel.ts +82 -0
- package/tasks/stopTask.ts +100 -0
- package/tasks/types.ts +46 -0
- package/upstreamproxy/relay.ts +455 -0
- package/upstreamproxy/upstreamproxy.ts +285 -0
- package/vim/motions.ts +82 -0
- package/vim/operators.ts +556 -0
- package/vim/textObjects.ts +186 -0
- package/vim/transitions.ts +490 -0
- package/vim/types.ts +199 -0
- package/voice/voiceModeEnabled.ts +54 -0
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
import { DIAMOND_FILLED, DIAMOND_OPEN } from '../constants/figures.js'
|
|
2
|
+
import { count } from '../utils/array.js'
|
|
3
|
+
import type { BackgroundTaskState } from './types.js'
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* Produces the compact footer-pill label for a set of background tasks.
|
|
7
|
+
* Used by both the footer pill and the turn-duration transcript line so the
|
|
8
|
+
* two surfaces agree on terminology.
|
|
9
|
+
*/
|
|
10
|
+
export function getPillLabel(tasks: BackgroundTaskState[]): string {
|
|
11
|
+
const n = tasks.length
|
|
12
|
+
const allSameType = tasks.every(t => t.type === tasks[0]!.type)
|
|
13
|
+
|
|
14
|
+
if (allSameType) {
|
|
15
|
+
switch (tasks[0]!.type) {
|
|
16
|
+
case 'local_bash': {
|
|
17
|
+
const monitors = count(
|
|
18
|
+
tasks,
|
|
19
|
+
t => t.type === 'local_bash' && t.kind === 'monitor',
|
|
20
|
+
)
|
|
21
|
+
const shells = n - monitors
|
|
22
|
+
const parts: string[] = []
|
|
23
|
+
if (shells > 0)
|
|
24
|
+
parts.push(shells === 1 ? '1 shell' : `${shells} shells`)
|
|
25
|
+
if (monitors > 0)
|
|
26
|
+
parts.push(monitors === 1 ? '1 monitor' : `${monitors} monitors`)
|
|
27
|
+
return parts.join(', ')
|
|
28
|
+
}
|
|
29
|
+
case 'in_process_teammate': {
|
|
30
|
+
const teamCount = new Set(
|
|
31
|
+
tasks.map(t =>
|
|
32
|
+
t.type === 'in_process_teammate' ? t.identity.teamName : '',
|
|
33
|
+
),
|
|
34
|
+
).size
|
|
35
|
+
return teamCount === 1 ? '1 team' : `${teamCount} teams`
|
|
36
|
+
}
|
|
37
|
+
case 'local_agent':
|
|
38
|
+
return n === 1 ? '1 local agent' : `${n} local agents`
|
|
39
|
+
case 'remote_agent': {
|
|
40
|
+
const first = tasks[0]!
|
|
41
|
+
// Per design mockup: ◇ open diamond while running/needs-input,
|
|
42
|
+
// ◆ filled once ExitPlanMode is awaiting approval.
|
|
43
|
+
if (n === 1 && first.type === 'remote_agent' && first.isUltraplan) {
|
|
44
|
+
switch (first.ultraplanPhase) {
|
|
45
|
+
case 'plan_ready':
|
|
46
|
+
return `${DIAMOND_FILLED} ultraplan ready`
|
|
47
|
+
case 'needs_input':
|
|
48
|
+
return `${DIAMOND_OPEN} ultraplan needs your input`
|
|
49
|
+
default:
|
|
50
|
+
return `${DIAMOND_OPEN} ultraplan`
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
return n === 1
|
|
54
|
+
? `${DIAMOND_OPEN} 1 cloud session`
|
|
55
|
+
: `${DIAMOND_OPEN} ${n} cloud sessions`
|
|
56
|
+
}
|
|
57
|
+
case 'local_workflow':
|
|
58
|
+
return n === 1 ? '1 background workflow' : `${n} background workflows`
|
|
59
|
+
case 'monitor_mcp':
|
|
60
|
+
return n === 1 ? '1 monitor' : `${n} monitors`
|
|
61
|
+
case 'dream':
|
|
62
|
+
return 'dreaming'
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
return `${n} background ${n === 1 ? 'task' : 'tasks'}`
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
/**
|
|
70
|
+
* True when the pill should show the dimmed " · ↓ to view" call-to-action.
|
|
71
|
+
* Per the state diagram: only the two attention states (needs_input,
|
|
72
|
+
* plan_ready) surface the CTA; plain running shows just the diamond + label.
|
|
73
|
+
*/
|
|
74
|
+
export function pillNeedsCta(tasks: BackgroundTaskState[]): boolean {
|
|
75
|
+
if (tasks.length !== 1) return false
|
|
76
|
+
const t = tasks[0]!
|
|
77
|
+
return (
|
|
78
|
+
t.type === 'remote_agent' &&
|
|
79
|
+
t.isUltraplan === true &&
|
|
80
|
+
t.ultraplanPhase !== undefined
|
|
81
|
+
)
|
|
82
|
+
}
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
// Shared logic for stopping a running task.
|
|
2
|
+
// Used by TaskStopTool (LLM-invoked) and SDK stop_task control request.
|
|
3
|
+
|
|
4
|
+
import type { AppState } from '../state/AppState.js'
|
|
5
|
+
import type { TaskStateBase } from '../Task.js'
|
|
6
|
+
import { getTaskByType } from '../tasks.js'
|
|
7
|
+
import { emitTaskTerminatedSdk } from '../utils/sdkEventQueue.js'
|
|
8
|
+
import { isLocalShellTask } from './LocalShellTask/guards.js'
|
|
9
|
+
|
|
10
|
+
export class StopTaskError extends Error {
|
|
11
|
+
constructor(
|
|
12
|
+
message: string,
|
|
13
|
+
public readonly code: 'not_found' | 'not_running' | 'unsupported_type',
|
|
14
|
+
) {
|
|
15
|
+
super(message)
|
|
16
|
+
this.name = 'StopTaskError'
|
|
17
|
+
}
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
type StopTaskContext = {
|
|
21
|
+
getAppState: () => AppState
|
|
22
|
+
setAppState: (f: (prev: AppState) => AppState) => void
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
type StopTaskResult = {
|
|
26
|
+
taskId: string
|
|
27
|
+
taskType: string
|
|
28
|
+
command: string | undefined
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
/**
|
|
32
|
+
* Look up a task by ID, validate it is running, kill it, and mark it as notified.
|
|
33
|
+
*
|
|
34
|
+
* Throws {@link StopTaskError} when the task cannot be stopped (not found,
|
|
35
|
+
* not running, or unsupported type). Callers can inspect `error.code` to
|
|
36
|
+
* distinguish the failure reason.
|
|
37
|
+
*/
|
|
38
|
+
export async function stopTask(
|
|
39
|
+
taskId: string,
|
|
40
|
+
context: StopTaskContext,
|
|
41
|
+
): Promise<StopTaskResult> {
|
|
42
|
+
const { getAppState, setAppState } = context
|
|
43
|
+
const appState = getAppState()
|
|
44
|
+
const task = appState.tasks?.[taskId] as TaskStateBase | undefined
|
|
45
|
+
|
|
46
|
+
if (!task) {
|
|
47
|
+
throw new StopTaskError(`No task found with ID: ${taskId}`, 'not_found')
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
if (task.status !== 'running') {
|
|
51
|
+
throw new StopTaskError(
|
|
52
|
+
`Task ${taskId} is not running (status: ${task.status})`,
|
|
53
|
+
'not_running',
|
|
54
|
+
)
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
const taskImpl = getTaskByType(task.type)
|
|
58
|
+
if (!taskImpl) {
|
|
59
|
+
throw new StopTaskError(
|
|
60
|
+
`Unsupported task type: ${task.type}`,
|
|
61
|
+
'unsupported_type',
|
|
62
|
+
)
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
await taskImpl.kill(taskId, setAppState)
|
|
66
|
+
|
|
67
|
+
// Bash: suppress the "exit code 137" notification (noise). Agent tasks: don't
|
|
68
|
+
// suppress — the AbortError catch sends a notification carrying
|
|
69
|
+
// extractPartialResult(agentMessages), which is the payload not noise.
|
|
70
|
+
if (isLocalShellTask(task)) {
|
|
71
|
+
let suppressed = false
|
|
72
|
+
setAppState(prev => {
|
|
73
|
+
const prevTask = prev.tasks[taskId]
|
|
74
|
+
if (!prevTask || prevTask.notified) {
|
|
75
|
+
return prev
|
|
76
|
+
}
|
|
77
|
+
suppressed = true
|
|
78
|
+
return {
|
|
79
|
+
...prev,
|
|
80
|
+
tasks: {
|
|
81
|
+
...prev.tasks,
|
|
82
|
+
[taskId]: { ...prevTask, notified: true },
|
|
83
|
+
},
|
|
84
|
+
}
|
|
85
|
+
})
|
|
86
|
+
// Suppressing the XML notification also suppresses print.ts's parsed
|
|
87
|
+
// task_notification SDK event — emit it directly so SDK consumers see
|
|
88
|
+
// the task close.
|
|
89
|
+
if (suppressed) {
|
|
90
|
+
emitTaskTerminatedSdk(taskId, 'stopped', {
|
|
91
|
+
toolUseId: task.toolUseId,
|
|
92
|
+
summary: task.description,
|
|
93
|
+
})
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
const command = isLocalShellTask(task) ? task.command : task.description
|
|
98
|
+
|
|
99
|
+
return { taskId, taskType: task.type, command }
|
|
100
|
+
}
|
package/tasks/types.ts
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
// Union of all concrete task state types
|
|
2
|
+
// Use this for components that need to work with any task type
|
|
3
|
+
|
|
4
|
+
import type { DreamTaskState } from './DreamTask/DreamTask.js'
|
|
5
|
+
import type { InProcessTeammateTaskState } from './InProcessTeammateTask/types.js'
|
|
6
|
+
import type { LocalAgentTaskState } from './LocalAgentTask/LocalAgentTask.js'
|
|
7
|
+
import type { LocalShellTaskState } from './LocalShellTask/guards.js'
|
|
8
|
+
import type { LocalWorkflowTaskState } from './LocalWorkflowTask/LocalWorkflowTask.js'
|
|
9
|
+
import type { MonitorMcpTaskState } from './MonitorMcpTask/MonitorMcpTask.js'
|
|
10
|
+
import type { RemoteAgentTaskState } from './RemoteAgentTask/RemoteAgentTask.js'
|
|
11
|
+
|
|
12
|
+
export type TaskState =
|
|
13
|
+
| LocalShellTaskState
|
|
14
|
+
| LocalAgentTaskState
|
|
15
|
+
| RemoteAgentTaskState
|
|
16
|
+
| InProcessTeammateTaskState
|
|
17
|
+
| LocalWorkflowTaskState
|
|
18
|
+
| MonitorMcpTaskState
|
|
19
|
+
| DreamTaskState
|
|
20
|
+
|
|
21
|
+
// Task types that can appear in the background tasks indicator
|
|
22
|
+
export type BackgroundTaskState =
|
|
23
|
+
| LocalShellTaskState
|
|
24
|
+
| LocalAgentTaskState
|
|
25
|
+
| RemoteAgentTaskState
|
|
26
|
+
| InProcessTeammateTaskState
|
|
27
|
+
| LocalWorkflowTaskState
|
|
28
|
+
| MonitorMcpTaskState
|
|
29
|
+
| DreamTaskState
|
|
30
|
+
|
|
31
|
+
/**
|
|
32
|
+
* Check if a task should be shown in the background tasks indicator.
|
|
33
|
+
* A task is considered a background task if:
|
|
34
|
+
* 1. It is running or pending
|
|
35
|
+
* 2. It has been explicitly backgrounded (not a foreground task)
|
|
36
|
+
*/
|
|
37
|
+
export function isBackgroundTask(task: TaskState): task is BackgroundTaskState {
|
|
38
|
+
if (task.status !== 'running' && task.status !== 'pending') {
|
|
39
|
+
return false
|
|
40
|
+
}
|
|
41
|
+
// Foreground tasks (isBackgrounded === false) are not yet "background tasks"
|
|
42
|
+
if ('isBackgrounded' in task && task.isBackgrounded === false) {
|
|
43
|
+
return false
|
|
44
|
+
}
|
|
45
|
+
return true
|
|
46
|
+
}
|
|
@@ -0,0 +1,455 @@
|
|
|
1
|
+
/* eslint-disable eslint-plugin-n/no-unsupported-features/node-builtins */
|
|
2
|
+
/**
|
|
3
|
+
* CONNECT-over-WebSocket relay for CCR upstreamproxy.
|
|
4
|
+
*
|
|
5
|
+
* Listens on localhost TCP, accepts HTTP CONNECT from curl/gh/kubectl/etc,
|
|
6
|
+
* and tunnels bytes over WebSocket to the CCR upstreamproxy endpoint.
|
|
7
|
+
* The CCR server-side terminates the tunnel, MITMs TLS, injects org-configured
|
|
8
|
+
* credentials (e.g. DD-API-KEY), and forwards to the real upstream.
|
|
9
|
+
*
|
|
10
|
+
* WHY WebSocket and not raw CONNECT: CCR ingress is GKE L7 with path-prefix
|
|
11
|
+
* routing; there's no connect_matcher in cdk-constructs. The session-ingress
|
|
12
|
+
* tunnel (sessions/tunnel/v1alpha/tunnel.proto) already uses this pattern.
|
|
13
|
+
*
|
|
14
|
+
* Protocol: bytes are wrapped in UpstreamProxyChunk protobuf messages
|
|
15
|
+
* (`message UpstreamProxyChunk { bytes data = 1; }`) for compatibility with
|
|
16
|
+
* gateway.NewWebSocketStreamAdapter on the server side.
|
|
17
|
+
*/
|
|
18
|
+
|
|
19
|
+
import { createServer, type Socket as NodeSocket } from 'node:net'
|
|
20
|
+
import { logForDebugging } from '../utils/debug.js'
|
|
21
|
+
import { getWebSocketTLSOptions } from '../utils/mtls.js'
|
|
22
|
+
import { getWebSocketProxyAgent, getWebSocketProxyUrl } from '../utils/proxy.js'
|
|
23
|
+
|
|
24
|
+
// The CCR container runs behind an egress gateway — direct outbound is
|
|
25
|
+
// blocked, so the WS upgrade must go through the same HTTP CONNECT proxy
|
|
26
|
+
// everything else uses. undici's globalThis.WebSocket does not consult
|
|
27
|
+
// the global dispatcher for the upgrade, so under Node we use the ws package
|
|
28
|
+
// with an explicit agent (same pattern as SessionsWebSocket). Bun's native
|
|
29
|
+
// WebSocket takes a proxy URL directly. Preloaded in startNodeRelay so
|
|
30
|
+
// openTunnel stays synchronous and the CONNECT state machine doesn't race.
|
|
31
|
+
type WSCtor = typeof import('ws').default
|
|
32
|
+
let nodeWSCtor: WSCtor | undefined
|
|
33
|
+
|
|
34
|
+
// Intersection of the surface openTunnel touches. Both undici's
|
|
35
|
+
// globalThis.WebSocket and the ws package satisfy this via property-style
|
|
36
|
+
// onX handlers.
|
|
37
|
+
type WebSocketLike = Pick<
|
|
38
|
+
WebSocket,
|
|
39
|
+
| 'onopen'
|
|
40
|
+
| 'onmessage'
|
|
41
|
+
| 'onerror'
|
|
42
|
+
| 'onclose'
|
|
43
|
+
| 'send'
|
|
44
|
+
| 'close'
|
|
45
|
+
| 'readyState'
|
|
46
|
+
| 'binaryType'
|
|
47
|
+
>
|
|
48
|
+
|
|
49
|
+
// Envoy per-request buffer cap. Week-1 Datadog payloads won't hit this, but
|
|
50
|
+
// design for it so git-push doesn't need a relay rewrite.
|
|
51
|
+
const MAX_CHUNK_BYTES = 512 * 1024
|
|
52
|
+
|
|
53
|
+
// Sidecar idle timeout is 50s; ping well inside that.
|
|
54
|
+
const PING_INTERVAL_MS = 30_000
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* Encode an UpstreamProxyChunk protobuf message by hand.
|
|
58
|
+
*
|
|
59
|
+
* For `message UpstreamProxyChunk { bytes data = 1; }` the wire format is:
|
|
60
|
+
* tag = (field_number << 3) | wire_type = (1 << 3) | 2 = 0x0a
|
|
61
|
+
* followed by varint length, followed by the bytes.
|
|
62
|
+
*
|
|
63
|
+
* protobufjs would be the general answer; for a single-field bytes message
|
|
64
|
+
* the hand encoding is 10 lines and avoids a runtime dep in the hot path.
|
|
65
|
+
*/
|
|
66
|
+
export function encodeChunk(data: Uint8Array): Uint8Array {
|
|
67
|
+
const len = data.length
|
|
68
|
+
// varint encoding of length — most chunks fit in 1–3 length bytes
|
|
69
|
+
const varint: number[] = []
|
|
70
|
+
let n = len
|
|
71
|
+
while (n > 0x7f) {
|
|
72
|
+
varint.push((n & 0x7f) | 0x80)
|
|
73
|
+
n >>>= 7
|
|
74
|
+
}
|
|
75
|
+
varint.push(n)
|
|
76
|
+
const out = new Uint8Array(1 + varint.length + len)
|
|
77
|
+
out[0] = 0x0a
|
|
78
|
+
out.set(varint, 1)
|
|
79
|
+
out.set(data, 1 + varint.length)
|
|
80
|
+
return out
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
/**
|
|
84
|
+
* Decode an UpstreamProxyChunk. Returns the data field, or null if malformed.
|
|
85
|
+
* Tolerates the server sending a zero-length chunk (keepalive semantics).
|
|
86
|
+
*/
|
|
87
|
+
export function decodeChunk(buf: Uint8Array): Uint8Array | null {
|
|
88
|
+
if (buf.length === 0) return new Uint8Array(0)
|
|
89
|
+
if (buf[0] !== 0x0a) return null
|
|
90
|
+
let len = 0
|
|
91
|
+
let shift = 0
|
|
92
|
+
let i = 1
|
|
93
|
+
while (i < buf.length) {
|
|
94
|
+
const b = buf[i]!
|
|
95
|
+
len |= (b & 0x7f) << shift
|
|
96
|
+
i++
|
|
97
|
+
if ((b & 0x80) === 0) break
|
|
98
|
+
shift += 7
|
|
99
|
+
if (shift > 28) return null
|
|
100
|
+
}
|
|
101
|
+
if (i + len > buf.length) return null
|
|
102
|
+
return buf.subarray(i, i + len)
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
export type UpstreamProxyRelay = {
|
|
106
|
+
port: number
|
|
107
|
+
stop: () => void
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
type ConnState = {
|
|
111
|
+
ws?: WebSocketLike
|
|
112
|
+
connectBuf: Buffer
|
|
113
|
+
pinger?: ReturnType<typeof setInterval>
|
|
114
|
+
// Bytes that arrived after the CONNECT header but before ws.onopen fired.
|
|
115
|
+
// TCP can coalesce CONNECT + ClientHello into one packet, and the socket's
|
|
116
|
+
// data callback can fire again while the WS handshake is still in flight.
|
|
117
|
+
// Both cases would silently drop bytes without this buffer.
|
|
118
|
+
pending: Buffer[]
|
|
119
|
+
wsOpen: boolean
|
|
120
|
+
// Set once the server's 200 Connection Established has been forwarded and
|
|
121
|
+
// the tunnel is carrying TLS. After that, writing a plaintext 502 would
|
|
122
|
+
// corrupt the client's TLS stream — just close instead.
|
|
123
|
+
established: boolean
|
|
124
|
+
// WS onerror is always followed by onclose; without a guard the second
|
|
125
|
+
// handler would sock.end() an already-ended socket. First caller wins.
|
|
126
|
+
closed: boolean
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
/**
|
|
130
|
+
* Minimal socket abstraction so the CONNECT parser and WS tunnel plumbing
|
|
131
|
+
* are runtime-agnostic. Implementations handle write backpressure internally:
|
|
132
|
+
* Bun's sock.write() does partial writes and needs explicit tail-queueing;
|
|
133
|
+
* Node's net.Socket buffers unconditionally and never drops bytes.
|
|
134
|
+
*/
|
|
135
|
+
type ClientSocket = {
|
|
136
|
+
write: (data: Uint8Array | string) => void
|
|
137
|
+
end: () => void
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
function newConnState(): ConnState {
|
|
141
|
+
return {
|
|
142
|
+
connectBuf: Buffer.alloc(0),
|
|
143
|
+
pending: [],
|
|
144
|
+
wsOpen: false,
|
|
145
|
+
established: false,
|
|
146
|
+
closed: false,
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
/**
|
|
151
|
+
* Start the relay. Returns the ephemeral port it bound and a stop function.
|
|
152
|
+
* Uses Bun.listen when available, otherwise Node's net.createServer — the CCR
|
|
153
|
+
* container runs the CLI under Node, not Bun.
|
|
154
|
+
*/
|
|
155
|
+
export async function startUpstreamProxyRelay(opts: {
|
|
156
|
+
wsUrl: string
|
|
157
|
+
sessionId: string
|
|
158
|
+
token: string
|
|
159
|
+
}): Promise<UpstreamProxyRelay> {
|
|
160
|
+
const authHeader =
|
|
161
|
+
'Basic ' + Buffer.from(`${opts.sessionId}:${opts.token}`).toString('base64')
|
|
162
|
+
// WS upgrade itself is auth-gated (proto authn: PRIVATE_API) — the gateway
|
|
163
|
+
// wants the session-ingress JWT on the upgrade request, separate from the
|
|
164
|
+
// Proxy-Authorization that rides inside the tunneled CONNECT.
|
|
165
|
+
const wsAuthHeader = `Bearer ${opts.token}`
|
|
166
|
+
|
|
167
|
+
const relay =
|
|
168
|
+
typeof Bun !== 'undefined'
|
|
169
|
+
? startBunRelay(opts.wsUrl, authHeader, wsAuthHeader)
|
|
170
|
+
: await startNodeRelay(opts.wsUrl, authHeader, wsAuthHeader)
|
|
171
|
+
|
|
172
|
+
logForDebugging(`[upstreamproxy] relay listening on 127.0.0.1:${relay.port}`)
|
|
173
|
+
return relay
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
function startBunRelay(
|
|
177
|
+
wsUrl: string,
|
|
178
|
+
authHeader: string,
|
|
179
|
+
wsAuthHeader: string,
|
|
180
|
+
): UpstreamProxyRelay {
|
|
181
|
+
// Bun TCP sockets don't auto-buffer partial writes: sock.write() returns
|
|
182
|
+
// the byte count actually handed to the kernel, and the remainder is
|
|
183
|
+
// silently dropped. When the kernel buffer fills, we queue the tail and
|
|
184
|
+
// let the drain handler flush it. Per-socket because the adapter closure
|
|
185
|
+
// outlives individual handler calls.
|
|
186
|
+
type BunState = ConnState & { writeBuf: Uint8Array[] }
|
|
187
|
+
|
|
188
|
+
// eslint-disable-next-line custom-rules/require-bun-typeof-guard -- caller dispatches on typeof Bun
|
|
189
|
+
const server = Bun.listen<BunState>({
|
|
190
|
+
hostname: '127.0.0.1',
|
|
191
|
+
port: 0,
|
|
192
|
+
socket: {
|
|
193
|
+
open(sock) {
|
|
194
|
+
sock.data = { ...newConnState(), writeBuf: [] }
|
|
195
|
+
},
|
|
196
|
+
data(sock, data) {
|
|
197
|
+
const st = sock.data
|
|
198
|
+
const adapter: ClientSocket = {
|
|
199
|
+
write: payload => {
|
|
200
|
+
const bytes =
|
|
201
|
+
typeof payload === 'string'
|
|
202
|
+
? Buffer.from(payload, 'utf8')
|
|
203
|
+
: payload
|
|
204
|
+
if (st.writeBuf.length > 0) {
|
|
205
|
+
st.writeBuf.push(bytes)
|
|
206
|
+
return
|
|
207
|
+
}
|
|
208
|
+
const n = sock.write(bytes)
|
|
209
|
+
if (n < bytes.length) st.writeBuf.push(bytes.subarray(n))
|
|
210
|
+
},
|
|
211
|
+
end: () => sock.end(),
|
|
212
|
+
}
|
|
213
|
+
handleData(adapter, st, data, wsUrl, authHeader, wsAuthHeader)
|
|
214
|
+
},
|
|
215
|
+
drain(sock) {
|
|
216
|
+
const st = sock.data
|
|
217
|
+
while (st.writeBuf.length > 0) {
|
|
218
|
+
const chunk = st.writeBuf[0]!
|
|
219
|
+
const n = sock.write(chunk)
|
|
220
|
+
if (n < chunk.length) {
|
|
221
|
+
st.writeBuf[0] = chunk.subarray(n)
|
|
222
|
+
return
|
|
223
|
+
}
|
|
224
|
+
st.writeBuf.shift()
|
|
225
|
+
}
|
|
226
|
+
},
|
|
227
|
+
close(sock) {
|
|
228
|
+
cleanupConn(sock.data)
|
|
229
|
+
},
|
|
230
|
+
error(sock, err) {
|
|
231
|
+
logForDebugging(`[upstreamproxy] client socket error: ${err.message}`)
|
|
232
|
+
cleanupConn(sock.data)
|
|
233
|
+
},
|
|
234
|
+
},
|
|
235
|
+
})
|
|
236
|
+
|
|
237
|
+
return {
|
|
238
|
+
port: server.port,
|
|
239
|
+
stop: () => server.stop(true),
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
// Exported so tests can exercise the Node path directly — the test runner is
|
|
244
|
+
// Bun, so the runtime dispatch in startUpstreamProxyRelay always picks Bun.
|
|
245
|
+
export async function startNodeRelay(
|
|
246
|
+
wsUrl: string,
|
|
247
|
+
authHeader: string,
|
|
248
|
+
wsAuthHeader: string,
|
|
249
|
+
): Promise<UpstreamProxyRelay> {
|
|
250
|
+
nodeWSCtor = (await import('ws')).default
|
|
251
|
+
const states = new WeakMap<NodeSocket, ConnState>()
|
|
252
|
+
|
|
253
|
+
const server = createServer(sock => {
|
|
254
|
+
const st = newConnState()
|
|
255
|
+
states.set(sock, st)
|
|
256
|
+
// Node's sock.write() buffers internally — a false return signals
|
|
257
|
+
// backpressure but the bytes are already queued, so no tail-tracking
|
|
258
|
+
// needed for correctness. Week-1 payloads won't stress the buffer.
|
|
259
|
+
const adapter: ClientSocket = {
|
|
260
|
+
write: payload => {
|
|
261
|
+
sock.write(typeof payload === 'string' ? payload : Buffer.from(payload))
|
|
262
|
+
},
|
|
263
|
+
end: () => sock.end(),
|
|
264
|
+
}
|
|
265
|
+
sock.on('data', data =>
|
|
266
|
+
handleData(adapter, st, data, wsUrl, authHeader, wsAuthHeader),
|
|
267
|
+
)
|
|
268
|
+
sock.on('close', () => cleanupConn(states.get(sock)))
|
|
269
|
+
sock.on('error', err => {
|
|
270
|
+
logForDebugging(`[upstreamproxy] client socket error: ${err.message}`)
|
|
271
|
+
cleanupConn(states.get(sock))
|
|
272
|
+
})
|
|
273
|
+
})
|
|
274
|
+
|
|
275
|
+
return new Promise((resolve, reject) => {
|
|
276
|
+
server.once('error', reject)
|
|
277
|
+
server.listen(0, '127.0.0.1', () => {
|
|
278
|
+
const addr = server.address()
|
|
279
|
+
if (addr === null || typeof addr === 'string') {
|
|
280
|
+
reject(new Error('upstreamproxy: server has no TCP address'))
|
|
281
|
+
return
|
|
282
|
+
}
|
|
283
|
+
resolve({
|
|
284
|
+
port: addr.port,
|
|
285
|
+
stop: () => server.close(),
|
|
286
|
+
})
|
|
287
|
+
})
|
|
288
|
+
})
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
/**
|
|
292
|
+
* Shared per-connection data handler. Phase 1 accumulates the CONNECT request;
|
|
293
|
+
* phase 2 forwards client bytes over the WS tunnel.
|
|
294
|
+
*/
|
|
295
|
+
function handleData(
|
|
296
|
+
sock: ClientSocket,
|
|
297
|
+
st: ConnState,
|
|
298
|
+
data: Buffer,
|
|
299
|
+
wsUrl: string,
|
|
300
|
+
authHeader: string,
|
|
301
|
+
wsAuthHeader: string,
|
|
302
|
+
): void {
|
|
303
|
+
// Phase 1: accumulate until we've seen the full CONNECT request
|
|
304
|
+
// (terminated by CRLF CRLF). curl/gh send this in one packet, but
|
|
305
|
+
// don't assume that.
|
|
306
|
+
if (!st.ws) {
|
|
307
|
+
st.connectBuf = Buffer.concat([st.connectBuf, data])
|
|
308
|
+
const headerEnd = st.connectBuf.indexOf('\r\n\r\n')
|
|
309
|
+
if (headerEnd === -1) {
|
|
310
|
+
// Guard against a client that never sends CRLFCRLF.
|
|
311
|
+
if (st.connectBuf.length > 8192) {
|
|
312
|
+
sock.write('HTTP/1.1 400 Bad Request\r\n\r\n')
|
|
313
|
+
sock.end()
|
|
314
|
+
}
|
|
315
|
+
return
|
|
316
|
+
}
|
|
317
|
+
const reqHead = st.connectBuf.subarray(0, headerEnd).toString('utf8')
|
|
318
|
+
const firstLine = reqHead.split('\r\n')[0] ?? ''
|
|
319
|
+
const m = firstLine.match(/^CONNECT\s+(\S+)\s+HTTP\/1\.[01]$/i)
|
|
320
|
+
if (!m) {
|
|
321
|
+
sock.write('HTTP/1.1 405 Method Not Allowed\r\n\r\n')
|
|
322
|
+
sock.end()
|
|
323
|
+
return
|
|
324
|
+
}
|
|
325
|
+
// Stash any bytes that arrived after the CONNECT header so
|
|
326
|
+
// openTunnel can flush them once the WS is open.
|
|
327
|
+
const trailing = st.connectBuf.subarray(headerEnd + 4)
|
|
328
|
+
if (trailing.length > 0) {
|
|
329
|
+
st.pending.push(Buffer.from(trailing))
|
|
330
|
+
}
|
|
331
|
+
st.connectBuf = Buffer.alloc(0)
|
|
332
|
+
openTunnel(sock, st, firstLine, wsUrl, authHeader, wsAuthHeader)
|
|
333
|
+
return
|
|
334
|
+
}
|
|
335
|
+
// Phase 2: WS exists. If it isn't OPEN yet, buffer; ws.onopen will
|
|
336
|
+
// flush. Once open, pump client bytes to WS in chunks.
|
|
337
|
+
if (!st.wsOpen) {
|
|
338
|
+
st.pending.push(Buffer.from(data))
|
|
339
|
+
return
|
|
340
|
+
}
|
|
341
|
+
forwardToWs(st.ws, data)
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
function openTunnel(
|
|
345
|
+
sock: ClientSocket,
|
|
346
|
+
st: ConnState,
|
|
347
|
+
connectLine: string,
|
|
348
|
+
wsUrl: string,
|
|
349
|
+
authHeader: string,
|
|
350
|
+
wsAuthHeader: string,
|
|
351
|
+
): void {
|
|
352
|
+
// core/websocket/stream.go picks JSON vs binary-proto from the upgrade
|
|
353
|
+
// request's Content-Type header (defaults to JSON). Without application/proto
|
|
354
|
+
// the server protojson.Unmarshals our hand-encoded binary chunks and fails
|
|
355
|
+
// silently with EOF.
|
|
356
|
+
const headers = {
|
|
357
|
+
'Content-Type': 'application/proto',
|
|
358
|
+
Authorization: wsAuthHeader,
|
|
359
|
+
}
|
|
360
|
+
let ws: WebSocketLike
|
|
361
|
+
if (nodeWSCtor) {
|
|
362
|
+
ws = new nodeWSCtor(wsUrl, {
|
|
363
|
+
headers,
|
|
364
|
+
agent: getWebSocketProxyAgent(wsUrl),
|
|
365
|
+
...getWebSocketTLSOptions(),
|
|
366
|
+
}) as unknown as WebSocketLike
|
|
367
|
+
} else {
|
|
368
|
+
ws = new globalThis.WebSocket(wsUrl, {
|
|
369
|
+
// @ts-expect-error — Bun extension; not in lib.dom WebSocket types
|
|
370
|
+
headers,
|
|
371
|
+
proxy: getWebSocketProxyUrl(wsUrl),
|
|
372
|
+
tls: getWebSocketTLSOptions() || undefined,
|
|
373
|
+
})
|
|
374
|
+
}
|
|
375
|
+
ws.binaryType = 'arraybuffer'
|
|
376
|
+
st.ws = ws
|
|
377
|
+
|
|
378
|
+
ws.onopen = () => {
|
|
379
|
+
// First chunk carries the CONNECT line plus Proxy-Authorization so the
|
|
380
|
+
// server can auth the tunnel and know the target host:port. Server
|
|
381
|
+
// responds with its own "HTTP/1.1 200" over the tunnel; we just pipe it.
|
|
382
|
+
const head =
|
|
383
|
+
`${connectLine}\r\n` + `Proxy-Authorization: ${authHeader}\r\n` + `\r\n`
|
|
384
|
+
ws.send(encodeChunk(Buffer.from(head, 'utf8')))
|
|
385
|
+
// Flush anything that arrived while the WS handshake was in flight —
|
|
386
|
+
// trailing bytes from the CONNECT packet and any data() callbacks that
|
|
387
|
+
// fired before onopen.
|
|
388
|
+
st.wsOpen = true
|
|
389
|
+
for (const buf of st.pending) {
|
|
390
|
+
forwardToWs(ws, buf)
|
|
391
|
+
}
|
|
392
|
+
st.pending = []
|
|
393
|
+
// Not all WS implementations expose ping(); empty chunk works as an
|
|
394
|
+
// application-level keepalive the server can ignore.
|
|
395
|
+
st.pinger = setInterval(sendKeepalive, PING_INTERVAL_MS, ws)
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
ws.onmessage = ev => {
|
|
399
|
+
const raw =
|
|
400
|
+
ev.data instanceof ArrayBuffer
|
|
401
|
+
? new Uint8Array(ev.data)
|
|
402
|
+
: new Uint8Array(Buffer.from(ev.data))
|
|
403
|
+
const payload = decodeChunk(raw)
|
|
404
|
+
if (payload && payload.length > 0) {
|
|
405
|
+
st.established = true
|
|
406
|
+
sock.write(payload)
|
|
407
|
+
}
|
|
408
|
+
}
|
|
409
|
+
|
|
410
|
+
ws.onerror = ev => {
|
|
411
|
+
const msg = 'message' in ev ? String(ev.message) : 'websocket error'
|
|
412
|
+
logForDebugging(`[upstreamproxy] ws error: ${msg}`)
|
|
413
|
+
if (st.closed) return
|
|
414
|
+
st.closed = true
|
|
415
|
+
if (!st.established) {
|
|
416
|
+
sock.write('HTTP/1.1 502 Bad Gateway\r\n\r\n')
|
|
417
|
+
}
|
|
418
|
+
sock.end()
|
|
419
|
+
cleanupConn(st)
|
|
420
|
+
}
|
|
421
|
+
|
|
422
|
+
ws.onclose = () => {
|
|
423
|
+
if (st.closed) return
|
|
424
|
+
st.closed = true
|
|
425
|
+
sock.end()
|
|
426
|
+
cleanupConn(st)
|
|
427
|
+
}
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
function sendKeepalive(ws: WebSocketLike): void {
|
|
431
|
+
if (ws.readyState === WebSocket.OPEN) {
|
|
432
|
+
ws.send(encodeChunk(new Uint8Array(0)))
|
|
433
|
+
}
|
|
434
|
+
}
|
|
435
|
+
|
|
436
|
+
function forwardToWs(ws: WebSocketLike, data: Buffer): void {
|
|
437
|
+
if (ws.readyState !== WebSocket.OPEN) return
|
|
438
|
+
for (let off = 0; off < data.length; off += MAX_CHUNK_BYTES) {
|
|
439
|
+
const slice = data.subarray(off, off + MAX_CHUNK_BYTES)
|
|
440
|
+
ws.send(encodeChunk(slice))
|
|
441
|
+
}
|
|
442
|
+
}
|
|
443
|
+
|
|
444
|
+
function cleanupConn(st: ConnState | undefined): void {
|
|
445
|
+
if (!st) return
|
|
446
|
+
if (st.pinger) clearInterval(st.pinger)
|
|
447
|
+
if (st.ws && st.ws.readyState <= WebSocket.OPEN) {
|
|
448
|
+
try {
|
|
449
|
+
st.ws.close()
|
|
450
|
+
} catch {
|
|
451
|
+
// already closing
|
|
452
|
+
}
|
|
453
|
+
}
|
|
454
|
+
st.ws = undefined
|
|
455
|
+
}
|