@caretive/caret-cli 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.npmrc.tmp +2 -0
- package/README.md +72 -0
- package/cmd/cline/main.go +348 -0
- package/cmd/cline-host/main.go +71 -0
- package/e2e/default_update_test.go +154 -0
- package/e2e/helpers_test.go +378 -0
- package/e2e/main_test.go +47 -0
- package/e2e/mixed_stress_test.go +120 -0
- package/e2e/sqlite_helper.go +161 -0
- package/e2e/start_list_test.go +178 -0
- package/go.mod +64 -0
- package/go.sum +162 -0
- package/man/cline.1 +331 -0
- package/man/cline.1.md +332 -0
- package/package.json +54 -0
- package/pkg/cli/auth/auth_cline_provider.go +285 -0
- package/pkg/cli/auth/auth_menu.go +323 -0
- package/pkg/cli/auth/auth_subscription.go +130 -0
- package/pkg/cli/auth/byo_quick_setup.go +247 -0
- package/pkg/cli/auth/models_cline.go +141 -0
- package/pkg/cli/auth/models_list_fetch.go +156 -0
- package/pkg/cli/auth/models_list_static.go +69 -0
- package/pkg/cli/auth/providers_byo.go +184 -0
- package/pkg/cli/auth/providers_list.go +517 -0
- package/pkg/cli/auth/update_api_configurations.go +647 -0
- package/pkg/cli/auth/wizard_byo.go +764 -0
- package/pkg/cli/auth/wizard_byo_bedrock.go +193 -0
- package/pkg/cli/auth/wizard_byo_oca.go +366 -0
- package/pkg/cli/auth.go +43 -0
- package/pkg/cli/clerror/cline_error.go +187 -0
- package/pkg/cli/config/manager.go +208 -0
- package/pkg/cli/config/settings_renderer.go +198 -0
- package/pkg/cli/config.go +152 -0
- package/pkg/cli/display/ansi.go +27 -0
- package/pkg/cli/display/banner.go +211 -0
- package/pkg/cli/display/deduplicator.go +95 -0
- package/pkg/cli/display/markdown_renderer.go +139 -0
- package/pkg/cli/display/renderer.go +304 -0
- package/pkg/cli/display/segment_streamer.go +212 -0
- package/pkg/cli/display/streaming.go +134 -0
- package/pkg/cli/display/system_renderer.go +269 -0
- package/pkg/cli/display/tool_renderer.go +455 -0
- package/pkg/cli/display/tool_result_parser.go +371 -0
- package/pkg/cli/display/typewriter.go +210 -0
- package/pkg/cli/doctor.go +65 -0
- package/pkg/cli/global/cline-clients.go +501 -0
- package/pkg/cli/global/global.go +113 -0
- package/pkg/cli/global/registry.go +304 -0
- package/pkg/cli/handlers/ask_handlers.go +339 -0
- package/pkg/cli/handlers/handler.go +130 -0
- package/pkg/cli/handlers/say_handlers.go +521 -0
- package/pkg/cli/instances.go +506 -0
- package/pkg/cli/logs.go +382 -0
- package/pkg/cli/output/coordinator.go +167 -0
- package/pkg/cli/output/input_model.go +497 -0
- package/pkg/cli/sqlite/locks.go +366 -0
- package/pkg/cli/task/history_handler.go +72 -0
- package/pkg/cli/task/input_handler.go +577 -0
- package/pkg/cli/task/manager.go +1283 -0
- package/pkg/cli/task/settings_parser.go +754 -0
- package/pkg/cli/task/stream_coordinator.go +60 -0
- package/pkg/cli/task.go +675 -0
- package/pkg/cli/terminal/keyboard.go +695 -0
- package/pkg/cli/tui/HELP_WANTED.md +1 -0
- package/pkg/cli/types/history.go +17 -0
- package/pkg/cli/types/messages.go +329 -0
- package/pkg/cli/types/state.go +59 -0
- package/pkg/cli/updater/updater.go +409 -0
- package/pkg/cli/version.go +43 -0
- package/pkg/common/constants.go +6 -0
- package/pkg/common/schema.go +54 -0
- package/pkg/common/types.go +54 -0
- package/pkg/common/utils.go +185 -0
- package/pkg/generated/field_overrides.go +39 -0
- package/pkg/generated/providers.go +1584 -0
- package/pkg/hostbridge/diff.go +351 -0
- package/pkg/hostbridge/disabled/watch.go +39 -0
- package/pkg/hostbridge/disabled/window.go +63 -0
- package/pkg/hostbridge/disabled/workspace.go +66 -0
- package/pkg/hostbridge/env.go +166 -0
- package/pkg/hostbridge/grpc_server.go +113 -0
- package/pkg/hostbridge/simple.go +43 -0
- package/pkg/hostbridge/simple_workspace.go +85 -0
- package/pkg/hostbridge/window.go +129 -0
- package/scripts/publish-caret-cli.sh +39 -0
|
@@ -0,0 +1,378 @@
|
|
|
1
|
+
package e2e
|
|
2
|
+
|
|
3
|
+
import (
|
|
4
|
+
"context"
|
|
5
|
+
"fmt"
|
|
6
|
+
"net"
|
|
7
|
+
"os"
|
|
8
|
+
"os/exec"
|
|
9
|
+
"path/filepath"
|
|
10
|
+
"strconv"
|
|
11
|
+
"strings"
|
|
12
|
+
"syscall"
|
|
13
|
+
"testing"
|
|
14
|
+
"time"
|
|
15
|
+
|
|
16
|
+
"github.com/cline/cli/pkg/cli/global"
|
|
17
|
+
"github.com/cline/cli/pkg/common"
|
|
18
|
+
"github.com/cline/grpc-go/cline"
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
const (
|
|
22
|
+
defaultTimeout = 30 * time.Second
|
|
23
|
+
longTimeout = 60 * time.Second
|
|
24
|
+
pollInterval = 250 * time.Millisecond
|
|
25
|
+
instancesBinRel = "../bin/cline"
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
func repoAwareBinPath(t *testing.T) string {
|
|
29
|
+
// Tests live in repoRoot/cli/e2e. Binary is at repoRoot/cli/bin/cline
|
|
30
|
+
t.Helper()
|
|
31
|
+
wd, err := os.Getwd()
|
|
32
|
+
if err != nil {
|
|
33
|
+
t.Fatalf("Getwd error: %v", err)
|
|
34
|
+
}
|
|
35
|
+
// cli/e2e -> cli/bin/cline
|
|
36
|
+
p := filepath.Clean(filepath.Join(wd, instancesBinRel))
|
|
37
|
+
if _, err := os.Stat(p); err != nil {
|
|
38
|
+
t.Fatalf("CLI binary not found at %s; run `npm run compile-cli` first: %v", p, err)
|
|
39
|
+
}
|
|
40
|
+
return p
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
func setTempClineDir(t *testing.T) string {
|
|
44
|
+
t.Helper()
|
|
45
|
+
dir := t.TempDir()
|
|
46
|
+
clineDir := filepath.Join(dir, ".cline")
|
|
47
|
+
if err := os.MkdirAll(clineDir, 0o755); err != nil {
|
|
48
|
+
t.Fatalf("mkdir clineDir: %v", err)
|
|
49
|
+
}
|
|
50
|
+
t.Setenv("CLINE_DIR", clineDir)
|
|
51
|
+
return clineDir
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
func runCLI(ctx context.Context, t *testing.T, args ...string) (string, string, int) {
|
|
55
|
+
t.Helper()
|
|
56
|
+
bin := repoAwareBinPath(t)
|
|
57
|
+
|
|
58
|
+
// Ensure CLI uses the same CLINE_DIR as the tests by passing --config=<CLINE_DIR>
|
|
59
|
+
// (InitializeGlobalConfig uses ConfigPath as the base directory for registry.)
|
|
60
|
+
if clineDir := os.Getenv("CLINE_DIR"); clineDir != "" && !contains(args, "--config") {
|
|
61
|
+
// Prepend persistent flag so Cobra sees it regardless of subcommand position
|
|
62
|
+
args = append([]string{"--config", clineDir}, args...)
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
cmd := exec.CommandContext(ctx, bin, args...)
|
|
66
|
+
// Run CLI from repo root so relative paths inside CLI (./cli/bin/...) resolve
|
|
67
|
+
if wd, err := os.Getwd(); err == nil {
|
|
68
|
+
repoRoot := filepath.Clean(filepath.Join(wd, "..", ".."))
|
|
69
|
+
cmd.Dir = repoRoot
|
|
70
|
+
}
|
|
71
|
+
// propagate env including CLINE_DIR
|
|
72
|
+
cmd.Env = os.Environ()
|
|
73
|
+
outB, errB := &strings.Builder{}, &strings.Builder{}
|
|
74
|
+
cmd.Stdout = outB
|
|
75
|
+
cmd.Stderr = errB
|
|
76
|
+
err := cmd.Run()
|
|
77
|
+
exit := 0
|
|
78
|
+
if err != nil {
|
|
79
|
+
// Extract exit code if possible
|
|
80
|
+
if ee, ok := err.(*exec.ExitError); ok {
|
|
81
|
+
exit = ee.ExitCode()
|
|
82
|
+
} else {
|
|
83
|
+
exit = -1
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
return outB.String(), errB.String(), exit
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
func mustRunCLI(ctx context.Context, t *testing.T, args ...string) string {
|
|
90
|
+
t.Helper()
|
|
91
|
+
out, errOut, exit := runCLI(ctx, t, args...)
|
|
92
|
+
if exit != 0 {
|
|
93
|
+
t.Fatalf("cline %v failed (exit=%d)\nstdout:\n%s\nstderr:\n%s", args, exit, out, errOut)
|
|
94
|
+
}
|
|
95
|
+
return out
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
func listInstancesJSON(ctx context.Context, t *testing.T) common.InstancesOutput {
|
|
99
|
+
t.Helper()
|
|
100
|
+
// Trigger CLI to perform cleanup/health by invoking list (table output is ignored)
|
|
101
|
+
_ = mustRunCLI(ctx, t, "instance", "list")
|
|
102
|
+
|
|
103
|
+
// Read from SQLite locks database to build structured output
|
|
104
|
+
clineDir := getClineDir(t)
|
|
105
|
+
|
|
106
|
+
// Load default instance from settings file
|
|
107
|
+
defaultInstance := readDefaultInstanceFromSettings(t, clineDir)
|
|
108
|
+
|
|
109
|
+
// Load instances from SQLite
|
|
110
|
+
instances := readInstancesFromSQLite(t, clineDir)
|
|
111
|
+
|
|
112
|
+
return common.InstancesOutput{
|
|
113
|
+
DefaultInstance: defaultInstance,
|
|
114
|
+
CoreInstances: instances,
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
func hasAddress(in common.InstancesOutput, addr string) bool {
|
|
119
|
+
for _, it := range in.CoreInstances {
|
|
120
|
+
if it.Address == addr {
|
|
121
|
+
return true
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
return false
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
func getByAddress(in common.InstancesOutput, addr string) (common.CoreInstanceInfo, bool) {
|
|
128
|
+
for _, it := range in.CoreInstances {
|
|
129
|
+
if it.Address == addr {
|
|
130
|
+
return it, true
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
return common.CoreInstanceInfo{}, false
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
func waitFor(t *testing.T, timeout time.Duration, cond func() (bool, string)) {
|
|
137
|
+
t.Helper()
|
|
138
|
+
deadline := time.Now().Add(timeout)
|
|
139
|
+
for {
|
|
140
|
+
ok, msg := cond()
|
|
141
|
+
if ok {
|
|
142
|
+
return
|
|
143
|
+
}
|
|
144
|
+
if time.Now().After(deadline) {
|
|
145
|
+
t.Fatalf("waitFor timeout: %s", msg)
|
|
146
|
+
}
|
|
147
|
+
time.Sleep(pollInterval)
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
func waitForAddressHealthy(t *testing.T, addr string, timeout time.Duration) {
|
|
152
|
+
t.Helper()
|
|
153
|
+
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
|
154
|
+
defer cancel()
|
|
155
|
+
|
|
156
|
+
t.Logf("Waiting for gRPC health check on %s...", addr)
|
|
157
|
+
|
|
158
|
+
waitFor(t, timeout, func() (bool, string) {
|
|
159
|
+
if common.IsInstanceHealthy(ctx, addr) {
|
|
160
|
+
return true, ""
|
|
161
|
+
}
|
|
162
|
+
return false, fmt.Sprintf("gRPC health check failed for %s", addr)
|
|
163
|
+
})
|
|
164
|
+
|
|
165
|
+
t.Logf("gRPC health check passed for %s", addr)
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
func waitForAddressRemoved(t *testing.T, addr string, timeout time.Duration) {
|
|
169
|
+
t.Helper()
|
|
170
|
+
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
|
171
|
+
defer cancel()
|
|
172
|
+
waitFor(t, timeout, func() (bool, string) {
|
|
173
|
+
out := listInstancesJSON(ctx, t)
|
|
174
|
+
if hasAddress(out, addr) {
|
|
175
|
+
return false, fmt.Sprintf("address %s still present", addr)
|
|
176
|
+
}
|
|
177
|
+
return true, ""
|
|
178
|
+
})
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
func findFreePort(t *testing.T) int {
|
|
182
|
+
t.Helper()
|
|
183
|
+
l, err := net.Listen("tcp", "127.0.0.1:0")
|
|
184
|
+
if err != nil {
|
|
185
|
+
t.Fatalf("listen 127.0.0.1:0: %v", err)
|
|
186
|
+
}
|
|
187
|
+
defer l.Close()
|
|
188
|
+
_, portStr, _ := net.SplitHostPort(l.Addr().String())
|
|
189
|
+
var port int
|
|
190
|
+
fmt.Sscanf(portStr, "%d", &port)
|
|
191
|
+
return port
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
func getClineDir(t *testing.T) string {
|
|
195
|
+
t.Helper()
|
|
196
|
+
clineDir := os.Getenv("CLINE_DIR")
|
|
197
|
+
if clineDir == "" {
|
|
198
|
+
t.Fatalf("CLINE_DIR not set")
|
|
199
|
+
}
|
|
200
|
+
return clineDir
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
// isPortInUse checks if a port is currently in use by any process
|
|
204
|
+
func isPortInUse(port int) bool {
|
|
205
|
+
conn, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
|
|
206
|
+
if err != nil {
|
|
207
|
+
return true // Port is in use
|
|
208
|
+
}
|
|
209
|
+
conn.Close()
|
|
210
|
+
return false // Port is free
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
// waitForPortClosed waits for a port to become free (no process listening)
|
|
214
|
+
func waitForPortClosed(t *testing.T, port int, timeout time.Duration) {
|
|
215
|
+
t.Helper()
|
|
216
|
+
waitFor(t, timeout, func() (bool, string) {
|
|
217
|
+
if isPortInUse(port) {
|
|
218
|
+
return false, fmt.Sprintf("port %d still in use", port)
|
|
219
|
+
}
|
|
220
|
+
return true, ""
|
|
221
|
+
})
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
// waitForPortsClosed waits for both core and host ports to become free
|
|
225
|
+
func waitForPortsClosed(t *testing.T, corePort, hostPort int, timeout time.Duration) {
|
|
226
|
+
t.Helper()
|
|
227
|
+
waitFor(t, timeout, func() (bool, string) {
|
|
228
|
+
if isPortInUse(corePort) {
|
|
229
|
+
return false, fmt.Sprintf("core port %d still in use", corePort)
|
|
230
|
+
}
|
|
231
|
+
if isPortInUse(hostPort) {
|
|
232
|
+
return false, fmt.Sprintf("host port %d still in use", hostPort)
|
|
233
|
+
}
|
|
234
|
+
return true, ""
|
|
235
|
+
})
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
// findAndKillHostProcess finds and kills any process listening on the host port
|
|
239
|
+
// This is used to clean up dangling host processes after SIGKILL tests
|
|
240
|
+
func findAndKillHostProcess(t *testing.T, hostPort int) {
|
|
241
|
+
t.Helper()
|
|
242
|
+
// Use lsof to find process listening on the host port
|
|
243
|
+
cmd := exec.Command("lsof", "-ti", fmt.Sprintf(":%d", hostPort))
|
|
244
|
+
output, err := cmd.Output()
|
|
245
|
+
if err != nil {
|
|
246
|
+
// No process found on port - that's fine
|
|
247
|
+
return
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
pidStr := strings.TrimSpace(string(output))
|
|
251
|
+
if pidStr == "" {
|
|
252
|
+
return
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
var pid int
|
|
256
|
+
if _, err := fmt.Sscanf(pidStr, "%d", &pid); err != nil {
|
|
257
|
+
t.Logf("Warning: could not parse PID from lsof output: %s", pidStr)
|
|
258
|
+
return
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
if pid > 0 {
|
|
262
|
+
t.Logf("Cleaning up dangling host process PID %d on port %d", pid, hostPort)
|
|
263
|
+
if err := syscall.Kill(pid, syscall.SIGKILL); err != nil {
|
|
264
|
+
t.Logf("Warning: failed to kill dangling host process %d: %v", pid, err)
|
|
265
|
+
}
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
// getPIDByPort returns the PID of the process listening on the specified port (fallback method)
|
|
270
|
+
func getPIDByPort(t *testing.T, port int) int {
|
|
271
|
+
t.Helper()
|
|
272
|
+
cmd := exec.Command("lsof", "-ti", fmt.Sprintf(":%d", port))
|
|
273
|
+
output, err := cmd.Output()
|
|
274
|
+
if err != nil {
|
|
275
|
+
return 0 // Process not found
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
pidStr := strings.TrimSpace(string(output))
|
|
279
|
+
if pidStr == "" {
|
|
280
|
+
return 0
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
pid, err := strconv.Atoi(pidStr)
|
|
284
|
+
if err != nil {
|
|
285
|
+
t.Logf("Warning: could not parse PID from lsof output: %s", pidStr)
|
|
286
|
+
return 0
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
return pid
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
// getCorePIDViaRPC returns the PID of the cline-core process using RPC (preferred method)
|
|
293
|
+
func getCorePIDViaRPC(t *testing.T, address string) int {
|
|
294
|
+
t.Helper()
|
|
295
|
+
|
|
296
|
+
// Initialize global config to access registry
|
|
297
|
+
clineDir := os.Getenv("CLINE_DIR")
|
|
298
|
+
if clineDir == "" {
|
|
299
|
+
t.Logf("Warning: CLINE_DIR not set, falling back to lsof")
|
|
300
|
+
return getCorePIDViaLsof(t, address)
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
cfg := &global.GlobalConfig{
|
|
304
|
+
ConfigPath: clineDir,
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
if err := global.InitializeGlobalConfig(cfg); err != nil {
|
|
308
|
+
t.Logf("Warning: failed to initialize global config, falling back to lsof: %v", err)
|
|
309
|
+
return getCorePIDViaLsof(t, address)
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
|
313
|
+
defer cancel()
|
|
314
|
+
|
|
315
|
+
// Get client for the address
|
|
316
|
+
client, err := global.Clients.GetRegistry().GetClient(ctx, address)
|
|
317
|
+
if err != nil {
|
|
318
|
+
t.Logf("Warning: failed to get client for %s, falling back to lsof: %v", address, err)
|
|
319
|
+
return getCorePIDViaLsof(t, address)
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
// Call GetProcessInfo RPC
|
|
323
|
+
processInfo, err := client.State.GetProcessInfo(ctx, &cline.EmptyRequest{})
|
|
324
|
+
if err != nil {
|
|
325
|
+
t.Logf("Warning: GetProcessInfo RPC failed for %s, falling back to lsof: %v", address, err)
|
|
326
|
+
return getCorePIDViaLsof(t, address)
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
return int(processInfo.ProcessId)
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
// getCorePIDViaLsof returns the PID using lsof (fallback method)
|
|
333
|
+
func getCorePIDViaLsof(t *testing.T, address string) int {
|
|
334
|
+
t.Helper()
|
|
335
|
+
_, portStr, err := net.SplitHostPort(address)
|
|
336
|
+
if err != nil {
|
|
337
|
+
t.Logf("Warning: invalid address format %s", address)
|
|
338
|
+
return 0
|
|
339
|
+
}
|
|
340
|
+
|
|
341
|
+
port, err := strconv.Atoi(portStr)
|
|
342
|
+
if err != nil {
|
|
343
|
+
t.Logf("Warning: invalid port in address %s", address)
|
|
344
|
+
return 0
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
return getPIDByPort(t, port)
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
// getCorePID returns the PID of the cline-core process for the given address
|
|
351
|
+
// Uses RPC first, falls back to lsof if RPC fails
|
|
352
|
+
func getCorePID(t *testing.T, address string) int {
|
|
353
|
+
t.Helper()
|
|
354
|
+
|
|
355
|
+
// Try RPC first (preferred method)
|
|
356
|
+
if pid := getCorePIDViaRPC(t, address); pid > 0 {
|
|
357
|
+
return pid
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
// Fall back to lsof if RPC fails
|
|
361
|
+
return getCorePIDViaLsof(t, address)
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
// getHostPID returns the PID of the cline-host process for the given host port
|
|
365
|
+
func getHostPID(t *testing.T, hostPort int) int {
|
|
366
|
+
t.Helper()
|
|
367
|
+
return getPIDByPort(t, hostPort)
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
// contains reports whether slice has the target string.
|
|
371
|
+
func contains(slice []string, target string) bool {
|
|
372
|
+
for _, s := range slice {
|
|
373
|
+
if s == target {
|
|
374
|
+
return true
|
|
375
|
+
}
|
|
376
|
+
}
|
|
377
|
+
return false
|
|
378
|
+
}
|
package/e2e/main_test.go
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
package e2e
|
|
2
|
+
|
|
3
|
+
import (
|
|
4
|
+
"fmt"
|
|
5
|
+
"os"
|
|
6
|
+
"path/filepath"
|
|
7
|
+
"strings"
|
|
8
|
+
"testing"
|
|
9
|
+
)
|
|
10
|
+
|
|
11
|
+
// TestMain validates required artifacts exist before running E2E tests.
|
|
12
|
+
// It does NOT build artifacts. Build manually via:
|
|
13
|
+
//
|
|
14
|
+
// npm run compile-standalone
|
|
15
|
+
// npm run compile-cli
|
|
16
|
+
func TestMain(m *testing.M) {
|
|
17
|
+
// Determine repo root from cli/e2e
|
|
18
|
+
wd, err := os.Getwd()
|
|
19
|
+
if err != nil {
|
|
20
|
+
fmt.Fprintf(os.Stderr, "getwd: %v\n", err)
|
|
21
|
+
os.Exit(2)
|
|
22
|
+
}
|
|
23
|
+
repoRoot := filepath.Clean(filepath.Join(wd, "..", ".."))
|
|
24
|
+
|
|
25
|
+
cliBin := filepath.Join(repoRoot, "cli", "bin", "cline")
|
|
26
|
+
coreJS := filepath.Join(repoRoot, "dist-standalone", "cline-core.js")
|
|
27
|
+
|
|
28
|
+
missing := []string{}
|
|
29
|
+
if _, err := os.Stat(cliBin); err != nil {
|
|
30
|
+
missing = append(missing, cliBin)
|
|
31
|
+
}
|
|
32
|
+
if _, err := os.Stat(coreJS); err != nil {
|
|
33
|
+
missing = append(missing, coreJS)
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
if len(missing) > 0 {
|
|
37
|
+
if testing.Short() {
|
|
38
|
+
// Optional quality-of-life: allow skipping with -short when artifacts are absent
|
|
39
|
+
fmt.Fprintf(os.Stderr, "[e2e] skipping (-short) due to missing artifacts:\n %s\n", strings.Join(missing, "\n "))
|
|
40
|
+
os.Exit(0)
|
|
41
|
+
}
|
|
42
|
+
fmt.Fprintf(os.Stderr, "Missing required build artifacts for E2E tests:\n %s\n\nPlease build them first:\n npm run compile-standalone\n npm run compile-cli\n", strings.Join(missing, "\n "))
|
|
43
|
+
os.Exit(2)
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
os.Exit(m.Run())
|
|
47
|
+
}
|
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
package e2e
|
|
2
|
+
|
|
3
|
+
import (
|
|
4
|
+
"context"
|
|
5
|
+
"fmt"
|
|
6
|
+
"os"
|
|
7
|
+
"path/filepath"
|
|
8
|
+
"syscall"
|
|
9
|
+
"testing"
|
|
10
|
+
|
|
11
|
+
"github.com/cline/cli/pkg/common"
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
// 9. Mixed localhost vs 127.0.0.1 addresses coexist and are both healthy
|
|
15
|
+
func TestMixedLocalhostVs127Coexist(t *testing.T) {
|
|
16
|
+
clineDir := setTempClineDir(t)
|
|
17
|
+
ctx, cancel := context.WithTimeout(context.Background(), longTimeout)
|
|
18
|
+
defer cancel()
|
|
19
|
+
|
|
20
|
+
// Start one instance
|
|
21
|
+
_ = mustRunCLI(ctx, t, "instance", "new")
|
|
22
|
+
|
|
23
|
+
// Get the running instance and its port/PID
|
|
24
|
+
out := listInstancesJSON(ctx, t)
|
|
25
|
+
if len(out.CoreInstances) == 0 {
|
|
26
|
+
t.Fatalf("expected at least 1 instance")
|
|
27
|
+
}
|
|
28
|
+
inst := out.CoreInstances[0]
|
|
29
|
+
waitForAddressHealthy(t, inst.Address, defaultTimeout)
|
|
30
|
+
|
|
31
|
+
// Manually add a SQLite entry for the same port but 127.0.0.1 host
|
|
32
|
+
addr127 := fmt.Sprintf("127.0.0.1:%d", inst.CorePort())
|
|
33
|
+
dbPath := filepath.Join(clineDir, common.SETTINGS_SUBFOLDER, "locks.db")
|
|
34
|
+
|
|
35
|
+
if err := insertRemoteInstanceIntoSQLite(t, dbPath, addr127, inst.CorePort(), inst.HostPort()); err != nil {
|
|
36
|
+
t.Fatalf("insert 127 alias entry: %v", err)
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
// Verify both addresses appear and are healthy
|
|
40
|
+
waitForAddressHealthy(t, inst.Address, defaultTimeout)
|
|
41
|
+
waitForAddressHealthy(t, addr127, defaultTimeout)
|
|
42
|
+
|
|
43
|
+
out = listInstancesJSON(ctx, t)
|
|
44
|
+
if !hasAddress(out, inst.Address) || !hasAddress(out, addr127) {
|
|
45
|
+
t.Fatalf("expected both %s and %s present", inst.Address, addr127)
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
// 10. Start-stop stress: loop starting then killing instances; ensure no leftovers
|
|
50
|
+
func TestStartStopStress(t *testing.T) {
|
|
51
|
+
_ = setTempClineDir(t)
|
|
52
|
+
|
|
53
|
+
for i := 0; i < 3; i++ { // keep small for CI time
|
|
54
|
+
ctx, cancel := context.WithTimeout(context.Background(), longTimeout)
|
|
55
|
+
defer cancel()
|
|
56
|
+
|
|
57
|
+
// Snapshot current addresses
|
|
58
|
+
before := listInstancesJSON(ctx, t)
|
|
59
|
+
beforeSet := map[string]struct{}{}
|
|
60
|
+
for _, it := range before.CoreInstances {
|
|
61
|
+
beforeSet[it.Address] = struct{}{}
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
// Start a new instance
|
|
65
|
+
_ = mustRunCLI(ctx, t, "instance", "new")
|
|
66
|
+
|
|
67
|
+
// Find the new instance address
|
|
68
|
+
var newAddr string
|
|
69
|
+
waitFor(t, defaultTimeout, func() (bool, string) {
|
|
70
|
+
after := listInstancesJSON(ctx, t)
|
|
71
|
+
for _, it := range after.CoreInstances {
|
|
72
|
+
if _, ok := beforeSet[it.Address]; !ok {
|
|
73
|
+
newAddr = it.Address
|
|
74
|
+
return true, ""
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
return false, "new instance address not detected yet"
|
|
78
|
+
})
|
|
79
|
+
|
|
80
|
+
// Wait healthy
|
|
81
|
+
waitForAddressHealthy(t, newAddr, defaultTimeout)
|
|
82
|
+
|
|
83
|
+
// Get PID using runtime discovery and kill it
|
|
84
|
+
after := listInstancesJSON(ctx, t)
|
|
85
|
+
info, ok := getByAddress(after, newAddr)
|
|
86
|
+
if !ok {
|
|
87
|
+
t.Fatalf("new instance %s missing", newAddr)
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
// Get PID using runtime discovery
|
|
91
|
+
corePID := getCorePID(t, info.Address)
|
|
92
|
+
if corePID <= 0 {
|
|
93
|
+
t.Fatalf("could not find PID for new instance at %s", info.Address)
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
t.Logf("Killing new instance %s (PID %d) for iteration %d", info.Address, corePID, i)
|
|
97
|
+
if err := syscall.Kill(corePID, syscall.SIGKILL); err != nil {
|
|
98
|
+
t.Fatalf("kill pid %d: %v", corePID, err)
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
// Wait removed from SQLite database
|
|
102
|
+
waitForAddressRemoved(t, newAddr, longTimeout)
|
|
103
|
+
|
|
104
|
+
// Verify instance is removed from SQLite database
|
|
105
|
+
clineDir := os.Getenv("CLINE_DIR")
|
|
106
|
+
if clineDir != "" {
|
|
107
|
+
dbPath := filepath.Join(clineDir, common.SETTINGS_SUBFOLDER, "locks.db")
|
|
108
|
+
if verifyInstanceExistsInSQLite(t, dbPath, newAddr) {
|
|
109
|
+
t.Fatalf("expected instance removed from SQLite database: %s", newAddr)
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
// Clean up dangling host process (SIGKILL leaves these behind by design)
|
|
114
|
+
t.Logf("Cleaning up dangling host process on port %d for iteration %d", info.HostPort(), i)
|
|
115
|
+
findAndKillHostProcess(t, info.HostPort())
|
|
116
|
+
|
|
117
|
+
// Verify both ports are now free
|
|
118
|
+
waitForPortsClosed(t, info.CorePort(), info.HostPort(), defaultTimeout)
|
|
119
|
+
}
|
|
120
|
+
}
|
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
package e2e
|
|
2
|
+
|
|
3
|
+
import (
|
|
4
|
+
"database/sql"
|
|
5
|
+
"encoding/json"
|
|
6
|
+
"os"
|
|
7
|
+
"path/filepath"
|
|
8
|
+
"strconv"
|
|
9
|
+
"testing"
|
|
10
|
+
"time"
|
|
11
|
+
|
|
12
|
+
"github.com/cline/cli/pkg/common"
|
|
13
|
+
_ "github.com/glebarez/go-sqlite"
|
|
14
|
+
"google.golang.org/grpc/health/grpc_health_v1"
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
// readInstancesFromSQLite reads instances directly from the SQLite database for testing
|
|
18
|
+
func readInstancesFromSQLite(t *testing.T, clineDir string) []common.CoreInstanceInfo {
|
|
19
|
+
t.Helper()
|
|
20
|
+
|
|
21
|
+
dbPath := filepath.Join(clineDir, common.SETTINGS_SUBFOLDER, "locks.db")
|
|
22
|
+
|
|
23
|
+
// Check if database exists
|
|
24
|
+
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
|
|
25
|
+
return []common.CoreInstanceInfo{}
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
db, err := sql.Open("sqlite", dbPath)
|
|
29
|
+
if err != nil {
|
|
30
|
+
t.Logf("Warning: Failed to open SQLite database: %v", err)
|
|
31
|
+
return []common.CoreInstanceInfo{}
|
|
32
|
+
}
|
|
33
|
+
defer db.Close()
|
|
34
|
+
|
|
35
|
+
// Query instance locks
|
|
36
|
+
query := common.SelectInstanceLockHoldersAscSQL
|
|
37
|
+
|
|
38
|
+
rows, err := db.Query(query)
|
|
39
|
+
if err != nil {
|
|
40
|
+
t.Logf("Warning: Failed to query instance locks: %v", err)
|
|
41
|
+
return []common.CoreInstanceInfo{}
|
|
42
|
+
}
|
|
43
|
+
defer rows.Close()
|
|
44
|
+
|
|
45
|
+
var instances []common.CoreInstanceInfo
|
|
46
|
+
for rows.Next() {
|
|
47
|
+
var heldBy, lockTarget string
|
|
48
|
+
var lockedAt int64
|
|
49
|
+
|
|
50
|
+
err := rows.Scan(&heldBy, &lockTarget, &lockedAt)
|
|
51
|
+
if err != nil {
|
|
52
|
+
t.Logf("Warning: Failed to scan lock row: %v", err)
|
|
53
|
+
continue
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
// Create InstanceInfo
|
|
57
|
+
info := common.CoreInstanceInfo{
|
|
58
|
+
Address: heldBy,
|
|
59
|
+
HostServiceAddress: lockTarget,
|
|
60
|
+
Status: grpc_health_v1.HealthCheckResponse_UNKNOWN, // Will be updated by health check
|
|
61
|
+
LastSeen: time.Unix(lockedAt/1000, 0), // Convert from milliseconds
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
instances = append(instances, info)
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
return instances
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
// readDefaultInstanceFromSettings reads the default instance from the settings file
|
|
71
|
+
func readDefaultInstanceFromSettings(t *testing.T, clineDir string) string {
|
|
72
|
+
t.Helper()
|
|
73
|
+
|
|
74
|
+
settingsPath := filepath.Join(clineDir, common.SETTINGS_SUBFOLDER, "settings", "cli-default-instance.json")
|
|
75
|
+
|
|
76
|
+
data, err := os.ReadFile(settingsPath)
|
|
77
|
+
if err != nil {
|
|
78
|
+
if os.IsNotExist(err) {
|
|
79
|
+
return ""
|
|
80
|
+
}
|
|
81
|
+
t.Logf("Warning: Failed to read default instance file: %v", err)
|
|
82
|
+
return ""
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
var tmp struct {
|
|
86
|
+
DefaultInstance string `json:"default_instance"`
|
|
87
|
+
}
|
|
88
|
+
if err := json.Unmarshal(data, &tmp); err != nil {
|
|
89
|
+
t.Logf("Warning: Failed to parse default instance file: %v", err)
|
|
90
|
+
return ""
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
return tmp.DefaultInstance
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
// insertRemoteInstanceIntoSQLite inserts a remote instance entry directly into SQLite for testing
|
|
97
|
+
func insertRemoteInstanceIntoSQLite(t *testing.T, dbPath, address string, corePort, hostPort int) error {
|
|
98
|
+
t.Helper()
|
|
99
|
+
|
|
100
|
+
db, err := sql.Open("sqlite", dbPath)
|
|
101
|
+
if err != nil {
|
|
102
|
+
return err
|
|
103
|
+
}
|
|
104
|
+
defer db.Close()
|
|
105
|
+
|
|
106
|
+
// Initialize database schema for testing
|
|
107
|
+
createTableSQL := `
|
|
108
|
+
CREATE TABLE IF NOT EXISTS locks (
|
|
109
|
+
id INTEGER PRIMARY KEY,
|
|
110
|
+
held_by TEXT NOT NULL,
|
|
111
|
+
lock_type TEXT NOT NULL CHECK (lock_type IN ('file', 'instance', 'folder')),
|
|
112
|
+
lock_target TEXT NOT NULL,
|
|
113
|
+
locked_at INTEGER NOT NULL,
|
|
114
|
+
UNIQUE(lock_type, lock_target)
|
|
115
|
+
);
|
|
116
|
+
`
|
|
117
|
+
createIndexesSQL := `
|
|
118
|
+
CREATE INDEX IF NOT EXISTS idx_locks_held_by ON locks(held_by);
|
|
119
|
+
CREATE INDEX IF NOT EXISTS idx_locks_type ON locks(lock_type);
|
|
120
|
+
CREATE INDEX IF NOT EXISTS idx_locks_target ON locks(lock_target);
|
|
121
|
+
`
|
|
122
|
+
|
|
123
|
+
if _, err := db.Exec(createTableSQL); err != nil {
|
|
124
|
+
return err
|
|
125
|
+
}
|
|
126
|
+
if _, err := db.Exec(createIndexesSQL); err != nil {
|
|
127
|
+
return err
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
// Insert the remote instance
|
|
131
|
+
hostAddress := "remote.example.com:0"
|
|
132
|
+
if hostPort != 0 {
|
|
133
|
+
hostAddress = "remote.example.com:" + strconv.Itoa(hostPort)
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
insertSQL := `INSERT INTO locks (held_by, lock_type, lock_target, locked_at) VALUES (?, 'instance', ?, ?)`
|
|
137
|
+
_, err = db.Exec(insertSQL, address, hostAddress, time.Now().Unix()*1000)
|
|
138
|
+
return err
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
// verifyInstanceExistsInSQLite checks if an instance exists in the SQLite database
|
|
142
|
+
func verifyInstanceExistsInSQLite(t *testing.T, dbPath, address string) bool {
|
|
143
|
+
t.Helper()
|
|
144
|
+
|
|
145
|
+
db, err := sql.Open("sqlite", dbPath)
|
|
146
|
+
if err != nil {
|
|
147
|
+
t.Logf("Failed to open database: %v", err)
|
|
148
|
+
return false
|
|
149
|
+
}
|
|
150
|
+
defer db.Close()
|
|
151
|
+
|
|
152
|
+
query := `SELECT COUNT(*) FROM locks WHERE held_by = ? AND lock_type = 'instance'`
|
|
153
|
+
var count int
|
|
154
|
+
err = db.QueryRow(query, address).Scan(&count)
|
|
155
|
+
if err != nil {
|
|
156
|
+
t.Logf("Failed to query database: %v", err)
|
|
157
|
+
return false
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
return count > 0
|
|
161
|
+
}
|