tinybird 0.0.1.dev306__py3-none-any.whl → 1.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tinybird/datafile/common.py +4 -1
- tinybird/feedback_manager.py +3 -0
- tinybird/service_datasources.py +57 -8
- tinybird/sql_template.py +1 -1
- tinybird/sql_template_fmt.py +14 -4
- tinybird/tb/__cli__.py +2 -2
- tinybird/tb/cli.py +1 -0
- tinybird/tb/client.py +104 -22
- tinybird/tb/modules/agent/tools/execute_query.py +1 -1
- tinybird/tb/modules/agent/tools/request_endpoint.py +1 -1
- tinybird/tb/modules/branch.py +150 -0
- tinybird/tb/modules/build.py +51 -10
- tinybird/tb/modules/build_common.py +4 -2
- tinybird/tb/modules/cli.py +32 -10
- tinybird/tb/modules/common.py +161 -134
- tinybird/tb/modules/connection.py +125 -194
- tinybird/tb/modules/connection_kafka.py +382 -0
- tinybird/tb/modules/copy.py +3 -1
- tinybird/tb/modules/create.py +11 -0
- tinybird/tb/modules/datafile/build.py +1 -1
- tinybird/tb/modules/datafile/format_pipe.py +44 -5
- tinybird/tb/modules/datafile/playground.py +1 -1
- tinybird/tb/modules/datasource.py +475 -324
- tinybird/tb/modules/deployment.py +2 -0
- tinybird/tb/modules/deployment_common.py +81 -43
- tinybird/tb/modules/deprecations.py +4 -4
- tinybird/tb/modules/dev_server.py +33 -12
- tinybird/tb/modules/info.py +50 -7
- tinybird/tb/modules/job_common.py +15 -0
- tinybird/tb/modules/local.py +91 -21
- tinybird/tb/modules/local_common.py +320 -13
- tinybird/tb/modules/local_logs.py +209 -0
- tinybird/tb/modules/login.py +3 -2
- tinybird/tb/modules/login_common.py +252 -9
- tinybird/tb/modules/open.py +10 -5
- tinybird/tb/modules/project.py +14 -5
- tinybird/tb/modules/shell.py +14 -6
- tinybird/tb/modules/sink.py +3 -1
- tinybird/tb/modules/telemetry.py +7 -3
- tinybird/tb_cli_modules/telemetry.py +1 -1
- {tinybird-0.0.1.dev306.dist-info → tinybird-1.0.5.dist-info}/METADATA +29 -4
- {tinybird-0.0.1.dev306.dist-info → tinybird-1.0.5.dist-info}/RECORD +45 -41
- {tinybird-0.0.1.dev306.dist-info → tinybird-1.0.5.dist-info}/WHEEL +1 -1
- {tinybird-0.0.1.dev306.dist-info → tinybird-1.0.5.dist-info}/entry_points.txt +0 -0
- {tinybird-0.0.1.dev306.dist-info → tinybird-1.0.5.dist-info}/top_level.txt +0 -0
|
@@ -4,6 +4,7 @@ import logging
|
|
|
4
4
|
import os
|
|
5
5
|
import re
|
|
6
6
|
import subprocess
|
|
7
|
+
import threading
|
|
7
8
|
import time
|
|
8
9
|
import uuid
|
|
9
10
|
from typing import Any, Dict, Optional
|
|
@@ -19,6 +20,15 @@ from tinybird.tb.client import AuthNoTokenException, TinyB
|
|
|
19
20
|
from tinybird.tb.modules.config import CLIConfig
|
|
20
21
|
from tinybird.tb.modules.exceptions import CLILocalException
|
|
21
22
|
from tinybird.tb.modules.feedback_manager import FeedbackManager
|
|
23
|
+
from tinybird.tb.modules.local_logs import (
|
|
24
|
+
check_memory_sufficient,
|
|
25
|
+
clickhouse_is_ready,
|
|
26
|
+
container_stats,
|
|
27
|
+
events_is_ready,
|
|
28
|
+
local_authentication_is_ready,
|
|
29
|
+
redis_is_ready,
|
|
30
|
+
server_is_ready,
|
|
31
|
+
)
|
|
22
32
|
from tinybird.tb.modules.secret_common import load_secrets
|
|
23
33
|
from tinybird.tb.modules.telemetry import add_telemetry_event
|
|
24
34
|
|
|
@@ -35,11 +45,18 @@ def get_tinybird_local_client(
|
|
|
35
45
|
config_obj: Dict[str, Any], test: bool = False, staging: bool = False, silent: bool = False
|
|
36
46
|
) -> TinyB:
|
|
37
47
|
"""Get a Tinybird client connected to the local environment."""
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
48
|
+
try:
|
|
49
|
+
config = get_tinybird_local_config(config_obj, test=test, silent=silent)
|
|
50
|
+
client = config.get_client(host=TB_LOCAL_ADDRESS, staging=staging)
|
|
51
|
+
load_secrets(config_obj.get("path", ""), client)
|
|
52
|
+
return client
|
|
53
|
+
# if some of the API calls to tinybird local fail due to a JSONDecodeError, it means that container is running but it's unhealthy
|
|
54
|
+
except json.JSONDecodeError:
|
|
55
|
+
raise CLILocalException(
|
|
56
|
+
message=FeedbackManager.error(
|
|
57
|
+
message="Tinybird Local is running but it's unhealthy. Please check if it's running and try again. If the problem persists, please run `tb local restart` and try again."
|
|
58
|
+
)
|
|
59
|
+
)
|
|
43
60
|
|
|
44
61
|
|
|
45
62
|
def get_tinybird_local_config(config_obj: Dict[str, Any], test: bool = False, silent: bool = False) -> CLIConfig:
|
|
@@ -140,6 +157,7 @@ def get_local_tokens() -> Dict[str, str]:
|
|
|
140
157
|
},
|
|
141
158
|
)
|
|
142
159
|
|
|
160
|
+
# TODO: If docker errors persist, explain that you can use custom environments too once they are open for everyone
|
|
143
161
|
if container and container.status == "running":
|
|
144
162
|
if container.health == "healthy":
|
|
145
163
|
raise CLILocalException(
|
|
@@ -219,7 +237,7 @@ def get_local_tokens() -> Dict[str, str]:
|
|
|
219
237
|
default=True,
|
|
220
238
|
)
|
|
221
239
|
if yes:
|
|
222
|
-
click.echo(FeedbackManager.highlight(message="»
|
|
240
|
+
click.echo(FeedbackManager.highlight(message="» Watching Tinybird Local... (Press Ctrl+C to stop)"))
|
|
223
241
|
docker_client = get_docker_client()
|
|
224
242
|
start_tinybird_local(docker_client, False)
|
|
225
243
|
click.echo(FeedbackManager.success(message="✓ Tinybird Local is ready!"))
|
|
@@ -237,6 +255,7 @@ def start_tinybird_local(
|
|
|
237
255
|
skip_new_version: bool = True,
|
|
238
256
|
user_token: Optional[str] = None,
|
|
239
257
|
workspace_token: Optional[str] = None,
|
|
258
|
+
watch: bool = False,
|
|
240
259
|
) -> None:
|
|
241
260
|
"""Start the Tinybird container."""
|
|
242
261
|
pull_show_prompt = False
|
|
@@ -299,18 +318,151 @@ def start_tinybird_local(
|
|
|
299
318
|
)
|
|
300
319
|
|
|
301
320
|
click.echo(FeedbackManager.info(message="* Waiting for Tinybird Local to be ready..."))
|
|
321
|
+
|
|
322
|
+
if watch:
|
|
323
|
+
# Stream logs in a separate thread while monitoring container health
|
|
324
|
+
container_ready = threading.Event()
|
|
325
|
+
stop_requested = threading.Event()
|
|
326
|
+
health_check: dict[str, str] = {}
|
|
327
|
+
|
|
328
|
+
log_thread = threading.Thread(
|
|
329
|
+
target=stream_logs_with_health_check,
|
|
330
|
+
args=(container, container_ready, stop_requested),
|
|
331
|
+
daemon=True,
|
|
332
|
+
)
|
|
333
|
+
log_thread.start()
|
|
334
|
+
|
|
335
|
+
health_check_thread = threading.Thread(
|
|
336
|
+
target=check_endpoints_health,
|
|
337
|
+
args=(container, docker_client, container_ready, stop_requested, health_check),
|
|
338
|
+
daemon=True,
|
|
339
|
+
)
|
|
340
|
+
health_check_thread.start()
|
|
341
|
+
|
|
342
|
+
# Monitor container health in main thread
|
|
343
|
+
memory_warning_shown = False
|
|
344
|
+
try:
|
|
345
|
+
while True:
|
|
346
|
+
container.reload() # Refresh container attributes
|
|
347
|
+
health = container.attrs.get("State", {}).get("Health", {}).get("Status")
|
|
348
|
+
if not container_ready.is_set():
|
|
349
|
+
click.echo(FeedbackManager.info(message=f"* Tinybird Local container status: {health}"))
|
|
350
|
+
stats = container_stats(container, docker_client)
|
|
351
|
+
click.echo(f"* {stats}")
|
|
352
|
+
|
|
353
|
+
# Check memory sufficiency
|
|
354
|
+
if not memory_warning_shown:
|
|
355
|
+
is_sufficient, warning_msg = check_memory_sufficient(container, docker_client)
|
|
356
|
+
if not is_sufficient and warning_msg:
|
|
357
|
+
click.echo(FeedbackManager.warning(message=f"△ {warning_msg}"))
|
|
358
|
+
memory_warning_shown = True
|
|
359
|
+
|
|
360
|
+
if health == "healthy":
|
|
361
|
+
click.echo(FeedbackManager.highlight(message="» Checking services..."))
|
|
362
|
+
stats = container_stats(container, docker_client)
|
|
363
|
+
click.echo(FeedbackManager.info(message=f"✓ Tinybird Local container ({stats})"))
|
|
364
|
+
|
|
365
|
+
# Check memory sufficiency before checking services
|
|
366
|
+
if not memory_warning_shown:
|
|
367
|
+
is_sufficient, warning_msg = check_memory_sufficient(container, docker_client)
|
|
368
|
+
if not is_sufficient and warning_msg:
|
|
369
|
+
click.echo(FeedbackManager.warning(message=f"△ {warning_msg}"))
|
|
370
|
+
memory_warning_shown = True
|
|
371
|
+
|
|
372
|
+
if not clickhouse_is_ready(container):
|
|
373
|
+
raise Exception("Clickhouse is not ready.")
|
|
374
|
+
click.echo(FeedbackManager.info(message="✓ Clickhouse"))
|
|
375
|
+
|
|
376
|
+
if not redis_is_ready(container):
|
|
377
|
+
raise Exception("Redis is not ready.")
|
|
378
|
+
click.echo(FeedbackManager.info(message="✓ Redis"))
|
|
379
|
+
|
|
380
|
+
if not server_is_ready(container):
|
|
381
|
+
raise Exception("Server is not ready.")
|
|
382
|
+
click.echo(FeedbackManager.info(message="✓ Server"))
|
|
383
|
+
|
|
384
|
+
if not events_is_ready(container):
|
|
385
|
+
raise Exception("Events is not ready.")
|
|
386
|
+
click.echo(FeedbackManager.info(message="✓ Events"))
|
|
387
|
+
|
|
388
|
+
if not local_authentication_is_ready(container):
|
|
389
|
+
raise Exception("Tinybird Local authentication is not ready.")
|
|
390
|
+
click.echo(FeedbackManager.info(message="✓ Tinybird Local authentication"))
|
|
391
|
+
container_ready.set()
|
|
392
|
+
# Keep monitoring and streaming logs until Ctrl+C or health check failure
|
|
393
|
+
while True:
|
|
394
|
+
# Check if health check detected an error
|
|
395
|
+
if stop_requested.is_set() and health_check.get("error"):
|
|
396
|
+
time.sleep(0.5) # Give log thread time to finish printing
|
|
397
|
+
raise CLILocalException(
|
|
398
|
+
FeedbackManager.error(
|
|
399
|
+
message=f"{health_check.get('error')}\n"
|
|
400
|
+
"Please run `tb local restart` to restart the container."
|
|
401
|
+
)
|
|
402
|
+
)
|
|
403
|
+
return
|
|
404
|
+
time.sleep(1)
|
|
405
|
+
if health == "unhealthy":
|
|
406
|
+
stop_requested.set()
|
|
407
|
+
# Check if memory might be the cause of unhealthy status
|
|
408
|
+
is_sufficient, warning_msg = check_memory_sufficient(container, docker_client)
|
|
409
|
+
error_msg = "Tinybird Local is unhealthy. Try running `tb local restart` in a few seconds."
|
|
410
|
+
if not is_sufficient and warning_msg:
|
|
411
|
+
error_msg = (
|
|
412
|
+
"Tinybird Local is unhealthy.\nnAfter adjusting memory, try running `tb local restart`."
|
|
413
|
+
)
|
|
414
|
+
raise CLILocalException(FeedbackManager.error(message=error_msg))
|
|
415
|
+
time.sleep(5)
|
|
416
|
+
except KeyboardInterrupt:
|
|
417
|
+
stop_requested.set()
|
|
418
|
+
click.echo(FeedbackManager.highlight(message="» Stopping Tinybird Local..."))
|
|
419
|
+
try:
|
|
420
|
+
container.stop()
|
|
421
|
+
click.echo(FeedbackManager.success(message="✓ Tinybird Local stopped."))
|
|
422
|
+
except KeyboardInterrupt:
|
|
423
|
+
click.echo(FeedbackManager.warning(message="⚠ Forced exit. Container may still be running."))
|
|
424
|
+
click.echo(FeedbackManager.info(message=" Run `tb local stop` to stop the container manually."))
|
|
425
|
+
return
|
|
426
|
+
|
|
427
|
+
# Non-watch mode: just wait for container to be healthy
|
|
428
|
+
memory_warning_shown = False
|
|
302
429
|
while True:
|
|
303
430
|
container.reload() # Refresh container attributes
|
|
304
431
|
health = container.attrs.get("State", {}).get("Health", {}).get("Status")
|
|
432
|
+
click.echo(FeedbackManager.info(message=f"* Tinybird Local container status: {health}"))
|
|
433
|
+
stats = container_stats(container, docker_client)
|
|
434
|
+
click.echo(f"* {stats}")
|
|
435
|
+
|
|
436
|
+
# Check memory sufficiency
|
|
437
|
+
if not memory_warning_shown:
|
|
438
|
+
is_sufficient, warning_msg = check_memory_sufficient(container, docker_client)
|
|
439
|
+
if not is_sufficient and warning_msg:
|
|
440
|
+
click.echo(FeedbackManager.warning(message=f"△ {warning_msg}"))
|
|
441
|
+
memory_warning_shown = True
|
|
442
|
+
|
|
305
443
|
if health == "healthy":
|
|
444
|
+
click.echo(FeedbackManager.highlight(message="» Checking services..."))
|
|
445
|
+
stats = container_stats(container, docker_client)
|
|
446
|
+
click.echo(FeedbackManager.info(message=f"✓ Tinybird Local container ({stats})"))
|
|
447
|
+
if not clickhouse_is_ready(container):
|
|
448
|
+
raise Exception("Clickhouse is not ready.")
|
|
449
|
+
click.echo(FeedbackManager.info(message="✓ Clickhouse"))
|
|
450
|
+
if not redis_is_ready(container):
|
|
451
|
+
raise Exception("Redis is not ready.")
|
|
452
|
+
click.echo(FeedbackManager.info(message="✓ Redis"))
|
|
453
|
+
if not server_is_ready(container):
|
|
454
|
+
raise Exception("Server is not ready.")
|
|
455
|
+
click.echo(FeedbackManager.info(message="✓ Server"))
|
|
456
|
+
if not events_is_ready(container):
|
|
457
|
+
raise Exception("Events is not ready.")
|
|
458
|
+
click.echo(FeedbackManager.info(message="✓ Events"))
|
|
459
|
+
if not local_authentication_is_ready(container):
|
|
460
|
+
raise Exception("Tinybird Local authentication is not ready.")
|
|
461
|
+
click.echo(FeedbackManager.info(message="✓ Tinybird Local authentication"))
|
|
306
462
|
break
|
|
307
463
|
if health == "unhealthy":
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
message="Tinybird Local is unhealthy. Try running `tb local restart` in a few seconds."
|
|
311
|
-
)
|
|
312
|
-
)
|
|
313
|
-
|
|
464
|
+
error_msg = "Tinybird Local is unhealthy. Try running `tb local restart` in a few seconds."
|
|
465
|
+
raise CLILocalException(FeedbackManager.error(message=error_msg))
|
|
314
466
|
time.sleep(5)
|
|
315
467
|
|
|
316
468
|
# Remove tinybird-local dangling images to avoid running out of disk space
|
|
@@ -429,7 +581,8 @@ def get_docker_client() -> DockerClient:
|
|
|
429
581
|
message=(
|
|
430
582
|
f"No container runtime is running. Make sure a Docker-compatible runtime is installed and running. "
|
|
431
583
|
f"{docker_location_message}\n\n"
|
|
432
|
-
"If you're using a custom location, please provide it using the DOCKER_HOST environment variable
|
|
584
|
+
"If you're using a custom location, please provide it using the DOCKER_HOST environment variable.\n\n"
|
|
585
|
+
"Alternatively, you can use Tinybird branches to develop your project without Docker. Run `tb branch create my_feature_branch` to create one. Learn more at: https://www.tinybird.co/docs/forward/test-and-deploy/branches"
|
|
433
586
|
)
|
|
434
587
|
)
|
|
435
588
|
)
|
|
@@ -474,3 +627,157 @@ def get_use_aws_creds() -> dict[str, str]:
|
|
|
474
627
|
)
|
|
475
628
|
|
|
476
629
|
return credentials
|
|
630
|
+
|
|
631
|
+
|
|
632
|
+
SERVICE_COLORS = {
|
|
633
|
+
"[EVENTS]": "\033[95m", # Magenta
|
|
634
|
+
"[SERVER]": "\033[94m", # Blue
|
|
635
|
+
"[HEALTH]": "\033[96m", # Cyan
|
|
636
|
+
"[KAFKA]": "\033[93m", # Yellow
|
|
637
|
+
"[AUTH]": "\033[90m", # Gray
|
|
638
|
+
}
|
|
639
|
+
|
|
640
|
+
RESET = "\033[0m"
|
|
641
|
+
|
|
642
|
+
|
|
643
|
+
def check_endpoints_health(
|
|
644
|
+
container: Container,
|
|
645
|
+
docker_client: DockerClient,
|
|
646
|
+
container_ready: threading.Event,
|
|
647
|
+
stop_requested: threading.Event,
|
|
648
|
+
health_check: dict[str, str],
|
|
649
|
+
) -> None:
|
|
650
|
+
"""Continuously check /tokens and /v0/health endpoints"""
|
|
651
|
+
# Wait for container to be ready before starting health checks
|
|
652
|
+
container_ready.wait()
|
|
653
|
+
|
|
654
|
+
# Give container a moment to fully start up
|
|
655
|
+
time.sleep(2)
|
|
656
|
+
|
|
657
|
+
check_interval = 10 # Check every 10 seconds
|
|
658
|
+
|
|
659
|
+
while not stop_requested.is_set():
|
|
660
|
+
try:
|
|
661
|
+
# Check /tokens endpoint
|
|
662
|
+
tokens_response = requests.get(f"{TB_LOCAL_ADDRESS}/tokens", timeout=5)
|
|
663
|
+
if tokens_response.status_code != 200:
|
|
664
|
+
health_check["error"] = (
|
|
665
|
+
f"/tokens endpoint returned status {tokens_response.status_code}. Tinybird Local may be unhealthy."
|
|
666
|
+
)
|
|
667
|
+
stop_requested.set()
|
|
668
|
+
break
|
|
669
|
+
|
|
670
|
+
# Check /v0/health endpoint
|
|
671
|
+
health_response = requests.get(f"{TB_LOCAL_ADDRESS}/v0/health", timeout=5)
|
|
672
|
+
if health_response.status_code != 200:
|
|
673
|
+
health_check["error"] = (
|
|
674
|
+
f"/v0/health endpoint returned status {health_response.status_code}. "
|
|
675
|
+
"Tinybird Local may be unhealthy."
|
|
676
|
+
)
|
|
677
|
+
stop_requested.set()
|
|
678
|
+
break
|
|
679
|
+
|
|
680
|
+
# Verify tokens response has expected structure
|
|
681
|
+
try:
|
|
682
|
+
tokens_data = tokens_response.json()
|
|
683
|
+
if not all(key in tokens_data for key in ["user_token", "admin_token", "workspace_admin_token"]):
|
|
684
|
+
health_check["error"] = (
|
|
685
|
+
"/tokens endpoint returned unexpected data. Tinybird Local may be unhealthy."
|
|
686
|
+
)
|
|
687
|
+
stop_requested.set()
|
|
688
|
+
break
|
|
689
|
+
except json.JSONDecodeError:
|
|
690
|
+
health_check["error"] = "/tokens endpoint returned invalid JSON. Tinybird Local may be unhealthy."
|
|
691
|
+
stop_requested.set()
|
|
692
|
+
break
|
|
693
|
+
|
|
694
|
+
except Exception as e:
|
|
695
|
+
# Check if it's a connection error
|
|
696
|
+
error_str = str(e)
|
|
697
|
+
if "connect" in error_str.lower() or "timeout" in error_str.lower():
|
|
698
|
+
health_check["error"] = f"Failed to connect to Tinybird Local: {error_str}"
|
|
699
|
+
else:
|
|
700
|
+
health_check["error"] = f"Health check failed: {error_str}"
|
|
701
|
+
stop_requested.set()
|
|
702
|
+
break
|
|
703
|
+
|
|
704
|
+
if container_ready.is_set():
|
|
705
|
+
stats = container_stats(container, docker_client)
|
|
706
|
+
click.echo(f"{SERVICE_COLORS['[HEALTH]']}[HEALTH]{RESET} {stats}")
|
|
707
|
+
|
|
708
|
+
# Wait before next check
|
|
709
|
+
for _ in range(check_interval):
|
|
710
|
+
if stop_requested.is_set():
|
|
711
|
+
break
|
|
712
|
+
time.sleep(1)
|
|
713
|
+
|
|
714
|
+
|
|
715
|
+
def stream_logs_with_health_check(
|
|
716
|
+
container: Container, container_ready: threading.Event, stop_requested: threading.Event
|
|
717
|
+
) -> None:
|
|
718
|
+
"""Stream logs and monitor container health in parallel"""
|
|
719
|
+
# Wait for container to be ready before starting health checks
|
|
720
|
+
container_ready.wait()
|
|
721
|
+
|
|
722
|
+
# Give container a moment to fully start up
|
|
723
|
+
time.sleep(2)
|
|
724
|
+
|
|
725
|
+
retry_count = 0
|
|
726
|
+
max_retries = 10
|
|
727
|
+
exec_result = None
|
|
728
|
+
|
|
729
|
+
while retry_count < max_retries and not stop_requested.is_set():
|
|
730
|
+
try:
|
|
731
|
+
# Try to tail the log files (only new logs, not historical)
|
|
732
|
+
# Use -F to follow by name and retry if files don't exist yet
|
|
733
|
+
log_files = {
|
|
734
|
+
"/var/log/tinybird-local-server.log": "SERVER",
|
|
735
|
+
"/var/log/tinybird-local-hfi.log": "EVENTS",
|
|
736
|
+
"/var/log/tinybird-local-setup.log": "AUTH",
|
|
737
|
+
"/var/log/tinybird-local-kafka.log": "KAFKA",
|
|
738
|
+
}
|
|
739
|
+
# Build commands to tail each file and prefix with its label (using stdbuf for unbuffered output)
|
|
740
|
+
tail_commands = [
|
|
741
|
+
f'tail -n 0 -f {path} | stdbuf -oL sed "s/^/[{source}] /"' for path, source in log_files.items()
|
|
742
|
+
]
|
|
743
|
+
# Join with & to run in parallel, then wait for all
|
|
744
|
+
cmd = f"sh -c '({' & '.join(tail_commands)}) & wait'"
|
|
745
|
+
exec_result = container.exec_run(cmd, stream=True, tty=False, stdout=True, stderr=True)
|
|
746
|
+
break # Success, exit retry loop
|
|
747
|
+
except Exception:
|
|
748
|
+
# Log file might not exist yet, wait and retry
|
|
749
|
+
retry_count += 1
|
|
750
|
+
if retry_count < max_retries:
|
|
751
|
+
time.sleep(2)
|
|
752
|
+
|
|
753
|
+
# Stream logs continuously
|
|
754
|
+
if exec_result:
|
|
755
|
+
try:
|
|
756
|
+
for line in exec_result.output:
|
|
757
|
+
if stop_requested.is_set():
|
|
758
|
+
break
|
|
759
|
+
|
|
760
|
+
raw_line = line.decode("utf-8").rstrip()
|
|
761
|
+
lines = raw_line.split("\n")
|
|
762
|
+
|
|
763
|
+
# Print "ready" message when container becomes healthy
|
|
764
|
+
if container_ready.is_set() and not hasattr(stream_logs_with_health_check, "ready_printed"):
|
|
765
|
+
click.echo(FeedbackManager.success(message="✓ Tinybird Local is ready!"))
|
|
766
|
+
stream_logs_with_health_check.ready_printed = True # type: ignore
|
|
767
|
+
|
|
768
|
+
for line in lines:
|
|
769
|
+
# Apply color to service label
|
|
770
|
+
for service, color in SERVICE_COLORS.items():
|
|
771
|
+
if line.startswith(service):
|
|
772
|
+
message = line[len(service) :]
|
|
773
|
+
# extract content of message="...""
|
|
774
|
+
match = re.search(r'message="([^"]*)"', message)
|
|
775
|
+
if match:
|
|
776
|
+
message = match.group(1)
|
|
777
|
+
line = f"{color}{service}{RESET} {message}"
|
|
778
|
+
break
|
|
779
|
+
|
|
780
|
+
click.echo(line)
|
|
781
|
+
|
|
782
|
+
except Exception:
|
|
783
|
+
pass # Silently ignore errors when stream is interrupted
|
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import platform
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
from docker.client import DockerClient
|
|
6
|
+
from docker.models.containers import Container
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def clickhouse_is_ready(container: Container) -> bool:
|
|
10
|
+
try:
|
|
11
|
+
result = container.exec_run("clickhouse 'SELECT 1 AS healthcheck'")
|
|
12
|
+
return result.output.decode("utf-8").strip() == "1"
|
|
13
|
+
except Exception:
|
|
14
|
+
return False
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def redis_is_ready(container: Container) -> bool:
|
|
18
|
+
try:
|
|
19
|
+
result = container.exec_run("redis-cli PING")
|
|
20
|
+
return result.output.decode("utf-8").strip() == "PONG"
|
|
21
|
+
except Exception:
|
|
22
|
+
return False
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def local_authentication_is_ready(container: Container) -> bool:
|
|
26
|
+
try:
|
|
27
|
+
result = container.exec_run("curl -s http://localhost:8000/tokens")
|
|
28
|
+
data = json.loads(result.output.decode("utf-8").strip())
|
|
29
|
+
token_keys = ["admin_token", "user_token", "workspace_admin_token"]
|
|
30
|
+
return all(key in data for key in token_keys)
|
|
31
|
+
except Exception:
|
|
32
|
+
return False
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def server_is_ready(container: Container) -> bool:
|
|
36
|
+
try:
|
|
37
|
+
result = container.exec_run("curl -s http://localhost:8001/health/liveness")
|
|
38
|
+
is_live = result.output.decode("utf-8").strip() == "alive"
|
|
39
|
+
if not is_live:
|
|
40
|
+
return False
|
|
41
|
+
result = container.exec_run("curl -s http://localhost:8001/health/readiness")
|
|
42
|
+
return result.output.decode("utf-8").strip() == "ready"
|
|
43
|
+
except Exception:
|
|
44
|
+
return False
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def events_is_ready(container: Container) -> bool:
|
|
48
|
+
try:
|
|
49
|
+
result = container.exec_run("curl -s http://localhost:8042/health/liveness")
|
|
50
|
+
is_live = result.output.decode("utf-8").strip() == "alive"
|
|
51
|
+
if not is_live:
|
|
52
|
+
return False
|
|
53
|
+
result = container.exec_run("curl -s http://localhost:8042/health/readiness")
|
|
54
|
+
return result.output.decode("utf-8").strip() == "ready"
|
|
55
|
+
except Exception:
|
|
56
|
+
return False
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def container_is_ready(container: Container) -> bool:
|
|
60
|
+
health = container.attrs.get("State", {}).get("Health", {}).get("Status")
|
|
61
|
+
status = container.status
|
|
62
|
+
return health == "healthy" and status == "running"
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def container_is_starting(container: Container) -> bool:
|
|
66
|
+
status = container.status
|
|
67
|
+
health = container.attrs.get("State", {}).get("Health", {}).get("Status")
|
|
68
|
+
return status == "restarting" or (status == "running" and health == "starting")
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def container_is_stopping(container: Container) -> bool:
|
|
72
|
+
status = container.status
|
|
73
|
+
return status == "stopping"
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def container_is_unhealthy(container: Container) -> bool:
|
|
77
|
+
health = container.attrs.get("State", {}).get("Health", {}).get("Status")
|
|
78
|
+
return health == "unhealthy"
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def bytes_to_gb(b):
|
|
82
|
+
return round(b / (1024**3), 2) # two decimal places (e.g., 1.75 GB)
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def get_container(client, name_or_id):
|
|
86
|
+
return client.containers.get(name_or_id)
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def get_image_arch(client, image_ref):
|
|
90
|
+
try:
|
|
91
|
+
image = client.images.get(image_ref)
|
|
92
|
+
return (image.attrs.get("Architecture") or "").lower()
|
|
93
|
+
except Exception:
|
|
94
|
+
return ""
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def is_emulated(host_arch, image_arch):
|
|
98
|
+
# Architecture equivalents - same arch with different names
|
|
99
|
+
arch_equivalents = [
|
|
100
|
+
{"x86_64", "amd64"},
|
|
101
|
+
{"aarch64", "arm64"},
|
|
102
|
+
]
|
|
103
|
+
|
|
104
|
+
if not host_arch or not image_arch:
|
|
105
|
+
return False
|
|
106
|
+
|
|
107
|
+
if host_arch == image_arch:
|
|
108
|
+
return False
|
|
109
|
+
|
|
110
|
+
# Check if architectures are equivalent
|
|
111
|
+
return all(not (host_arch in equiv_set and image_arch in equiv_set) for equiv_set in arch_equivalents)
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def mem_usage_percent(container):
|
|
115
|
+
st = container.stats(stream=False)
|
|
116
|
+
mem = st.get("memory_stats", {}) or {}
|
|
117
|
+
limit = float(mem.get("limit") or 0.0)
|
|
118
|
+
usage = float(mem.get("usage") or 0.0)
|
|
119
|
+
stats = mem.get("stats", {}) or {}
|
|
120
|
+
inactive = float(stats.get("total_inactive_file") or stats.get("inactive_file") or 0.0)
|
|
121
|
+
used = max(usage - inactive, 0.0)
|
|
122
|
+
pct = (used / limit * 100.0) if limit > 0 else None
|
|
123
|
+
return used, limit, pct
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def container_stats(container: Container, client: DockerClient):
|
|
127
|
+
host_arch = platform.machine().lower()
|
|
128
|
+
image_arch = get_image_arch(client, container.attrs.get("Config", {}).get("Image", ""))
|
|
129
|
+
emu = is_emulated(host_arch, image_arch)
|
|
130
|
+
used_b, limit_b, pct = mem_usage_percent(container)
|
|
131
|
+
pct = round(pct, 1) if pct is not None else None
|
|
132
|
+
used_gb = bytes_to_gb(used_b)
|
|
133
|
+
limit_gb = bytes_to_gb(limit_b) if limit_b > 0 else None
|
|
134
|
+
lim_str = f"{limit_gb} GB" if limit_gb else "no-limit"
|
|
135
|
+
arch_str = f"arch={host_arch} img={image_arch or 'unknown'} emulated={str(emu).lower()}"
|
|
136
|
+
cpu_usage_pct = cpu_usage_stats(container)
|
|
137
|
+
return f"memory {used_gb}/{lim_str} cpu {cpu_usage_pct} {arch_str}"
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def cpu_usage_stats(container: Container) -> str:
|
|
141
|
+
st = container.stats(stream=False)
|
|
142
|
+
cpu = st.get("cpu_stats", {}) or {}
|
|
143
|
+
cpu_usage = cpu.get("cpu_usage", {}) or {}
|
|
144
|
+
total_usage = cpu_usage.get("total_usage", 0)
|
|
145
|
+
system_cpu_usage = cpu.get("system_cpu_usage", 0)
|
|
146
|
+
pct = (total_usage / system_cpu_usage * 100.0) if system_cpu_usage > 0 else None
|
|
147
|
+
return f"{round(pct, 1) if pct is not None else 'N/A'}%"
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def check_memory_sufficient(container: Container, client: DockerClient) -> tuple[bool, Optional[str]]:
|
|
151
|
+
"""
|
|
152
|
+
Check if container has sufficient memory.
|
|
153
|
+
|
|
154
|
+
Returns:
|
|
155
|
+
tuple[bool, str | None]: (is_sufficient, warning_message)
|
|
156
|
+
- is_sufficient: True if memory is sufficient, False otherwise
|
|
157
|
+
- warning_message: None if sufficient, otherwise a warning message
|
|
158
|
+
"""
|
|
159
|
+
host_arch = platform.machine().lower()
|
|
160
|
+
image_arch = get_image_arch(client, container.attrs.get("Config", {}).get("Image", ""))
|
|
161
|
+
is_emu = is_emulated(host_arch, image_arch)
|
|
162
|
+
used_b, limit_b, pct = mem_usage_percent(container)
|
|
163
|
+
|
|
164
|
+
if limit_b <= 0:
|
|
165
|
+
# No memory limit set
|
|
166
|
+
return True, None
|
|
167
|
+
|
|
168
|
+
limit_gb = bytes_to_gb(limit_b)
|
|
169
|
+
used_gb = bytes_to_gb(used_b)
|
|
170
|
+
|
|
171
|
+
# Memory thresholds
|
|
172
|
+
# For emulated containers, we need more memory and lower threshold
|
|
173
|
+
HIGH_MEMORY_THRESHOLD_EMULATED = 70.0 # 70% for emulated
|
|
174
|
+
HIGH_MEMORY_THRESHOLD_NATIVE = 85.0 # 85% for native
|
|
175
|
+
MINIMUM_MEMORY_GB_EMULATED = 6.0 # Minimum 6GB for emulated
|
|
176
|
+
MINIMUM_MEMORY_GB_NATIVE = 4.0 # Minimum 4GB for native
|
|
177
|
+
|
|
178
|
+
warnings = []
|
|
179
|
+
|
|
180
|
+
# Check memory percentage
|
|
181
|
+
if pct is not None:
|
|
182
|
+
threshold = HIGH_MEMORY_THRESHOLD_EMULATED if is_emu else HIGH_MEMORY_THRESHOLD_NATIVE
|
|
183
|
+
if pct >= threshold:
|
|
184
|
+
warnings.append(
|
|
185
|
+
f"Memory usage is at {pct:.1f}% ({used_gb}/{limit_gb:.2f} GB), "
|
|
186
|
+
f"which exceeds the recommended threshold of {threshold:.0f}%."
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
# Check absolute memory limit
|
|
190
|
+
min_memory = MINIMUM_MEMORY_GB_EMULATED if is_emu else MINIMUM_MEMORY_GB_NATIVE
|
|
191
|
+
if limit_gb < min_memory:
|
|
192
|
+
arch_msg = f" (running emulated {image_arch} on {host_arch})" if is_emu else ""
|
|
193
|
+
warnings.append(
|
|
194
|
+
f"Memory limit is {limit_gb:.2f} GB{arch_msg}, but at least {min_memory:.1f} GB is recommended."
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
if warnings:
|
|
198
|
+
warning_msg = " ".join(warnings)
|
|
199
|
+
if is_emu:
|
|
200
|
+
warning_msg += (
|
|
201
|
+
"\n"
|
|
202
|
+
f"You're running an emulated container ({image_arch} on {host_arch}), which requires more resources.\n"
|
|
203
|
+
"Consider increasing Docker's memory allocation."
|
|
204
|
+
)
|
|
205
|
+
else:
|
|
206
|
+
warning_msg += "Consider increasing Docker's memory allocation."
|
|
207
|
+
return False, warning_msg
|
|
208
|
+
|
|
209
|
+
return True, None
|
tinybird/tb/modules/login.py
CHANGED
|
@@ -15,12 +15,13 @@ from tinybird.tb.modules.telemetry import add_telemetry_event
|
|
|
15
15
|
"--host",
|
|
16
16
|
type=str,
|
|
17
17
|
default=None,
|
|
18
|
-
help="Set
|
|
18
|
+
help="Set the API host to authenticate to. See https://www.tinybird.co/docs/api-reference#regions-and-endpoints for the available list of regions.",
|
|
19
19
|
)
|
|
20
20
|
@click.option(
|
|
21
21
|
"--auth-host",
|
|
22
22
|
default="https://cloud.tinybird.co",
|
|
23
|
-
help="Set the host to authenticate to. If unset, the default host will be used.",
|
|
23
|
+
help="Set the auth host to authenticate to. If unset, the default host will be used.",
|
|
24
|
+
hidden=True,
|
|
24
25
|
)
|
|
25
26
|
@click.option(
|
|
26
27
|
"--workspace",
|