service-forge 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- service_forge/api/http_api.py +138 -0
- service_forge/api/kafka_api.py +126 -0
- service_forge/api/task_manager.py +141 -0
- service_forge/api/websocket_api.py +86 -0
- service_forge/api/websocket_manager.py +425 -0
- service_forge/db/__init__.py +1 -0
- service_forge/db/database.py +119 -0
- service_forge/llm/__init__.py +62 -0
- service_forge/llm/llm.py +56 -0
- service_forge/main.py +121 -0
- service_forge/model/__init__.py +0 -0
- service_forge/model/websocket.py +13 -0
- service_forge/proto/foo_input.py +5 -0
- service_forge/service.py +111 -0
- service_forge/service_config.py +115 -0
- service_forge/sft/cli.py +91 -0
- service_forge/sft/cmd/config_command.py +67 -0
- service_forge/sft/cmd/deploy_service.py +124 -0
- service_forge/sft/cmd/list_tars.py +41 -0
- service_forge/sft/cmd/service_command.py +149 -0
- service_forge/sft/cmd/upload_service.py +36 -0
- service_forge/sft/config/injector.py +87 -0
- service_forge/sft/config/injector_default_files.py +97 -0
- service_forge/sft/config/sf_metadata.py +30 -0
- service_forge/sft/config/sft_config.py +125 -0
- service_forge/sft/file/__init__.py +0 -0
- service_forge/sft/file/ignore_pattern.py +80 -0
- service_forge/sft/file/sft_file_manager.py +107 -0
- service_forge/sft/kubernetes/kubernetes_manager.py +257 -0
- service_forge/sft/util/assert_util.py +25 -0
- service_forge/sft/util/logger.py +16 -0
- service_forge/sft/util/name_util.py +2 -0
- service_forge/utils/__init__.py +0 -0
- service_forge/utils/default_type_converter.py +12 -0
- service_forge/utils/register.py +39 -0
- service_forge/utils/type_converter.py +74 -0
- service_forge/workflow/__init__.py +1 -0
- service_forge/workflow/context.py +13 -0
- service_forge/workflow/edge.py +31 -0
- service_forge/workflow/node.py +179 -0
- service_forge/workflow/nodes/__init__.py +7 -0
- service_forge/workflow/nodes/control/if_node.py +29 -0
- service_forge/workflow/nodes/input/console_input_node.py +26 -0
- service_forge/workflow/nodes/llm/query_llm_node.py +41 -0
- service_forge/workflow/nodes/nested/workflow_node.py +28 -0
- service_forge/workflow/nodes/output/kafka_output_node.py +27 -0
- service_forge/workflow/nodes/output/print_node.py +29 -0
- service_forge/workflow/nodes/test/if_console_input_node.py +33 -0
- service_forge/workflow/nodes/test/time_consuming_node.py +61 -0
- service_forge/workflow/port.py +86 -0
- service_forge/workflow/trigger.py +20 -0
- service_forge/workflow/triggers/__init__.py +4 -0
- service_forge/workflow/triggers/fast_api_trigger.py +125 -0
- service_forge/workflow/triggers/kafka_api_trigger.py +44 -0
- service_forge/workflow/triggers/once_trigger.py +20 -0
- service_forge/workflow/triggers/period_trigger.py +26 -0
- service_forge/workflow/workflow.py +251 -0
- service_forge/workflow/workflow_factory.py +227 -0
- service_forge/workflow/workflow_group.py +23 -0
- service_forge/workflow/workflow_type.py +52 -0
- service_forge-0.1.0.dist-info/METADATA +93 -0
- service_forge-0.1.0.dist-info/RECORD +64 -0
- service_forge-0.1.0.dist-info/WHEEL +4 -0
- service_forge-0.1.0.dist-info/entry_points.txt +2 -0
service_forge/main.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
default_service_config = "configs/service/test_service_websocket.yaml"
|
|
2
|
+
#!/usr/bin/env python3
|
|
3
|
+
"""
|
|
4
|
+
Service Forge 主入口文件
|
|
5
|
+
用于启动 Service Forge 服务
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import os
|
|
9
|
+
import sys
|
|
10
|
+
import argparse
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from loguru import logger
|
|
13
|
+
|
|
14
|
+
# 添加项目根目录到Python路径
|
|
15
|
+
project_root = Path(__file__).parent.parent.parent
|
|
16
|
+
sys.path.insert(0, str(project_root))
|
|
17
|
+
|
|
18
|
+
from service_forge.service import create_service
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def parse_args():
|
|
22
|
+
"""解析命令行参数"""
|
|
23
|
+
parser = argparse.ArgumentParser(description="Service Forge - 工作流服务框架")
|
|
24
|
+
|
|
25
|
+
# 配置文件参数
|
|
26
|
+
parser.add_argument(
|
|
27
|
+
"-c", "--config",
|
|
28
|
+
type=str,
|
|
29
|
+
default="config.yaml",
|
|
30
|
+
help="服务配置文件路径 (默认: config.yaml)"
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
# 工作流配置参数
|
|
34
|
+
parser.add_argument(
|
|
35
|
+
"-w", "--workflows",
|
|
36
|
+
type=str,
|
|
37
|
+
nargs="+",
|
|
38
|
+
help="工作流配置文件路径列表"
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
# 调试模式
|
|
42
|
+
parser.add_argument(
|
|
43
|
+
"--debug",
|
|
44
|
+
action="store_true",
|
|
45
|
+
help="启用调试模式"
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
# 端口参数
|
|
49
|
+
parser.add_argument(
|
|
50
|
+
"--port",
|
|
51
|
+
type=int,
|
|
52
|
+
default=8000,
|
|
53
|
+
help="HTTP服务端口 (默认: 8000)"
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
# 主机参数
|
|
57
|
+
parser.add_argument(
|
|
58
|
+
"--host",
|
|
59
|
+
type=str,
|
|
60
|
+
default="0.0.0.0",
|
|
61
|
+
help="HTTP服务主机 (默认: 0.0.0.0)"
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
return parser.parse_args()
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def main():
|
|
68
|
+
"""主函数"""
|
|
69
|
+
args = parse_args()
|
|
70
|
+
|
|
71
|
+
# 设置日志级别
|
|
72
|
+
if args.debug:
|
|
73
|
+
logger.remove()
|
|
74
|
+
logger.add(
|
|
75
|
+
sys.stderr,
|
|
76
|
+
level="DEBUG",
|
|
77
|
+
format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>"
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
# 检查配置文件是否存在
|
|
81
|
+
config_path = Path(args.config)
|
|
82
|
+
if not config_path.exists():
|
|
83
|
+
config_path = default_service_config
|
|
84
|
+
|
|
85
|
+
# 创建服务
|
|
86
|
+
try:
|
|
87
|
+
logger.info(f"正在加载配置文件: {config_path}")
|
|
88
|
+
|
|
89
|
+
# 导入WebSocketMessage模型
|
|
90
|
+
from service_forge.model.websocket import WebSocketMessage
|
|
91
|
+
|
|
92
|
+
# 创建服务并添加WEBSOCKET_MESSAGE到service_env
|
|
93
|
+
service = create_service(str(config_path), service_env={"WEBSOCKET_MESSAGE": WebSocketMessage})
|
|
94
|
+
|
|
95
|
+
# 如果指定了工作流配置,则添加到服务中
|
|
96
|
+
if args.workflows:
|
|
97
|
+
for workflow_path in args.workflows:
|
|
98
|
+
if not Path(workflow_path).exists():
|
|
99
|
+
logger.error(f"工作流配置文件不存在: {workflow_path}")
|
|
100
|
+
continue
|
|
101
|
+
logger.info(f"添加工作流配置: {workflow_path}")
|
|
102
|
+
# 这里需要根据实际情况调整
|
|
103
|
+
|
|
104
|
+
# 启动服务
|
|
105
|
+
logger.info("正在启动 Service Forge 服务...")
|
|
106
|
+
logger.info(f"HTTP服务器: http://{args.host}:{args.port}")
|
|
107
|
+
logger.info("WebSocket端点: ws://{}:{}/ws/connect/{{client_id}}".format(args.host, args.port))
|
|
108
|
+
|
|
109
|
+
# 运行服务
|
|
110
|
+
import asyncio
|
|
111
|
+
asyncio.run(service.start())
|
|
112
|
+
|
|
113
|
+
except KeyboardInterrupt:
|
|
114
|
+
logger.info("服务已停止")
|
|
115
|
+
except Exception as e:
|
|
116
|
+
logger.error(f"服务启动失败: {e}")
|
|
117
|
+
sys.exit(1)
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
if __name__ == "__main__":
|
|
121
|
+
main()
|
|
File without changes
|
service_forge/service.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import asyncio
|
|
5
|
+
from omegaconf import OmegaConf
|
|
6
|
+
from service_forge.workflow.workflow_factory import create_workflows
|
|
7
|
+
from service_forge.api.http_api import start_fastapi_server
|
|
8
|
+
from service_forge.api.kafka_api import start_kafka_server
|
|
9
|
+
from service_forge.db.database import DatabaseManager
|
|
10
|
+
from loguru import logger
|
|
11
|
+
from typing import Callable, AsyncIterator, Awaitable, Any
|
|
12
|
+
|
|
13
|
+
class Service:
|
|
14
|
+
def __init__(
|
|
15
|
+
self,
|
|
16
|
+
name: str,
|
|
17
|
+
config_path: str,
|
|
18
|
+
workflow_config_paths: list[str],
|
|
19
|
+
_handle_stream_output: Callable[[str, AsyncIterator[str]], Awaitable[None]] = None,
|
|
20
|
+
_handle_query_user: Callable[[str, str], Awaitable[str]] = None,
|
|
21
|
+
enable_http: bool = True,
|
|
22
|
+
http_host: str = "0.0.0.0",
|
|
23
|
+
http_port: int = 8000,
|
|
24
|
+
enable_kafka: bool = True,
|
|
25
|
+
kafka_host: str = "localhost",
|
|
26
|
+
kafka_port: int = 9092,
|
|
27
|
+
service_env: dict[str, Any] = None,
|
|
28
|
+
database_manager: DatabaseManager = None,
|
|
29
|
+
) -> None:
|
|
30
|
+
self.name = name
|
|
31
|
+
self.config_path = config_path
|
|
32
|
+
self.workflow_config_paths = workflow_config_paths
|
|
33
|
+
self._handle_stream_output = _handle_stream_output
|
|
34
|
+
self._handle_query_user = _handle_query_user
|
|
35
|
+
self.enable_http = enable_http
|
|
36
|
+
self.http_host = http_host
|
|
37
|
+
self.http_port = http_port
|
|
38
|
+
self.enable_kafka = enable_kafka
|
|
39
|
+
self.kafka_host = kafka_host
|
|
40
|
+
self.kafka_port = kafka_port
|
|
41
|
+
self.service_env = {} if service_env is None else service_env
|
|
42
|
+
self.database_manager = database_manager
|
|
43
|
+
|
|
44
|
+
async def start(self):
|
|
45
|
+
if self.enable_http:
|
|
46
|
+
fastapi_task = asyncio.create_task(start_fastapi_server(self.http_host, self.http_port))
|
|
47
|
+
else:
|
|
48
|
+
fastapi_task = None
|
|
49
|
+
if self.enable_kafka:
|
|
50
|
+
kafka_task = asyncio.create_task(start_kafka_server(f"{self.kafka_host}:{self.kafka_port}"))
|
|
51
|
+
else:
|
|
52
|
+
kafka_task = None
|
|
53
|
+
|
|
54
|
+
workflow_tasks = []
|
|
55
|
+
|
|
56
|
+
for workflow_config_path in self.workflow_config_paths:
|
|
57
|
+
workflow = create_workflows(
|
|
58
|
+
self.parse_workflow_path(workflow_config_path),
|
|
59
|
+
service_env=self.service_env,
|
|
60
|
+
_handle_stream_output=self._handle_stream_output,
|
|
61
|
+
_handle_query_user=self._handle_query_user,
|
|
62
|
+
database_manager=self.database_manager,
|
|
63
|
+
)
|
|
64
|
+
workflow_tasks.append(asyncio.create_task(workflow.run()))
|
|
65
|
+
|
|
66
|
+
try:
|
|
67
|
+
tasks = []
|
|
68
|
+
if fastapi_task:
|
|
69
|
+
tasks.append(fastapi_task)
|
|
70
|
+
if kafka_task:
|
|
71
|
+
tasks.append(kafka_task)
|
|
72
|
+
tasks.extend(workflow_tasks)
|
|
73
|
+
await asyncio.gather(*tasks)
|
|
74
|
+
except Exception as e:
|
|
75
|
+
logger.error(f"Error in service {self.name}: {e}")
|
|
76
|
+
if fastapi_task:
|
|
77
|
+
fastapi_task.cancel()
|
|
78
|
+
if kafka_task:
|
|
79
|
+
kafka_task.cancel()
|
|
80
|
+
for workflow_task in workflow_tasks:
|
|
81
|
+
workflow_task.cancel()
|
|
82
|
+
raise
|
|
83
|
+
|
|
84
|
+
def parse_workflow_path(self, workflow_config_path: str) -> str:
|
|
85
|
+
if os.path.isabs(workflow_config_path):
|
|
86
|
+
return workflow_config_path
|
|
87
|
+
else:
|
|
88
|
+
return os.path.join(os.path.dirname(self.config_path), workflow_config_path)
|
|
89
|
+
|
|
90
|
+
@staticmethod
|
|
91
|
+
def from_config(config_path: str, service_env: dict[str, Any] = None) -> Service:
|
|
92
|
+
config = OmegaConf.to_object(OmegaConf.load(config_path))
|
|
93
|
+
database_manager = DatabaseManager.from_config(config=config)
|
|
94
|
+
return Service(
|
|
95
|
+
name=config['name'],
|
|
96
|
+
config_path=config_path,
|
|
97
|
+
workflow_config_paths=config.get('workflows', []),
|
|
98
|
+
_handle_stream_output=None,
|
|
99
|
+
_handle_query_user=None,
|
|
100
|
+
enable_http=config.get('enable_http', True),
|
|
101
|
+
http_host=config.get('http_host', '0.0.0.0'),
|
|
102
|
+
http_port=config.get('http_port', 8000),
|
|
103
|
+
enable_kafka=config.get('enable_kafka', True),
|
|
104
|
+
kafka_host=config.get('kafka_host', 'localhost'),
|
|
105
|
+
kafka_port=config.get('kafka_port', 9092),
|
|
106
|
+
service_env=service_env,
|
|
107
|
+
database_manager=database_manager,
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
def create_service(config_path: str, service_env: dict[str, Any] = None) -> Service:
|
|
111
|
+
return Service.from_config(config_path, service_env)
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
class ServiceDatabaseConfig:
|
|
4
|
+
def __init__(
|
|
5
|
+
self,
|
|
6
|
+
name: str,
|
|
7
|
+
postgres_user: str,
|
|
8
|
+
postgres_password: str,
|
|
9
|
+
postgres_host: str,
|
|
10
|
+
postgres_port: int,
|
|
11
|
+
postgres_db: str,
|
|
12
|
+
) -> None:
|
|
13
|
+
self.name = name
|
|
14
|
+
self.postgres_user = postgres_user
|
|
15
|
+
self.postgres_password = postgres_password
|
|
16
|
+
self.postgres_host = postgres_host
|
|
17
|
+
self.postgres_port = postgres_port
|
|
18
|
+
self.postgres_db = postgres_db
|
|
19
|
+
|
|
20
|
+
@staticmethod
|
|
21
|
+
def from_dict(config: dict) -> ServiceDatabaseConfig:
|
|
22
|
+
return ServiceDatabaseConfig(
|
|
23
|
+
name=config['name'],
|
|
24
|
+
postgres_user=config['postgres_user'],
|
|
25
|
+
postgres_password=config['postgres_password'],
|
|
26
|
+
postgres_host=config['postgres_host'],
|
|
27
|
+
postgres_port=config['postgres_port'],
|
|
28
|
+
postgres_db=config['postgres_db'],
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
def to_dict(self) -> dict:
|
|
32
|
+
return {
|
|
33
|
+
'name': self.name,
|
|
34
|
+
'postgres_user': self.postgres_user,
|
|
35
|
+
'postgres_password': self.postgres_password,
|
|
36
|
+
'postgres_host': self.postgres_host,
|
|
37
|
+
'postgres_port': self.postgres_port,
|
|
38
|
+
'postgres_db': self.postgres_db,
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
class ServiceConfig:
|
|
42
|
+
def __init__(
|
|
43
|
+
self,
|
|
44
|
+
name: str,
|
|
45
|
+
workflows: list[str],
|
|
46
|
+
enable_http: bool,
|
|
47
|
+
http_host: str,
|
|
48
|
+
http_port: int,
|
|
49
|
+
enable_kafka: bool,
|
|
50
|
+
kafka_host: str,
|
|
51
|
+
kafka_port: int,
|
|
52
|
+
databases: list[ServiceDatabaseConfig],
|
|
53
|
+
) -> None:
|
|
54
|
+
self.name = name
|
|
55
|
+
self.workflows = workflows
|
|
56
|
+
self.enable_http = enable_http
|
|
57
|
+
self.http_host = http_host
|
|
58
|
+
self.http_port = http_port
|
|
59
|
+
self.enable_kafka = enable_kafka
|
|
60
|
+
self.kafka_host = kafka_host
|
|
61
|
+
self.kafka_port = kafka_port
|
|
62
|
+
self.databases = databases
|
|
63
|
+
|
|
64
|
+
@staticmethod
|
|
65
|
+
def from_dict(config: dict) -> ServiceConfig:
|
|
66
|
+
return ServiceConfig(
|
|
67
|
+
name=config['name'],
|
|
68
|
+
workflows=config['workflows'],
|
|
69
|
+
enable_http=config['enable_http'],
|
|
70
|
+
http_host=config['http_host'],
|
|
71
|
+
http_port=config['http_port'],
|
|
72
|
+
enable_kafka=config['enable_kafka'],
|
|
73
|
+
kafka_host=config['kafka_host'],
|
|
74
|
+
kafka_port=config['kafka_port'],
|
|
75
|
+
databases=[ServiceDatabaseConfig.from_dict(database) for database in config['databases']],
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
def to_dict(self) -> dict:
|
|
79
|
+
return {
|
|
80
|
+
'name': self.name,
|
|
81
|
+
'workflows': self.workflows,
|
|
82
|
+
'enable_http': self.enable_http,
|
|
83
|
+
'http_host': self.http_host,
|
|
84
|
+
'http_port': self.http_port,
|
|
85
|
+
'enable_kafka': self.enable_kafka,
|
|
86
|
+
'kafka_host': self.kafka_host,
|
|
87
|
+
'kafka_port': self.kafka_port,
|
|
88
|
+
'databases': [database.to_dict() for database in self.databases],
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
# name: tag_service
|
|
92
|
+
# workflows:
|
|
93
|
+
# # - ./workflow/kafka_workflow.yaml
|
|
94
|
+
# - ./workflow/query_tags_workflow.yaml
|
|
95
|
+
# - ./workflow/create_tag_workflow.yaml
|
|
96
|
+
# - ./workflow/update_tag_workflow.yaml
|
|
97
|
+
# - ./workflow/delete_tag_workflow.yaml
|
|
98
|
+
# - ./workflow/get_tags_from_record.yaml
|
|
99
|
+
|
|
100
|
+
# enable_http: true
|
|
101
|
+
# enable_kafka: false
|
|
102
|
+
|
|
103
|
+
# # Following configs will be auto-injected by sft.
|
|
104
|
+
# http_host: 0.0.0.0
|
|
105
|
+
# http_port: 37200
|
|
106
|
+
# kafka_host: localhost
|
|
107
|
+
# kafka_port: 9092
|
|
108
|
+
|
|
109
|
+
# databases:
|
|
110
|
+
# - name: tag
|
|
111
|
+
# postgres_user: postgres
|
|
112
|
+
# postgres_password: "gnBGWg7aL4"
|
|
113
|
+
# postgres_host: second-brain-postgres-postgresql
|
|
114
|
+
# postgres_port: 5432
|
|
115
|
+
# postgres_db: tag-service-tag
|
service_forge/sft/cli.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
import sys
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
import typer
|
|
6
|
+
from service_forge.sft.util.logger import log_error, log_info
|
|
7
|
+
from service_forge.sft.cmd.list_tars import list_tars
|
|
8
|
+
from service_forge.sft.cmd.upload_service import upload_service
|
|
9
|
+
from service_forge.sft.cmd.deploy_service import deploy_service
|
|
10
|
+
from service_forge.sft.cmd.config_command import list_config, get_config, set_config
|
|
11
|
+
from service_forge.sft.cmd.service_command import list_services, delete_service, show_service_logs
|
|
12
|
+
|
|
13
|
+
app = typer.Typer(
|
|
14
|
+
name="sft",
|
|
15
|
+
help="Service Forge CLI - Service management tool",
|
|
16
|
+
add_completion=False,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
@app.command(name="upload")
|
|
20
|
+
def upload_service_command(
|
|
21
|
+
project_path: Optional[str] = typer.Argument(
|
|
22
|
+
default=".",
|
|
23
|
+
help="Project path, default is the current directory"
|
|
24
|
+
)
|
|
25
|
+
) -> None:
|
|
26
|
+
upload_service(project_path)
|
|
27
|
+
|
|
28
|
+
@app.command(name="list")
|
|
29
|
+
def list_tars_command() -> None:
|
|
30
|
+
list_tars()
|
|
31
|
+
|
|
32
|
+
@app.command(name="deploy")
|
|
33
|
+
def deploy_service_command(name: str, version: str) -> None:
|
|
34
|
+
deploy_service(name, version)
|
|
35
|
+
|
|
36
|
+
config_app = typer.Typer(
|
|
37
|
+
name="config",
|
|
38
|
+
help="Configuration management commands",
|
|
39
|
+
add_completion=False,
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
@config_app.command(name="list")
|
|
43
|
+
def config_list_command() -> None:
|
|
44
|
+
list_config()
|
|
45
|
+
|
|
46
|
+
@config_app.command(name="get")
|
|
47
|
+
def config_get_command(
|
|
48
|
+
key: str = typer.Argument(help="Configuration item key")
|
|
49
|
+
) -> None:
|
|
50
|
+
get_config(key)
|
|
51
|
+
|
|
52
|
+
@config_app.command(name="set")
|
|
53
|
+
def config_set_command(
|
|
54
|
+
key: str = typer.Argument(help="Configuration item key"),
|
|
55
|
+
value: str = typer.Argument(help="Configuration item value")
|
|
56
|
+
) -> None:
|
|
57
|
+
set_config(key, value)
|
|
58
|
+
|
|
59
|
+
app.add_typer(config_app)
|
|
60
|
+
|
|
61
|
+
service_app = typer.Typer(
|
|
62
|
+
name="service",
|
|
63
|
+
help="Kubernetes service management commands",
|
|
64
|
+
add_completion=False,
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
@service_app.command(name="list")
|
|
68
|
+
def service_list_command() -> None:
|
|
69
|
+
list_services()
|
|
70
|
+
|
|
71
|
+
@service_app.command(name="delete")
|
|
72
|
+
def service_delete_command(
|
|
73
|
+
service_name: str = typer.Argument(help="Service name to delete (must start with sf-)"),
|
|
74
|
+
force: bool = typer.Option(False, "--force", "-f", help="Force delete")
|
|
75
|
+
) -> None:
|
|
76
|
+
delete_service(service_name, force)
|
|
77
|
+
|
|
78
|
+
@service_app.command(name="logs")
|
|
79
|
+
def service_logs_command(
|
|
80
|
+
service_name: str = typer.Argument(help="Service name to view logs for (must start with sf-)"),
|
|
81
|
+
container: Optional[str] = typer.Option(None, "--container", "-c", help="Container name (if pod has multiple containers)"),
|
|
82
|
+
tail: int = typer.Option(100, "--tail", "-n", help="Number of lines to show from the end of logs"),
|
|
83
|
+
follow: bool = typer.Option(False, "--follow", "-f", help="Follow log output"),
|
|
84
|
+
previous: bool = typer.Option(False, "--previous", "-p", help="Get logs from previous instance of container")
|
|
85
|
+
) -> None:
|
|
86
|
+
show_service_logs(service_name, container, tail, follow, previous)
|
|
87
|
+
|
|
88
|
+
app.add_typer(service_app)
|
|
89
|
+
|
|
90
|
+
def main() -> None:
|
|
91
|
+
app()
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
import typer
|
|
3
|
+
from rich.console import Console
|
|
4
|
+
from rich.table import Table
|
|
5
|
+
|
|
6
|
+
from service_forge.sft.config.sft_config import SftConfig
|
|
7
|
+
from service_forge.sft.util.logger import log_error, log_info, log_success, log_warning
|
|
8
|
+
from service_forge.sft.config.sft_config import sft_config
|
|
9
|
+
|
|
10
|
+
def list_config() -> None:
|
|
11
|
+
try:
|
|
12
|
+
console = Console()
|
|
13
|
+
|
|
14
|
+
table = Table(title="SFT Configuration", show_header=True, header_style="bold magenta")
|
|
15
|
+
table.add_column("Key", style="cyan", no_wrap=True)
|
|
16
|
+
table.add_column("Value", style="green")
|
|
17
|
+
table.add_column("Description", style="yellow")
|
|
18
|
+
|
|
19
|
+
# Automatically add rows for all config items
|
|
20
|
+
config_dict = sft_config.to_dict()
|
|
21
|
+
for key, value in sorted(config_dict.items()):
|
|
22
|
+
description = SftConfig.CONFIG_DESCRIPTIONS.get(key, "No description available")
|
|
23
|
+
table.add_row(key, str(value), description)
|
|
24
|
+
|
|
25
|
+
console.print(table)
|
|
26
|
+
console.print(f"\n[dim]Config file location: {sft_config.config_file_path}[/dim]")
|
|
27
|
+
except Exception as e:
|
|
28
|
+
log_error(f"Failed to load config: {e}")
|
|
29
|
+
raise typer.Exit(1)
|
|
30
|
+
|
|
31
|
+
def get_config(key: str) -> None:
|
|
32
|
+
try:
|
|
33
|
+
value = sft_config.get(key)
|
|
34
|
+
|
|
35
|
+
if value is None:
|
|
36
|
+
log_error(f"Config key '{key}' not found")
|
|
37
|
+
log_info("Available keys: config_root, sft_file_root, k8s_namespace")
|
|
38
|
+
raise typer.Exit(1)
|
|
39
|
+
|
|
40
|
+
log_info(f"{key} = {value}")
|
|
41
|
+
except ValueError as e:
|
|
42
|
+
log_error(str(e))
|
|
43
|
+
raise typer.Exit(1)
|
|
44
|
+
except Exception as e:
|
|
45
|
+
log_error(f"Failed to get config: {e}")
|
|
46
|
+
raise typer.Exit(1)
|
|
47
|
+
|
|
48
|
+
def set_config(key: str, value: str) -> None:
|
|
49
|
+
try:
|
|
50
|
+
current_value = sft_config.get(key)
|
|
51
|
+
if current_value is None:
|
|
52
|
+
log_error(f"Unknown config key: {key}")
|
|
53
|
+
log_info("Available keys: config_root, sft_file_root, k8s_namespace")
|
|
54
|
+
raise typer.Exit(1)
|
|
55
|
+
|
|
56
|
+
sft_config.set(key, value)
|
|
57
|
+
sft_config.save()
|
|
58
|
+
|
|
59
|
+
log_success(f"Updated {key} = {value}")
|
|
60
|
+
log_info(f"Config saved to {sft_config.config_file_path}")
|
|
61
|
+
except ValueError as e:
|
|
62
|
+
log_error(str(e))
|
|
63
|
+
raise typer.Exit(1)
|
|
64
|
+
except Exception as e:
|
|
65
|
+
log_error(f"Failed to set config: {e}")
|
|
66
|
+
raise typer.Exit(1)
|
|
67
|
+
|
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import shutil
|
|
3
|
+
import subprocess
|
|
4
|
+
import tarfile
|
|
5
|
+
import tempfile
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
import typer
|
|
9
|
+
from omegaconf import OmegaConf
|
|
10
|
+
from service_forge.sft.util.logger import log_error, log_info, log_success, log_warning
|
|
11
|
+
from service_forge.sft.file.sft_file_manager import sft_file_manager
|
|
12
|
+
from service_forge.sft.config.sft_config import sft_config
|
|
13
|
+
from service_forge.sft.util.assert_util import assert_file_exists, assert_dir_exists
|
|
14
|
+
from service_forge.sft.config.sf_metadata import load_metadata
|
|
15
|
+
from service_forge.sft.kubernetes.kubernetes_manager import KubernetesManager
|
|
16
|
+
from service_forge.sft.config.injector import Injector
|
|
17
|
+
from service_forge.sft.util.name_util import get_service_name
|
|
18
|
+
|
|
19
|
+
def _extract_tar_file(tar_file: Path, temp_path: Path) -> None:
|
|
20
|
+
log_info(f"Extracting tar file to: {temp_path}")
|
|
21
|
+
|
|
22
|
+
try:
|
|
23
|
+
with tarfile.open(tar_file, 'r') as tar:
|
|
24
|
+
tar.extractall(temp_path)
|
|
25
|
+
except Exception as e:
|
|
26
|
+
log_error(f"Failed to extract tar file: {e}")
|
|
27
|
+
raise typer.Exit(1)
|
|
28
|
+
|
|
29
|
+
log_success("Tar file extracted successfully")
|
|
30
|
+
|
|
31
|
+
def _build_docker_image(project_dir: Path, name: str, version: str) -> None:
|
|
32
|
+
image_name = f"sf-{name}:{version}"
|
|
33
|
+
full_image_name = sft_config.registry_address + "/" + image_name
|
|
34
|
+
log_info(f"Building Docker image: {image_name}")
|
|
35
|
+
try:
|
|
36
|
+
# build docker image
|
|
37
|
+
build_result = subprocess.run(
|
|
38
|
+
["docker", "build", "-t", full_image_name, str(project_dir)],
|
|
39
|
+
capture_output=True,
|
|
40
|
+
text=True,
|
|
41
|
+
check=True
|
|
42
|
+
)
|
|
43
|
+
log_success(f"Docker image built successfully: {image_name}")
|
|
44
|
+
if build_result.stdout:
|
|
45
|
+
log_info(build_result.stdout)
|
|
46
|
+
|
|
47
|
+
# push docker image to registry
|
|
48
|
+
log_info(f"Pushing Docker image to registry: {full_image_name}")
|
|
49
|
+
push_result = subprocess.run(
|
|
50
|
+
["docker", "push", full_image_name],
|
|
51
|
+
capture_output=True,
|
|
52
|
+
text=True,
|
|
53
|
+
check=True
|
|
54
|
+
)
|
|
55
|
+
log_success(f"Docker image pushed successfully: {full_image_name}")
|
|
56
|
+
if push_result.stdout:
|
|
57
|
+
log_info(push_result.stdout)
|
|
58
|
+
|
|
59
|
+
except subprocess.CalledProcessError as e:
|
|
60
|
+
log_error(f"Docker operation failed: {e}")
|
|
61
|
+
if e.stderr:
|
|
62
|
+
log_error(e.stderr)
|
|
63
|
+
raise typer.Exit(1)
|
|
64
|
+
except FileNotFoundError:
|
|
65
|
+
log_error("Docker command not found. Please install Docker.")
|
|
66
|
+
raise typer.Exit(1)
|
|
67
|
+
|
|
68
|
+
def _apply_k8s_deployment(deployment_yaml: Path, ingress_yaml: Path, name: str, version: str) -> None:
|
|
69
|
+
log_info("Applying k8s deployment...")
|
|
70
|
+
|
|
71
|
+
try:
|
|
72
|
+
k8s_manager = KubernetesManager()
|
|
73
|
+
k8s_manager.delete_service(sft_config.k8s_namespace, get_service_name(name, version), force=True)
|
|
74
|
+
k8s_manager.apply_deployment_yaml(deployment_yaml, sft_config.k8s_namespace)
|
|
75
|
+
k8s_manager.apply_deployment_yaml(ingress_yaml, sft_config.k8s_namespace)
|
|
76
|
+
log_success("K8s deployment applied successfully")
|
|
77
|
+
except Exception as e:
|
|
78
|
+
log_error(f"K8s deployment failed: {e}")
|
|
79
|
+
raise typer.Exit(1)
|
|
80
|
+
|
|
81
|
+
log_success(f"Deployment process completed for {name}:{version}")
|
|
82
|
+
|
|
83
|
+
def _inject_config(project_dir: Path) -> None:
|
|
84
|
+
injector = Injector(project_dir)
|
|
85
|
+
injector.inject()
|
|
86
|
+
|
|
87
|
+
def deploy_service(name: str, version: str) -> None:
|
|
88
|
+
tar_file = sft_file_manager.tar_path / f"sf_{name}_{version}.tar"
|
|
89
|
+
|
|
90
|
+
assert_file_exists(tar_file)
|
|
91
|
+
|
|
92
|
+
import os
|
|
93
|
+
# 确保临时父目录存在
|
|
94
|
+
temp_parent = os.path.join(tempfile.gettempdir(), "sft")
|
|
95
|
+
os.makedirs(temp_parent, exist_ok=True)
|
|
96
|
+
|
|
97
|
+
with tempfile.TemporaryDirectory(prefix=f"deploy_{name}_{version}", dir=temp_parent) as temp_dir:
|
|
98
|
+
temp_path = Path(temp_dir)
|
|
99
|
+
|
|
100
|
+
_extract_tar_file(tar_file, temp_path)
|
|
101
|
+
|
|
102
|
+
project_dir = temp_path / f"{name}_{version}"
|
|
103
|
+
|
|
104
|
+
_inject_config(project_dir)
|
|
105
|
+
|
|
106
|
+
dockerfile_path = project_dir / "Dockerfile"
|
|
107
|
+
metadata_path = project_dir / "sf-meta.yaml"
|
|
108
|
+
deployment_yaml = project_dir / "deployment.yaml"
|
|
109
|
+
ingress_yaml = project_dir / "ingress.yaml"
|
|
110
|
+
|
|
111
|
+
assert_dir_exists(project_dir)
|
|
112
|
+
assert_file_exists(dockerfile_path)
|
|
113
|
+
assert_file_exists(metadata_path)
|
|
114
|
+
assert_file_exists(deployment_yaml)
|
|
115
|
+
assert_file_exists(ingress_yaml)
|
|
116
|
+
|
|
117
|
+
try:
|
|
118
|
+
meta_data = load_metadata(metadata_path)
|
|
119
|
+
except Exception as e:
|
|
120
|
+
log_error(f"Failed to read sf-meta.yaml: {e}")
|
|
121
|
+
raise typer.Exit(1)
|
|
122
|
+
|
|
123
|
+
_build_docker_image(project_dir, meta_data.name, meta_data.version)
|
|
124
|
+
_apply_k8s_deployment(deployment_yaml, ingress_yaml, meta_data.name, meta_data.version)
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
|
|
3
|
+
from rich.console import Console
|
|
4
|
+
from rich.table import Table
|
|
5
|
+
|
|
6
|
+
from service_forge.sft.util.logger import log_error, log_info
|
|
7
|
+
from service_forge.sft.file.sft_file_manager import sft_file_manager
|
|
8
|
+
|
|
9
|
+
def list_tars() -> None:
|
|
10
|
+
tar_files = sft_file_manager.load_tars()
|
|
11
|
+
|
|
12
|
+
if not tar_files:
|
|
13
|
+
log_info("No tar files found.")
|
|
14
|
+
return
|
|
15
|
+
|
|
16
|
+
console = Console()
|
|
17
|
+
table = Table(title="Service Tar Files", show_header=True, header_style="bold magenta")
|
|
18
|
+
table.add_column("Project", style="cyan", no_wrap=True)
|
|
19
|
+
table.add_column("Version", style="cyan", no_wrap=True)
|
|
20
|
+
table.add_column("File Name", style="cyan", no_wrap=True)
|
|
21
|
+
table.add_column("Size", justify="right", style="green")
|
|
22
|
+
table.add_column("Modified Time", style="yellow")
|
|
23
|
+
|
|
24
|
+
for tar_file in tar_files:
|
|
25
|
+
table.add_row(tar_file.project_name, tar_file.version, tar_file.path.name, tar_file._format_size(), tar_file._format_modified_time())
|
|
26
|
+
|
|
27
|
+
console.print(table)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _format_size(size_bytes: int) -> str:
|
|
31
|
+
for unit in ['B', 'KB', 'MB', 'GB']:
|
|
32
|
+
if size_bytes < 1024.0:
|
|
33
|
+
return f"{size_bytes:.2f} {unit}"
|
|
34
|
+
size_bytes /= 1024.0
|
|
35
|
+
return f"{size_bytes:.2f} TB"
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def _format_time(timestamp: float) -> str:
|
|
39
|
+
from datetime import datetime
|
|
40
|
+
return datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S")
|
|
41
|
+
|