jaseci 1.4.0.9__py3-none-any.whl → 1.4.0.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of jaseci might be problematic. Click here for more details.
- jaseci/VERSION +1 -1
- jaseci/__init__.py +3 -0
- jaseci/actions/standard/elastic.py +3 -2
- jaseci/actions/standard/mail.py +3 -2
- jaseci/actions/standard/std.py +3 -2
- jaseci/actions/standard/stripe.py +3 -2
- jaseci/actions/standard/task.py +3 -5
- jaseci/actions/standard/tests/test_mail_lib.py +8 -7
- jaseci/actions/tests/test_std.py +4 -5
- jaseci/actor/walker.py +6 -3
- jaseci/api/config_api.py +3 -2
- jaseci/api/jac_api.py +2 -2
- jaseci/api/jsorc_api.py +60 -121
- jaseci/api/prometheus_api.py +14 -20
- jaseci/api/queue_api.py +9 -5
- jaseci/api/tests/test_global_api.py +3 -3
- jaseci/api/tests/test_logger_api.py +3 -3
- jaseci/api/user_api.py +3 -3
- jaseci/api/webhook_api.py +6 -4
- jaseci/attr/action.py +10 -4
- jaseci/element/master.py +2 -0
- jaseci/element/super_master.py +2 -0
- jaseci/hook/memory.py +3 -1
- jaseci/hook/redis.py +5 -4
- jaseci/jac/interpreter/interp.py +16 -4
- jaseci/jac/tests/test_book.py +2 -2
- jaseci/jsctl/jsctl.py +48 -15
- jaseci/jsctl/tests/test_jsctl.py +5 -0
- jaseci/jsorc.py +733 -0
- jaseci/jsorc_settings.py +184 -0
- jaseci/manifests/database.yaml +107 -0
- jaseci/manifests/elastic.yaml +5923 -0
- jaseci/manifests/prometheus.yaml +1273 -0
- jaseci/{svc/jsorc-backup/jaseci-redis.yaml → manifests/redis.yaml} +20 -0
- jaseci/svc/__init__.py +0 -25
- jaseci/svc/{elastic/elastic.py → elastic_svc.py} +5 -16
- jaseci/svc/kube_svc.py +240 -0
- jaseci/svc/{mail/mail.py → mail_svc.py} +14 -17
- jaseci/svc/{prometheus/prometheus.py → prome_svc.py} +5 -16
- jaseci/svc/{redis/redis.py → redis_svc.py} +14 -26
- jaseci/svc/{stripe/stripe.py → stripe_svc.py} +4 -7
- jaseci/svc/{task/task.py → task_svc.py} +27 -24
- jaseci/svc/{task/common.py → tasks.py} +287 -293
- jaseci/tests/jac_test_progs.py +21 -0
- jaseci/tests/test_core.py +14 -15
- jaseci/tests/test_jac.py +59 -60
- jaseci/tests/test_node.py +6 -13
- jaseci/tests/test_progs.py +74 -52
- jaseci/tests/test_stripe.py +6 -10
- jaseci/utils/actions/actions_manager.py +254 -0
- jaseci/{svc/actions_optimizer → utils/actions}/actions_optimizer.py +9 -19
- jaseci/utils/json_handler.py +2 -3
- jaseci/utils/test_core.py +4 -5
- jaseci/utils/utils.py +12 -0
- {jaseci-1.4.0.9.dist-info → jaseci-1.4.0.11.dist-info}/METADATA +2 -1
- {jaseci-1.4.0.9.dist-info → jaseci-1.4.0.11.dist-info}/RECORD +63 -80
- jaseci/svc/common.py +0 -763
- jaseci/svc/config.py +0 -9
- jaseci/svc/elastic/__init__.py +0 -3
- jaseci/svc/elastic/config.py +0 -8
- jaseci/svc/elastic/manifest.py +0 -1
- jaseci/svc/jsorc-backup/jsorc.py +0 -182
- jaseci/svc/jsorc-backup/promon/__init__.py +0 -0
- jaseci/svc/jsorc-backup/promon/promon.py +0 -202
- jaseci/svc/mail/__init__.py +0 -4
- jaseci/svc/mail/config.py +0 -25
- jaseci/svc/meta.py +0 -164
- jaseci/svc/postgres/__init__.py +0 -0
- jaseci/svc/postgres/manifest.py +0 -106
- jaseci/svc/prometheus/__init__.py +0 -5
- jaseci/svc/prometheus/config.py +0 -11
- jaseci/svc/prometheus/manifest.py +0 -1102
- jaseci/svc/redis/__init__.py +0 -5
- jaseci/svc/redis/config.py +0 -10
- jaseci/svc/redis/manifest.py +0 -65
- jaseci/svc/state.py +0 -17
- jaseci/svc/stripe/__init__.py +0 -3
- jaseci/svc/stripe/config.py +0 -7
- jaseci/svc/task/__init__.py +0 -5
- jaseci/svc/task/config.py +0 -17
- /jaseci/{svc/actions_optimizer → manifests}/__init__.py +0 -0
- /jaseci/{svc/jsorc-backup → utils/actions}/__init__.py +0 -0
- /jaseci/{svc/actions_optimizer → utils/actions}/actions_state.py +0 -0
- {jaseci-1.4.0.9.dist-info → jaseci-1.4.0.11.dist-info}/LICENSE +0 -0
- {jaseci-1.4.0.9.dist-info → jaseci-1.4.0.11.dist-info}/WHEEL +0 -0
- {jaseci-1.4.0.9.dist-info → jaseci-1.4.0.11.dist-info}/entry_points.txt +0 -0
- {jaseci-1.4.0.9.dist-info → jaseci-1.4.0.11.dist-info}/top_level.txt +0 -0
jaseci/svc/config.py
DELETED
jaseci/svc/elastic/__init__.py
DELETED
jaseci/svc/elastic/config.py
DELETED
jaseci/svc/elastic/manifest.py
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
ELASTIC_MANIFEST = {}
|
jaseci/svc/jsorc-backup/jsorc.py
DELETED
|
@@ -1,182 +0,0 @@
|
|
|
1
|
-
from .promon.promon import Promon
|
|
2
|
-
import time
|
|
3
|
-
from kubernetes import client, config
|
|
4
|
-
from kubernetes.client.rest import ApiException
|
|
5
|
-
import os
|
|
6
|
-
import yaml
|
|
7
|
-
import multiprocessing
|
|
8
|
-
from jaseci.utils.utils import logger
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
class KubeController:
|
|
12
|
-
# A set of all functions that are helpful for kubernetes operations
|
|
13
|
-
# Configs can be set in Configuration class directly or using helper utility
|
|
14
|
-
|
|
15
|
-
def __init__(self, configuration):
|
|
16
|
-
self.config = configuration
|
|
17
|
-
self.api_instance = client.CoreV1Api(self.config)
|
|
18
|
-
self.app_client = client.ApiClient(self.config)
|
|
19
|
-
self.app_api = client.AppsV1Api(self.app_client)
|
|
20
|
-
|
|
21
|
-
def get_pod_list(self):
|
|
22
|
-
ret = self.api_instance.list_pod_for_all_namespaces(watch=False)
|
|
23
|
-
res = []
|
|
24
|
-
for i in ret.items:
|
|
25
|
-
res.append({"namespace": i.metadata.namespace, "name": i.metadata.name})
|
|
26
|
-
return res
|
|
27
|
-
|
|
28
|
-
def get_deployment_list(self):
|
|
29
|
-
ret = self.app_api.list_deployment_for_all_namespaces(watch=False)
|
|
30
|
-
res = []
|
|
31
|
-
for i in ret.items:
|
|
32
|
-
res.append({"namespace": i.metadata.namespace, "name": i.metadata.name})
|
|
33
|
-
return res
|
|
34
|
-
|
|
35
|
-
def create_deployment(self, config: dict, namespace: str = "default"):
|
|
36
|
-
return self.app_api.create_namespaced_deployment(
|
|
37
|
-
namespace=namespace, body=config
|
|
38
|
-
)
|
|
39
|
-
|
|
40
|
-
def get_deployment_conf(self, name: str, namespace: str = "default"):
|
|
41
|
-
api_response = self.app_api.read_namespaced_deployment(
|
|
42
|
-
name=name, namespace=namespace
|
|
43
|
-
)
|
|
44
|
-
return api_response
|
|
45
|
-
|
|
46
|
-
def patch_deployment_conf(self, config, name: str, namespace: str = "default"):
|
|
47
|
-
api_response = self.app_api.patch_namespaced_deployment(
|
|
48
|
-
name=name, namespace=namespace, body=config
|
|
49
|
-
)
|
|
50
|
-
return api_response
|
|
51
|
-
|
|
52
|
-
def deployment_set_scale(
|
|
53
|
-
self, name: str, namespace: str = "default", scale: int = 1
|
|
54
|
-
):
|
|
55
|
-
# This is just a small shortcut to set the scale of a deployment
|
|
56
|
-
conf = self.get_deployment_conf(name, namespace)
|
|
57
|
-
conf.spec.replicas = scale
|
|
58
|
-
self.patch_deployment_conf(conf, name, namespace)
|
|
59
|
-
|
|
60
|
-
def kill_deployment(self, name: str, namespace: str = "default"):
|
|
61
|
-
try:
|
|
62
|
-
api_response = self.app_api.delete_namespaced_deployment(
|
|
63
|
-
name=name, namespace=namespace
|
|
64
|
-
)
|
|
65
|
-
return api_response
|
|
66
|
-
except ApiException as e:
|
|
67
|
-
print("Exception when calling CoreV1Api->delete_namespaced_pod: %s\n" % e)
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
class Monitor:
|
|
71
|
-
def __init__(self, promon_url: str, k8sconfig):
|
|
72
|
-
self.promon = Promon(promon_url)
|
|
73
|
-
self.controller = KubeController(k8sconfig)
|
|
74
|
-
|
|
75
|
-
def strategy_start_redis(self):
|
|
76
|
-
deployments = self.controller.get_deployment_list()
|
|
77
|
-
exsits = False
|
|
78
|
-
for deployment in deployments:
|
|
79
|
-
if deployment["name"] == "jaseci-redis":
|
|
80
|
-
exsits = True
|
|
81
|
-
if not exsits:
|
|
82
|
-
logger.info("Creating new jaseci redis")
|
|
83
|
-
dirpath = os.path.dirname(os.path.realpath(__file__))
|
|
84
|
-
filepath = os.path.join(dirpath, "jaseci-redis.yaml")
|
|
85
|
-
self.controller.create_deployment(
|
|
86
|
-
config=yaml.safe_load(open(filepath, "r"))
|
|
87
|
-
)
|
|
88
|
-
|
|
89
|
-
def strategy_redis_cpu(
|
|
90
|
-
self, node_name: str, deployment_namespace: str, deployment_name: str
|
|
91
|
-
):
|
|
92
|
-
cpu = self.promon.cpu_utilization_percentage()
|
|
93
|
-
cpu_usage = cpu[node_name]
|
|
94
|
-
print(f"Detect CPU Usage: {cpu_usage}")
|
|
95
|
-
if cpu_usage > 10:
|
|
96
|
-
pods = self.controller.get_deployment_list()
|
|
97
|
-
for pod in pods:
|
|
98
|
-
namespace = pod["namespace"]
|
|
99
|
-
name = pod["name"]
|
|
100
|
-
if name == deployment_name:
|
|
101
|
-
print("Kill deployment")
|
|
102
|
-
self.controller.kill_deployment(name=name, namespace=namespace)
|
|
103
|
-
if cpu_usage < 5:
|
|
104
|
-
pods = self.controller.get_deployment_list()
|
|
105
|
-
redis_running = False
|
|
106
|
-
for pod in pods:
|
|
107
|
-
if pod["name"] == deployment_name:
|
|
108
|
-
redis_running = True
|
|
109
|
-
if not redis_running:
|
|
110
|
-
print("Creating new deployment")
|
|
111
|
-
self.controller.create_deployment(
|
|
112
|
-
config=yaml.safe_load(open("jaseci.yaml", "r"))
|
|
113
|
-
)
|
|
114
|
-
|
|
115
|
-
def strategy_service_cpu(self):
|
|
116
|
-
cpu = self.promon.cpu_utilization_per_pod_cores()
|
|
117
|
-
count = 0
|
|
118
|
-
total = 0
|
|
119
|
-
for pod_name in cpu.keys():
|
|
120
|
-
if pod_name.startswith("jaseci-redis"):
|
|
121
|
-
count = count + 1
|
|
122
|
-
total = total + cpu[pod_name]
|
|
123
|
-
|
|
124
|
-
avg = total / count
|
|
125
|
-
print(avg)
|
|
126
|
-
if avg > 0.001:
|
|
127
|
-
conf = self.controller.get_deployment_conf("jaseci-redis", "default")
|
|
128
|
-
replicas = conf.spec.replicas + 1
|
|
129
|
-
print(f"Num of Replicas: {replicas}")
|
|
130
|
-
self.controller.deployment_set_scale("jaseci-redis", "default", replicas)
|
|
131
|
-
if avg < 0.001:
|
|
132
|
-
conf = self.controller.get_deployment_conf("jaseci-redis", "default")
|
|
133
|
-
replicas = conf.spec.replicas - 1
|
|
134
|
-
print(f"Num of Replicas: {replicas}")
|
|
135
|
-
self.controller.deployment_set_scale("jaseci-redis", "default", replicas)
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
def daemon(k8s_conf, prometheus_url: str):
|
|
139
|
-
m = Monitor(prometheus_url, k8s_conf)
|
|
140
|
-
while True:
|
|
141
|
-
# m.strategy_redis_cpu("minikube", "default", "jaseci-redis")
|
|
142
|
-
m.strategy_start_redis()
|
|
143
|
-
time.sleep(10)
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
def start_monitoring(k8s_conf, prometheus_url: str):
|
|
147
|
-
monitor = multiprocessing.Process(target=daemon, args=(k8s_conf, prometheus_url))
|
|
148
|
-
monitor.start()
|
|
149
|
-
return monitor
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
def wait_monitoring(monitor_thread):
|
|
153
|
-
monitor_thread.join()
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
def stop_monitoring(monitor_thread):
|
|
157
|
-
monitor_thread.terminate()
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
def remote_k8s_conf(k8s_url: str):
|
|
161
|
-
k8sconf = client.Configuration()
|
|
162
|
-
k8sconf.host = k8s_url
|
|
163
|
-
k8sconf.verify_ssl = False
|
|
164
|
-
return k8sconf
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
def incluster():
|
|
168
|
-
try:
|
|
169
|
-
config.load_incluster_config()
|
|
170
|
-
except config.config_exception.ConfigException:
|
|
171
|
-
return False
|
|
172
|
-
return True
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
def jsorc_start():
|
|
176
|
-
logger.info("JSORC Running")
|
|
177
|
-
if incluster():
|
|
178
|
-
k8s_config = None
|
|
179
|
-
start_monitoring(
|
|
180
|
-
k8s_conf=k8s_config, prometheus_url="http://js-prometheus:9090"
|
|
181
|
-
)
|
|
182
|
-
logger.info("Monitoring started")
|
|
File without changes
|
|
@@ -1,202 +0,0 @@
|
|
|
1
|
-
from prometheus_api_client import PrometheusConnect
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
class Promon:
|
|
5
|
-
def __init__(self, url: str):
|
|
6
|
-
self.prom = PrometheusConnect(url=url, disable_ssl=True)
|
|
7
|
-
|
|
8
|
-
def all_metrics(self) -> list:
|
|
9
|
-
return self.prom.all_metrics()
|
|
10
|
-
|
|
11
|
-
def cpu_utilization_core(self) -> dict:
|
|
12
|
-
util = self.prom.get_current_metric_value(
|
|
13
|
-
'sum(irate(node_cpu_seconds_total{mode!="idle"}[10m])) by (node)'
|
|
14
|
-
)
|
|
15
|
-
res = {}
|
|
16
|
-
for node in util:
|
|
17
|
-
node_name = node["metric"]["node"]
|
|
18
|
-
node_util = float(node["value"][1])
|
|
19
|
-
res[node_name] = node_util
|
|
20
|
-
return res
|
|
21
|
-
|
|
22
|
-
def cpu_utilization_percentage(self) -> dict:
|
|
23
|
-
util = self.prom.get_current_metric_value(
|
|
24
|
-
'(sum(irate(node_cpu_seconds_total{mode!="idle"}[10m])) by '
|
|
25
|
-
'(node)) / (sum(irate(node_cpu_seconds_total{mode!=""}[10m])) by '
|
|
26
|
-
"(node)) * 100"
|
|
27
|
-
)
|
|
28
|
-
res = {}
|
|
29
|
-
for node in util:
|
|
30
|
-
node_name = node["metric"]["node"]
|
|
31
|
-
node_util = float(node["value"][1])
|
|
32
|
-
res[node_name] = node_util
|
|
33
|
-
return res
|
|
34
|
-
|
|
35
|
-
def cpu_utilization_per_pod_cores(self) -> dict:
|
|
36
|
-
util = self.prom.get_current_metric_value(
|
|
37
|
-
'sum(irate(container_cpu_usage_seconds_total{pod!=""}[10m])) by (pod)'
|
|
38
|
-
)
|
|
39
|
-
res = {}
|
|
40
|
-
for pod in util:
|
|
41
|
-
pod_name = pod["metric"]["pod"]
|
|
42
|
-
value = float(pod["value"][1])
|
|
43
|
-
res[pod_name] = float(value)
|
|
44
|
-
return res
|
|
45
|
-
|
|
46
|
-
def mem_total_bytes(self) -> dict:
|
|
47
|
-
util = self.prom.get_current_metric_value(
|
|
48
|
-
"sum(node_memory_MemTotal_bytes) by (node)"
|
|
49
|
-
)
|
|
50
|
-
res = {}
|
|
51
|
-
for node in util:
|
|
52
|
-
node_name = node["metric"]["node"]
|
|
53
|
-
node_util = float(node["value"][1])
|
|
54
|
-
res[node_name] = node_util
|
|
55
|
-
return res
|
|
56
|
-
|
|
57
|
-
def mem_utilization_bytes(self) -> dict:
|
|
58
|
-
util = self.prom.get_current_metric_value(
|
|
59
|
-
"sum(node_memory_Active_bytes) by (node)"
|
|
60
|
-
)
|
|
61
|
-
res = {}
|
|
62
|
-
for node in util:
|
|
63
|
-
node_name = node["metric"]["node"]
|
|
64
|
-
node_util = float(node["value"][1])
|
|
65
|
-
res[node_name] = node_util
|
|
66
|
-
return res
|
|
67
|
-
|
|
68
|
-
def mem_utilization_percentage(self) -> dict:
|
|
69
|
-
util = self.prom.get_current_metric_value(
|
|
70
|
-
"sum(node_memory_Active_bytes / node_memory_MemTotal_bytes * 100 ) by "
|
|
71
|
-
"(node)"
|
|
72
|
-
)
|
|
73
|
-
res = {}
|
|
74
|
-
for node in util:
|
|
75
|
-
node_name = node["metric"]["node"]
|
|
76
|
-
node_util = float(node["value"][1])
|
|
77
|
-
res[node_name] = node_util
|
|
78
|
-
return res
|
|
79
|
-
|
|
80
|
-
def mem_utilization_per_pod_bytes(self) -> dict:
|
|
81
|
-
util = self.prom.get_current_metric_value(
|
|
82
|
-
'sum(container_memory_working_set_bytes{pod!=""}) by (pod)'
|
|
83
|
-
)
|
|
84
|
-
res = {}
|
|
85
|
-
for pod in util:
|
|
86
|
-
pod_name = pod["metric"]["pod"]
|
|
87
|
-
value = float(pod["value"][1])
|
|
88
|
-
res[pod_name] = float(value)
|
|
89
|
-
return res
|
|
90
|
-
|
|
91
|
-
def node_pods(self) -> dict:
|
|
92
|
-
util = self.prom.get_current_metric_value("kube_pod_info")
|
|
93
|
-
res = {}
|
|
94
|
-
for pod in util:
|
|
95
|
-
info = pod["metric"]
|
|
96
|
-
node = info["node"]
|
|
97
|
-
pod = info["pod"]
|
|
98
|
-
if res.get(node) is None:
|
|
99
|
-
res[node] = set()
|
|
100
|
-
res[node].add(pod)
|
|
101
|
-
return res
|
|
102
|
-
|
|
103
|
-
def network_receive_bytes(self) -> dict:
|
|
104
|
-
util = self.prom.get_current_metric_value(
|
|
105
|
-
"sum (rate (node_network_receive_bytes_total{}[10m])) by (node)"
|
|
106
|
-
)
|
|
107
|
-
res = {}
|
|
108
|
-
for node in util:
|
|
109
|
-
node_name = node["metric"]["node"]
|
|
110
|
-
node_util = float(node["value"][1])
|
|
111
|
-
res[node_name] = node_util
|
|
112
|
-
return res
|
|
113
|
-
|
|
114
|
-
def network_receive_per_pod_bytes(self):
|
|
115
|
-
util = self.prom.get_current_metric_value(
|
|
116
|
-
'sum (rate (container_network_receive_bytes_total{pod!=""}[10m])) by (pod)'
|
|
117
|
-
)
|
|
118
|
-
res = {}
|
|
119
|
-
for pod in util:
|
|
120
|
-
pod_name = pod["metric"]["pod"]
|
|
121
|
-
value = pod["value"][1]
|
|
122
|
-
res[pod_name] = float(value)
|
|
123
|
-
return res
|
|
124
|
-
|
|
125
|
-
def network_transmit_bytes(self) -> dict:
|
|
126
|
-
util = self.prom.get_current_metric_value(
|
|
127
|
-
"sum (rate (node_network_transmit_bytes_total{}[10m])) by (node)"
|
|
128
|
-
)
|
|
129
|
-
res = {}
|
|
130
|
-
for node in util:
|
|
131
|
-
node_name = node["metric"]["node"]
|
|
132
|
-
node_util = float(node["value"][1])
|
|
133
|
-
res[node_name] = node_util
|
|
134
|
-
return res
|
|
135
|
-
|
|
136
|
-
def network_transmit_per_pod_bytes(self):
|
|
137
|
-
util = self.prom.get_current_metric_value(
|
|
138
|
-
'sum (rate (container_network_transmit_bytes_total{pod!=""}[10m])) by (pod)'
|
|
139
|
-
)
|
|
140
|
-
res = {}
|
|
141
|
-
for pod in util:
|
|
142
|
-
pod_name = pod["metric"]["pod"]
|
|
143
|
-
value = pod["value"][1]
|
|
144
|
-
res[pod_name] = float(value)
|
|
145
|
-
return res
|
|
146
|
-
|
|
147
|
-
def disk_total_bytes(self) -> dict:
|
|
148
|
-
util = self.prom.get_current_metric_value(
|
|
149
|
-
'sum(avg (node_filesystem_size_bytes{mountpoint!="/boot", '
|
|
150
|
-
'fstype!="tmpfs"}) without (mountpoint)) by (node)'
|
|
151
|
-
)
|
|
152
|
-
res = {}
|
|
153
|
-
for node in util:
|
|
154
|
-
node_name = node["metric"]["node"]
|
|
155
|
-
node_util = float(node["value"][1])
|
|
156
|
-
res[node_name] = node_util
|
|
157
|
-
return res
|
|
158
|
-
|
|
159
|
-
def disk_free_bytes(self) -> dict:
|
|
160
|
-
util = self.prom.get_current_metric_value(
|
|
161
|
-
'sum(avg (node_filesystem_free_bytes{mountpoint!="/boot", '
|
|
162
|
-
'fstype!="tmpfs"}) without (mountpoint)) by (node)'
|
|
163
|
-
)
|
|
164
|
-
res = {}
|
|
165
|
-
for node in util:
|
|
166
|
-
node_name = node["metric"]["node"]
|
|
167
|
-
node_util = float(node["value"][1])
|
|
168
|
-
res[node_name] = node_util
|
|
169
|
-
return res
|
|
170
|
-
|
|
171
|
-
def pod_info(self) -> dict:
|
|
172
|
-
util = self.prom.get_current_metric_value("kube_pod_info")
|
|
173
|
-
res = {}
|
|
174
|
-
for pod in util:
|
|
175
|
-
pod_name = pod["metric"]["pod"]
|
|
176
|
-
res[pod_name] = pod["metric"]
|
|
177
|
-
|
|
178
|
-
cpu = self.cpu_utilization_per_pod_cores()
|
|
179
|
-
for pod in util:
|
|
180
|
-
pod_name = pod["metric"]["pod"]
|
|
181
|
-
pod_cpu = cpu.get(pod_name, 0)
|
|
182
|
-
res[pod_name]["cpu_utilization_cores"] = pod_cpu
|
|
183
|
-
|
|
184
|
-
mem = self.mem_utilization_per_pod_bytes()
|
|
185
|
-
for pod in util:
|
|
186
|
-
pod_name = pod["metric"]["pod"]
|
|
187
|
-
pod_mem = mem.get(pod_name, 0)
|
|
188
|
-
res[pod_name]["mem_utilization_bytes"] = pod_mem
|
|
189
|
-
|
|
190
|
-
recv = self.network_receive_per_pod_bytes()
|
|
191
|
-
for pod in util:
|
|
192
|
-
pod_name = pod["metric"]["pod"]
|
|
193
|
-
pod_recv = recv.get(pod_name, 0)
|
|
194
|
-
res[pod_name]["network_recv_bytes"] = pod_recv
|
|
195
|
-
|
|
196
|
-
tran = self.network_transmit_per_pod_bytes()
|
|
197
|
-
for pod in util:
|
|
198
|
-
pod_name = pod["metric"]["pod"]
|
|
199
|
-
pod_tran = tran.get(pod_name, 0)
|
|
200
|
-
res[pod_name]["network_tran_bytes"] = pod_tran
|
|
201
|
-
|
|
202
|
-
return res
|
jaseci/svc/mail/__init__.py
DELETED
jaseci/svc/mail/config.py
DELETED
|
@@ -1,25 +0,0 @@
|
|
|
1
|
-
MAIL_CONFIG = {
|
|
2
|
-
"enabled": False,
|
|
3
|
-
"quiet": True,
|
|
4
|
-
"version": 2,
|
|
5
|
-
"tls": True,
|
|
6
|
-
"host": "",
|
|
7
|
-
"port": 587,
|
|
8
|
-
"sender": "",
|
|
9
|
-
"user": "",
|
|
10
|
-
"pass": "",
|
|
11
|
-
"backend": "smtp",
|
|
12
|
-
"templates": {
|
|
13
|
-
"activation_subj": "Please activate your account!",
|
|
14
|
-
"activation_body": "Thank you for creating an account!\n\n"
|
|
15
|
-
"Activation Code: {{code}}\n"
|
|
16
|
-
"Please click below to activate:\n{{link}}",
|
|
17
|
-
"activation_html_body": "Thank you for creating an account!<br><br>"
|
|
18
|
-
"Activation Code: {{code}}<br>"
|
|
19
|
-
"Please click below to activate:<br>"
|
|
20
|
-
"{{link}}",
|
|
21
|
-
"resetpass_subj": "Password Reset for Jaseci Account",
|
|
22
|
-
"resetpass_body": "Your Jaseci password reset token is: {{token}}",
|
|
23
|
-
"resetpass_html_body": "Your Jaseci password reset" "token is: {{token}}",
|
|
24
|
-
},
|
|
25
|
-
}
|
jaseci/svc/meta.py
DELETED
|
@@ -1,164 +0,0 @@
|
|
|
1
|
-
import signal
|
|
2
|
-
from jaseci.utils.utils import logger
|
|
3
|
-
from jaseci.svc import (
|
|
4
|
-
CommonService,
|
|
5
|
-
JsOrc,
|
|
6
|
-
MetaProperties,
|
|
7
|
-
MailService,
|
|
8
|
-
RedisService,
|
|
9
|
-
TaskService,
|
|
10
|
-
StripeService,
|
|
11
|
-
PrometheusService,
|
|
12
|
-
ElasticService,
|
|
13
|
-
ServiceState as Ss,
|
|
14
|
-
)
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
class MetaService(CommonService, MetaProperties):
|
|
18
|
-
def __init__(self, run_svcs=True):
|
|
19
|
-
self.run_svcs = run_svcs
|
|
20
|
-
MetaProperties.__init__(self, __class__)
|
|
21
|
-
|
|
22
|
-
self.start()
|
|
23
|
-
|
|
24
|
-
###################################################
|
|
25
|
-
# BUILDER #
|
|
26
|
-
###################################################
|
|
27
|
-
|
|
28
|
-
def run(self, hook=None):
|
|
29
|
-
self.app = JsOrc(self)
|
|
30
|
-
self.populate_context()
|
|
31
|
-
self.populate_services()
|
|
32
|
-
|
|
33
|
-
def post_run(self, hook=None):
|
|
34
|
-
if self.run_svcs:
|
|
35
|
-
self.app.build()
|
|
36
|
-
if self.is_automated():
|
|
37
|
-
logger.info("JsOrc is automated. Pushing interval check alarm...")
|
|
38
|
-
self.push_interval(1)
|
|
39
|
-
else:
|
|
40
|
-
logger.info("JsOrc is not automated.")
|
|
41
|
-
|
|
42
|
-
def push_interval(self, interval):
|
|
43
|
-
if self.running_interval == 0:
|
|
44
|
-
self.running_interval += 1
|
|
45
|
-
signal.alarm(interval)
|
|
46
|
-
else:
|
|
47
|
-
logger.info("Reusing current running interval...")
|
|
48
|
-
|
|
49
|
-
###################################################
|
|
50
|
-
# SERVICES #
|
|
51
|
-
###################################################
|
|
52
|
-
|
|
53
|
-
def add_service_builder(self, name, svc):
|
|
54
|
-
self.app.add_service_builder(name, svc)
|
|
55
|
-
|
|
56
|
-
def build_service(self, name, background, *args, **kwargs):
|
|
57
|
-
return self.app.build_service(name, background, *args, **kwargs)
|
|
58
|
-
|
|
59
|
-
def get_service(self, name, *args, **kwargs):
|
|
60
|
-
return self.app.get_service(name, *args, **kwargs)
|
|
61
|
-
|
|
62
|
-
###################################################
|
|
63
|
-
# CONTEXT #
|
|
64
|
-
###################################################
|
|
65
|
-
|
|
66
|
-
def add_context(self, ctx, cls, *args, **kwargs):
|
|
67
|
-
self.app.add_context(ctx, cls, *args, **kwargs)
|
|
68
|
-
|
|
69
|
-
def build_context(self, ctx, *args, **kwargs):
|
|
70
|
-
return self.app.build_context(ctx, *args, **kwargs)
|
|
71
|
-
|
|
72
|
-
def get_context(self, ctx):
|
|
73
|
-
return self.app.context.get(ctx, {}).get("class")
|
|
74
|
-
|
|
75
|
-
###################################################
|
|
76
|
-
# COMMON #
|
|
77
|
-
###################################################
|
|
78
|
-
|
|
79
|
-
def is_automated(self):
|
|
80
|
-
return self.is_running() and self.app and self.app.automated
|
|
81
|
-
|
|
82
|
-
def in_cluster(self):
|
|
83
|
-
return self.app.in_cluster()
|
|
84
|
-
|
|
85
|
-
###################################################
|
|
86
|
-
# BUILDER #
|
|
87
|
-
###################################################
|
|
88
|
-
|
|
89
|
-
def build_hook(self):
|
|
90
|
-
h = self.build_context("hook")
|
|
91
|
-
h.meta = self
|
|
92
|
-
if self.run_svcs:
|
|
93
|
-
h.promon = self.get_service("promon", h)
|
|
94
|
-
h.redis = self.get_service("redis", h)
|
|
95
|
-
h.task = self.get_service("task", h)
|
|
96
|
-
h.mail = self.get_service("mail", h)
|
|
97
|
-
h.elastic = self.get_service("elastic", h)
|
|
98
|
-
h.stripe = self.get_service("stripe", h)
|
|
99
|
-
|
|
100
|
-
if not self.is_automated():
|
|
101
|
-
h.mail.start(h)
|
|
102
|
-
h.redis.start(h)
|
|
103
|
-
h.task.start(h)
|
|
104
|
-
h.elastic.start(h)
|
|
105
|
-
h.stripe.start(h)
|
|
106
|
-
|
|
107
|
-
return h
|
|
108
|
-
|
|
109
|
-
def build_master(self, *args, **kwargs):
|
|
110
|
-
return self.__common("master", *args, **kwargs)
|
|
111
|
-
|
|
112
|
-
def build_super_master(self, *args, **kwargs):
|
|
113
|
-
return self.__common("super_master", *args, **kwargs)
|
|
114
|
-
|
|
115
|
-
def __common(self, ctx, *args, **kwargs):
|
|
116
|
-
if not kwargs.get("h", None):
|
|
117
|
-
kwargs["h"] = self.build_hook()
|
|
118
|
-
|
|
119
|
-
return self.build_context(ctx, *args, **kwargs)
|
|
120
|
-
|
|
121
|
-
###################################################
|
|
122
|
-
# OVERRIDEN #
|
|
123
|
-
###################################################
|
|
124
|
-
|
|
125
|
-
def reset(self, hook=None, start=True):
|
|
126
|
-
self.terminate_daemon("jsorc")
|
|
127
|
-
self.app = None
|
|
128
|
-
self.state = Ss.NOT_STARTED
|
|
129
|
-
self.__init__()
|
|
130
|
-
|
|
131
|
-
def populate_context(self):
|
|
132
|
-
from jaseci.hook import RedisHook
|
|
133
|
-
from jaseci.element.master import Master
|
|
134
|
-
from jaseci.element.super_master import SuperMaster
|
|
135
|
-
|
|
136
|
-
self.add_context("hook", RedisHook)
|
|
137
|
-
self.add_context("master", Master)
|
|
138
|
-
self.add_context("super_master", SuperMaster)
|
|
139
|
-
|
|
140
|
-
def populate_services(self):
|
|
141
|
-
self.add_service_builder("redis", RedisService)
|
|
142
|
-
self.add_service_builder("task", TaskService)
|
|
143
|
-
self.add_service_builder("mail", MailService)
|
|
144
|
-
self.add_service_builder("promon", PrometheusService)
|
|
145
|
-
self.add_service_builder("elastic", ElasticService)
|
|
146
|
-
self.add_service_builder("stripe", StripeService)
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
def interval_check(signum, frame):
|
|
150
|
-
meta = MetaService()
|
|
151
|
-
if meta.is_automated():
|
|
152
|
-
meta.app.interval_check()
|
|
153
|
-
logger.info(
|
|
154
|
-
f"Backing off for {meta.app.backoff_interval} seconds before the next interval check..."
|
|
155
|
-
)
|
|
156
|
-
|
|
157
|
-
# wait interval_check to be finished before decrement
|
|
158
|
-
meta.running_interval -= 1
|
|
159
|
-
meta.push_interval(meta.app.backoff_interval)
|
|
160
|
-
else:
|
|
161
|
-
meta.running_interval -= 1
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
signal.signal(signal.SIGALRM, interval_check)
|
jaseci/svc/postgres/__init__.py
DELETED
|
File without changes
|