ct 0.10.8.114__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cantools/__init__.py +24 -0
- cantools/_db.py +142 -0
- cantools/_memcache.py +76 -0
- cantools/_pay.py +46 -0
- cantools/admin.py +31 -0
- cantools/cfg.py +347 -0
- cantools/config.py +131 -0
- cantools/db/__init__.py +18 -0
- cantools/db/admin.py +27 -0
- cantools/db/gae/__init__.py +0 -0
- cantools/db/gae/model.py +127 -0
- cantools/db/gae/properties.py +35 -0
- cantools/db/wp.py +99 -0
- cantools/geo.py +188 -0
- cantools/hooks.py +13 -0
- cantools/scripts/__init__.py +0 -0
- cantools/scripts/bench.py +167 -0
- cantools/scripts/builder.py +272 -0
- cantools/scripts/deploy.py +154 -0
- cantools/scripts/doc.py +239 -0
- cantools/scripts/index.py +226 -0
- cantools/scripts/init.py +345 -0
- cantools/scripts/migrate.py +593 -0
- cantools/scripts/pubsub/__init__.py +28 -0
- cantools/scripts/pubsub/actor.py +13 -0
- cantools/scripts/pubsub/bots.py +143 -0
- cantools/scripts/pubsub/channel.py +85 -0
- cantools/scripts/pubsub/ps.py +145 -0
- cantools/scripts/pubsub/user.py +51 -0
- cantools/scripts/start.py +53 -0
- cantools/scripts/util.py +24 -0
- cantools/util/__init__.py +78 -0
- cantools/util/admin.py +620 -0
- cantools/util/data.py +109 -0
- cantools/util/media.py +303 -0
- cantools/util/package.py +125 -0
- cantools/util/system.py +73 -0
- cantools/web/__init__.py +9 -0
- cantools/web/dez_server/__init__.py +1 -0
- cantools/web/dez_server/controller.py +129 -0
- cantools/web/dez_server/cron.py +115 -0
- cantools/web/dez_server/daemons.py +64 -0
- cantools/web/dez_server/mail.py +24 -0
- cantools/web/dez_server/response.py +63 -0
- cantools/web/dez_server/routes.py +21 -0
- cantools/web/dez_server/server.py +229 -0
- cantools/web/dez_server/sms.py +12 -0
- cantools/web/gae_server.py +68 -0
- cantools/web/util.py +552 -0
- ct-0.10.8.114.dist-info/LICENSE +9 -0
- ct-0.10.8.114.dist-info/METADATA +25 -0
- ct-0.10.8.114.dist-info/RECORD +55 -0
- ct-0.10.8.114.dist-info/WHEEL +5 -0
- ct-0.10.8.114.dist-info/entry_points.txt +10 -0
- ct-0.10.8.114.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,593 @@
|
|
|
1
|
+
"""
|
|
2
|
+
### Usage: ctmigrate [load|dump|blobdiff|snap|accounts|deps|pack|unpack|owners|finish|install|profile] [--domain=DOMAIN] [--port=PORT] [--filename=FILENAME] [--skip=SKIP] [--tables=TABLES] [--cutoff=CUTOFF] [-nr]
|
|
3
|
+
|
|
4
|
+
### Options:
|
|
5
|
+
-h, --help show this help message and exit
|
|
6
|
+
-d DOMAIN, --domain=DOMAIN
|
|
7
|
+
domain of target server (default: localhost)
|
|
8
|
+
-p PORT, --port=PORT port of target server (default: 8080)
|
|
9
|
+
-c CUTOFF, --cutoff=CUTOFF
|
|
10
|
+
blobdiff cutoff - number to start after (default: 0)
|
|
11
|
+
-f FILENAME, --filename=FILENAME
|
|
12
|
+
name of sqlite data file for dumping/loading to/from
|
|
13
|
+
(default: dump.db)
|
|
14
|
+
-s SKIP, --skip=SKIP don't dump these tables - use '|' as separator, such
|
|
15
|
+
as 'table1|table2|table3' (default: none)
|
|
16
|
+
-t TABLES, --tables=TABLES
|
|
17
|
+
dump these tables - use '|' as separator, such as
|
|
18
|
+
'table1|table2|table3' (default: all)
|
|
19
|
+
-n, --no_binary disable binary download
|
|
20
|
+
-r, --dry_run accounts/deps/pack/unpack/owners/finish/install/profile
|
|
21
|
+
dry run
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
import os, getpass, datetime
|
|
25
|
+
from optparse import OptionParser
|
|
26
|
+
from databae.util import blobify
|
|
27
|
+
from fyg.util import log, error, write, confirm
|
|
28
|
+
from cantools import db
|
|
29
|
+
from cantools.util import mkdir, cmd, output, sym
|
|
30
|
+
from cantools.util.admin import install, snapinstall, simplecfg, enc, dec, qdec, phpver, running, servicer, mysqlcheck, zipit
|
|
31
|
+
|
|
32
|
+
LIMIT = 500
|
|
33
|
+
|
|
34
|
+
def load(host, port, session, filters={}, protocol="http", tables=None):
|
|
35
|
+
pw = getpass.getpass("admin password? ")
|
|
36
|
+
if port == 443:
|
|
37
|
+
protocol = "https"
|
|
38
|
+
log("loading database into %s://%s:%s"%(protocol, host, port), important=True)
|
|
39
|
+
for model in (tables or db.get_schema()):
|
|
40
|
+
load_model(model, host, port, session, filters=filters, protocol=protocol, pw=pw)
|
|
41
|
+
log("finished loading data from sqlite dump file")
|
|
42
|
+
|
|
43
|
+
def load_model(model, host, port, session=None, filters={}, protocol="http", pw=None, action="put", blobifier=None):
|
|
44
|
+
from cantools.web import post
|
|
45
|
+
# log("retrieving %s entities"%(model,), important=True) # too spammy
|
|
46
|
+
session = session or db.seshman.get()
|
|
47
|
+
mod = db.get_model(model)
|
|
48
|
+
def push(data):
|
|
49
|
+
log(post(host, "/_db", port, {
|
|
50
|
+
"pw": pw,
|
|
51
|
+
"action": action,
|
|
52
|
+
"data": data,
|
|
53
|
+
"blobifier": blobifier
|
|
54
|
+
}, protocol=protocol, ctjson=True))
|
|
55
|
+
offset = 0
|
|
56
|
+
while 1:
|
|
57
|
+
chunk = db.get_page(model, LIMIT, offset, filters=filters, session=session)
|
|
58
|
+
clen = len(chunk)
|
|
59
|
+
if clen:
|
|
60
|
+
if action == "edit":
|
|
61
|
+
for item in chunk:
|
|
62
|
+
push(item)
|
|
63
|
+
else:
|
|
64
|
+
push(chunk)
|
|
65
|
+
offset += clen
|
|
66
|
+
log("processed %s %s records"%(offset, model), 1)
|
|
67
|
+
if clen < LIMIT:
|
|
68
|
+
return offset
|
|
69
|
+
|
|
70
|
+
keys = {}
|
|
71
|
+
missing = {}
|
|
72
|
+
blobs = []
|
|
73
|
+
realz = set()
|
|
74
|
+
indices = set()
|
|
75
|
+
|
|
76
|
+
def fixmissing(orig):
|
|
77
|
+
if orig in missing and orig in keys:
|
|
78
|
+
for d in missing[orig]:
|
|
79
|
+
for k, v in list(d.items()):
|
|
80
|
+
if type(v) is list:
|
|
81
|
+
for i in range(len(v)):
|
|
82
|
+
if v[i] == orig:
|
|
83
|
+
v[i] = keys[orig]
|
|
84
|
+
elif v == orig:
|
|
85
|
+
d[k] = keys[orig]
|
|
86
|
+
del missing[orig]
|
|
87
|
+
|
|
88
|
+
def _missing(k, d):
|
|
89
|
+
if k not in missing:
|
|
90
|
+
missing[k] = []
|
|
91
|
+
missing[k].append(d)
|
|
92
|
+
|
|
93
|
+
def _fix_or_miss(d, prop):
|
|
94
|
+
dk = d[prop]
|
|
95
|
+
if type(dk) is list:
|
|
96
|
+
for i in range(len(dk)):
|
|
97
|
+
k = dk[i]
|
|
98
|
+
if k in keys:
|
|
99
|
+
dk[i] = keys[k]
|
|
100
|
+
else:
|
|
101
|
+
_missing(k, d)
|
|
102
|
+
elif dk:
|
|
103
|
+
if dk in keys:
|
|
104
|
+
d[prop] = keys[dk]
|
|
105
|
+
else:
|
|
106
|
+
_missing(dk, d)
|
|
107
|
+
|
|
108
|
+
def fixkeys(d, schema):
|
|
109
|
+
if "ctkey" in d:
|
|
110
|
+
orig = d["gaekey"] = d["key"]
|
|
111
|
+
old = d["oldkey"]
|
|
112
|
+
d["key"] = keys[orig] = keys[old] = d["ctkey"]
|
|
113
|
+
realz.add(d["key"])
|
|
114
|
+
fixmissing(orig)
|
|
115
|
+
fixmissing(old)
|
|
116
|
+
if d["index"] in indices:
|
|
117
|
+
error("duplicate index! (%s)"%(d["index"],),
|
|
118
|
+
"try running 'ctindex -m index' ;)")
|
|
119
|
+
indices.add(d["index"])
|
|
120
|
+
for prop in schema["_kinds"]:
|
|
121
|
+
_fix_or_miss(d, prop)
|
|
122
|
+
|
|
123
|
+
def delmissing(badkey):
|
|
124
|
+
log("deleting references to %s"%(badkey,), important=True)
|
|
125
|
+
for d in missing[badkey]:
|
|
126
|
+
log("purging %s"%(d,), important=True)
|
|
127
|
+
for k, v in list(d.items()):
|
|
128
|
+
if type(v) is list:
|
|
129
|
+
d[k] = [val for val in v if val != badkey]
|
|
130
|
+
elif v == badkey:
|
|
131
|
+
d[k] = None
|
|
132
|
+
del missing[badkey]
|
|
133
|
+
|
|
134
|
+
def prune():
|
|
135
|
+
if missing:
|
|
136
|
+
mlen = len(list(missing.keys()))
|
|
137
|
+
log("pruning %s missing keys"%(mlen,), important=True)
|
|
138
|
+
log("searching for matches")
|
|
139
|
+
for oldkey in list(missing.keys()):
|
|
140
|
+
fixmissing(oldkey)
|
|
141
|
+
newlen = len(list(missing.keys()))
|
|
142
|
+
log("matched %s - %s left over"%(mlen - newlen, newlen))
|
|
143
|
+
log("deleting stragglers")
|
|
144
|
+
for oldkey in list(missing.keys()):
|
|
145
|
+
delmissing(oldkey)
|
|
146
|
+
|
|
147
|
+
def checkblobs(d, schema):
|
|
148
|
+
for key, prop in list(schema.items()):
|
|
149
|
+
if prop == "blob" and d[key]:
|
|
150
|
+
blobs.append(d)
|
|
151
|
+
break
|
|
152
|
+
|
|
153
|
+
def blobificator(host=None, port=None, dpref=None):
|
|
154
|
+
dpref = dpref or "%s://%s:%s"%((port == 443) and "https" or "http",
|
|
155
|
+
host, port)
|
|
156
|
+
return dpref + "/_db?action=blob&key=%s&property=%s"
|
|
157
|
+
|
|
158
|
+
def getblobs(host, port):
|
|
159
|
+
log("retrieving binaries stored on %s records"%(len(blobs),), important=True)
|
|
160
|
+
blobifier = blobificator(host, port)
|
|
161
|
+
for d in blobs:
|
|
162
|
+
blobify(d, blobifier)
|
|
163
|
+
|
|
164
|
+
def dump(host, port, session, binary, skip=[], tables=None):
|
|
165
|
+
from cantools.web import fetch
|
|
166
|
+
log("dumping database at %s:%s"%(host, port), important=True)
|
|
167
|
+
mods = {}
|
|
168
|
+
schemas = db.get_schema()
|
|
169
|
+
for model in (tables or schemas):
|
|
170
|
+
if model in skip:
|
|
171
|
+
log("skipping %s entities"%(model,), important=True)
|
|
172
|
+
continue
|
|
173
|
+
log("retrieving %s entities"%(model,), important=True)
|
|
174
|
+
schema = schemas[model]
|
|
175
|
+
mods[model] = []
|
|
176
|
+
offset = 0
|
|
177
|
+
while 1:
|
|
178
|
+
chunk = fetch(host, port=port, ctjson=True,
|
|
179
|
+
path="/_db?action=get&modelName=%s&offset=%s&limit=%s"%(model, offset, LIMIT),
|
|
180
|
+
protocol = (port == 443) and "https" or "http")
|
|
181
|
+
for c in chunk:
|
|
182
|
+
fixkeys(c, schema)
|
|
183
|
+
checkblobs(c, schema)
|
|
184
|
+
mods[model] += [c for c in chunk if c["modelName"] == model]
|
|
185
|
+
offset += LIMIT
|
|
186
|
+
if len(chunk) < LIMIT:
|
|
187
|
+
break
|
|
188
|
+
log("got %s %s records"%(offset, model), 1)
|
|
189
|
+
log("found %s %s records"%(len(mods[model]), model))
|
|
190
|
+
log("%s unmatched keys!"%(len(list(missing.keys())),), important=True)
|
|
191
|
+
prune()
|
|
192
|
+
if binary and blobs:
|
|
193
|
+
getblobs(host, port)
|
|
194
|
+
puts = []
|
|
195
|
+
log("building models", important=True)
|
|
196
|
+
for model in mods:
|
|
197
|
+
mod = db.get_model(model)
|
|
198
|
+
puts += [mod(**db.dprep(c)) for c in mods[model]]
|
|
199
|
+
log("saving %s records to sqlite dump file"%(len(puts),))
|
|
200
|
+
db.put_multi(puts, session=session, preserve_timestamps=True)
|
|
201
|
+
|
|
202
|
+
def dumpit(user, dname=None):
|
|
203
|
+
dname = dname or "dbz.sql"
|
|
204
|
+
log("dumping databases to %s"%(dname,), important=True)
|
|
205
|
+
cmd("mysqldump -u %s -p --all-databases > %s"%(user, dname))
|
|
206
|
+
|
|
207
|
+
def undumpit(user, dname=None, noautocommit=True):
|
|
208
|
+
dname = dname or "dbz.sql"
|
|
209
|
+
log("undumping databases from %s"%(dname,), important=True)
|
|
210
|
+
if noautocommit:
|
|
211
|
+
cmd('mysql -u %s -p -e "SET autocommit=0; SOURCE %s; COMMIT;"'%(user, dname))
|
|
212
|
+
else:
|
|
213
|
+
cmd("mysql -u %s -p < %s"%(user, dname))
|
|
214
|
+
|
|
215
|
+
def blobdiff(cutoff):
|
|
216
|
+
bz = os.listdir("blob")
|
|
217
|
+
mkdir("blobdiff")
|
|
218
|
+
for b in bz:
|
|
219
|
+
if int(b) > cutoff:
|
|
220
|
+
cmd("cp blob/%s blobdiff/%s"%(b, b))
|
|
221
|
+
zipit("blobdiff", remove=True)
|
|
222
|
+
|
|
223
|
+
def projpath():
|
|
224
|
+
path = os.path.join(*os.path.abspath(".").rsplit("/", 2)[1:])
|
|
225
|
+
return input("what's the project's path? [default: %s] "%(path,)) or path
|
|
226
|
+
|
|
227
|
+
def scp(user, domain, path, keyfile=None, isdir=False):
|
|
228
|
+
line = ["scp"]
|
|
229
|
+
isdir and line.append("-r")
|
|
230
|
+
keyfile and line.append("-i %s"%(keyfile,))
|
|
231
|
+
line.append("%s@%s:%s ."%(user, domain, path))
|
|
232
|
+
cmd(" ".join(line))
|
|
233
|
+
|
|
234
|
+
def askArch(path):
|
|
235
|
+
if not os.path.exists(path):
|
|
236
|
+
return
|
|
237
|
+
if not confirm("%s exists - should i archive it"%(path,), True):
|
|
238
|
+
return log("ok, i'm overwriting it!", important=True)
|
|
239
|
+
newpath = "%s%s"%(path, str(datetime.datetime.now()).split(" ").pop(0))
|
|
240
|
+
log("ok, i'm moving it to %s"%(newpath,))
|
|
241
|
+
os.rename(path, newpath)
|
|
242
|
+
|
|
243
|
+
def askGet(path, user, domain, basepath, projpath, keyfile=None, isdir=False):
|
|
244
|
+
if not confirm("should i get %s"%(path,), True):
|
|
245
|
+
return
|
|
246
|
+
askArch(path)
|
|
247
|
+
scp(user, domain, "/%s/%s/%s"%(basepath, projpath, path), keyfile, isdir)
|
|
248
|
+
|
|
249
|
+
def askBasePath(user):
|
|
250
|
+
if user == "root":
|
|
251
|
+
basepath = "root"
|
|
252
|
+
else:
|
|
253
|
+
basepath = "home/%s"%(user,)
|
|
254
|
+
return input("what's the base path? [default: %s] "%(basepath,)) or basepath
|
|
255
|
+
|
|
256
|
+
def doGets(user, domain, projpath, keyfile):
|
|
257
|
+
basepath = askBasePath(user)
|
|
258
|
+
askGet("data.db", user, domain, basepath, projpath, keyfile)
|
|
259
|
+
askGet("blob", user, domain, basepath, projpath, keyfile, True)
|
|
260
|
+
askGet("pack.zip", user, domain, basepath, projpath, keyfile)
|
|
261
|
+
otherPath = input("anything else? [default: nah] ")
|
|
262
|
+
while otherPath:
|
|
263
|
+
askGet(otherPath, user, domain, basepath, projpath, keyfile,
|
|
264
|
+
input("is that a directory? [default: nope] "))
|
|
265
|
+
otherPath = input("anything else? [default: nah] ")
|
|
266
|
+
|
|
267
|
+
def snap(domain):
|
|
268
|
+
if domain == "ask":
|
|
269
|
+
domain = input("what's the snap domain? ")
|
|
270
|
+
doGets(input("what's the user? [default: root]: ") or "root",
|
|
271
|
+
domain, projpath(), input("what's the key file? [default: none] "))
|
|
272
|
+
|
|
273
|
+
def drylog(cfg):
|
|
274
|
+
for variety in cfg:
|
|
275
|
+
for line in cfg[variety]:
|
|
276
|
+
log("%s: %s"%(variety, line), 1)
|
|
277
|
+
|
|
278
|
+
def prodeps(pman):
|
|
279
|
+
cfg = simplecfg("%s.profile"%(pman,))
|
|
280
|
+
bcfg = cfg and cfg.get("basic")
|
|
281
|
+
if not bcfg:
|
|
282
|
+
return
|
|
283
|
+
if pman == "apt": # else snap or snap classic
|
|
284
|
+
return install(*bcfg)
|
|
285
|
+
isclas = pman == "clasnap"
|
|
286
|
+
for pkg in bcfg:
|
|
287
|
+
snapinstall(pkg, isclas)
|
|
288
|
+
|
|
289
|
+
def deps(dryrun=False):
|
|
290
|
+
cfg = simplecfg("deps.cfg")
|
|
291
|
+
if not cfg: return
|
|
292
|
+
log("installing dependencies", important=True)
|
|
293
|
+
if dryrun:
|
|
294
|
+
return drylog(cfg)
|
|
295
|
+
if "basic" in cfg:
|
|
296
|
+
install(*cfg["basic"])
|
|
297
|
+
if "snap" in cfg:
|
|
298
|
+
for pkg in cfg["snap"]:
|
|
299
|
+
snapinstall(pkg)
|
|
300
|
+
if "clasnap" in cfg:
|
|
301
|
+
for pkg in cfg["clasnap"]:
|
|
302
|
+
snapinstall(pkg, True)
|
|
303
|
+
if "pro" in cfg:
|
|
304
|
+
for pman in cfg["pro"]:
|
|
305
|
+
prodeps(pman)
|
|
306
|
+
|
|
307
|
+
def usergroup(cfg, recursive=True, nobasic=False):
|
|
308
|
+
chowner = "chown"
|
|
309
|
+
if recursive:
|
|
310
|
+
chowner = "%s -R"%(chowner,)
|
|
311
|
+
if not nobasic and "basic" in cfg:
|
|
312
|
+
for oline in cfg["basic"]:
|
|
313
|
+
o, p = oline.split("@")
|
|
314
|
+
cmd("%s %s:%s %s"%(chowner, o, o, p), sudo=True)
|
|
315
|
+
if "user" in cfg:
|
|
316
|
+
for oline in cfg["user"]:
|
|
317
|
+
o, p = oline.split("@")
|
|
318
|
+
cmd("%s %s: %s"%(chowner, o, p), sudo=True)
|
|
319
|
+
if "group" in cfg:
|
|
320
|
+
for oline in cfg["group"]:
|
|
321
|
+
o, p = oline.split("@")
|
|
322
|
+
cmd("%s :%s %s"%(chowner, o, p), sudo=True)
|
|
323
|
+
|
|
324
|
+
def accounts(dryrun=False):
|
|
325
|
+
cfg = simplecfg("accounts.cfg")
|
|
326
|
+
if not cfg: return
|
|
327
|
+
log("setting up accounts", important=True)
|
|
328
|
+
if dryrun:
|
|
329
|
+
return drylog(cfg)
|
|
330
|
+
if "basic" in cfg: # system-level
|
|
331
|
+
for uline in cfg["basic"]:
|
|
332
|
+
u, p = uline.split("@")
|
|
333
|
+
p = qdec(p, asdata=True)
|
|
334
|
+
cmd('useradd -m -p "%s" %s'%(p, u), sudo=True)
|
|
335
|
+
if "mysql" in cfg:
|
|
336
|
+
for uline in cfg["mysql"]:
|
|
337
|
+
log("mysql account creation unimplemented: %s"%(uline,))
|
|
338
|
+
usergroup(cfg, nobasic=True) # for pre-unpack permissions
|
|
339
|
+
|
|
340
|
+
def owners(dryrun=False, recursive=True):
|
|
341
|
+
cfg = simplecfg("owners.cfg")
|
|
342
|
+
if not cfg: return
|
|
343
|
+
log("assigning owners", important=True)
|
|
344
|
+
if dryrun:
|
|
345
|
+
return drylog(cfg)
|
|
346
|
+
usergroup(cfg, recursive=recursive)
|
|
347
|
+
|
|
348
|
+
packs = ["mysql", "basic", "multi", "zip", "rephp", "sym", "crontab"]
|
|
349
|
+
|
|
350
|
+
class Packer(object):
|
|
351
|
+
def __init__(self, dryrun=False):
|
|
352
|
+
self.index = 0
|
|
353
|
+
self.dryrun = dryrun
|
|
354
|
+
self.cfg = simplecfg("pack.cfg")
|
|
355
|
+
|
|
356
|
+
def log(self, msg, level=0, important=False):
|
|
357
|
+
log("Packer: %s"%(msg,), level, important)
|
|
358
|
+
|
|
359
|
+
def confset(self, name):
|
|
360
|
+
if name in self.cfg:
|
|
361
|
+
return self.cfg[name]
|
|
362
|
+
self.log("no %s items"%(name,))
|
|
363
|
+
return []
|
|
364
|
+
|
|
365
|
+
def proc(self, name, reverse=False):
|
|
366
|
+
funame = reverse and "un%s"%(name,) or name
|
|
367
|
+
preposition = reverse and "to" or "from"
|
|
368
|
+
fun = getattr(self, funame)
|
|
369
|
+
for fname in self.confset(name):
|
|
370
|
+
self.log("%s %s %s %s"%(funame, self.index, preposition, fname))
|
|
371
|
+
self.dryrun or fun(fname, str(self.index))
|
|
372
|
+
self.index += 1
|
|
373
|
+
|
|
374
|
+
def basic(self, fname, oname):
|
|
375
|
+
enc(fname, oname)
|
|
376
|
+
|
|
377
|
+
def unbasic(self, fname, oname):
|
|
378
|
+
dec(oname, fname)
|
|
379
|
+
|
|
380
|
+
def multi(self, fline, oname):
|
|
381
|
+
enc(fline.split("|").pop(0), oname)
|
|
382
|
+
|
|
383
|
+
def unmulti(self, fline, oname):
|
|
384
|
+
for fname in fline.split("|"):
|
|
385
|
+
dec(oname, fname)
|
|
386
|
+
|
|
387
|
+
def zip(self, fname, oname):
|
|
388
|
+
if "/" in fname:
|
|
389
|
+
jumpzip(fname, oname, keepsyms="ask")
|
|
390
|
+
else:
|
|
391
|
+
zipit(fname, oname, keepsyms="ask")
|
|
392
|
+
|
|
393
|
+
def unzip(self, fname, oname):
|
|
394
|
+
cmd("unzip %s -d %s"%(oname, fname.rsplit("/", 1).pop(0)))
|
|
395
|
+
|
|
396
|
+
def crontab(self, nothing, oname):
|
|
397
|
+
enc(output("crontab -l"), oname, asdata=True)
|
|
398
|
+
|
|
399
|
+
def uncrontab(self, nothing, oname):
|
|
400
|
+
cmd("ctutil admin qdec %s | crontab -"%(oname,))
|
|
401
|
+
|
|
402
|
+
def mysql(self, uname, oname):
|
|
403
|
+
dumpit(uname or "root", oname)
|
|
404
|
+
|
|
405
|
+
def unmysql(self, uname, oname):
|
|
406
|
+
uname = uname or "root"
|
|
407
|
+
hostname = "localhost"
|
|
408
|
+
if "@" in uname:
|
|
409
|
+
uname, hostname = uname.split("@")
|
|
410
|
+
mysqlcheck(uname, hostname, "ask")
|
|
411
|
+
undumpit(uname, oname)
|
|
412
|
+
|
|
413
|
+
def sym(self, fline, oname):
|
|
414
|
+
self.log("sym(%s->%s) - noop"%(fline, oname))
|
|
415
|
+
|
|
416
|
+
def unsym(self, fline, oname):
|
|
417
|
+
src, dest = fline.split("@")
|
|
418
|
+
sym(src, dest)
|
|
419
|
+
|
|
420
|
+
def rephp(self, fname, oname):
|
|
421
|
+
enc(fname, oname, replace=("php%s"%(phpver(),), "php_PVER_"))
|
|
422
|
+
|
|
423
|
+
def unrephp(self, fname, oname):
|
|
424
|
+
dec(oname, fname, replace=("php_PVER_", "php%s"%(phpver(),)))
|
|
425
|
+
|
|
426
|
+
def pack(self):
|
|
427
|
+
if not self.cfg: return
|
|
428
|
+
self.log("packing", important=True)
|
|
429
|
+
mkdir("pack")
|
|
430
|
+
os.chdir("pack")
|
|
431
|
+
for psub in packs:
|
|
432
|
+
self.proc(psub)
|
|
433
|
+
os.chdir("..")
|
|
434
|
+
zipit("pack", remove=True)
|
|
435
|
+
|
|
436
|
+
def unpack(self):
|
|
437
|
+
if not self.cfg: return
|
|
438
|
+
self.log("unpacking", important=True)
|
|
439
|
+
cmd("unzip pack.zip")
|
|
440
|
+
os.chdir("pack")
|
|
441
|
+
for psub in packs:
|
|
442
|
+
self.proc(psub, True)
|
|
443
|
+
os.chdir("..")
|
|
444
|
+
|
|
445
|
+
def pack(dryrun=False):
|
|
446
|
+
Packer(dryrun).pack()
|
|
447
|
+
|
|
448
|
+
def unpack(dryrun=False):
|
|
449
|
+
Packer(dryrun).unpack()
|
|
450
|
+
|
|
451
|
+
def dofrom(path, fun):
|
|
452
|
+
opath = os.path.abspath(".")
|
|
453
|
+
os.chdir(path)
|
|
454
|
+
fun()
|
|
455
|
+
os.chdir(opath)
|
|
456
|
+
|
|
457
|
+
def jumpsnap(domain, path, grabPack=True):
|
|
458
|
+
dofrom(path, lambda : snap(domain))
|
|
459
|
+
grabPack and cmd("mv %s ."%(os.path.join(path, "pack.zip"),))
|
|
460
|
+
|
|
461
|
+
def jumpzip(fline, oname, keepsyms=False):
|
|
462
|
+
fpath, fname = fline.rsplit("/", 1)
|
|
463
|
+
dofrom(fpath, lambda : zipit(fname, keepsyms=keepsyms))
|
|
464
|
+
cmd("mv %s.zip %s"%(fline, oname))
|
|
465
|
+
|
|
466
|
+
def runcfg(name, cbs, dryrun=False):
|
|
467
|
+
cfg = simplecfg("%s.cfg"%(name,), True) or []
|
|
468
|
+
for step in cfg:
|
|
469
|
+
v = step["variety"]
|
|
470
|
+
line = step["line"]
|
|
471
|
+
if dryrun:
|
|
472
|
+
log("%s %s %s"%(name, v, line), 1)
|
|
473
|
+
elif v in cbs:
|
|
474
|
+
cbs[v](line)
|
|
475
|
+
elif v == "basic":
|
|
476
|
+
cmd(line)
|
|
477
|
+
else:
|
|
478
|
+
error("unrecognized mode ('%s') for line: %s"%(v, line))
|
|
479
|
+
|
|
480
|
+
def certer(line):
|
|
481
|
+
cmd("certbot certonly --standalone -d %s"%(" -d ".join(line.split("|")),))
|
|
482
|
+
|
|
483
|
+
def servset(line):
|
|
484
|
+
for service in line.split("|"):
|
|
485
|
+
servicer(service)
|
|
486
|
+
|
|
487
|
+
def finish(dryrun=False):
|
|
488
|
+
log("finishing installation", important=True)
|
|
489
|
+
runcfg("finish", {
|
|
490
|
+
"cert": certer,
|
|
491
|
+
"services": servset
|
|
492
|
+
}, dryrun)
|
|
493
|
+
running("cron") or servicer("cron", "start", ask=True)
|
|
494
|
+
|
|
495
|
+
def snapper(line):
|
|
496
|
+
if "@" in line:
|
|
497
|
+
jumpsnap(*line.split("@"))
|
|
498
|
+
else:
|
|
499
|
+
snap(line)
|
|
500
|
+
|
|
501
|
+
def doinstall(dryrun=False):
|
|
502
|
+
log("running installation", important=True)
|
|
503
|
+
confirm("install dependencies", True) and deps(dryrun)
|
|
504
|
+
runcfg("install", { "snap": snapper }, dryrun)
|
|
505
|
+
confirm("setup accounts", True) and accounts(dryrun)
|
|
506
|
+
confirm("unpack pack", True) and unpack(dryrun)
|
|
507
|
+
confirm("update owners", True) and owners(dryrun)
|
|
508
|
+
if confirm("finish off installation"):
|
|
509
|
+
finish(dryrun)
|
|
510
|
+
else:
|
|
511
|
+
log("ok, deferring final steps - type 'ctmigrate finish' to complete the installation", important=True)
|
|
512
|
+
running("cron") and servicer("cron", "stop", ask=True)
|
|
513
|
+
|
|
514
|
+
def prolog(pman, data, dryrun=False):
|
|
515
|
+
if not data:
|
|
516
|
+
log("no %s packages installed!"%(pman,), important=True)
|
|
517
|
+
elif dryrun:
|
|
518
|
+
log("%s profile:\n\n%s"%(pman, data))
|
|
519
|
+
else:
|
|
520
|
+
write(data, "%s.profile"%(pman,))
|
|
521
|
+
|
|
522
|
+
def profile(dryrun=False):
|
|
523
|
+
log("system package profiler", important=True)
|
|
524
|
+
if confirm("profile aptitude", True):
|
|
525
|
+
ilist = output("apt list --installed").split("Listing...\n").pop().split("\n")
|
|
526
|
+
if not confirm("allow local packages"):
|
|
527
|
+
ilist = list(filter(lambda i : not i.endswith("local]"), ilist))
|
|
528
|
+
apro = "\n".join([line.split("/").pop(0) for line in ilist])
|
|
529
|
+
prolog("apt", apro, dryrun)
|
|
530
|
+
if confirm("profile snap", True):
|
|
531
|
+
snas = []
|
|
532
|
+
clas = []
|
|
533
|
+
for line in output("snap list").split("\n")[1:]:
|
|
534
|
+
name = line.split(" ").pop(0)
|
|
535
|
+
if "classic" in line:
|
|
536
|
+
clas.append(name)
|
|
537
|
+
else:
|
|
538
|
+
snas.append(name)
|
|
539
|
+
prolog("snap", "\n".join(snas), dryrun)
|
|
540
|
+
prolog("clasnap", "\n".join(clas), dryrun)
|
|
541
|
+
|
|
542
|
+
MODES = { "load": load, "dump": dump, "blobdiff": blobdiff, "snap": snap, "accounts": accounts, "deps": deps, "pack": pack, "unpack": unpack, "owners": owners, "finish": finish, "install": doinstall, "profile": profile }
|
|
543
|
+
|
|
544
|
+
def go():
|
|
545
|
+
parser = OptionParser("ctmigrate [load|dump|blobdiff|snap|accounts|deps|pack|unpack|owners|finish|install|profile] [--domain=DOMAIN] [--port=PORT] [--filename=FILENAME] [--skip=SKIP] [--tables=TABLES] [--cutoff=CUTOFF] [-nr]")
|
|
546
|
+
parser.add_option("-d", "--domain", dest="domain", default="localhost",
|
|
547
|
+
help="domain of target server (default: localhost)")
|
|
548
|
+
parser.add_option("-p", "--port", dest="port", default=8080,
|
|
549
|
+
help="port of target server (default: 8080)")
|
|
550
|
+
parser.add_option("-c", "--cutoff", dest="cutoff", default=0,
|
|
551
|
+
help="blobdiff cutoff - number to start after (default: 0)")
|
|
552
|
+
parser.add_option("-f", "--filename", dest="filename", default="dump.db",
|
|
553
|
+
help="name of sqlite data file for dumping/loading to/from (default: dump.db)")
|
|
554
|
+
parser.add_option("-s", "--skip", dest="skip", default="",
|
|
555
|
+
help="don't dump these tables - use '|' as separator, such as 'table1|table2|table3' (default: none)")
|
|
556
|
+
parser.add_option("-t", "--tables", dest="tables", default="",
|
|
557
|
+
help="dump these tables - use '|' as separator, such as 'table1|table2|table3' (default: all)")
|
|
558
|
+
parser.add_option("-n", "--no_binary", dest="binary", action="store_false",
|
|
559
|
+
default=True, help="disable binary download")
|
|
560
|
+
parser.add_option("-r", "--dry_run", dest="dryrun", action="store_true",
|
|
561
|
+
default=False, help="accounts/deps/pack/unpack/owners/finish/install/profile dry run")
|
|
562
|
+
options, args = parser.parse_args()
|
|
563
|
+
if not args:
|
|
564
|
+
error("no mode specified -- must be 'ctmigrate load' or 'ctmigrate dump'")
|
|
565
|
+
try:
|
|
566
|
+
import model # model loads schema
|
|
567
|
+
except:
|
|
568
|
+
log("no model found - proceeding without schema")
|
|
569
|
+
mode = args[0]
|
|
570
|
+
if mode in MODES:
|
|
571
|
+
if mode == "blobdiff":
|
|
572
|
+
blobdiff(int(options.cutoff))
|
|
573
|
+
elif mode == "snap":
|
|
574
|
+
snap(options.domain)
|
|
575
|
+
elif mode in ["accounts", "deps", "pack", "unpack", "owners", "finish", "install", "profile"]:
|
|
576
|
+
MODES[mode](options.dryrun)
|
|
577
|
+
else:
|
|
578
|
+
port = int(options.port)
|
|
579
|
+
session = db.Session("sqlite:///%s"%(options.filename,))
|
|
580
|
+
tabes = options.tables and options.tables.split("|")
|
|
581
|
+
if mode == "load":
|
|
582
|
+
load(options.domain, port, session, tables=tabes)
|
|
583
|
+
elif mode == "dump":
|
|
584
|
+
dump(options.domain, port, session, options.binary,
|
|
585
|
+
options.skip and options.skip.split("|"), tabes)
|
|
586
|
+
else:
|
|
587
|
+
error("invalid mode specified ('%s')"%(mode,),
|
|
588
|
+
"must be 'ctmigrate snap' or 'ctmigrate load' or ctmigrate dump' or ctmigrate blobdiff'")
|
|
589
|
+
log("everything seems to have worked!")
|
|
590
|
+
log("goodbye")
|
|
591
|
+
|
|
592
|
+
if __name__ == "__main__":
|
|
593
|
+
go()
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
"""
|
|
2
|
+
### Usage: ctpubsub [-d domain] [-p port]
|
|
3
|
+
|
|
4
|
+
### Options:
|
|
5
|
+
-h, --help show this help message and exit
|
|
6
|
+
-d DOMAIN, --domain=DOMAIN
|
|
7
|
+
use a specific domain (default: localhost)
|
|
8
|
+
-p PORT, --port=PORT use a specific port (default: 8888)
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from cantools.util import init_rel
|
|
12
|
+
from cantools import config
|
|
13
|
+
from .bots import Bot
|
|
14
|
+
|
|
15
|
+
def start(host=config.pubsub.host, port=config.pubsub.port):
|
|
16
|
+
init_rel()
|
|
17
|
+
from .ps import PubSub
|
|
18
|
+
PubSub(host, port).start()
|
|
19
|
+
|
|
20
|
+
def get_addr_and_start():
|
|
21
|
+
from optparse import OptionParser
|
|
22
|
+
parser = OptionParser("ctpubsub [-d domain] [-p port]")
|
|
23
|
+
parser.add_option("-d", "--domain", dest="domain", default=config.pubsub.host,
|
|
24
|
+
help="use a specific domain (default: %s)"%(config.pubsub.host,))
|
|
25
|
+
parser.add_option("-p", "--port", dest="port", default=config.pubsub.port,
|
|
26
|
+
help="use a specific port (default: %s)"%(config.pubsub.port,))
|
|
27
|
+
options, arguments = parser.parse_args()
|
|
28
|
+
start(options.domain, int(options.port))
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
class Actor(object):
|
|
2
|
+
def data(self):
|
|
3
|
+
return {
|
|
4
|
+
"name": self.name,
|
|
5
|
+
"channels": [c.name for c in self.channels]
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
def join(self, channel):
|
|
9
|
+
self.channels.add(channel)
|
|
10
|
+
|
|
11
|
+
def leave(self, channel):
|
|
12
|
+
if channel in self.channels:
|
|
13
|
+
self.channels.remove(channel)
|