remote-run-everything 2.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,16 @@
1
+ from remote_run_everything.deploy.by_http import ByHttp
2
+ from remote_run_everything.deploy.by_http_server import ByHttpServer, cherrypy_in_daemon
3
+
4
+ from remote_run_everything.db.crude_duck import CrudeDuck
5
+ from remote_run_everything.db.crud_sqlalchemy import Crud
6
+ from remote_run_everything.db.kv_store import KvStore
7
+ from remote_run_everything.db.backup import BackUp
8
+
9
+ from remote_run_everything.tools.common import Common
10
+ from remote_run_everything.tools.sqlacodegen_go_struct import Sql2go
11
+ from remote_run_everything.tools.decorators import cache_by_1starg, cache_by_name, cache_by_rkey,cache_by_nth_arg
12
+
13
+ from remote_run_everything.nosql.no_sql import Nosql
14
+ from remote_run_everything.nosql.no_sql_pg import NosqlPg
15
+ from remote_run_everything.nosql.no_sql_mysql import NosqlMysql
16
+ from remote_run_everything.vsconf.core import VsConf
File without changes
@@ -0,0 +1,38 @@
1
+ from bson import BSON, decode_all
2
+ import pymongo
3
+ import os, sys
4
+
5
+
6
+ class BackUp:
7
+ def __init__(self, root):
8
+ self.root = root
9
+ os.makedirs(self.root, exist_ok=True)
10
+
11
+ def mongo_dump(self,cli,dbs):
12
+ for d in dbs:
13
+ db = cli[d]
14
+ dir = os.path.join(self.root, d)
15
+ os.makedirs(dir, exist_ok=True)
16
+ cols = db.list_collection_names()
17
+ for col in cols:
18
+ sr = db[col]
19
+ # Dump.
20
+ with open(f'{dir}/{col}.bson', 'wb+') as f:
21
+ for doc in sr.find():
22
+ f.write(BSON.encode(doc))
23
+
24
+ def mongo_restore(self,cli,dbs):
25
+ for d in dbs:
26
+ db = cli[d]
27
+ dir = os.path.join(self.root, d)
28
+ files = os.listdir(dir)
29
+ cols = [i.split('.')[0] for i in files]
30
+ print(cols)
31
+ for name in cols:
32
+ file = f"{dir}/{name}.bson"
33
+ col = db[name]
34
+ with open(file, 'rb') as f:
35
+ data = decode_all(f.read())
36
+ col.insert_many(data)
37
+
38
+
@@ -0,0 +1,97 @@
1
+ import os, pymysql
2
+ from urllib.parse import quote_plus
3
+
4
+ from sqlalchemy import create_engine, select, update, and_, insert, delete
5
+
6
+
7
+ class Crud:
8
+ def sqlite_engine(self, dbpath):
9
+ dir = os.path.dirname(dbpath)
10
+ os.makedirs(dir, exist_ok=True)
11
+ url = f"sqlite:///{dbpath}"
12
+ engine = create_engine(url, future=True, connect_args={'timeout': 30, 'check_same_thread': False})
13
+ return engine
14
+
15
+ def pg_url(self, user, pwd, host, port, db):
16
+ f = lambda x: quote_plus(x)
17
+ return f"postgresql://{f(user)}:{f(pwd)}@{f(host)}:{port}/{db}"
18
+
19
+ def pg_engine(self, user, pwd, host, port, db):
20
+ return create_engine(self.pg_url(user, pwd, host, port, db), pool_recycle=3600, pool_size=80, max_overflow=-1,
21
+ echo=False,
22
+ future=True)
23
+
24
+ def mysql_url(self, user, pwd, host, port, db):
25
+ f = lambda x: quote_plus(x)
26
+ return f"mysql+pymysql://{f(user)}:{f(pwd)}@{f(host)}:{port}/{db}"
27
+
28
+ def mysql_engine(self, user, pwd, host, port, db):
29
+ # print(f"sqlacodegen {url} >> {db}.py")
30
+ return create_engine(self.mysql_url(user, pwd, host, port, db), pool_recycle=3600, pool_size=80,
31
+ max_overflow=-1, echo=False,
32
+ future=True)
33
+
34
+ def drop_table(self, engine, mod):
35
+ mod.__table__.drop(engine)
36
+
37
+ def create_table(self, engine, mod):
38
+ mod.__table__.create(engine, checkfirst=True)
39
+
40
+ def exist_id(self, engine, mod, cond):
41
+ with engine.connect() as conn:
42
+ stmt = select(mod).where(cond).limit(1)
43
+ id = conn.scalar(stmt)
44
+ if id is not None:
45
+ return id
46
+ return None
47
+
48
+ def table_columns(self, mod):
49
+ if "__annotations__" in mod.__dict__:
50
+ cols = mod.__dict__['__annotations__'].keys()
51
+ else:
52
+ cols = [i for i in mod.__dict__.keys() if not i.startswith("__")]
53
+ return cols
54
+
55
+ def insert_many(self, engine, mod, l):
56
+ if len(l)==0:return
57
+ cols = self.table_columns(mod)
58
+ with engine.connect() as conn:
59
+ for dic in l:
60
+ dic = {k: v for k, v in dic.items() if k in cols}
61
+ stmt = insert(mod).values(dic)
62
+ conn.execute(stmt)
63
+ conn.commit()
64
+
65
+ def insert_one(self, engine, mod, dic):
66
+ cols = self.table_columns(mod)
67
+ dic = {k: v for k, v in dic.items() if k in cols}
68
+ with engine.connect() as conn:
69
+ stmt = insert(mod).values(dic)
70
+ conn.execute(stmt)
71
+ conn.commit()
72
+
73
+ def update_by_id(self, engine, mod, id, dic):
74
+ cols = self.table_columns(mod)
75
+ dic = {k: v for k, v in dic.items() if k in cols}
76
+ with engine.connect() as conn:
77
+ stmt = update(mod).where(mod.id == id).values(dic)
78
+ conn.execute(stmt)
79
+ conn.commit()
80
+
81
+ def upsert(self, engine, mod, cond, dic):
82
+ id = self.exist_id(engine, mod, cond)
83
+ if id is not None:
84
+ self.update_by_id(engine, mod, id, dic)
85
+ return
86
+ self.insert_one(engine, mod, dic)
87
+
88
+ def delete_by_id(self, engine, mod, id):
89
+ with engine.connect() as conn:
90
+ stmt = delete(mod).where(mod.id == id)
91
+ conn.execute(stmt)
92
+ conn.commit()
93
+
94
+ def delete(self, engine, mod, cond):
95
+ id = self.exist_id(engine, mod, cond)
96
+ if id is not None:
97
+ self.delete_by_id(engine, mod, id)
@@ -0,0 +1,122 @@
1
+ import duckdb, os, arrow
2
+
3
+
4
+ class CrudeDuck:
5
+
6
+ def install_sql_ext(self, dbpath):
7
+ dir = os.path.dirname(dbpath)
8
+ os.makedirs(dir, exist_ok=True)
9
+ sql = f"ATTACH '{dbpath}' AS db (TYPE sqlite,journal_mode wal);use db;"
10
+ con = duckdb.connect()
11
+ con.install_extension("sqlite")
12
+ con.load_extension("sqlite")
13
+ con.sql(sql)
14
+ return con
15
+
16
+ def install_pg_ext(self, user, pwd, host, port, dbname):
17
+ sql = f'''ATTACH 'dbname={dbname} user={user}
18
+ host={host} port={port} connect_timeout=10 password={pwd}'
19
+ AS {dbname} (TYPE postgres);use {dbname};'''
20
+ con = duckdb.connect()
21
+ con.install_extension("postgres")
22
+ con.load_extension("postgres")
23
+ con.sql(sql)
24
+ return con
25
+
26
+ def install_mysql_ext(self, user, pwd, host, port, dbname):
27
+ con = duckdb.connect()
28
+ con.install_extension("mysql")
29
+ con.load_extension("mysql")
30
+ sql = f"ATTACH 'host={host} user={user} password={pwd} port={port} database={dbname}' AS msqldb (TYPE MYSQL);"
31
+ con.sql(sql)
32
+ con.sql(f"USE msqldb;")
33
+ return con
34
+
35
+ def scheme(self, con, db, table, dbtype):
36
+ # for mysql db== dbname ; for pg db==public for sqlite db=main
37
+ if dbtype == "mysql":
38
+ db = db
39
+ elif dbtype == "sqlite3":
40
+ db = "main"
41
+ elif dbtype == "pg":
42
+ db = "public"
43
+ else:
44
+ db = db
45
+ sql = f''' SELECT column_name, data_type FROM information_schema.columns
46
+ WHERE table_schema = '{db}' AND table_name = '{table}';
47
+ '''
48
+ scheme = {i[0]: i[1] for i in con.sql(sql).fetchall()}
49
+ return scheme
50
+
51
+ def max_id(self, con, table):
52
+ sql = f'select max(id) from {table}'
53
+ a = con.sql(sql).fetchone()
54
+ if a is None or a[0] is None: return 0
55
+ return a[0] + 1
56
+
57
+ def sql_from_ty(self, ty, v):
58
+ if v is None: return None
59
+ ty = ty.upper()
60
+ if ty in ['BIGINT', "TINYINT", "INTEGER", "BOOLEAN"]:
61
+ return str(int(v))
62
+ if ty in ["VARCHAR"]:
63
+ return f"'{str(v)}'"
64
+ if "TIMESTAMP" in ty:
65
+ return f"'{arrow.get(v).format()[:19]}'"
66
+ return None
67
+
68
+ def dic2sql(self, data, scheme):
69
+ dic = {}
70
+ for k, v in data.items():
71
+ if k not in scheme.keys(): continue
72
+ tyv = self.sql_from_ty(scheme[k], v)
73
+ if tyv is not None: dic[k] = tyv
74
+ if len(dic) == 0:
75
+ return "", ""
76
+ cols = ", ".join(dic.keys())
77
+ values = ", ".join(dic.values())
78
+ return f"({cols})", f"({values})"
79
+
80
+ def list2sql(self, l, scheme):
81
+ cols = ""
82
+ values = ""
83
+ for data in l:
84
+ dic = {}
85
+ for k, v in data.items():
86
+ if k not in scheme.keys(): continue
87
+ tyv = self.sql_from_ty(scheme[k], v)
88
+ if tyv is not None: dic[k] = tyv
89
+ if len(dic) == 0:
90
+ continue
91
+ cols = ", ".join(dic.keys())
92
+ s = ", ".join(dic.values())
93
+ values = values + f"({s}),"
94
+ return f"({cols})", values
95
+
96
+ def insert_many(self, con, db, table, data, dbtype=""):
97
+ sche = self.scheme(con, db, table, dbtype)
98
+ cols, values = self.list2sql(data, sche)
99
+ if values == "": return
100
+ sql = f'insert into {table} {cols} values {values}'
101
+ con.execute(sql)
102
+
103
+ def insert_one(self, con, db, table, data, dbtype=""):
104
+ sche = self.scheme(con, db, table, dbtype)
105
+ cols, values = self.dic2sql(data, sche)
106
+ if values == "": return
107
+ sql = f'insert into {table} {cols} values {values}'
108
+ con.execute(sql)
109
+
110
+ def drop_table(self, con, table):
111
+ sql = f"drop table if exists {table}"
112
+ con.execute(sql)
113
+
114
+ def delete_by_ids(self, con, table, ids):
115
+ ids = [str(i) for i in ids]
116
+ ids = ','.join(ids)
117
+ sql = f"delete from {table} where id in ({ids})"
118
+ con.execute(sql)
119
+
120
+ def delete_by_id(self, con, table, id):
121
+ sql = f"delete from {table} where id = {id}"
122
+ con.execute(sql)
@@ -0,0 +1,109 @@
1
+ import sqlite3, os, json, arrow
2
+
3
+ '''
4
+ kv = KeyValueStore('test.db') # uses SQLite
5
+
6
+ print(len(kv)) # 0 item
7
+ kv['hello1'] = 'you1'
8
+
9
+ del kv['hello1']
10
+ print(len(kv)) # 2 items remaining
11
+ print('hello1' in kv) # False, it has just been deleted!
12
+
13
+ kv['hello3'] = 'newvalue' # redefine an already present key/value
14
+
15
+ print(kv.keys()) # ['hello2', 'hello3']
16
+ print(kv.values()) # ['you2', 'newvalue']
17
+ print(kv.items()) # [('hello2', 'you2'), ('hello3', 'newvalue')]
18
+
19
+ for k in kv:
20
+ print(k, kv[k])
21
+
22
+ kv.close()
23
+ '''
24
+
25
+
26
+ class KvStore(dict):
27
+ def __init__(self, filename=None):
28
+ self.db_path = self.default_db_path(filename)
29
+ self.conn = sqlite3.connect(self.db_path, isolation_level=None)
30
+ self.conn.execute('pragma journal_mode=wal')
31
+ self.conn.execute("CREATE TABLE IF NOT EXISTS kv (key text unique, value text)")
32
+
33
+ def default_db_path(self, db_path):
34
+ if db_path is None:
35
+ db_path = "D://wq/temp/decor.db" if os.name == 'nt' else "/data/temp/decor.db"
36
+ os.makedirs(os.path.dirname(db_path), exist_ok=True)
37
+ return db_path
38
+
39
+ def close(self):
40
+ self.conn.close()
41
+
42
+ def commit(self):
43
+ self.conn.commit()
44
+
45
+ def __len__(self):
46
+ rows = self.conn.execute('SELECT COUNT(*) FROM kv').fetchone()[0]
47
+ return rows if rows is not None else 0
48
+
49
+ def iterkeys(self):
50
+ c = self.conn.cursor()
51
+ for row in c.execute('SELECT key FROM kv'):
52
+ yield row[0]
53
+
54
+ def itervalues(self):
55
+ c = self.conn.cursor()
56
+ for row in c.execute('SELECT value FROM kv'):
57
+ yield row[0]
58
+
59
+ def iteritems(self):
60
+ c = self.conn.cursor()
61
+ for row in c.execute('SELECT key, value FROM kv'):
62
+ yield row[0], row[1]
63
+
64
+ def keys(self):
65
+ return list(self.iterkeys())
66
+
67
+ def values(self):
68
+ return list(self.itervalues())
69
+
70
+ def items(self):
71
+ return list(self.iteritems())
72
+
73
+ def __contains__(self, key):
74
+ key = str(key)
75
+ return self.conn.execute('SELECT 1 FROM kv WHERE key = ?', (key,)).fetchone() is not None
76
+
77
+ def __getitem__(self, key):
78
+ key = str(key)
79
+ item = self.conn.execute('SELECT value FROM kv WHERE key = ?', (key,)).fetchone()
80
+ if item is None:
81
+ return None
82
+ v = json.loads(item[0])
83
+ return v
84
+
85
+ def __setitem__(self, key, value):
86
+ key = str(key)
87
+ value = json.dumps(value)
88
+ self.conn.execute('REPLACE INTO kv (key, value) VALUES (?,?)', (key, value))
89
+
90
+ def __delitem__(self, key):
91
+ key = str(key)
92
+ if key in self:
93
+ self.conn.execute('DELETE FROM kv WHERE key = ?', (key,))
94
+
95
+ def __iter__(self):
96
+ return self.iterkeys()
97
+
98
+ def read_with_ex(self, key, ex):
99
+ res = self.__getitem__(key)
100
+ if res is None: return None
101
+ if not (isinstance(res, dict) and "time" in res.keys()): return None
102
+ dif = arrow.now() - arrow.get(res['time'])
103
+ if dif.seconds >= ex:
104
+ self.__delitem__(key)
105
+ return None
106
+ return res['v']
107
+
108
+ def write_with_ex(self, k, v):
109
+ self.__setitem__(k, {"v": v, "time": arrow.now().format()})
File without changes
@@ -0,0 +1,82 @@
1
+ import os.path
2
+
3
+ from remote_run_everything.db.crud_sqlalchemy import Crud
4
+ from remote_run_everything.deploy.record_mod import Up, Down
5
+ from remote_run_everything.db.crude_duck import CrudeDuck
6
+ from remote_run_everything.deploy.by_http_tool import ByHttpTool
7
+
8
+
9
+ class ByHttp:
10
+ def __init__(self, host, local, remote, dbpath):
11
+ self.host = host
12
+ self.local = local
13
+ self.remote = remote
14
+ self.dbpath = dbpath
15
+ self.t = ByHttpTool()
16
+
17
+ def down(self, disallow_keys=None):
18
+ assert self.dbpath.endswith(".db"), "dbpath should be xxx.db"
19
+ c = Crud()
20
+ eg = c.sqlite_engine(self.dbpath)
21
+ c.create_table(eg, Down)
22
+ con = CrudeDuck().install_sql_ext(self.dbpath)
23
+ remote_files = self.t.all_remote_path(self.host, self.remote, disallow_keys)
24
+ mod = Down
25
+ add_l = []
26
+ for dic in remote_files:
27
+ path = dic['path']
28
+ t = dic['time']
29
+ sql = f"select * from down where path='{path}' and time='{t}' "
30
+ res = con.sql(sql).fetchone()
31
+ if res is not None: continue
32
+ print("down", dic)
33
+ con.execute(f"delete from down where path='{path}' ")
34
+ self.t.pull(self.host, path, self.local, self.remote)
35
+ add_l.append(dic)
36
+ con.commit()
37
+ c.insert_many(eg, mod, add_l)
38
+
39
+ def up(self, disallow_keys=None):
40
+ if os.name == "nt":
41
+ return self.up_win(disallow_keys)
42
+ assert self.dbpath.endswith(".db"), "dbpath should be xxx.db"
43
+ c = Crud()
44
+ eg = c.sqlite_engine(self.dbpath)
45
+ c.create_table(eg, Up)
46
+ con = CrudeDuck().install_sql_ext(self.dbpath)
47
+ mod = Up
48
+ loc_files = self.t.all_local_path(self.local, disallow_keys)
49
+ add_l = []
50
+ for dic in loc_files:
51
+ path = dic['path']
52
+ t = dic['time']
53
+ sql = f"select * from up where path='{path}' and time='{t}' and host='{self.host}' "
54
+ res = con.sql(sql).fetchone()
55
+ if res is not None: continue
56
+ print("up==", dic)
57
+ sql = f"delete from up where path='{path}' and host='{self.host}'"
58
+ con.execute(sql).commit()
59
+ self.t.push(self.host, path, self.local, self.remote)
60
+ dic['host'] = self.host
61
+ add_l.append(dic)
62
+ con.commit()
63
+ c.insert_many(eg, mod, add_l)
64
+
65
+ def up_win(self, disallow_keys=None):
66
+ from mongo_emb import PyMongoEmb
67
+ if not os.path.exists(self.dbpath):
68
+ os.makedirs(self.dbpath)
69
+ assert os.path.isdir(self.dbpath), "dbpath should be dir"
70
+ path = os.path.normpath(self.dbpath)
71
+ db = PyMongoEmb(path)
72
+ col = db['up']
73
+ loc_files = self.t.all_local_path(self.local, disallow_keys)
74
+ for dic in loc_files:
75
+ dic['host'] = self.host
76
+ # time path host
77
+ if col.find_one(dic) is not None:
78
+ continue
79
+ if os.path.normpath(os.path.dirname(dic['path'])) == path: continue
80
+ self.t.push(self.host, dic['path'], self.local, self.remote)
81
+ print("up==", dic)
82
+ col.update_one({"host": self.host, "path": dic['path']}, {"$set": {"time": dic['time']}}, upsert=True)
@@ -0,0 +1,56 @@
1
+ import cherrypy
2
+ from remote_run_everything.deploy.by_http_tool import ByHttpTool
3
+ from remote_run_everything.tools.common import Common
4
+
5
+ from cherrypy.process.plugins import Daemonizer
6
+
7
+
8
+ def cherrypy_in_daemon(myapp, port, prefix):
9
+ cherrypy.config.update({
10
+ "server.socket_port": port,
11
+ })
12
+ Daemonizer(cherrypy.engine).subscribe()
13
+ cherrypy.tree.mount(myapp(), prefix)
14
+ cherrypy.engine.start()
15
+ cherrypy.engine.block()
16
+
17
+
18
+ class ByHttpServer:
19
+
20
+ @cherrypy.expose
21
+ @cherrypy.tools.json_out()
22
+ @cherrypy.tools.json_in()
23
+ def rb64(self):
24
+ try:
25
+ args = cherrypy.request.json
26
+ path = args['path']
27
+ data = Common().readb64(path)
28
+ return {"status": "ok", "data": data}
29
+ except Exception as e:
30
+ return {"status": "fail", "data": str(e)}
31
+
32
+ @cherrypy.expose
33
+ @cherrypy.tools.json_out()
34
+ @cherrypy.tools.json_in()
35
+ def wb64(self):
36
+ try:
37
+ args = cherrypy.request.json
38
+ path = args['path']
39
+ b64 = args['b64']
40
+ Common().writeb64(path, b64)
41
+ return {"status": "ok", "data": path}
42
+ except Exception as e:
43
+ return {"status": "fail", "data": str(e)}
44
+
45
+ @cherrypy.expose
46
+ @cherrypy.tools.json_out()
47
+ @cherrypy.tools.json_in()
48
+ def iterdir(self):
49
+ try:
50
+ args = cherrypy.request.json
51
+ root = args['root']
52
+ disallow = args.get("disallow_keys", [])
53
+ data = ByHttpTool().all_local_path(root, disallow)
54
+ return {"status": "ok", "data": data}
55
+ except Exception as e:
56
+ return {"status": "fail", "data": str(e)}
@@ -0,0 +1,64 @@
1
+ from remote_run_everything.tools.common import Common
2
+
3
+ import os, glob, arrow, requests
4
+
5
+
6
+ class ByHttpTool:
7
+ def push(self, host, f, local, remote):
8
+ b64 = Common().readb64(f)
9
+ push_url = f"{host}/wb64"
10
+ path = self.loc2remote(f, local, remote)
11
+ pay = {"b64": b64, "path": path}
12
+ res = requests.post(push_url, json=pay).json()
13
+ return res
14
+
15
+ def pull(self, host, f, local, remote):
16
+ res = requests.post(f"{host}/rb64", json={"path": f}).json()
17
+ b64 = res['data']
18
+ path = self.remote2loc(f, local, remote)
19
+ Common().writeb64(path, b64)
20
+ return path
21
+
22
+ def all_remote_path(self, host, root, disallow_keys=None):
23
+ url = f"{host}/iterdir"
24
+ res = requests.post(url, json={"root": root}).json()
25
+ l = []
26
+ for i in res['data']:
27
+ path = i['path']
28
+ if path == "": continue
29
+ if not self.contain_disallow(path, disallow_keys):
30
+ t = arrow.get(i['time']).format()
31
+ l.append({"path": path, "time": t})
32
+ return l
33
+
34
+ def contain_disallow(self, path, disallow_keys):
35
+ if disallow_keys is None or len(disallow_keys) == 0:
36
+ return False
37
+ for k in disallow_keys:
38
+ if k in path:
39
+ return True
40
+ return False
41
+
42
+ def all_local_path(self, root, disallow_keys=None):
43
+ '''服务端提供的接口
44
+ '''
45
+ if not os.path.exists(root):
46
+ return []
47
+ files = glob.glob(f'{root}/**/*', recursive=True)
48
+ res = []
49
+ for path in files:
50
+ if os.path.isdir(path):
51
+ continue
52
+ if not self.contain_disallow(path, disallow_keys):
53
+ t = os.path.getmtime(path)
54
+ t = arrow.get(t).format()
55
+ res.append({"path": path, "time": t})
56
+ return res
57
+
58
+ def loc2remote(self, f, local, remote):
59
+ rela = os.path.relpath(f, local)
60
+ return f"{remote}/{rela}"
61
+
62
+ def remote2loc(self, f, local, remote):
63
+ rela = os.path.relpath(f, remote)
64
+ return f"{local}/{rela}"
@@ -0,0 +1,26 @@
1
+ from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column
2
+ import datetime
3
+
4
+ from sqlalchemy import Integer, DECIMAL, DateTime, Index, Integer, String, Column, Text
5
+ import json, arrow, sys, os
6
+
7
+
8
+ class Base(DeclarativeBase):
9
+ pass
10
+
11
+
12
+ class Up(Base):
13
+ __tablename__ = 'up'
14
+ id = Column(Integer, primary_key=True, autoincrement=True, nullable=False)
15
+ host = Column(String(50), default="")
16
+ path = Column(String(50), default="")
17
+ time = Column(String(50), default="")
18
+
19
+
20
+ class Down(Base):
21
+ __tablename__ = 'down'
22
+ id = Column(Integer, primary_key=True, autoincrement=True, nullable=False)
23
+ path = Column(String(50), default="")
24
+ time = Column(String(50), default="")
25
+
26
+
File without changes