unisi 0.1.12__py3-none-any.whl → 0.1.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
unisi/kdb.py ADDED
@@ -0,0 +1,336 @@
1
+ import kuzu, shutil, os, re, time
2
+ from datetime import date, datetime
3
+ from cymple import QueryBuilder as qb
4
+ from cymple.typedefs import Properties
5
+ from .common import get_default_args, equal_dicts
6
+ from .dbelements import Dblist
7
+
8
+ def is_modifying_query(cypher_query):
9
+ query = cypher_query.lower()
10
+ modifying_pattern = r'\b(create|delete|detach\s+delete|set|merge|remove|call\s+\w+\s+yield|foreach)\b'
11
+ return re.search(modifying_pattern, query)
12
+
13
+ def query_offset(id, offset):
14
+ return qb().match().node(id, 'a').where('a.ID','=',offset)
15
+
16
+ def kuzu_data_type(value):
17
+ match value:
18
+ case bool():
19
+ return "BOOLEAN"
20
+ case int():
21
+ return "INT64"
22
+ case float():
23
+ return "DOUBLE"
24
+ case str():
25
+ return "STRING"
26
+ case datetime():
27
+ return "TIMESTAMP"
28
+ case date():
29
+ return "DATE"
30
+ case bytes():
31
+ return "BLOB"
32
+ case list() | tuple():
33
+ return "LIST"
34
+ case _:
35
+ return ""
36
+
37
+ number_types = ["DOUBLE", "INT64"]
38
+
39
+ def dict_to_cypher_set(properties, alias = 'a'):
40
+ set_items = []
41
+ for key, value in properties.items():
42
+ if isinstance(value, str):
43
+ set_items.append(f"{alias}.{key} = '{value}'")
44
+ else:
45
+ set_items.append(f"{alias}.{key} = {value}")
46
+ return "SET " + ", ".join(set_items)
47
+
48
+ class Database:
49
+ def __init__(self, dbpath, message_logger = print) -> None:
50
+ self.db = kuzu.Database(dbpath)
51
+ self.conn = kuzu.Connection(self.db)
52
+ self.message_logger = message_logger
53
+ self.table_params = get_default_args(self.get_table)
54
+
55
+ def execute(self, query_str, ignore_exception = False):
56
+ query_str = str(query_str)
57
+ """ if not query_str.endswith(';'):
58
+ query_str += ';'
59
+ print(query_str) """
60
+ try:
61
+ result = self.conn.execute(query_str)
62
+ except Exception as e:
63
+ if not ignore_exception:
64
+ self.message_logger(e)
65
+ return None
66
+ return True if result is None else result
67
+
68
+ def delete(dir_path):
69
+ if os.path.exists(dir_path):
70
+ # Remove the directory and all its contents
71
+ shutil.rmtree(dir_path)
72
+
73
+ @property
74
+ def table_names(self):
75
+ return self.conn._get_node_table_names()
76
+
77
+ def get_table_fields(self, table_name, remove_id = True) -> None | dict:
78
+ result = self.qlist(f"CALL table_info('{table_name}') RETURN *;", ignore_exception = True)
79
+ if result is not None:
80
+ return {info[1]: info[2] for info in result if not remove_id or info[1] != 'ID'}
81
+
82
+ def delete_table(self, table_name):
83
+ return self.execute( f'DROP TABLE {table_name};')
84
+
85
+ def get_table(self, id = None, limit = 100, headers = None, rows = None, fields = None):
86
+ if id:
87
+ if rows and fields is None:
88
+ if not headers:
89
+ self.message_logger(f'headers are not defined!')
90
+ return None
91
+ types = [None] * len(headers)
92
+ for row in rows:
93
+ for j, cell in enumerate(row):
94
+ if cell is not None:
95
+ ktype = kuzu_data_type(cell)
96
+ if ktype:
97
+ if types[j] is None:
98
+ types[j] = ktype
99
+ elif types[j] != ktype:
100
+ if types[j] in number_types and ktype in number_types:
101
+ types[j] = "DOUBLE"
102
+ else:
103
+ self.message_logger(f'Conflict types for {id} table in {j} column: {types[j], ktype}!')
104
+ return None
105
+ if None in types:
106
+ index = types.index(None)
107
+ self.message_logger(f'Rows data doesnt contain allowed values for {headers[index]} column!')
108
+ return None
109
+ fields = {headers[i]: type for i, type in enumerate(types)}
110
+
111
+ if (table_fields := self.get_table_fields(id)) is not None:
112
+ if not equal_dicts(table_fields, fields):
113
+ if self.delete_table(id):
114
+ self.message_logger(f'Node table {id} was deleted because of fields contradiction!', 'warning')
115
+ else:
116
+ return Dbtable(id, self, limit, table_fields)
117
+
118
+ return self.create_table(id, fields, limit, rows)
119
+
120
+ def get_table_params(self, params):
121
+ return {k: v for k, v in params.items() if k in self.table_params}
122
+
123
+ def set_db_list(self, gui_table):
124
+ table = self.get_table(**self.get_table_params(gui_table.__dict__))
125
+ tlst = table.list
126
+ gui_table.rows = tlst
127
+ if tlst.update['type'] != 'init':
128
+ tlst.update = dict(type ='init', length = table.length, limit = table.limit, data = tlst.get_delta_0())
129
+
130
+ def create_table(self, id, fields : dict, limit = 100, rows = None):
131
+ specs = ','.join(f'{prop} {type}' for prop, type in fields.items())
132
+ query = f"CREATE NODE TABLE {id}({specs},ID SERIAL, PRIMARY KEY(ID))"
133
+ self.execute(query)
134
+ table = Dbtable(id, self, limit, fields)
135
+ if rows:
136
+ table.list.extend(rows)
137
+ return table
138
+
139
+ def update_row(self, table_id, row_id, props, in_node = True):
140
+ set_props = dict_to_cypher_set(props)
141
+ query = f'MATCH (a: {table_id}) WHERE a.ID = {row_id} {set_props}' if in_node else\
142
+ f'MATCH ()-[a: {table_id}]->() WHERE a.ID = {row_id} {set_props}'
143
+ return self.execute(query)
144
+
145
+ def qlist(self, query, func = None, ignore_exception = False):
146
+ if answer := self.execute(query, ignore_exception):
147
+ result = []
148
+ while answer.has_next():
149
+ value = answer.get_next()
150
+ result.append(func(value) if func else value)
151
+ return result
152
+
153
+ def qiter(self, query, func = None, ignore_exception = False):
154
+ answer = self.execute(query, ignore_exception)
155
+ while answer.has_next():
156
+ value = answer.get_next()
157
+ yield func(value) if func else value
158
+
159
+ class Dbtable:
160
+ def __init__(self, id, db, limit = 100, table_fields = None) -> None:
161
+ self.db = db
162
+ self.id = id
163
+ self.table_fields = table_fields
164
+ self.limit = limit
165
+ self.node_columns = list(db.conn._get_node_property_names(id).keys())[:-1]
166
+ self.init_list()
167
+
168
+ @property
169
+ def rel_table_names(self):
170
+ return self.db.conn._get_rel_table_names()
171
+
172
+ def default_index_name2(self, link_table):
173
+ return f'{self.id}2{link_table}'
174
+
175
+ def calc_linked_rows(self, index_name, link_ids, include_rels = False, search = ''):
176
+ #condition = ' OR '.join(f'b.ID = {id}' for id in link_ids) #bug in IN op!
177
+ condition = f'b.ID in {link_ids}'
178
+ rel_info = ', r.*' if include_rels else ''
179
+ query = f"""
180
+ MATCH (a:{self.id})-[r:{index_name}]->(b:User)
181
+ WHERE {condition}
182
+ RETURN a.*{rel_info}
183
+ ORDER BY a.ID ASC
184
+ """
185
+ lst = self.db.qlist(query)
186
+ return Dblist(self, cache = lst)
187
+
188
+ def get_rel_fields2(self, tname, fields : dict = None, relname = None):
189
+ """return name of link table and fields and its fields dict"""
190
+ if not relname:
191
+ relname = self.default_index_name2(tname)
192
+ rel_table_fields = self.db.get_table_fields(relname)
193
+ if isinstance(rel_table_fields, dict):
194
+ if isinstance(fields, dict):
195
+ if equal_dicts(rel_table_fields, fields):
196
+ return relname, rel_table_fields
197
+ else:
198
+ self.db.delete_table(relname)
199
+ else:
200
+ fields = rel_table_fields
201
+ elif fields is None:
202
+ fields = {}
203
+
204
+ if not any(info['name'] == relname for info in self.rel_table_names):
205
+ fprops = ''.join(f', {field} {type}' for field, type in fields.items()) if fields else ''
206
+ fprops += ', ID SERIAL'
207
+ query = f"CREATE REL TABLE {relname}(FROM {self.id} TO {tname} {fprops})"
208
+ self.db.execute(query)
209
+ self.rel_table_names.append({'name' : relname})
210
+ return relname, fields
211
+
212
+ def add_link(self, snode_id, link_table, tnode_id, link_fields = None, link_index_name = None):
213
+ """return added link"""
214
+ if link_index_name is None:
215
+ link_index_name = self.default_index_name2(link_table)
216
+ if link_fields is None:
217
+ link_fields = {}
218
+ query = f"""
219
+ MATCH (a:{self.id}), (b:{link_table})
220
+ WHERE a.ID = {snode_id} AND b.ID = {tnode_id}
221
+ CREATE (a)-[r:{link_index_name} {{{Properties(link_fields)}}}]->(b)
222
+ RETURN r.*
223
+ """
224
+ lst = self.db.qlist(query)
225
+ return lst[0]
226
+
227
+ def add_links(self, link_table, snode_ids : iter, tnode_id, link_index_name = None):
228
+ result = []
229
+ for id in snode_ids:
230
+ result.append(self.add_link(id, link_table, tnode_id, link_index_name = link_index_name))
231
+ return result
232
+
233
+ def delete_link(self, link_table_id, link_id, index_name = None):
234
+ if not index_name:
235
+ index_name = self.default_index_name2(link_table_id)
236
+ query = f"""
237
+ MATCH (:{self.id})-[r:{index_name}]->(:{link_table_id})
238
+ WHERE r.ID = {link_id}
239
+ DELETE r
240
+ """
241
+ self.db.execute(query)
242
+
243
+ def delete_links(self, link_table_id, link_node_id = None, source_ids = None, link_ids = None, index_name = None):
244
+ if not index_name:
245
+ index_name = self.default_index_name2(link_table_id)
246
+
247
+ if link_ids:
248
+ condition = f'r.ID in {link_ids}'
249
+ #condition = ' OR '.join(f'r.ID = {id}' for id in link_ids) #bug in IN op!
250
+ else:
251
+ if not isinstance(source_ids, list):
252
+ source_ids = list(source_ids)
253
+ #condition = ' OR '.join(f'a.ID = {id}' for id in source_ids) #bug in IN op!
254
+ condition = f'a.ID in {source_ids}'
255
+ condition = f'({condition}) AND b.ID = {link_node_id}'
256
+ query = f"""
257
+ MATCH (a:{self.id})-[r:{index_name}]->(b:{link_table_id})
258
+ WHERE {condition}
259
+ DELETE r
260
+ """
261
+ self.db.execute(query)
262
+
263
+ def init_list(self):
264
+ list = self.read_rows(limit = self.limit)
265
+ length = len(list)
266
+ #possibly the table has more rows
267
+ if length == self.limit:
268
+ #qresult = self.db.execute()
269
+ ql = self.db.qlist(f"MATCH (n:{self.id}) RETURN count(n)")
270
+ self.length = ql[0][0]
271
+ else:
272
+ self.length = length
273
+ self.list = Dblist(self, list)
274
+
275
+ def get_init_list(self, search_string = None):
276
+ lst = self.list
277
+ lst.update = dict(type ='init', length = self.length,
278
+ limit = self.limit, data = self.list.get_delta_0())
279
+ return lst
280
+
281
+ def read_rows(self, skip = 0, limit = 0):
282
+ query = qb().match().node(self.id, 'a').return_literal('a.*').order_by('a.ID')
283
+ if skip:
284
+ query = query.skip(skip)
285
+ query = query.limit(limit if limit else self.limit)
286
+ return self.db.qlist(query)
287
+
288
+ def assign_row(self, row_array):
289
+ return self.db.update_row(self.id, row_array[-1], {name : value for name, value in zip(self.node_columns, row_array)})
290
+
291
+ def delete_row(self, id):
292
+ query = query_offset(self.id, id)
293
+ self.length -= 1
294
+ return self.db.execute(query.detach_delete('a'))
295
+
296
+ def delete_rows(self, ids):
297
+ #condition = ' OR '.join(f'a.ID = {id}' for id in ids) #bug in IN op!
298
+ condition = f'a.ID in {ids}'
299
+ query = f"""
300
+ MATCH (a:{self.id})
301
+ WHERE {condition}
302
+ DELETE a
303
+ """
304
+ return self.db.execute(query)
305
+
306
+ def append_row(self, row):
307
+ """row can be list or dict, returns ID"""
308
+ if isinstance(row, list):
309
+ props = {name: value for name, value in zip(self.node_columns, row) if value is not None}
310
+ try:
311
+ answer = self.db.execute(qb().create().node(self.id, 'a', props).return_literal('a.ID'))
312
+ except Exception as e:
313
+ return None
314
+ if answer.has_next():
315
+ self.length += 1
316
+ return answer.get_next()[0]
317
+ return None
318
+
319
+ def append_rows(self, rows):
320
+ """row can be list or dict"""
321
+ rows_arr = []
322
+ for row in rows:
323
+ row = {name: value for name, value in zip(self.node_columns, row)} if not isinstance(row, dict) else row
324
+ srow = f' {{{Properties(row).to_str()}}}'
325
+ rows_arr.append(srow)
326
+ rows_arr = ','.join(rows_arr)
327
+
328
+ query = (qb().with_(f'[{rows_arr}] AS rows')
329
+ .unwind('rows AS row')
330
+ .create()
331
+ .node(self.id, 'n', {p: f'row.{p}' for p in self.node_columns}, escape=False)
332
+ .return_literal('n.*'))
333
+
334
+ self.length += len(rows)
335
+ return self.db.qlist(query)
336
+
unisi/multimon.py CHANGED
@@ -1,4 +1,4 @@
1
- import multiprocessing, time, asyncio, logging
1
+ import multiprocessing, time, asyncio, logging, inspect
2
2
  from .utils import start_logging
3
3
  from config import froze_time, monitor_tick, profile, pool
4
4
 
@@ -11,38 +11,27 @@ def read_string_from(shared_array):
11
11
 
12
12
  _multiprocessing_pool = None
13
13
 
14
-
15
-
16
14
  def multiprocessing_pool():
17
15
  global _multiprocessing_pool
18
16
  if not _multiprocessing_pool:
19
17
  _multiprocessing_pool = multiprocessing.Pool(pool)
20
18
  return _multiprocessing_pool
21
19
 
22
- # Define an asynchronous function that will run the synchronous function in a separate process
23
- """ argument example
24
- def long_running_task(queue):
25
- for i in range(5):
26
- time.sleep(2) # emulate long calculation
27
- queue.put(f"Task is {i*20}% complete")
28
- queue.put(None)
29
-
30
- async def callback(string):
31
- await context_user().progress(str)
32
- """
33
- async def run_external_process(long_running_task, *args, callback = False):
34
- if callback:
35
- queue = multiprocessing.Manager().Queue()
36
- args = *args, queue
37
- result = multiprocessing_pool().apply_async(long_running_task, args)
38
- if callback:
39
- while not result.ready():
40
- if not queue.empty():
41
- message = queue.get()
42
- if message is None:
43
- break
44
- await callback(message)
45
- await asyncio.sleep(0.1)
20
+ async def run_external_process(long_running_task, *args, progress_callback = None, **kwargs):
21
+ if progress_callback:
22
+ if args[-1] is None:
23
+ queue = multiprocessing.Manager().Queue()
24
+ args = *args[:-1], queue
25
+ else:
26
+ queue = args[-1]
27
+
28
+ result = multiprocessing_pool().apply_async(long_running_task, args, kwargs)
29
+ if progress_callback:
30
+ while not result.ready() or not queue.empty():
31
+ message = queue.get()
32
+ if message is None:
33
+ break
34
+ await asyncio.gather(progress_callback(message), asyncio.sleep(monitor_tick))
46
35
  return result.get()
47
36
 
48
37
  logging_lock = multiprocessing.Lock()
@@ -57,12 +46,11 @@ def monitor_process(monitor_shared_arr):
57
46
  while True:
58
47
  #Wait for data in the shared array
59
48
  while monitor_shared_arr[0] == b'\x00':
60
- time.sleep(0.005)
49
+ time.sleep(monitor_tick)
61
50
  if timer is not None:
62
51
  timer -= monitor_tick
63
52
  if timer < 0:
64
- timer = None
65
-
53
+ timer = None
66
54
  arr = list(session_status.items())
67
55
  arr.sort(key = lambda s: s[1][1], reverse=True)
68
56
  ct = time.time()
@@ -71,7 +59,6 @@ def monitor_process(monitor_shared_arr):
71
59
  with logging_lock:
72
60
  logging.warning(message)
73
61
  timer = None
74
-
75
62
  # Read and process the data
76
63
  status = read_string_from(monitor_shared_arr).split(splitter)
77
64
  #free
unisi/proxy.py CHANGED
@@ -30,7 +30,9 @@ class Proxy:
30
30
  addr_port = f'{wss_header if ssl else ws_header}{host_port}'
31
31
  addr_port = f'{addr_port}{"" if addr_port.endswith("/") else "/"}{ws_path}'
32
32
  self.host_port = f'{"https" if ssl else "http"}://{host_port}'
33
- self.conn = create_connection(addr_port, timeout = timeout, header = {'session' : session})
33
+ if session:
34
+ addr_port = f'{addr_port}?{session}'
35
+ self.conn = create_connection(addr_port, timeout = timeout)
34
36
  self.screen = None
35
37
  self.screens = {}
36
38
  self.dialog = None
unisi/reloader.py CHANGED
@@ -15,8 +15,8 @@ if config.hot_reload:
15
15
  import os, sys, traceback
16
16
  from watchdog.observers import Observer
17
17
  from watchdog.events import PatternMatchingEventHandler
18
- from .users import User
19
- from .utils import divpath, Redesign, app_dir
18
+ from .users import User, Redesign
19
+ from .utils import divpath, app_dir
20
20
  from .autotest import check_module
21
21
  import re, collections
22
22
 
unisi/server.py CHANGED
@@ -49,8 +49,6 @@ async def websocket_handler(request):
49
49
  if not user:
50
50
  await ws.send_str(toJson(status))
51
51
  else:
52
- user.transport = ws._writer.transport if divpath != '/' else None
53
-
54
52
  async def send(res):
55
53
  if type(res) != str:
56
54
  res = toJson(user.prepare_result(res))
@@ -88,9 +86,17 @@ async def websocket_handler(request):
88
86
  user.log(traceback.format_exc())
89
87
 
90
88
  await user.delete()
91
- return ws #?<->
89
+ return ws
90
+
91
+ def ensure_directory_exists(directory_path):
92
+ if not os.path.exists(directory_path):
93
+ os.makedirs(directory_path)
94
+ print(f"Directory '{directory_path}' created.")
92
95
 
93
96
  def start(appname = None, user_type = User, http_handlers = []):
97
+ ensure_directory_exists(screens_dir)
98
+ ensure_directory_exists(blocks_dir)
99
+
94
100
  if appname:
95
101
  config.appname = appname
96
102
 
@@ -103,7 +109,7 @@ def start(appname = None, user_type = User, http_handlers = []):
103
109
  http_handlers += [web.static(f'/{config.upload_dir}', upload_dir),
104
110
  web.get('/{tail:.*}', static_serve), web.post('/', post_handler)]
105
111
 
106
- print(f'Start {appname} web server..')
112
+ #print(f'Start {appname} web server..')
107
113
  app = web.Application()
108
114
  app.add_routes(http_handlers)
109
115
  web.run_app(app, port=port)