django-restit 4.2.173__py3-none-any.whl → 4.2.174__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- account/models/group.py +16 -19
- auditlog/models.py +3 -2
- {django_restit-4.2.173.dist-info → django_restit-4.2.174.dist-info}/METADATA +1 -1
- {django_restit-4.2.173.dist-info → django_restit-4.2.174.dist-info}/RECORD +17 -16
- incident/models/event.py +8 -9
- incident/models/incident.py +6 -6
- incident/models/ossec.py +2 -2
- metrics/models.py +11 -10
- rest/__init__.py +1 -1
- rest/models/base.py +19 -7
- rest/requestex.py +3 -4
- rest/serializers/response.py +37 -5
- rest/service.py +135 -0
- rest/urls.py +0 -4
- taskqueue/worker.py +23 -23
- {django_restit-4.2.173.dist-info → django_restit-4.2.174.dist-info}/LICENSE.md +0 -0
- {django_restit-4.2.173.dist-info → django_restit-4.2.174.dist-info}/WHEEL +0 -0
account/models/group.py
CHANGED
@@ -118,6 +118,7 @@ class Group(models.Model, RestModel, MetaDataModel):
|
|
118
118
|
"kind",
|
119
119
|
("metadata__timezone", "timezone"),
|
120
120
|
("metadata__eod", "end_of_day"),
|
121
|
+
("parent__pk", "parent_id"),
|
121
122
|
("parent__name", "parent"),
|
122
123
|
("location.line1", "address_line1"),
|
123
124
|
("location.line2", "address_line2"),
|
@@ -171,7 +172,7 @@ class Group(models.Model, RestModel, MetaDataModel):
|
|
171
172
|
if item:
|
172
173
|
return item.thumbnail_url()
|
173
174
|
return None
|
174
|
-
|
175
|
+
|
175
176
|
@classmethod
|
176
177
|
def on_rest_list_filter(cls, request, qset=None):
|
177
178
|
if not request.member.hasPermission("view_all_groups"):
|
@@ -231,7 +232,7 @@ class Group(models.Model, RestModel, MetaDataModel):
|
|
231
232
|
raise RestValidationError("cannot set self as parent", 1101)
|
232
233
|
value = Group.objects.filter(pk=value).last()
|
233
234
|
self.parent = value
|
234
|
-
|
235
|
+
|
235
236
|
def set_child_of(self, value):
|
236
237
|
# this is a helper to add this group to another group
|
237
238
|
parent = Group.objects.filter(pk=value).last()
|
@@ -321,7 +322,7 @@ class Group(models.Model, RestModel, MetaDataModel):
|
|
321
322
|
return True
|
322
323
|
return False
|
323
324
|
|
324
|
-
def notifyMembers(self, subject, message=None, template=None, context=None,
|
325
|
+
def notifyMembers(self, subject, message=None, template=None, context=None,
|
325
326
|
email_only=False, sms_msg=None, perms=None, force=False,
|
326
327
|
from_email=None, exclude_member=None):
|
327
328
|
if perms is not None:
|
@@ -436,19 +437,19 @@ class Group(models.Model, RestModel, MetaDataModel):
|
|
436
437
|
qset = self.memberships.filter(state__gte=-10)
|
437
438
|
if perms:
|
438
439
|
qset = qset.filter(
|
439
|
-
Q(properties__category="permissions",
|
440
|
-
properties__key__in=perms,
|
441
|
-
properties__value__in=TRUE_VALUES) |
|
442
|
-
Q(member__properties__category="permissions",
|
443
|
-
member__properties__key__in=perms,
|
440
|
+
Q(properties__category="permissions",
|
441
|
+
properties__key__in=perms,
|
442
|
+
properties__value__in=TRUE_VALUES) |
|
443
|
+
Q(member__properties__category="permissions",
|
444
|
+
member__properties__key__in=perms,
|
444
445
|
member__properties__value__in=TRUE_VALUES))
|
445
446
|
if notify:
|
446
447
|
qset = qset.filter(
|
447
|
-
Q(properties__category="notify",
|
448
|
-
properties__key__in=notify,
|
449
|
-
properties__value__in=TRUE_VALUES) |
|
450
|
-
Q(member__properties__category="notify",
|
451
|
-
member__properties__key__in=notify,
|
448
|
+
Q(properties__category="notify",
|
449
|
+
properties__key__in=notify,
|
450
|
+
properties__value__in=TRUE_VALUES) |
|
451
|
+
Q(member__properties__category="notify",
|
452
|
+
member__properties__key__in=notify,
|
452
453
|
member__properties__value__in=TRUE_VALUES))
|
453
454
|
|
454
455
|
if role:
|
@@ -495,7 +496,7 @@ class Group(models.Model, RestModel, MetaDataModel):
|
|
495
496
|
# enable again?
|
496
497
|
ms.set_state(0)
|
497
498
|
ms.save()
|
498
|
-
|
499
|
+
|
499
500
|
if MEMBERSHIP_ROLES:
|
500
501
|
perms = MEMBERSHIP_ROLES.get(role, [])
|
501
502
|
for k in perms:
|
@@ -601,12 +602,8 @@ class Group(models.Model, RestModel, MetaDataModel):
|
|
601
602
|
if credentials.kind == "member":
|
602
603
|
if msg.pk is not None:
|
603
604
|
return credentials.instance.isMemberOf(msg.pk)
|
604
|
-
return False
|
605
|
+
return False
|
605
606
|
|
606
607
|
|
607
608
|
class GroupMetaData(MetaDataBase):
|
608
609
|
parent = models.ForeignKey(Group, related_name="properties", on_delete=models.CASCADE)
|
609
|
-
|
610
|
-
|
611
|
-
|
612
|
-
|
auditlog/models.py
CHANGED
@@ -55,10 +55,11 @@ class PersistentLog(models.Model, RestModel):
|
|
55
55
|
class RestMeta:
|
56
56
|
DEFAULT_SORT = "-when"
|
57
57
|
CAN_SAVE = False
|
58
|
+
ESTIMATE_COUNTS = True
|
58
59
|
GROUP_FIELD = None # do this so when we sort by group it is always exact
|
59
60
|
QUERY_FIELDS = [
|
60
|
-
"level", "component", "pkey", "action",
|
61
|
-
"request_method", "request_path",
|
61
|
+
"level", "component", "pkey", "action",
|
62
|
+
"request_method", "request_path",
|
62
63
|
"session__ip", "user",
|
63
64
|
"tid", "group", "user__username"]
|
64
65
|
SEARCH_FIELDS = ["user__username", "user__last_name", "message", "session__ip", "request_path"]
|
@@ -28,7 +28,7 @@ account/migrations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuF
|
|
28
28
|
account/models/__init__.py,sha256=cV_lMnT2vL_mjiYtT4hlcIHo52ocFbGSNVkOIHHLXZY,385
|
29
29
|
account/models/device.py,sha256=8D-Sbv9PZWAnX6UVpp1lNJ03P24fknNnN1VOhqY7RVg,6306
|
30
30
|
account/models/feeds.py,sha256=vI7fG4ASY1M0Zjke24RdnfDcuWeATl_yR_25jPmT64g,2011
|
31
|
-
account/models/group.py,sha256=
|
31
|
+
account/models/group.py,sha256=JVyMIakLskUuGXBJFyessw4LlD9Fl6AsHtpo1yZEYjk,22884
|
32
32
|
account/models/legacy.py,sha256=zYdtv4LC0ooxPVqWM-uToPwV-lYWQLorSE6p6yn1xDw,2720
|
33
33
|
account/models/member.py,sha256=qmLCOVbNTRr4L-E7BbOMtv4V64QN7K-0pXDgnuB-AbY,54722
|
34
34
|
account/models/membership.py,sha256=90EpAhOsGaqphDAkONP6j_qQ0OWSRaQsI8H7E7fgMkE,9249
|
@@ -69,7 +69,7 @@ auditlog/middleware.py,sha256=Q4bXg8rnm8y2fMnAsN6ha3Fz6TW8jIzLnvpu4H9SpWE,1537
|
|
69
69
|
auditlog/migrations/0001_initial.py,sha256=X171gKQZIaTO9FGNG1yKTjGSZS0ZjZj5gvimF9-_kks,3309
|
70
70
|
auditlog/migrations/0002_alter_persistentlog_session.py,sha256=DkkcIobbHdbniKg5bOlRmiF-Nc4hX55Y6KuQySrCcJ8,541
|
71
71
|
auditlog/migrations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
72
|
-
auditlog/models.py,sha256=
|
72
|
+
auditlog/models.py,sha256=kRoWpOout2DYW70jsZLHj7GJBssqbrR75fx0_-hrsaM,16546
|
73
73
|
auditlog/periodic.py,sha256=AUhDeVsZtC47BJ-lklvYEegHoxAzj1RpIvRFSsM7g5E,363
|
74
74
|
auditlog/rpc.py,sha256=gJgj3Wiar5pVsw8tuhy0jXLkqFkOr3Z-oI2DKelMRAQ,5592
|
75
75
|
auditlog/tq.py,sha256=ROcCjQhCavOgW3-8yjLrboNTAYEC5Pu2UCoenom0Law,2478
|
@@ -114,9 +114,9 @@ incident/migrations/0015_rule_title_template_alter_incident_state.py,sha256=FPUD
|
|
114
114
|
incident/migrations/0016_rule_notify_template.py,sha256=4WGdMxiELujLIy9bzHovHWbAORupodN1Ty3vsy3mLjg,425
|
115
115
|
incident/migrations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
116
116
|
incident/models/__init__.py,sha256=NMphuhb0RTMf7Ov4QkNv7iv6_I8Wtr3xQ54yjX_a31M,209
|
117
|
-
incident/models/event.py,sha256=
|
118
|
-
incident/models/incident.py,sha256=
|
119
|
-
incident/models/ossec.py,sha256=
|
117
|
+
incident/models/event.py,sha256=JEUdVUxqz2nRcfGC2GHo3baIg5JntK5cSL6_MesFLeA,7967
|
118
|
+
incident/models/incident.py,sha256=oxwLDJYGmk26zf7AD_e7nKcoJw3gXibjrX3DlByNAT8,22580
|
119
|
+
incident/models/ossec.py,sha256=g7cc2vYdYEB8zomohwqbo0ekyPt1v_qA67y35sBn2YY,2244
|
120
120
|
incident/models/rules.py,sha256=PPp8oJDW1gop9i_21lhP50qgt_TrdWErp2mYqZCMfd4,7065
|
121
121
|
incident/models/ticket.py,sha256=S3kqGQpYLE6Y4M9IKu_60sgW-f592xNr8uufqHnvDoU,2302
|
122
122
|
incident/parsers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -356,7 +356,7 @@ metrics/migrations/0003_metrics_expires.py,sha256=_g4oRv4NHW-4iCQx2s1SiF38LLyFf8
|
|
356
356
|
metrics/migrations/0004_eodmetrics.py,sha256=Ky6ZVMZqa0F_SUp_QFWY7ZKBgVhy9CS4wZcsEhrkSgc,3271
|
357
357
|
metrics/migrations/0005_alter_metrics_v1_alter_metrics_v10_alter_metrics_v11_and_more.py,sha256=pmwJfpPJ1RUX_CqM66l6vvV-nrAUPo_GIan0Pc9mQHs,2358
|
358
358
|
metrics/migrations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
359
|
-
metrics/models.py,sha256=
|
359
|
+
metrics/models.py,sha256=v3SKizBcKVQ8eFIT18fPr6XveQ1cLJZqLKiitncz5-0,14175
|
360
360
|
metrics/periodic.py,sha256=IayBLLat40D8FB-A3bYBW9lxm9-IzcugQunojThQ_OU,661
|
361
361
|
metrics/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
362
362
|
metrics/providers/aws.py,sha256=dIBGFE1Fvszy6rmVrn_Fm1zUDv345q4dBsg9Iit-XCc,8358
|
@@ -379,7 +379,7 @@ pushit/utils.py,sha256=IeTCGa-164nmB1jIsK1lu1O1QzUhS3BKfuXHGjCW-ck,2121
|
|
379
379
|
rest/.gitignore,sha256=TbEvWRMnAiajCTOdhiNrd9eeCAaIjRp9PRjE_VkMM5g,118
|
380
380
|
rest/README.md,sha256=V3ETc-cJu8PZIbKr9xSe_pA4JEUpC8Dhw4bQeVCDJPw,5460
|
381
381
|
rest/RemoteEvents.py,sha256=nL46U7AuxIrlw2JunphR1tsXyqi-ep_gD9CYGpYbNgE,72
|
382
|
-
rest/__init__.py,sha256=
|
382
|
+
rest/__init__.py,sha256=j76Rqm_TIvJvezmNJkUripR1cpoqTtqZAXhwIQ0t_Uk,122
|
383
383
|
rest/arc4.py,sha256=y644IbF1ec--e4cUJ3KEYsewTCITK0gmlwa5mJruFC0,1967
|
384
384
|
rest/cache.py,sha256=1Qg0rkaCJCaVP0-l5hZg2CIblTdeBSlj_0fP6vlKUpU,83
|
385
385
|
rest/crypto/__init__.py,sha256=Tl0U11rgj1eBYqd6OXJ2_XSdNLumW_JkBZnaJqI6Ldw,72
|
@@ -412,12 +412,12 @@ rest/middleware/request.py,sha256=JchRNy5L-bGd-7h-KFYekGRvREe2eCkZXKOYqIkP2hI,41
|
|
412
412
|
rest/middleware/session.py,sha256=zHSoQpIzRLmpqr_JvW406wzpvU3W3gDbm5JhtzLAMlE,10240
|
413
413
|
rest/middleware/session_store.py,sha256=1nSdeXK8PyuYgGgIufqrS6j6QpIrQ7zbMNT0ol75e6U,1901
|
414
414
|
rest/models/__init__.py,sha256=M8pvFDq-WCF-QcM58X7pMufYYe0aaQ3U0PwGe9TKbbY,130
|
415
|
-
rest/models/base.py,sha256=
|
415
|
+
rest/models/base.py,sha256=oPj4m4-fnmjYUqd9dru17ulyh8jdFbXtWJt1CiuiBls,72633
|
416
416
|
rest/models/cacher.py,sha256=eKz8TINVhWEqKhJGMsRkKZTtBUIv5rN3NHbZwOC56Uk,578
|
417
417
|
rest/models/metadata.py,sha256=ni8-BRF07lv4CdPUWnUdfPTOClQAVEeRZvO-ic623HU,12904
|
418
418
|
rest/net.py,sha256=LcB2QV6VNRtsSdmiQvYZgwQUDwOPMn_VBdRiZ6OpI-I,2974
|
419
419
|
rest/regexes.yaml,sha256=VoGb4E-P_K9f82Yzcpltgzekpt9usRtwu9PYlo46nUw,149463
|
420
|
-
rest/requestex.py,sha256=
|
420
|
+
rest/requestex.py,sha256=hv0ss8n2wojoBD70cx1DkUj1Msuhd5Nr8bdaYQNIPC0,16123
|
421
421
|
rest/rpc.py,sha256=WXZe5CLdYMeSXRH6wuzl-_riPPRnmtFNVJB9dfY1GSo,2965
|
422
422
|
rest/search.py,sha256=QVjk2b5tZLgf1zM2MHvJTyRjwUbY5ZD7HXSTmSPXtvU,8362
|
423
423
|
rest/serializers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -429,8 +429,9 @@ rest/serializers/legacy.py,sha256=a5O-x2PqMKX8wYWrhCmdcivVbkPnru7UdyLbrhCaAdY,61
|
|
429
429
|
rest/serializers/localizers.py,sha256=BegaCvTQVaruhWzvGHq3zWeVFmtBChatquRqAtkke10,410
|
430
430
|
rest/serializers/model.py,sha256=08HJeqpmytjxvyiJFfsSRRG0uH-iK2mXCw6w0oMfWrI,8598
|
431
431
|
rest/serializers/profiler.py,sha256=OxOimhEyvCAuzUBC9Q1dz2xaakjAqmSnekMATsjduXM,997
|
432
|
-
rest/serializers/response.py,sha256=
|
432
|
+
rest/serializers/response.py,sha256=aSZ9bvhsYGHR26NA6sin7fGqFptjTsBhmIcDKrqeeoY,8656
|
433
433
|
rest/serializers/util.py,sha256=-In89fpuVTd6_Ul8nwEUt3DjVKdpeoEyAxudlyB8K6Y,2734
|
434
|
+
rest/service.py,sha256=jl8obnMDEUzB8y3LROGPvmfKKoFU_SzOvywUQjoQZpg,4046
|
434
435
|
rest/settings_helper.py,sha256=_Vn9nmL5_GPss9zIsXzacbTQkn99NbO42CqvOZC3ge4,1532
|
435
436
|
rest/ssl_check.py,sha256=kH4Pk4upUEwKTAnBLR0DIKezNJHjkW3g2TdQAObEgW4,1419
|
436
437
|
rest/static/lib/jquery.js,sha256=VAvG3sHdS5LqTT-5A_aeq_bZGa_Uj04xKxY8KM_w9EE,95786
|
@@ -445,7 +446,7 @@ rest/templates/rest_html.html,sha256=utOzvXEE6LiHFUvcAQUDOQ0yl3J_2F3-22r0K611eTQ
|
|
445
446
|
rest/ua.py,sha256=0wwOtJPWCiCxUKzWKyMApRbSaiuAxCCk0Pe3OwzYBhE,185398
|
446
447
|
rest/uberdict.py,sha256=ivDpzfchQqX8dM2_TtuyMW7NNO-j7zDmxkdKixQxvU4,17064
|
447
448
|
rest/url_docs.py,sha256=O8O_CQso3fB-7o-huidIT1BRGv5R6lDW-yKHnsGyAPk,11881
|
448
|
-
rest/urls.py,sha256=
|
449
|
+
rest/urls.py,sha256=MGMTDU_SN5ZO3HeomFsE6szKnMzE_X9vzh_J0YL0KGA,1866
|
449
450
|
rest/views.py,sha256=WUjHQMCzw2f4Te2X4Kh40ksfoVAAlrWC2pbt8nhcIYA,1115
|
450
451
|
sessionlog/.gitignore,sha256=TbEvWRMnAiajCTOdhiNrd9eeCAaIjRp9PRjE_VkMM5g,118
|
451
452
|
sessionlog/README.md,sha256=vQEVJ_8u3Vv19VwPfscjCiHFu61ZSrEM-KIuBpUXhws,62
|
@@ -471,7 +472,7 @@ taskqueue/transports/http.py,sha256=AzliUnw_LuyO2zZZOoUAJGFcTV-Gxt1iE3hCVnIiyGQ,
|
|
471
472
|
taskqueue/transports/s3.py,sha256=fMosL893u1iQdo6Y1djwb7KEoNo6TTsDPJl13OJdJP8,1913
|
472
473
|
taskqueue/transports/sftp.py,sha256=jT1_krjTHA7DCAukD85aGYRCg9m0cEH9EWzOC-wJGdk,1891
|
473
474
|
taskqueue/transports/sms.py,sha256=H1-LIGEMfbUNqJD9amRcsvKUSwtz9yBj1QNfB7EHjHE,142
|
474
|
-
taskqueue/worker.py,sha256=
|
475
|
+
taskqueue/worker.py,sha256=H7KfcIj-lNd8K5Wn7tlw4rXYPqVdkxv2B5X5kbEcIbk,16054
|
475
476
|
telephony/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
476
477
|
telephony/admin.py,sha256=iOdsBfFFbBisdqKSZ36bIrh_z5sU0Wx_PkaFi8wd1iA,243
|
477
478
|
telephony/decorators.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -515,7 +516,7 @@ ws4redis/servers/uwsgi.py,sha256=VyhoCI1DnVFqBiJYHoxqn5Idlf6uJPHvfBKgkjs34mo,172
|
|
515
516
|
ws4redis/settings.py,sha256=KKq00EwoGnz1yLwCZr5Dfoq2izivmAdsNEEM4EhZwN4,1610
|
516
517
|
ws4redis/utf8validator.py,sha256=S0OlfjeGRP75aO6CzZsF4oTjRQAgR17OWE9rgZdMBZA,5122
|
517
518
|
ws4redis/websocket.py,sha256=R0TUyPsoVRD7Y_oU7w2I6NL4fPwiz5Vl94-fUkZgLHA,14848
|
518
|
-
django_restit-4.2.
|
519
|
-
django_restit-4.2.
|
520
|
-
django_restit-4.2.
|
521
|
-
django_restit-4.2.
|
519
|
+
django_restit-4.2.174.dist-info/LICENSE.md,sha256=VHN4hhEeVOoFjtG-5fVv4jesA4SWi0Z-KgOzzN6a1ps,1068
|
520
|
+
django_restit-4.2.174.dist-info/METADATA,sha256=r1t8GFE4qKAq75b6CnYkpJCE6BbfW7-t2qt2dy-XkaA,7714
|
521
|
+
django_restit-4.2.174.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
|
522
|
+
django_restit-4.2.174.dist-info/RECORD,,
|
incident/models/event.py
CHANGED
@@ -18,8 +18,8 @@ INCIDENT_EVENT_GRANULARITY = settings.get("INCIDENT_EVENT_GRANULARITY", "hourly"
|
|
18
18
|
EVENT_TO_INCIDENT_LEVEL = settings.get("EVENT_TO_INCIDENT_LEVEL", 4)
|
19
19
|
EVENT_DETAIL_TEMPLATES = settings.get("EVENT_DETAIL_TEMPLATES", None)
|
20
20
|
EVENT_META_KEYWORDS = settings.get("EVENT_META_KEYWORDS", [
|
21
|
-
"path", "ip", "reporter_ip", "code",
|
22
|
-
"reason", "buid", "merchant", "tid",
|
21
|
+
"path", "ip", "reporter_ip", "code",
|
22
|
+
"reason", "buid", "merchant", "tid",
|
23
23
|
"group", "http_user_agent", "user_agent",
|
24
24
|
"app_url", "isp", "city", "state", "country",
|
25
25
|
"username"
|
@@ -28,7 +28,7 @@ EVENT_META_KEYWORDS = settings.get("EVENT_META_KEYWORDS", [
|
|
28
28
|
logger = log.getLogger("incident", filename="incident.log")
|
29
29
|
|
30
30
|
"""
|
31
|
-
very generic
|
31
|
+
very generic
|
32
32
|
external system can post an event
|
33
33
|
{
|
34
34
|
"description": "Critical Test Event",
|
@@ -77,15 +77,15 @@ class Event(JSONMetaData, rm.RestModel):
|
|
77
77
|
# code = models.IntegerField(default=0, db_index=True)
|
78
78
|
|
79
79
|
group = models.ForeignKey(
|
80
|
-
"account.Group", on_delete=models.SET_NULL,
|
80
|
+
"account.Group", on_delete=models.SET_NULL,
|
81
81
|
related_name="+", null=True, default=None)
|
82
|
-
|
82
|
+
|
83
83
|
component = models.SlugField(max_length=250, null=True, blank=True, default=None)
|
84
84
|
component_id = models.IntegerField(null=True, blank=True, default=None)
|
85
85
|
|
86
86
|
# this allows us to bundle multiple events to an incident
|
87
87
|
incident = models.ForeignKey(
|
88
|
-
Incident, null=True, default=None,
|
88
|
+
Incident, null=True, default=None,
|
89
89
|
related_name="events", on_delete=models.SET_NULL)
|
90
90
|
|
91
91
|
def runRules(self):
|
@@ -170,14 +170,14 @@ class Event(JSONMetaData, rm.RestModel):
|
|
170
170
|
# logger.info(f"ignore event {self.pk} {self.description}")
|
171
171
|
return
|
172
172
|
|
173
|
-
# always create an incident
|
173
|
+
# always create an incident
|
174
174
|
if incident is None:
|
175
175
|
incident = Incident(
|
176
176
|
rule=hit_rule, priority=priority,
|
177
177
|
reporter_ip=self.reporter_ip,
|
178
178
|
category=self.category,
|
179
179
|
group=self.group,
|
180
|
-
component=self.component,
|
180
|
+
component=self.component,
|
181
181
|
component_id=self.component_id,
|
182
182
|
hostname=self.hostname)
|
183
183
|
if self.group is None and hit_rule is not None:
|
@@ -211,4 +211,3 @@ class Event(JSONMetaData, rm.RestModel):
|
|
211
211
|
incident.triggerAction()
|
212
212
|
except Exception:
|
213
213
|
logger.exception()
|
214
|
-
|
incident/models/incident.py
CHANGED
@@ -92,7 +92,7 @@ class Incident(models.Model, rm.RestModel, rm.MetaDataModel):
|
|
92
92
|
|
93
93
|
group = models.ForeignKey("account.Group", on_delete=models.SET_NULL, null=True, default=None)
|
94
94
|
assigned_to = models.ForeignKey("account.Member", on_delete=models.SET_NULL, null=True, default=None)
|
95
|
-
|
95
|
+
|
96
96
|
priority = models.IntegerField(default=0) # 1-10, 1 being the highest
|
97
97
|
# 0=new, 1=opened, 2=paused, 3=ignore, 4=resolved, 5=pending
|
98
98
|
state = models.IntegerField(default=0, choices=INCIDENT_STATES)
|
@@ -160,7 +160,7 @@ class Incident(models.Model, rm.RestModel, rm.MetaDataModel):
|
|
160
160
|
|
161
161
|
if event.details:
|
162
162
|
self.setProperty("details", event.details)
|
163
|
-
|
163
|
+
|
164
164
|
if event.metadata.geoip and event.metadata.geoip.city:
|
165
165
|
self.setProperty("city", event.metadata.geoip.city)
|
166
166
|
self.setProperty("state", event.metadata.geoip.state)
|
@@ -209,7 +209,7 @@ class Incident(models.Model, rm.RestModel, rm.MetaDataModel):
|
|
209
209
|
self.save()
|
210
210
|
elif self.rule.action.startswith("firewall_block"):
|
211
211
|
if settings.FIREWALL_GLOBAL_BLOCK:
|
212
|
-
Task.Publish("incident", "firewall_block",
|
212
|
+
Task.Publish("incident", "firewall_block",
|
213
213
|
dict(ip=self.reporter_ip),
|
214
214
|
channel="tq_broadcast")
|
215
215
|
|
@@ -348,7 +348,7 @@ class Incident(models.Model, rm.RestModel, rm.MetaDataModel):
|
|
348
348
|
member.sendSMS(sms_msg)
|
349
349
|
members = group.getMembers(perms=email_perms, as_member=True)
|
350
350
|
for member in members:
|
351
|
-
member.sendEmail(subject, body)
|
351
|
+
member.sendEmail(subject, body)
|
352
352
|
|
353
353
|
def notifyWith(self, perm):
|
354
354
|
# logger.info("notifyWith", perm)
|
@@ -407,7 +407,7 @@ class Incident(models.Model, rm.RestModel, rm.MetaDataModel):
|
|
407
407
|
if request != None and "DATA" in request and "note" in request.DATA:
|
408
408
|
self.logHistory(kind="note", note=request.DATA.get("note"), request=request)
|
409
409
|
|
410
|
-
def logHistory(self, kind="history", note=None, media=None,
|
410
|
+
def logHistory(self, kind="history", note=None, media=None,
|
411
411
|
request=None, member=None, notify=True):
|
412
412
|
if request is None:
|
413
413
|
request = self.getActiveRequest()
|
@@ -493,7 +493,7 @@ class Incident(models.Model, rm.RestModel, rm.MetaDataModel):
|
|
493
493
|
email_only=True,
|
494
494
|
from_email=INCIDENT_EMAIL_FROM)
|
495
495
|
else:
|
496
|
-
# notitfy everyone but the sender
|
496
|
+
# notitfy everyone but the sender
|
497
497
|
if history.by is None:
|
498
498
|
members = Member.GetWithPermission(perm, ignore_disabled_email=True).exclude(pk=history.by.pk)
|
499
499
|
if members.count() == 0:
|
incident/models/ossec.py
CHANGED
@@ -46,8 +46,8 @@ class ServerOssecAlert(models.Model, rm.RestModel):
|
|
46
46
|
level = models.IntegerField(default=0)
|
47
47
|
title = models.CharField(max_length=200, blank=True, null=True, default=None)
|
48
48
|
geoip = models.ForeignKey("location.GeoIP", blank=True, null=True, default=None, on_delete=models.DO_NOTHING)
|
49
|
-
|
49
|
+
|
50
50
|
metadata = None
|
51
|
-
|
51
|
+
|
52
52
|
def __str__(self):
|
53
53
|
return f'{self.hostname}: {self.title}'
|
metrics/models.py
CHANGED
@@ -43,14 +43,14 @@ def metric(
|
|
43
43
|
elif granularity in ["minutes", "seconds"]:
|
44
44
|
expires = datetime.now() + timedelta(days=7)
|
45
45
|
m, created = Metrics.objects.get_or_create(
|
46
|
-
uuid=key,
|
46
|
+
uuid=key,
|
47
47
|
defaults=dict(
|
48
48
|
granularity=granularity, slug=slug, expires=expires,
|
49
49
|
group=group, start=utils.date_for_granulatiry(date, granularity)))
|
50
50
|
m.updateMetrics(keys, data, created)
|
51
51
|
|
52
52
|
|
53
|
-
def gauge(slug, keys, data, granularity="daily", group=None, date=None,
|
53
|
+
def gauge(slug, keys, data, granularity="daily", group=None, date=None,
|
54
54
|
timezone=None, slug_append=None, max_granularity=None):
|
55
55
|
# guage does not accumulate but just stores the data like a cache
|
56
56
|
# if calledf on the same time period it will just update the current numbers
|
@@ -72,7 +72,7 @@ def gauge(slug, keys, data, granularity="daily", group=None, date=None,
|
|
72
72
|
elif gran == "daily":
|
73
73
|
expires = datetime.now() + timedelta(days=METRICS_EXPIRE_DAILY)
|
74
74
|
m, created = Metrics.objects.get_or_create(
|
75
|
-
uuid=key,
|
75
|
+
uuid=key,
|
76
76
|
defaults=dict(
|
77
77
|
granularity=gran, slug=slug, expires=expires,
|
78
78
|
group=group, start=utils.date_for_granulatiry(date, gran)))
|
@@ -99,7 +99,7 @@ def get_qset(slug, granularity, start=None, end=None,
|
|
99
99
|
start = group.getLocalTime(start)
|
100
100
|
elif settings.METRICS_TIMEZONE:
|
101
101
|
start = date_util.convertToLocalTime(settings.METRICS_TIMEZONE, start)
|
102
|
-
|
102
|
+
|
103
103
|
start = utils.date_for_granulatiry(start, granularity)
|
104
104
|
if end is None:
|
105
105
|
if granularity == "hourly":
|
@@ -133,7 +133,7 @@ def get_totals(slug, keys, granularity, start=None, end=None, group=None):
|
|
133
133
|
start = group.getLocalTime(start)
|
134
134
|
elif settings.METRICS_TIMEZONE:
|
135
135
|
start = date_util.convertToLocalTime(settings.METRICS_TIMEZONE, start)
|
136
|
-
|
136
|
+
|
137
137
|
start = utils.date_for_granulatiry(start, granularity)
|
138
138
|
if end is None:
|
139
139
|
end = start + timedelta(minutes=5)
|
@@ -141,7 +141,7 @@ def get_totals(slug, keys, granularity, start=None, end=None, group=None):
|
|
141
141
|
end = group.getLocalTime(end)
|
142
142
|
elif settings.METRICS_TIMEZONE:
|
143
143
|
end = date_util.convertToLocalTime(settings.METRICS_TIMEZONE, end)
|
144
|
-
|
144
|
+
|
145
145
|
qset = Metrics.objects.filter(
|
146
146
|
slug=slug, granularity=granularity,
|
147
147
|
group=group, start__gte=start, start__lte=end)
|
@@ -162,7 +162,7 @@ def get_metric(slug, granularity, start, group=None):
|
|
162
162
|
start = group.getLocalTime(start)
|
163
163
|
elif settings.METRICS_TIMEZONE:
|
164
164
|
start = date_util.convertToLocalTime(settings.METRICS_TIMEZONE, start)
|
165
|
-
|
165
|
+
|
166
166
|
start = utils.date_for_granulatiry(start, granularity)
|
167
167
|
qset = Metrics.objects.filter(
|
168
168
|
slug=slug, granularity=granularity, start__gte=start, start__lte=start, group=group)
|
@@ -226,7 +226,7 @@ def get_metrics(slug, granularity, start, end=None, group=None, samples=None):
|
|
226
226
|
for k in keys:
|
227
227
|
data[k].append(0)
|
228
228
|
continue
|
229
|
-
for k in keys:
|
229
|
+
for k in keys:
|
230
230
|
data[k].append(result["values"].get(k, 0))
|
231
231
|
period_values.remove(result)
|
232
232
|
return objict(periods=periods, data=data)
|
@@ -241,7 +241,7 @@ def get_chart_periods(slug, granularity, start, end=None, group=None):
|
|
241
241
|
periods.append(period)
|
242
242
|
if granularity != "weekly":
|
243
243
|
periods.reverse()
|
244
|
-
return periods
|
244
|
+
return periods
|
245
245
|
|
246
246
|
|
247
247
|
def get_adjusted_date_range(granularity, start, end, group=None, samples=None):
|
@@ -282,6 +282,7 @@ def generate_uuid(slug, group, slug_append=None):
|
|
282
282
|
class Metrics(models.Model, rm.RestModel):
|
283
283
|
class RestMeta:
|
284
284
|
QUERY_FIELDS = ["group__kind", "all_fields"]
|
285
|
+
ESTIMATE_COUNTS = True
|
285
286
|
GRAPHS = {
|
286
287
|
"detailed": {
|
287
288
|
"fields": [
|
@@ -310,7 +311,7 @@ class Metrics(models.Model, rm.RestModel):
|
|
310
311
|
# allow to group metrics by a group
|
311
312
|
group = models.ForeignKey("account.Group", related_name="+", on_delete=models.CASCADE, null=True, default=None)
|
312
313
|
|
313
|
-
# now we create a set of k/v
|
314
|
+
# now we create a set of k/v
|
314
315
|
k1 = models.SlugField(max_length=64, null=True, default=None)
|
315
316
|
v1 = models.BigIntegerField(default=0)
|
316
317
|
|
rest/__init__.py
CHANGED
rest/models/base.py
CHANGED
@@ -7,6 +7,7 @@ import importlib
|
|
7
7
|
from django.db import models as dm
|
8
8
|
from django.db.transaction import atomic
|
9
9
|
from django.apps import apps
|
10
|
+
from django.db import connection
|
10
11
|
|
11
12
|
from rest import helpers as rh
|
12
13
|
from rest import errors as re
|
@@ -876,7 +877,7 @@ class RestModel(object):
|
|
876
877
|
if not graph and request is not None:
|
877
878
|
graph = request.DATA.get("graph", "default")
|
878
879
|
return GRAPH_HELPERS.restList(
|
879
|
-
request, qset, sort=sort, totals=totals,
|
880
|
+
request, qset, sort=sort, totals=totals,
|
880
881
|
return_httpresponse=return_httpresponse,
|
881
882
|
response_params=dict(graph=graph),
|
882
883
|
**cls.getGraph(graph))
|
@@ -896,6 +897,17 @@ class RestModel(object):
|
|
896
897
|
request = cls.getActiveRequest()
|
897
898
|
return cls.restList(request, qset, graph, totals, False)
|
898
899
|
|
900
|
+
@classmethod
|
901
|
+
def restListEstimatedCount(cls, request, qset):
|
902
|
+
# TODO attempt to make this work with the qset,
|
903
|
+
# right now it gets an estimated count of the entire table
|
904
|
+
if getattr(cls.RestMeta, "ESTIMATE_COUNTS", False):
|
905
|
+
if connection.vendor == 'postgresql':
|
906
|
+
with connection.cursor() as cursor:
|
907
|
+
cursor.execute(f"SELECT reltuples::bigint FROM pg_class WHERE relname = '{cls._meta.db_table}'")
|
908
|
+
return cursor.fetchone()[0]
|
909
|
+
return qset.count()
|
910
|
+
|
899
911
|
def restStatus(self, request, status, **kwargs):
|
900
912
|
RestModel._setupGraphHelpers()
|
901
913
|
return GRAPH_HELPERS.restStatus(request, status, **kwargs)
|
@@ -959,7 +971,7 @@ class RestModel(object):
|
|
959
971
|
has_perms = True
|
960
972
|
if perms is not None:
|
961
973
|
has_perms = cls.on_rest_list_has_perms(request, perms, qset)
|
962
|
-
|
974
|
+
|
963
975
|
if request.group:
|
964
976
|
qset = cls.on_rest_filter_children(request, qset)
|
965
977
|
if not has_perms:
|
@@ -1040,7 +1052,7 @@ class RestModel(object):
|
|
1040
1052
|
elif dr_offset > 0:
|
1041
1053
|
dr_start = dr_start + timedelta(minutes=dr_offset)
|
1042
1054
|
dr_end = dr_end + timedelta(minutes=dr_offset)
|
1043
|
-
|
1055
|
+
|
1044
1056
|
rh.debug("tr_end changing", str(dr_start), str(dr_end))
|
1045
1057
|
dr_field = request.DATA.get("dr_field", getattr(cls.RestMeta, "DATE_RANGE_FIELD", "created"))
|
1046
1058
|
q = dict()
|
@@ -1145,7 +1157,7 @@ class RestModel(object):
|
|
1145
1157
|
cls._boundRest()
|
1146
1158
|
if format.startswith("json"):
|
1147
1159
|
return GRAPH_HELPERS.views.restJSON(
|
1148
|
-
request, qset, fields,
|
1160
|
+
request, qset, fields,
|
1149
1161
|
name, format_size, localize=localize)
|
1150
1162
|
elif format.startswith("csv"):
|
1151
1163
|
return GRAPH_HELPERS.views.restCSV(
|
@@ -1159,7 +1171,7 @@ class RestModel(object):
|
|
1159
1171
|
return GRAPH_HELPERS.views.restFlat(
|
1160
1172
|
request, qset, fields,
|
1161
1173
|
name, format_size, localize=localize)
|
1162
|
-
|
1174
|
+
|
1163
1175
|
@classmethod
|
1164
1176
|
def on_rest_list_summary(cls, request, qset):
|
1165
1177
|
cls._boundRest()
|
@@ -1589,7 +1601,7 @@ class RestModel(object):
|
|
1589
1601
|
"group": "group__kind"
|
1590
1602
|
}
|
1591
1603
|
|
1592
|
-
QUERY_FIELDS can be used to restrict what fields can be queried
|
1604
|
+
QUERY_FIELDS can be used to restrict what fields can be queried
|
1593
1605
|
and add special fields:
|
1594
1606
|
|
1595
1607
|
?group__kind=iso
|
@@ -1600,7 +1612,7 @@ class RestModel(object):
|
|
1600
1612
|
or
|
1601
1613
|
<field_name>__<operator>=value
|
1602
1614
|
|
1603
|
-
You cannot do <field_name>__<sub_field>__<operator> unless
|
1615
|
+
You cannot do <field_name>__<sub_field>__<operator> unless
|
1604
1616
|
<field_name>__<sub_field> is
|
1605
1617
|
|
1606
1618
|
allowed <operator>s
|
rest/requestex.py
CHANGED
@@ -18,12 +18,12 @@ SAFE_ASCII = set(string.printable)
|
|
18
18
|
def createFakeRequest(method="GET", **kwargs):
|
19
19
|
member = kwargs.get("member", objict(is_authenticated=True, is_staff=True, is_superuser=True))
|
20
20
|
return objict(
|
21
|
-
META={},
|
21
|
+
META={},
|
22
22
|
user=member,
|
23
23
|
member=member,
|
24
24
|
group=kwargs.get("group", None),
|
25
|
-
method=method,
|
26
|
-
_started=0,
|
25
|
+
method=method,
|
26
|
+
_started=0,
|
27
27
|
DATA=RequestData(data=kwargs))
|
28
28
|
|
29
29
|
|
@@ -284,7 +284,6 @@ class RequestData(object):
|
|
284
284
|
if value == '':
|
285
285
|
if field_type in [int, float]:
|
286
286
|
return field_type(0)
|
287
|
-
|
288
287
|
if field_type in [int, str, float, str]:
|
289
288
|
value = field_type(value)
|
290
289
|
elif field_type is datetime:
|
rest/serializers/response.py
CHANGED
@@ -1,6 +1,8 @@
|
|
1
1
|
from django.http import HttpResponse
|
2
2
|
from django.shortcuts import render
|
3
3
|
from django.db.models.query import QuerySet
|
4
|
+
from django.core.cache import cache
|
5
|
+
import hashlib
|
4
6
|
|
5
7
|
from rest import settings
|
6
8
|
from version import VERSION
|
@@ -13,6 +15,27 @@ from . import csv
|
|
13
15
|
from . import excel
|
14
16
|
# from . import profiler
|
15
17
|
STATUS_ON_PERM_DENIED = settings.get("STATUS_ON_PERM_DENIED", 403)
|
18
|
+
REST_LIST_CACHE_COUNT = settings.get("REST_LIST_CACHE_COUNT", True)
|
19
|
+
|
20
|
+
|
21
|
+
def get_query_hash(queryset):
|
22
|
+
"""Generate a unique hash based on the queryset's SQL query and params."""
|
23
|
+
sql_query = str(queryset.query)
|
24
|
+
return hashlib.sha256(sql_query.encode()).hexdigest()
|
25
|
+
|
26
|
+
|
27
|
+
def get_cached_count(queryset, hash=None, timeout=1800):
|
28
|
+
"""
|
29
|
+
Retrieves the cached count for a queryset or calculates & caches it.
|
30
|
+
"""
|
31
|
+
if hash is None:
|
32
|
+
hash = get_query_hash(queryset)
|
33
|
+
cache_key = f"rest_hash_{hash}"
|
34
|
+
count = cache.get(cache_key)
|
35
|
+
if count is None:
|
36
|
+
count = queryset.count()
|
37
|
+
cache.set(cache_key, count, timeout=timeout)
|
38
|
+
return count
|
16
39
|
|
17
40
|
|
18
41
|
def restStatus(request, status, data={}, **kwargs):
|
@@ -41,13 +64,20 @@ def restGet(request, obj, fields=None, extra=[], exclude=[], recurse_into=[], **
|
|
41
64
|
data["elapsed"] = get_request_elapsed(request)
|
42
65
|
return restResult(request, data)
|
43
66
|
|
67
|
+
|
44
68
|
# @profiler.timeit
|
45
69
|
def restList(request, qset, size=25, start=0, sort=None, fields=None, extra=[], exclude=[], recurse_into=[], **kwargs):
|
46
|
-
count = 0
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
70
|
+
count = kwargs.get('count', 0)
|
71
|
+
hash = None
|
72
|
+
if count == 0:
|
73
|
+
if isinstance(qset, QuerySet):
|
74
|
+
if REST_LIST_CACHE_COUNT and not request.DATA.get("no_cache", False):
|
75
|
+
hash = get_query_hash(qset)
|
76
|
+
count = get_cached_count(qset, hash)
|
77
|
+
else:
|
78
|
+
count = qset.count()
|
79
|
+
elif isinstance(qset, list):
|
80
|
+
count = len(qset)
|
51
81
|
start = request.DATA.get("start", start, field_type=int)
|
52
82
|
size = request.DATA.get("size", size, field_type=int)
|
53
83
|
sort = request.DATA.get("sort", sort)
|
@@ -56,6 +86,8 @@ def restList(request, qset, size=25, start=0, sort=None, fields=None, extra=[],
|
|
56
86
|
data["datetime"] = int(time.time())
|
57
87
|
data["elapsed"] = get_request_elapsed(request)
|
58
88
|
data["count"] = count
|
89
|
+
if hash is not None:
|
90
|
+
data["hash"] = hash
|
59
91
|
if "response_params" in kwargs:
|
60
92
|
for k, v in kwargs.get("response_params").items():
|
61
93
|
data[k] = v
|
rest/service.py
ADDED
@@ -0,0 +1,135 @@
|
|
1
|
+
import os
|
2
|
+
import signal
|
3
|
+
import time
|
4
|
+
import argparse
|
5
|
+
import logging
|
6
|
+
import daemon
|
7
|
+
from daemon import pidfile
|
8
|
+
from watchdog.observers import Observer
|
9
|
+
from watchdog.events import FileSystemEventHandler
|
10
|
+
|
11
|
+
|
12
|
+
class Service(FileSystemEventHandler):
|
13
|
+
def __init__(self, name, pid_file, watch_file=None, require_user=None, logger=None):
|
14
|
+
self.name = name
|
15
|
+
self.pid_file = pid_file
|
16
|
+
self.watch_file = os.path.realpath(watch_file) if watch_file else None
|
17
|
+
self.require_user = require_user
|
18
|
+
self.observer = None
|
19
|
+
|
20
|
+
def log_info(self, *args):
|
21
|
+
if self.logger:
|
22
|
+
self.log_info(*args)
|
23
|
+
|
24
|
+
def start(self):
|
25
|
+
if self.is_running():
|
26
|
+
self.log_info("Service is already running.")
|
27
|
+
return
|
28
|
+
|
29
|
+
self.log_info(f"Starting {self.name}...")
|
30
|
+
|
31
|
+
with daemon.DaemonContext(
|
32
|
+
pidfile=pidfile.TimeoutPIDLockFile(self.pid_file),
|
33
|
+
stdout=open('/tmp/service.log', 'a+'),
|
34
|
+
stderr=open('/tmp/service_error.log', 'a+'),
|
35
|
+
signal_map={
|
36
|
+
signal.SIGTERM: self.stop,
|
37
|
+
signal.SIGHUP: self.restart
|
38
|
+
}
|
39
|
+
):
|
40
|
+
self.run()
|
41
|
+
|
42
|
+
def stop(self, *args):
|
43
|
+
pid = self.get_pid()
|
44
|
+
if not pid:
|
45
|
+
self.log_info("Service is not running.")
|
46
|
+
return
|
47
|
+
|
48
|
+
self.log_info(f"Stopping {self.name} (PID: {pid})...")
|
49
|
+
try:
|
50
|
+
os.kill(pid, signal.SIGTERM)
|
51
|
+
time.sleep(1)
|
52
|
+
if self.is_running():
|
53
|
+
os.kill(pid, signal.SIGKILL)
|
54
|
+
if os.path.exists(self.pid_file):
|
55
|
+
os.remove(self.pid_file)
|
56
|
+
self.log_info("Service stopped.")
|
57
|
+
except OSError:
|
58
|
+
self.logger.error(f"Failed to stop process {pid}")
|
59
|
+
|
60
|
+
def restart(self, *args):
|
61
|
+
self.log_info("Restarting service...")
|
62
|
+
self.stop()
|
63
|
+
time.sleep(1)
|
64
|
+
self.start()
|
65
|
+
|
66
|
+
def status(self):
|
67
|
+
if self.is_running():
|
68
|
+
print(f"{self.name} is running (PID: {self.get_pid()})")
|
69
|
+
else:
|
70
|
+
print(f"{self.name} is not running")
|
71
|
+
|
72
|
+
def is_running(self):
|
73
|
+
"""Check if the process is running using os.kill(pid, 0)"""
|
74
|
+
pid = self.get_pid()
|
75
|
+
if pid:
|
76
|
+
try:
|
77
|
+
os.kill(pid, 0) # Does not kill, just checks if process exists
|
78
|
+
return True
|
79
|
+
except OSError:
|
80
|
+
return False
|
81
|
+
return False
|
82
|
+
|
83
|
+
def get_pid(self):
|
84
|
+
if os.path.exists(self.pid_file):
|
85
|
+
try:
|
86
|
+
with open(self.pid_file, "r") as f:
|
87
|
+
pid = int(f.read().strip())
|
88
|
+
return pid
|
89
|
+
except ValueError:
|
90
|
+
return None
|
91
|
+
return None
|
92
|
+
|
93
|
+
def run(self):
|
94
|
+
"""Main daemon loop"""
|
95
|
+
self.log_info(f"{self.name} is running in background.")
|
96
|
+
if self.watch_file:
|
97
|
+
self.start_watcher()
|
98
|
+
self.on_run()
|
99
|
+
|
100
|
+
def on_run(self):
|
101
|
+
while True:
|
102
|
+
time.sleep(5)
|
103
|
+
|
104
|
+
def start_watcher(self):
|
105
|
+
self.log_info(f"Watching file: {self.watch_file}")
|
106
|
+
self.observer = Observer()
|
107
|
+
self.observer.schedule(self, path=os.path.dirname(self.watch_file), recursive=False)
|
108
|
+
self.observer.start()
|
109
|
+
|
110
|
+
def on_modified(self, event):
|
111
|
+
if self.watch_file and event.src_path == self.watch_file:
|
112
|
+
self.log_info("Config file changed, restarting service...")
|
113
|
+
self.restart()
|
114
|
+
|
115
|
+
|
116
|
+
def main():
|
117
|
+
parser = argparse.ArgumentParser(description="Manage the service daemon")
|
118
|
+
parser.add_argument("command", choices=["start", "stop", "restart", "status", "run"])
|
119
|
+
args = parser.parse_args()
|
120
|
+
|
121
|
+
service = Service(name="MyDaemon", pid_file="/tmp/mydaemon.pid")
|
122
|
+
|
123
|
+
if args.command == "start":
|
124
|
+
service.start()
|
125
|
+
elif args.command == "stop":
|
126
|
+
service.stop()
|
127
|
+
elif args.command == "restart":
|
128
|
+
service.restart()
|
129
|
+
elif args.command == "status":
|
130
|
+
service.status()
|
131
|
+
elif args.command == "run":
|
132
|
+
service.run()
|
133
|
+
|
134
|
+
if __name__ == "__main__":
|
135
|
+
main()
|
rest/urls.py
CHANGED
taskqueue/worker.py
CHANGED
@@ -46,23 +46,23 @@ class WorkManager(object):
|
|
46
46
|
self.lock = threading.RLock()
|
47
47
|
if not self.logger:
|
48
48
|
self.logger = getLogger("root", filename="tq_worker.log")
|
49
|
-
self.logger.info("starting manager, workers: {
|
50
|
-
self.logger.info("handling: {
|
49
|
+
self.logger.info(f"starting manager, workers: {self.worker_count}")
|
50
|
+
self.logger.info(f"handling: {self.subscribe_to}")
|
51
51
|
if USE_THREADS:
|
52
52
|
self._pool = futures.ThreadPoolExecutor(max_workers=self.worker_count)
|
53
53
|
else:
|
54
54
|
self._pool = futures.ProcessPoolExecutor(max_workers=self.worker_count)
|
55
55
|
|
56
56
|
def updateCounts(self):
|
57
|
-
self.logger.info("running: {} --- pending: {
|
57
|
+
self.logger.info(f"running: {self._running_count} --- pending: {self._pending_count}")
|
58
58
|
|
59
59
|
def addTask(self, task):
|
60
60
|
if task.is_stale:
|
61
|
-
self.logger.warning("task({}) is now stale"
|
61
|
+
self.logger.warning(f"task({task.id}) is now stale")
|
62
62
|
task.failed("stale")
|
63
63
|
return
|
64
64
|
if task.id in self._scheduled_tasks:
|
65
|
-
self.logger.error("task({}) is already scheduled"
|
65
|
+
self.logger.error(f"task({task.id}) is already scheduled")
|
66
66
|
return
|
67
67
|
task.manager = self
|
68
68
|
with self.lock:
|
@@ -76,7 +76,7 @@ class WorkManager(object):
|
|
76
76
|
# self.logger.info("processing event", event)
|
77
77
|
if event.type == "subscribe":
|
78
78
|
# confirmation we subscribed
|
79
|
-
self.logger.info("succesfully subscribed to: {
|
79
|
+
self.logger.info(f"succesfully subscribed to: {event.channel}")
|
80
80
|
return
|
81
81
|
|
82
82
|
self.logger.info(f"new_event@{event.channel}")
|
@@ -118,7 +118,7 @@ class WorkManager(object):
|
|
118
118
|
cached_task = self._scheduled_tasks.get(task.id, None)
|
119
119
|
if not cached_task:
|
120
120
|
# task is not scheduled
|
121
|
-
self.logger.warning("canceling non scheduled task({
|
121
|
+
self.logger.warning(f"canceling non scheduled task({task.id})")
|
122
122
|
task.state = -2
|
123
123
|
task.reason = reason
|
124
124
|
task.save()
|
@@ -130,11 +130,11 @@ class WorkManager(object):
|
|
130
130
|
task = cached_task
|
131
131
|
if task.future.running():
|
132
132
|
# right now we don't support canceling a running task but we will try!
|
133
|
-
self.logger.warning("attempting to stop running task({
|
133
|
+
self.logger.warning(f"attempting to stop running task({task.id})")
|
134
134
|
if self.killWorker(task._thread_id):
|
135
135
|
time.sleep(2.0)
|
136
136
|
if task.future.done():
|
137
|
-
self.logger.info("succesfully killed task({}@{
|
137
|
+
self.logger.info(f"succesfully killed task({task.id}@{task._thread_id})")
|
138
138
|
task.state = -2
|
139
139
|
task.reason = reason
|
140
140
|
task.save()
|
@@ -144,14 +144,14 @@ class WorkManager(object):
|
|
144
144
|
self.logger.warning("failed to kill worker")
|
145
145
|
else:
|
146
146
|
if task.future.cancel():
|
147
|
-
self.logger.info("succesfully canceled task({
|
147
|
+
self.logger.info(f"succesfully canceled task({task.id})")
|
148
148
|
task.state = -2
|
149
149
|
task.reason = reason
|
150
150
|
task.save()
|
151
151
|
self.removeTask(task)
|
152
152
|
return
|
153
153
|
else:
|
154
|
-
self.logger.error("failed to cancel task({
|
154
|
+
self.logger.error(f"failed to cancel task({task.id})")
|
155
155
|
|
156
156
|
def killWorker(self, thread_id):
|
157
157
|
import ctypes
|
@@ -175,16 +175,16 @@ class WorkManager(object):
|
|
175
175
|
for task in tasks:
|
176
176
|
if task.channel in self.subscribe_to:
|
177
177
|
if task.cancel_requested:
|
178
|
-
self.logger.info("task has cancel request {
|
178
|
+
self.logger.info(f"task has cancel request {task.id}")
|
179
179
|
task.state = -2
|
180
180
|
if not task.reason:
|
181
181
|
task.reason = "task canceled"
|
182
182
|
task.save()
|
183
183
|
continue
|
184
|
-
self.logger.debug("resubmitting job {
|
184
|
+
self.logger.debug(f"resubmitting job {task.id}")
|
185
185
|
self.addTask(task)
|
186
186
|
else:
|
187
|
-
self.logger.warning("ignore job {}:{
|
187
|
+
self.logger.warning(f"ignore job {task.id}:{task.channel}")
|
188
188
|
|
189
189
|
def _on_webrequest(self, task):
|
190
190
|
if http.REQUEST(task):
|
@@ -254,7 +254,7 @@ class WorkManager(object):
|
|
254
254
|
def on_run_task(self, task):
|
255
255
|
""" Handles execution of a task with structured error handling and refactored logic. """
|
256
256
|
|
257
|
-
self.logger.info("running task({
|
257
|
+
self.logger.info(f"running task({task.id})")
|
258
258
|
|
259
259
|
# Start task and handle cancel/stale conditions early
|
260
260
|
if not self._initialize_task(task):
|
@@ -275,14 +275,14 @@ class WorkManager(object):
|
|
275
275
|
self.on_task_started(task)
|
276
276
|
task.refresh_from_db()
|
277
277
|
task._thread_id = threading.current_thread().ident
|
278
|
-
self.logger.debug("running on thread:{
|
278
|
+
self.logger.debug(f"running on thread:{task._thread_id}")
|
279
279
|
|
280
280
|
if task.state not in [0, 1, 2, 10] or task.cancel_requested:
|
281
|
-
self.logger.info("task({}) was canceled?"
|
281
|
+
self.logger.info(f"task({task.id}) was canceled?")
|
282
282
|
return self._end_task(task)
|
283
283
|
|
284
284
|
if task.is_stale:
|
285
|
-
self.logger.warning("task({}) is now stale"
|
285
|
+
self.logger.warning(f"task({task.id}) is now stale")
|
286
286
|
task.failed("stale")
|
287
287
|
return self._end_task(task)
|
288
288
|
|
@@ -324,13 +324,13 @@ class WorkManager(object):
|
|
324
324
|
except Exception as err:
|
325
325
|
self._handle_task_exception(task, err)
|
326
326
|
except SystemExit:
|
327
|
-
self.logger.error("task({}) was killed"
|
327
|
+
self.logger.error(f"task({task.id}) was killed")
|
328
328
|
finally:
|
329
329
|
self._end_task(task)
|
330
330
|
|
331
331
|
def _handle_task_exception(self, task, err):
|
332
332
|
""" Handles exceptions during task execution. """
|
333
|
-
self.logger.exception("task({}) had exception: {}"
|
333
|
+
self.logger.exception(f"task({task.id}) had exception: {err}")
|
334
334
|
task.log_exception(err)
|
335
335
|
|
336
336
|
if "connection already closed" in str(err).lower():
|
@@ -342,7 +342,7 @@ class WorkManager(object):
|
|
342
342
|
def _end_task(self, task):
|
343
343
|
""" Ensures proper cleanup and logging at the end of the task. """
|
344
344
|
self.on_task_ended(task)
|
345
|
-
self.logger.info("task({}) finished with state {
|
345
|
+
self.logger.info(f"task({task.id}) finished with state {task.state}")
|
346
346
|
|
347
347
|
def run_forever(self):
|
348
348
|
self.logger.info("starting work manager...")
|
@@ -379,7 +379,7 @@ class WorkManager(object):
|
|
379
379
|
if self.host_channel not in self.subscribe_to:
|
380
380
|
self.subscribe_to.append(self.host_channel)
|
381
381
|
for key in self.subscribe_to:
|
382
|
-
self.logger.info("subscribing to: {}"
|
382
|
+
self.logger.info(f"subscribing to: {key}")
|
383
383
|
self.pubsub.subscribe(key)
|
384
384
|
self.pubsub.subscribe("tq_cancel")
|
385
385
|
self.pubsub.subscribe("tq_restart")
|
@@ -419,7 +419,7 @@ class WorkManager(object):
|
|
419
419
|
except Exception as err:
|
420
420
|
self.logger.exception(err)
|
421
421
|
|
422
|
-
self.logger.info("waiting for {} running tasks, timeout: {}"
|
422
|
+
self.logger.info(f"waiting for {self._running_count} running tasks, timeout: {timeout}")
|
423
423
|
time.sleep(1.0)
|
424
424
|
self.__close()
|
425
425
|
timeout_at = time.time() + timeout
|
File without changes
|
File without changes
|