n0s1 1.0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- n0s1-1.0.1/MANIFEST.in +1 -0
- n0s1-1.0.1/PKG-INFO +69 -0
- n0s1-1.0.1/README.md +39 -0
- n0s1-1.0.1/setup.cfg +4 -0
- n0s1-1.0.1/setup.py +77 -0
- n0s1-1.0.1/src/n0s1/__init__.py +1 -0
- n0s1-1.0.1/src/n0s1/clients/__init__.py +0 -0
- n0s1-1.0.1/src/n0s1/clients/http_client.py +102 -0
- n0s1-1.0.1/src/n0s1/clients/linear_graphql_client.py +222 -0
- n0s1-1.0.1/src/n0s1/config/config.yaml +18 -0
- n0s1-1.0.1/src/n0s1/config/regex.toml +823 -0
- n0s1-1.0.1/src/n0s1/controllers/__init__.py +0 -0
- n0s1-1.0.1/src/n0s1/controllers/jira_controller.py +78 -0
- n0s1-1.0.1/src/n0s1/controllers/linear_controller.py +72 -0
- n0s1-1.0.1/src/n0s1/controllers/platform_controller.py +50 -0
- n0s1-1.0.1/src/n0s1/n0s1.py +322 -0
- n0s1-1.0.1/src/n0s1.egg-info/PKG-INFO +69 -0
- n0s1-1.0.1/src/n0s1.egg-info/SOURCES.txt +20 -0
- n0s1-1.0.1/src/n0s1.egg-info/dependency_links.txt +1 -0
- n0s1-1.0.1/src/n0s1.egg-info/entry_points.txt +3 -0
- n0s1-1.0.1/src/n0s1.egg-info/requires.txt +10 -0
- n0s1-1.0.1/src/n0s1.egg-info/top_level.txt +1 -0
n0s1-1.0.1/MANIFEST.in
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
include src/n0s1/config/*
|
n0s1-1.0.1/PKG-INFO
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: n0s1
|
|
3
|
+
Version: 1.0.1
|
|
4
|
+
Summary: n0s1 is a secret scanner for Project Management and Issue Tracker tools such as Jira and Linear.
|
|
5
|
+
Home-page: https://spark1.us/n0s1
|
|
6
|
+
Author: Spark 1
|
|
7
|
+
Author-email: contact@spark1.us
|
|
8
|
+
License: UNKNOWN
|
|
9
|
+
Project-URL: Bug Reports, https://github.com/spark1security/n0s1/issues
|
|
10
|
+
Project-URL: Funding, https://www.spark1.us
|
|
11
|
+
Project-URL: Source, https://github.com/spark1security/n0s1
|
|
12
|
+
Description: # n0s1 - Secret Scanner
|
|
13
|
+
n0s1 (pronounced as nosy, /ˈnōzē/) is an open source secret scanner for Project Management and Issue Tracker tools.
|
|
14
|
+
|
|
15
|
+
The scanner will traverse all items within the target platform (e.g. Jira or Linear) and find leaked secrets in the ticket's title, body and comments.
|
|
16
|
+
|
|
17
|
+
The secrets are matched based on an extensible configuration file (regex.toml). The scanner looks for sensitive data such as:
|
|
18
|
+
* Github Personal Access Tokens
|
|
19
|
+
* GitLab Personal Access Tokens
|
|
20
|
+
* AWS Access Tokens
|
|
21
|
+
* PKCS8 private keys
|
|
22
|
+
* RSA private keys
|
|
23
|
+
* SSH private keys
|
|
24
|
+
* npm access tokens
|
|
25
|
+
|
|
26
|
+
### Currently supported target platforms:
|
|
27
|
+
* [Jira](https://www.atlassian.com/software/jira)
|
|
28
|
+
* [Linear](https://linear.app/)
|
|
29
|
+
|
|
30
|
+
### Usage
|
|
31
|
+
```bash
|
|
32
|
+
cd src/n0s1
|
|
33
|
+
python3 -m pip install n0s1
|
|
34
|
+
n0s1 jira_scan --server "https://<YOUR_JIRA_SERVER>.atlassian.net" --api-key "<YOUR_JIRA_API_TOKEN>"
|
|
35
|
+
```
|
|
36
|
+
From source:
|
|
37
|
+
```bash
|
|
38
|
+
cd src/n0s1
|
|
39
|
+
python3 -m venv n0s1_python
|
|
40
|
+
source n0s1_python/bin/activate
|
|
41
|
+
python3 -m pip install -r ../../requirements.txt
|
|
42
|
+
python3 n0s1.py jira_scan --server "https://<YOUR_JIRA_SERVER>.atlassian.net" --api-key "<YOUR_JIRA_API_TOKEN>"
|
|
43
|
+
deactivate
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
## Community
|
|
47
|
+
|
|
48
|
+
n0s1 is a [Spark 1](https://spark1.us) open source project.
|
|
49
|
+
Learn about our open source work and portfolio [here](https://spark1.us/n0s1).
|
|
50
|
+
Contact us about any matter by opening a GitHub Discussion [here](https://github.com/spark1security/n0s1/issues)
|
|
51
|
+
|
|
52
|
+
Keywords: security,cybersecurity,scanner,secret scanner,secret leak,data leak,Jira,Linear,security scanner
|
|
53
|
+
Platform: UNKNOWN
|
|
54
|
+
Classifier: Development Status :: 3 - Alpha
|
|
55
|
+
Classifier: Intended Audience :: Developers
|
|
56
|
+
Classifier: Intended Audience :: Information Technology
|
|
57
|
+
Classifier: Operating System :: OS Independent
|
|
58
|
+
Classifier: Topic :: Security
|
|
59
|
+
Classifier: Topic :: Software Development
|
|
60
|
+
Classifier: Topic :: System :: Monitoring
|
|
61
|
+
Classifier: Topic :: Utilities
|
|
62
|
+
Classifier: License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)
|
|
63
|
+
Classifier: Programming Language :: Python :: 3.7
|
|
64
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
65
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
66
|
+
Requires-Python: >=3.7, <4
|
|
67
|
+
Description-Content-Type: text/markdown
|
|
68
|
+
Provides-Extra: dev
|
|
69
|
+
Provides-Extra: test
|
n0s1-1.0.1/README.md
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
# n0s1 - Secret Scanner
|
|
2
|
+
n0s1 (pronounced as nosy, /ˈnōzē/) is an open source secret scanner for Project Management and Issue Tracker tools.
|
|
3
|
+
|
|
4
|
+
The scanner will traverse all items within the target platform (e.g. Jira or Linear) and find leaked secrets in the ticket's title, body and comments.
|
|
5
|
+
|
|
6
|
+
The secrets are matched based on an extensible configuration file (regex.toml). The scanner looks for sensitive data such as:
|
|
7
|
+
* Github Personal Access Tokens
|
|
8
|
+
* GitLab Personal Access Tokens
|
|
9
|
+
* AWS Access Tokens
|
|
10
|
+
* PKCS8 private keys
|
|
11
|
+
* RSA private keys
|
|
12
|
+
* SSH private keys
|
|
13
|
+
* npm access tokens
|
|
14
|
+
|
|
15
|
+
### Currently supported target platforms:
|
|
16
|
+
* [Jira](https://www.atlassian.com/software/jira)
|
|
17
|
+
* [Linear](https://linear.app/)
|
|
18
|
+
|
|
19
|
+
### Usage
|
|
20
|
+
```bash
|
|
21
|
+
cd src/n0s1
|
|
22
|
+
python3 -m pip install n0s1
|
|
23
|
+
n0s1 jira_scan --server "https://<YOUR_JIRA_SERVER>.atlassian.net" --api-key "<YOUR_JIRA_API_TOKEN>"
|
|
24
|
+
```
|
|
25
|
+
From source:
|
|
26
|
+
```bash
|
|
27
|
+
cd src/n0s1
|
|
28
|
+
python3 -m venv n0s1_python
|
|
29
|
+
source n0s1_python/bin/activate
|
|
30
|
+
python3 -m pip install -r ../../requirements.txt
|
|
31
|
+
python3 n0s1.py jira_scan --server "https://<YOUR_JIRA_SERVER>.atlassian.net" --api-key "<YOUR_JIRA_API_TOKEN>"
|
|
32
|
+
deactivate
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
## Community
|
|
36
|
+
|
|
37
|
+
n0s1 is a [Spark 1](https://spark1.us) open source project.
|
|
38
|
+
Learn about our open source work and portfolio [here](https://spark1.us/n0s1).
|
|
39
|
+
Contact us about any matter by opening a GitHub Discussion [here](https://github.com/spark1security/n0s1/issues)
|
n0s1-1.0.1/setup.cfg
ADDED
n0s1-1.0.1/setup.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
"""A setuptools based setup module.
|
|
2
|
+
|
|
3
|
+
See:
|
|
4
|
+
https://packaging.python.org/guides/distributing-packages-using-setuptools/
|
|
5
|
+
https://github.com/pypa/sampleproject
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import pathlib
|
|
9
|
+
import re
|
|
10
|
+
|
|
11
|
+
# Always prefer setuptools over distutils
|
|
12
|
+
from setuptools import setup, find_packages
|
|
13
|
+
|
|
14
|
+
here = pathlib.Path(__file__).parent.resolve() # current path
|
|
15
|
+
long_description = (here / "README.md").read_text(encoding="utf-8")
|
|
16
|
+
with open(here / "requirements.txt") as fp:
|
|
17
|
+
install_reqs = [r.rstrip() for r in fp.readlines() if not r.startswith("#")]
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def get_version():
|
|
21
|
+
file = here / "src/n0s1/__init__.py"
|
|
22
|
+
return re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', file.read_text(), re.M).group(1)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
setup(
|
|
26
|
+
name="n0s1",
|
|
27
|
+
version=get_version(),
|
|
28
|
+
description="n0s1 is a secret scanner for Project Management and Issue Tracker tools such as Jira and Linear.",
|
|
29
|
+
long_description=long_description,
|
|
30
|
+
long_description_content_type="text/markdown",
|
|
31
|
+
url="https://spark1.us/n0s1",
|
|
32
|
+
author="Spark 1",
|
|
33
|
+
author_email="contact@spark1.us",
|
|
34
|
+
classifiers=["Development Status :: 3 - Alpha",
|
|
35
|
+
"Intended Audience :: Developers",
|
|
36
|
+
"Intended Audience :: Information Technology",
|
|
37
|
+
"Operating System :: OS Independent",
|
|
38
|
+
"Topic :: Security",
|
|
39
|
+
"Topic :: Software Development",
|
|
40
|
+
"Topic :: System :: Monitoring",
|
|
41
|
+
"Topic :: Utilities",
|
|
42
|
+
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
|
|
43
|
+
"Programming Language :: Python :: 3.7",
|
|
44
|
+
"Programming Language :: Python :: 3.8",
|
|
45
|
+
"Programming Language :: Python :: 3.9",
|
|
46
|
+
], # Classifiers help users find your project by categorizing it https://pypi.org/classifiers/
|
|
47
|
+
keywords="security, cybersecurity, scanner, secret scanner, secret leak, data leak, Jira, Linear, security scanner",
|
|
48
|
+
package_dir={"": "src"},
|
|
49
|
+
packages=find_packages(where="src"),
|
|
50
|
+
python_requires=">=3.7, <4",
|
|
51
|
+
|
|
52
|
+
# For an analysis of "install_requires" vs pip's requirements files see:
|
|
53
|
+
# https://packaging.python.org/en/latest/requirements.html
|
|
54
|
+
install_requires=install_reqs,
|
|
55
|
+
|
|
56
|
+
# List additional groups of dependencies here (e.g. development
|
|
57
|
+
# dependencies). Users will be able to install these using the "extras"
|
|
58
|
+
# syntax, for example: $ pip install sampleproject[dev]
|
|
59
|
+
# Similar to `install_requires` above, these must be valid existing projects
|
|
60
|
+
extras_require={"dev": ["check-manifest"],
|
|
61
|
+
"test": ["coverage"],
|
|
62
|
+
},
|
|
63
|
+
|
|
64
|
+
include_package_data=True,
|
|
65
|
+
package_data={"n0s1": ["src/n0s1/config/*"],
|
|
66
|
+
},
|
|
67
|
+
|
|
68
|
+
# The following would provide a command called `n0s1` which
|
|
69
|
+
# executes the function `main` from this package when invoked:
|
|
70
|
+
entry_points={"console_scripts": ["n0s1=n0s1.n0s1:main", ],
|
|
71
|
+
},
|
|
72
|
+
|
|
73
|
+
project_urls={"Bug Reports": "https://github.com/spark1security/n0s1/issues",
|
|
74
|
+
"Funding": "https://www.spark1.us",
|
|
75
|
+
"Source": "https://github.com/spark1security/n0s1",
|
|
76
|
+
},
|
|
77
|
+
)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "1.0.1"
|
|
File without changes
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
from requests.models import Response
|
|
3
|
+
from types import ModuleType
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class HttpClient:
|
|
7
|
+
def __init__(
|
|
8
|
+
self,
|
|
9
|
+
headers: dict,
|
|
10
|
+
logging: ModuleType,
|
|
11
|
+
uri: str = None,
|
|
12
|
+
) -> None:
|
|
13
|
+
if uri is None:
|
|
14
|
+
raise Exception("Must specify URI")
|
|
15
|
+
self.uri = uri
|
|
16
|
+
self.headers = headers
|
|
17
|
+
self.logging = logging
|
|
18
|
+
|
|
19
|
+
def _delete_request(self, url: str, params: dict = None, headers: dict = None, data=None) -> Response:
|
|
20
|
+
if headers:
|
|
21
|
+
response = requests.delete(
|
|
22
|
+
url,
|
|
23
|
+
params=params,
|
|
24
|
+
headers={**headers, **self.headers},
|
|
25
|
+
data=data,
|
|
26
|
+
)
|
|
27
|
+
else:
|
|
28
|
+
response = requests.delete(
|
|
29
|
+
url,
|
|
30
|
+
params=params,
|
|
31
|
+
headers=self.headers,
|
|
32
|
+
data=data,
|
|
33
|
+
)
|
|
34
|
+
self.logging.debug(
|
|
35
|
+
f"HTTP DELETE request status: [{response.status_code}]. URL: {url}"
|
|
36
|
+
)
|
|
37
|
+
self.logging.debug(response.text)
|
|
38
|
+
return response
|
|
39
|
+
|
|
40
|
+
def _get_request(self, url: str, params: dict = None, headers: dict = None, data=None, timeout=None) -> Response:
|
|
41
|
+
if headers:
|
|
42
|
+
response = requests.get(
|
|
43
|
+
url,
|
|
44
|
+
params=params,
|
|
45
|
+
headers={**headers, **self.headers},
|
|
46
|
+
data=data,
|
|
47
|
+
timeout=timeout,
|
|
48
|
+
)
|
|
49
|
+
else:
|
|
50
|
+
response = requests.get(
|
|
51
|
+
url,
|
|
52
|
+
params=params,
|
|
53
|
+
headers=self.headers,
|
|
54
|
+
data=data,
|
|
55
|
+
timeout=timeout,
|
|
56
|
+
)
|
|
57
|
+
self.logging.debug(
|
|
58
|
+
f"HTTP GET request status: [{response.status_code}]. URL: {url}"
|
|
59
|
+
)
|
|
60
|
+
self.logging.debug(response.text)
|
|
61
|
+
return response
|
|
62
|
+
|
|
63
|
+
def _post_request(
|
|
64
|
+
self, url: str, params: dict = None, headers: dict = None, data=None, json: dict = None
|
|
65
|
+
) -> Response:
|
|
66
|
+
if headers:
|
|
67
|
+
response = requests.post(
|
|
68
|
+
url,
|
|
69
|
+
params=params,
|
|
70
|
+
headers={**headers, **self.headers},
|
|
71
|
+
data=data,
|
|
72
|
+
json=json,
|
|
73
|
+
)
|
|
74
|
+
else:
|
|
75
|
+
response = requests.post(
|
|
76
|
+
url,
|
|
77
|
+
params=params,
|
|
78
|
+
headers=self.headers,
|
|
79
|
+
data=data,
|
|
80
|
+
json=json,
|
|
81
|
+
)
|
|
82
|
+
self.logging.debug(response.text)
|
|
83
|
+
return response
|
|
84
|
+
|
|
85
|
+
def _put_request(self, url: str, params: dict = None, headers: dict = None, data=None) -> Response:
|
|
86
|
+
if headers:
|
|
87
|
+
response = requests.put(
|
|
88
|
+
url,
|
|
89
|
+
params=params,
|
|
90
|
+
headers={**headers, **self.headers},
|
|
91
|
+
data=data,
|
|
92
|
+
)
|
|
93
|
+
else:
|
|
94
|
+
response = requests.put(
|
|
95
|
+
url,
|
|
96
|
+
params=params,
|
|
97
|
+
headers=self.headers,
|
|
98
|
+
data=data,
|
|
99
|
+
)
|
|
100
|
+
self.logging.debug(response.text)
|
|
101
|
+
return response
|
|
102
|
+
|
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import time
|
|
3
|
+
from datetime import datetime, timezone
|
|
4
|
+
from types import ModuleType
|
|
5
|
+
|
|
6
|
+
try:
|
|
7
|
+
import clients.http_client as http_client
|
|
8
|
+
except:
|
|
9
|
+
import n0s1.clients.http_client as http_client
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _generate_query_issues_with_pagination(pagination_arguments):
|
|
13
|
+
query_issues_pagination = f"""
|
|
14
|
+
{{
|
|
15
|
+
issues{pagination_arguments} {{
|
|
16
|
+
edges {{
|
|
17
|
+
node {{
|
|
18
|
+
id
|
|
19
|
+
identifier
|
|
20
|
+
url
|
|
21
|
+
title
|
|
22
|
+
description
|
|
23
|
+
comments {{
|
|
24
|
+
nodes {{
|
|
25
|
+
id
|
|
26
|
+
body
|
|
27
|
+
}}
|
|
28
|
+
}}
|
|
29
|
+
}}
|
|
30
|
+
cursor
|
|
31
|
+
}}
|
|
32
|
+
pageInfo {{
|
|
33
|
+
hasNextPage
|
|
34
|
+
endCursor
|
|
35
|
+
}}
|
|
36
|
+
}}
|
|
37
|
+
}}
|
|
38
|
+
"""
|
|
39
|
+
query = {"query": query_issues_pagination, "variables": {}}
|
|
40
|
+
return query
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class LinearGraphQLClient(http_client.HttpClient):
|
|
44
|
+
def __init__(
|
|
45
|
+
self,
|
|
46
|
+
headers: dict,
|
|
47
|
+
logging: ModuleType,
|
|
48
|
+
uri: str = None,
|
|
49
|
+
) -> None:
|
|
50
|
+
if not uri:
|
|
51
|
+
uri = "https://api.linear.app/graphql"
|
|
52
|
+
super().__init__(headers, logging, uri)
|
|
53
|
+
self._user = None
|
|
54
|
+
|
|
55
|
+
def graphql_query(self, query):
|
|
56
|
+
url = f"{self.uri}/graphql/"
|
|
57
|
+
r = self._post_request(url, json=query)
|
|
58
|
+
|
|
59
|
+
# Check for rate limiting
|
|
60
|
+
# https://developers.linear.app/docs/graphql/working-with-the-graphql-api/rate-limiting
|
|
61
|
+
if r.status_code == 200 or 400 <= r.status_code < 500:
|
|
62
|
+
rate_limit = r.headers.get("X-RateLimit-Requests-Limit", -1)
|
|
63
|
+
rate_limit_remaining = r.headers.get("X-RateLimit-Requests-Remaining", -1)
|
|
64
|
+
rate_limit_reset = r.headers.get("X-RateLimit-Requests-Reset", -1)
|
|
65
|
+
if int(rate_limit_remaining) < 20:
|
|
66
|
+
try:
|
|
67
|
+
timestamp_reset = float(rate_limit_reset) / 1000
|
|
68
|
+
datetime_now_obj = datetime.now(timezone.utc)
|
|
69
|
+
timestamp_now = datetime_now_obj.timestamp()
|
|
70
|
+
retry_after = int(timestamp_reset - timestamp_now) + 5
|
|
71
|
+
|
|
72
|
+
reset_datatime = datetime.utcfromtimestamp(timestamp_reset)
|
|
73
|
+
self.logging.warning(
|
|
74
|
+
f"Approaching rate limit! There are [{rate_limit_remaining}] requests remaining out of [{rate_limit}]. Current date: [{datetime_now_obj}] Rate Limit reset time: [{reset_datatime} UTC]. Retrying after [{retry_after}] seconds..."
|
|
75
|
+
)
|
|
76
|
+
if retry_after < 7200:
|
|
77
|
+
time.sleep(retry_after)
|
|
78
|
+
else:
|
|
79
|
+
logging.warning(f"Retry after period is too long: [{retry_after}]. Skipping retry period. Header X-RateLimit-Requests-Reset set to {rate_limit_reset} and UTC epoch seconds now is {timestamp_now}.")
|
|
80
|
+
except Exception as e:
|
|
81
|
+
logging.warning(e)
|
|
82
|
+
pass
|
|
83
|
+
return r
|
|
84
|
+
|
|
85
|
+
def get_curret_user(self):
|
|
86
|
+
query_me = f"""
|
|
87
|
+
query Me {{
|
|
88
|
+
viewer {{
|
|
89
|
+
id
|
|
90
|
+
name
|
|
91
|
+
email
|
|
92
|
+
}}
|
|
93
|
+
}}
|
|
94
|
+
"""
|
|
95
|
+
query = {"query": query_me}
|
|
96
|
+
response = self.graphql_query(query)
|
|
97
|
+
r = None
|
|
98
|
+
if response.status_code == 200:
|
|
99
|
+
r = response.json()
|
|
100
|
+
return r
|
|
101
|
+
|
|
102
|
+
def get_issue(self, id):
|
|
103
|
+
query_issue = f"""
|
|
104
|
+
query Issue {{
|
|
105
|
+
issue(id: "{id}") {{
|
|
106
|
+
id
|
|
107
|
+
identifier
|
|
108
|
+
url
|
|
109
|
+
title
|
|
110
|
+
description
|
|
111
|
+
comments {{
|
|
112
|
+
nodes {{
|
|
113
|
+
id
|
|
114
|
+
body
|
|
115
|
+
}}
|
|
116
|
+
}}
|
|
117
|
+
}}
|
|
118
|
+
}}
|
|
119
|
+
"""
|
|
120
|
+
query = {"query": query_issue}
|
|
121
|
+
response = self.graphql_query(query)
|
|
122
|
+
r = None
|
|
123
|
+
if response.status_code == 200:
|
|
124
|
+
r = response.json()
|
|
125
|
+
return r
|
|
126
|
+
|
|
127
|
+
def set_issue_title(self, issue_id, title):
|
|
128
|
+
query_issue = f"""
|
|
129
|
+
mutation IssueUpdate {{
|
|
130
|
+
issueUpdate(
|
|
131
|
+
id: "{issue_id}",
|
|
132
|
+
input: {{
|
|
133
|
+
title: "{title}"
|
|
134
|
+
}}
|
|
135
|
+
) {{
|
|
136
|
+
success
|
|
137
|
+
issue {{
|
|
138
|
+
id
|
|
139
|
+
title
|
|
140
|
+
state {{
|
|
141
|
+
id
|
|
142
|
+
name
|
|
143
|
+
}}
|
|
144
|
+
}}
|
|
145
|
+
}}
|
|
146
|
+
}}
|
|
147
|
+
"""
|
|
148
|
+
query = {"query": query_issue}
|
|
149
|
+
response = self.graphql_query(query)
|
|
150
|
+
r = None
|
|
151
|
+
if response.status_code == 200:
|
|
152
|
+
r = response.json()
|
|
153
|
+
return r
|
|
154
|
+
|
|
155
|
+
def add_comment(self, issue_id, comment):
|
|
156
|
+
comment = comment.replace("\n", "\\n")
|
|
157
|
+
query_issue = f"""
|
|
158
|
+
mutation commentCreate {{
|
|
159
|
+
commentCreate(
|
|
160
|
+
input: {{
|
|
161
|
+
issueId: "{issue_id}",
|
|
162
|
+
body: "{comment}"
|
|
163
|
+
}}
|
|
164
|
+
)
|
|
165
|
+
{{
|
|
166
|
+
success
|
|
167
|
+
comment {{
|
|
168
|
+
id
|
|
169
|
+
}}
|
|
170
|
+
}}
|
|
171
|
+
}}
|
|
172
|
+
"""
|
|
173
|
+
query = {"query": query_issue}
|
|
174
|
+
response = self.graphql_query(query)
|
|
175
|
+
r = None
|
|
176
|
+
if response.status_code == 200:
|
|
177
|
+
r = response.json()
|
|
178
|
+
return r
|
|
179
|
+
|
|
180
|
+
def get_issues_and_comments(self, issues_per_page=100):
|
|
181
|
+
pagination_arguments = f"(first: {issues_per_page})"
|
|
182
|
+
query = _generate_query_issues_with_pagination(pagination_arguments)
|
|
183
|
+
response = self.graphql_query(query)
|
|
184
|
+
|
|
185
|
+
page_num = 0
|
|
186
|
+
has_next_page = False
|
|
187
|
+
cursor = None
|
|
188
|
+
|
|
189
|
+
if response.status_code == 200:
|
|
190
|
+
r = response.json()
|
|
191
|
+
yield r
|
|
192
|
+
has_next_page = r.get("data", {}).get("issues", {}).get("pageInfo", {}).get("hasNextPage", False)
|
|
193
|
+
cursor = r.get("data", {}).get("issues", {}).get("pageInfo", {}).get("endCursor", None)
|
|
194
|
+
|
|
195
|
+
while has_next_page and cursor:
|
|
196
|
+
pagination_arguments = f"(first: {issues_per_page}, after: \"{cursor}\")"
|
|
197
|
+
query = _generate_query_issues_with_pagination(pagination_arguments)
|
|
198
|
+
response = self.graphql_query(query)
|
|
199
|
+
|
|
200
|
+
page_num += 1
|
|
201
|
+
scanned_issues = page_num * issues_per_page
|
|
202
|
+
self.logging.info(f"Total Linear issues scanned: [{scanned_issues}]. Page number: [{page_num}]. Paginating with cursor: [{cursor}]")
|
|
203
|
+
|
|
204
|
+
has_next_page = False
|
|
205
|
+
cursor = None
|
|
206
|
+
|
|
207
|
+
if response.status_code == 200:
|
|
208
|
+
r = response.json()
|
|
209
|
+
yield r
|
|
210
|
+
has_next_page = r.get("data", {}).get("issues", {}).get("pageInfo", {}).get("hasNextPage", False)
|
|
211
|
+
cursor = r.get("data", {}).get("issues", {}).get("pageInfo", {}).get("endCursor", None)
|
|
212
|
+
|
|
213
|
+
return {}
|
|
214
|
+
|
|
215
|
+
def get_all_issues_and_comments(self):
|
|
216
|
+
result = {"data": {"issues": {"edges": [], "pageInfo": {}}}}
|
|
217
|
+
issues_per_page = 100
|
|
218
|
+
for r in self.get_issues_and_comments(issues_per_page):
|
|
219
|
+
if r:
|
|
220
|
+
result["data"]["issues"]["edges"] += r.get("data", {}).get("issues", {}).get("edges", [])
|
|
221
|
+
return result
|
|
222
|
+
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
maintainer: "Spark 1"
|
|
2
|
+
|
|
3
|
+
general_params:
|
|
4
|
+
regex_file: "config/regex.toml"
|
|
5
|
+
report_file: "n0s1_report.json"
|
|
6
|
+
post_comment: true
|
|
7
|
+
skip_comment: false
|
|
8
|
+
|
|
9
|
+
comment_params:
|
|
10
|
+
bot_name: "n0s1 bot"
|
|
11
|
+
secret_manager: "a secret manager tool"
|
|
12
|
+
contact_help: "contact@spark1.us"
|
|
13
|
+
message_template: "Hello! I'm {bot_name}, and I'm here to assist you in preventing data leaks.\nI apologize for being nosy, but it appears that there may have been potentially sensitive data posted here in the past.\n\n.\nHere are the details:\nPlatform:[{platform}] Field:[ticket {field}] ID:[{regex_config_id}] Description:[{regex_config_description}] Regex: {regex}\n############## Sanitized Secret Leak ##############\n {leak}\n############## Sanitized Secret Leak ##############\n.\n\nIf you can verify that this is indeed an actual data leak, please take the necessary steps to rotate the credentials and remove any previously shared sensitive information.\nFor future reference, consider using {secret_manager} whenever you need to share credentials or any other sensitive data securely.\nIf you require further assistance, please don't hesitate to reach out to us at {contact_help}\n\n{label}"
|
|
14
|
+
label: "n0s1bot_auto_comment_e869dd5fa15ca0749a350aac758c7f56f56ad9be1"
|
|
15
|
+
|
|
16
|
+
jira_params:
|
|
17
|
+
server: "http://localhost:2990/jira"
|
|
18
|
+
email: ""
|