Skip to content

Commit 723d6ee

Browse files
🎉 implement n0s1 scanner #13564 (#13580)
* implement n0s1 scanner #13564 * update * ruff * update * update * update * underline the correctness through comparison with rustyhog
1 parent 3c28fb5 commit 723d6ee

File tree

7 files changed

+1521
-0
lines changed

7 files changed

+1521
-0
lines changed
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
---
2+
title: "n0s1 Scanner"
3+
toc_hide: true
4+
---
5+
6+
### File Types
7+
Parser n0s1 expects a JSON file of scanner n0s1.
8+
9+
### Sample Scan Data
10+
Sample n0s1 scans can be found [here](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/n0s1).
11+
12+
### Link To Tool
13+
See n0s1 on GitHub: https://github.com/spark1security/n0s1
14+
15+
### Default Deduplication Hashcode Fields
16+
By default, DefectDojo identifies duplicate Findings using these [hashcode fields](https://docs.defectdojo.com/en/working_with_findings/finding_deduplication/about_deduplication/):
17+
18+
- description

dojo/settings/settings.dist.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1397,6 +1397,7 @@ def saml2_attrib_map_format(din):
13971397
"Cycognito Scan": ["title", "severity"],
13981398
"OpenVAS Parser v2": ["title", "severity", "vuln_id_from_tool", "endpoints"],
13991399
"Snyk Issue API Scan": ["vuln_id_from_tool", "file_path"],
1400+
"n0s1 Scanner": ["description"],
14001401
}
14011402

14021403
# Override the hardcoded settings here via the env var

dojo/tools/n0s1/__init__.py

Whitespace-only changes.

dojo/tools/n0s1/parser.py

Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,86 @@
1+
2+
import json
3+
4+
from dojo.models import Finding
5+
from dojo.tools.parser_test import ParserTest
6+
7+
8+
class N0s1Parser:
9+
def get_scan_types(self):
10+
return ["n0s1 Scanner"]
11+
12+
def get_label_for_scan_types(self, scan_type):
13+
return scan_type
14+
15+
def get_description_for_scan_types(self, scan_type):
16+
return "JSON output from the n0s1 scanner."
17+
18+
def get_tests(self, scan_type, handle):
19+
data = json.load(handle)
20+
subscanner = self.detect_subscanner(data)
21+
test = ParserTest(
22+
name=subscanner,
23+
parser_type=subscanner,
24+
version=data.get("tool", {}).get("version", ""),
25+
description=f"Scan from {subscanner}",
26+
)
27+
test.findings = self.get_findings_from_data(data)
28+
return [test]
29+
30+
def get_findings(self, scan_file, test):
31+
data = json.load(scan_file)
32+
return self.get_findings_from_data(data)
33+
34+
def detect_subscanner(self, data):
35+
platforms = {f.get("details", {}).get("platform", "") for f in data.get("findings", {}).values()}
36+
if "Confluence" in platforms:
37+
return "n0s1 Confluence"
38+
if "GitHub" in platforms:
39+
return "n0s1 GitHub"
40+
if "GitLab" in platforms:
41+
return "n0s1 GitLab"
42+
return "n0s1"
43+
44+
def get_findings_from_data(self, data):
45+
dupes = {}
46+
regex_configs = {}
47+
if "regex_config" in data and "rules" in data["regex_config"]:
48+
for rule in data["regex_config"]["rules"]:
49+
regex_configs[rule["id"]] = rule
50+
for finding_id, finding_data in data.get("findings", {}).items():
51+
details = finding_data.get("details", {})
52+
regex_ref = details.get("matched_regex_config", {})
53+
regex_id = regex_ref.get("id")
54+
regex_info = regex_configs.get(regex_id, {})
55+
merged_regex = {
56+
"id": regex_id,
57+
"description": regex_ref.get("description", regex_info.get("description", "N/A")),
58+
"regex": regex_ref.get("regex", regex_info.get("regex", "N/A")),
59+
"keywords": regex_info.get("keywords", []),
60+
"tags": regex_info.get("tags", []),
61+
}
62+
title = merged_regex["id"] or "n0s1 Finding"
63+
description = f"**URL:** {finding_data.get('url', 'N/A')}\n"
64+
description += f"**Secret:** {finding_data.get('secret', 'N/A')}\n"
65+
description += f"**Platform:** {details.get('platform', 'N/A')}\n"
66+
description += f"**Ticket Field:** {details.get('ticket_field', 'N/A')}\n"
67+
description += f"**Regex ID:** {merged_regex['id']}\n"
68+
description += f"**Regex Description:** {merged_regex['description']}\n"
69+
description += f"**Regex Pattern:** {merged_regex['regex']}\n"
70+
if merged_regex["keywords"]:
71+
description += f"**Keywords:** {', '.join(merged_regex['keywords'])}\n"
72+
if merged_regex["tags"]:
73+
description += f"**Tags:** {', '.join(merged_regex['tags'])}\n"
74+
dupe_key = finding_data.get("id", finding_id)
75+
if dupe_key in dupes:
76+
continue
77+
finding = Finding(
78+
title=title,
79+
description=description,
80+
severity="High",
81+
dynamic_finding=True,
82+
static_finding=False,
83+
unique_id_from_tool=dupe_key,
84+
)
85+
dupes[dupe_key] = finding
86+
return list(dupes.values())

0 commit comments

Comments
 (0)