|
| 1 | + |
| 2 | +import json |
| 3 | + |
| 4 | +from dojo.models import Finding |
| 5 | +from dojo.tools.parser_test import ParserTest |
| 6 | + |
| 7 | + |
| 8 | +class N0s1Parser: |
| 9 | + def get_scan_types(self): |
| 10 | + return ["n0s1 Scanner"] |
| 11 | + |
| 12 | + def get_label_for_scan_types(self, scan_type): |
| 13 | + return scan_type |
| 14 | + |
| 15 | + def get_description_for_scan_types(self, scan_type): |
| 16 | + return "JSON output from the n0s1 scanner." |
| 17 | + |
| 18 | + def get_tests(self, scan_type, handle): |
| 19 | + data = json.load(handle) |
| 20 | + subscanner = self.detect_subscanner(data) |
| 21 | + test = ParserTest( |
| 22 | + name=subscanner, |
| 23 | + parser_type=subscanner, |
| 24 | + version=data.get("tool", {}).get("version", ""), |
| 25 | + description=f"Scan from {subscanner}", |
| 26 | + ) |
| 27 | + test.findings = self.get_findings_from_data(data) |
| 28 | + return [test] |
| 29 | + |
| 30 | + def get_findings(self, scan_file, test): |
| 31 | + data = json.load(scan_file) |
| 32 | + return self.get_findings_from_data(data) |
| 33 | + |
| 34 | + def detect_subscanner(self, data): |
| 35 | + platforms = {f.get("details", {}).get("platform", "") for f in data.get("findings", {}).values()} |
| 36 | + if "Confluence" in platforms: |
| 37 | + return "n0s1 Confluence" |
| 38 | + if "GitHub" in platforms: |
| 39 | + return "n0s1 GitHub" |
| 40 | + if "GitLab" in platforms: |
| 41 | + return "n0s1 GitLab" |
| 42 | + return "n0s1" |
| 43 | + |
| 44 | + def get_findings_from_data(self, data): |
| 45 | + dupes = {} |
| 46 | + regex_configs = {} |
| 47 | + if "regex_config" in data and "rules" in data["regex_config"]: |
| 48 | + for rule in data["regex_config"]["rules"]: |
| 49 | + regex_configs[rule["id"]] = rule |
| 50 | + for finding_id, finding_data in data.get("findings", {}).items(): |
| 51 | + details = finding_data.get("details", {}) |
| 52 | + regex_ref = details.get("matched_regex_config", {}) |
| 53 | + regex_id = regex_ref.get("id") |
| 54 | + regex_info = regex_configs.get(regex_id, {}) |
| 55 | + merged_regex = { |
| 56 | + "id": regex_id, |
| 57 | + "description": regex_ref.get("description", regex_info.get("description", "N/A")), |
| 58 | + "regex": regex_ref.get("regex", regex_info.get("regex", "N/A")), |
| 59 | + "keywords": regex_info.get("keywords", []), |
| 60 | + "tags": regex_info.get("tags", []), |
| 61 | + } |
| 62 | + title = merged_regex["id"] or "n0s1 Finding" |
| 63 | + description = f"**URL:** {finding_data.get('url', 'N/A')}\n" |
| 64 | + description += f"**Secret:** {finding_data.get('secret', 'N/A')}\n" |
| 65 | + description += f"**Platform:** {details.get('platform', 'N/A')}\n" |
| 66 | + description += f"**Ticket Field:** {details.get('ticket_field', 'N/A')}\n" |
| 67 | + description += f"**Regex ID:** {merged_regex['id']}\n" |
| 68 | + description += f"**Regex Description:** {merged_regex['description']}\n" |
| 69 | + description += f"**Regex Pattern:** {merged_regex['regex']}\n" |
| 70 | + if merged_regex["keywords"]: |
| 71 | + description += f"**Keywords:** {', '.join(merged_regex['keywords'])}\n" |
| 72 | + if merged_regex["tags"]: |
| 73 | + description += f"**Tags:** {', '.join(merged_regex['tags'])}\n" |
| 74 | + dupe_key = finding_data.get("id", finding_id) |
| 75 | + if dupe_key in dupes: |
| 76 | + continue |
| 77 | + finding = Finding( |
| 78 | + title=title, |
| 79 | + description=description, |
| 80 | + severity="High", |
| 81 | + dynamic_finding=True, |
| 82 | + static_finding=False, |
| 83 | + unique_id_from_tool=dupe_key, |
| 84 | + ) |
| 85 | + dupes[dupe_key] = finding |
| 86 | + return list(dupes.values()) |
0 commit comments