Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 10 additions & 3 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,13 @@ env:
- GO111MODULE=on
gobuild_args: -a -tags netgo -ldflags '-w'
go_import_path: github.com/adevinta/vulcan-tracker
script:
- go install ./...
- go test -short -v $(go list ./... | grep -v /vendor/)

jobs:
include:
# "test" stage.
- stage: "test"
name: "test"
script: ./_script/test -cover ./...
# script:
# - go install ./...
# - go test -short -v $(go list ./... | grep -v /vendor/)
18 changes: 18 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,3 +33,21 @@ For running the component locally, clone and run at the root of the repo the fol
```sh
go install ./...
```

## Test

Execute the tests:

```
_script/test -cover ./...
```

`_script/test` makes sure the testing infrastructure is up and running and then
runs `go test` with the provided arguments. It also disables test caching and
avoids running multiple test programs in parallel.

Stop the testing infrastructure:

```
_script/clean
```
10 changes: 10 additions & 0 deletions _resources/config/local.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,14 @@ port = 8080
[log]
level = "DEBUG"

[kafka]
user = "user"
pass = "supersecret"
broker = "localhost:9092"
# Time between retries on failure (0 means no retries).
retry_duration="5s"
topics = {assets = "test-topic"}

[servers]
[servers.example1]
url = "http://localhost:8080"
Expand All @@ -20,3 +28,5 @@ level = "DEBUG"
project = "TEST"
fix_workflow = ["ToDo", "In Progress", "Under Review", "Fixed"]
wontfix_workflow = ["Won't Fix"]


8 changes: 8 additions & 0 deletions _script/clean
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
#!/bin/bash

set -e -u

# Set working directory to /_script.
cd "$(dirname $0)"

docker-compose -p 'vulcan-tracker' rm -s -f
49 changes: 49 additions & 0 deletions _script/docker-compose.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
version: "2.4"

services:
setup:
image: busybox
depends_on:
# The service_healthy condition is not honored by "docker-compose run" in
# all docker-compose versions, but it is respected if the condition is in
# a transitive dependency. Thus, we have created an intermediate
# dependency with all the required conditions.
- healthchecks
entrypoint: ["echo", "setup done"]

healthchecks:
image: busybox
depends_on:
kafka:
condition: service_healthy
entrypoint: ["echo", "healthchecks done"]

kafka:
image: confluentinc/cp-kafka:7.2.2
ports:
- 127.0.0.1:9092:9092
expose:
- 29092
environment:
- KAFKA_BROKER_ID=1
- KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
- KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://kafka:29092,PLAINTEXT_HOST://127.0.0.1:9092
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
- KAFKA_INTER_BROKER_LISTENER_NAME=PLAINTEXT
- KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1
healthcheck:
test: ["CMD", "nc", "-z", "kafka", "9092"]
interval: 5s
timeout: 10s
retries: 6
depends_on:
zookeeper:
condition: service_started

zookeeper:
image: confluentinc/cp-zookeeper:7.2.2
expose:
- 2181
environment:
- ZOOKEEPER_CLIENT_PORT=2181
- ZOOKEEPER_TICK_TIME=2000
8 changes: 8 additions & 0 deletions _script/setup
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
#!/bin/bash

set -e -u

# Set working directory to /_script.
cd "$(dirname $0)"

docker-compose -p 'vulcan-tracker' run setup
14 changes: 14 additions & 0 deletions _script/test
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
#!/bin/bash

set -e -u

# Set working directory to the root of the repo.
cd "$(dirname $0)/.."

# Start testing infrastructure (i.e. kafka and graph-asset-inventory-api).
./_script/setup

# Disable the test cache (-count=1), so tests always connect to the testing
# infrastructure. Also, do not run multiple test programs in parallel (-p=1),
# so there are no race conditions between tests in different packages.
exec go test -count=1 -p=1 "$@"
141 changes: 139 additions & 2 deletions cmd/vulcan-tracker/main.go
Original file line number Diff line number Diff line change
@@ -1,19 +1,24 @@
/*
Copyright 2022 Adevinta
*/

package main

import (
"context"
"flag"
"fmt"
"log"
"strings"
"time"

"github.com/adevinta/vulcan-tracker/pkg/api"
"github.com/adevinta/vulcan-tracker/pkg/config"
"github.com/adevinta/vulcan-tracker/pkg/events"
"github.com/adevinta/vulcan-tracker/pkg/events/kafka"
"github.com/adevinta/vulcan-tracker/pkg/log"
"github.com/adevinta/vulcan-tracker/pkg/model"
"github.com/adevinta/vulcan-tracker/pkg/storage"
"github.com/adevinta/vulcan-tracker/pkg/tracking"
"github.com/adevinta/vulcan-tracker/pkg/vulcan"
"github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
)
Expand All @@ -27,6 +32,10 @@ func main() {
log.Fatalf("Error reading configuration: %v", err)
}

if err := log.SetLevel(strings.ToLower(cfg.Log.Level)); err != nil {
log.Fatalf("error setting log level: %v", err)
}

e := echo.New()

e.Logger.SetLevel(config.ParseLogLvl(cfg.Log.Level))
Expand Down Expand Up @@ -68,7 +77,135 @@ func main() {
e.POST("/:team_id/tickets/:id/fix", a.FixTicket)
e.POST("/:team_id/tickets/:id/wontfix", a.WontFixTicket)

errs := make(chan error, 1)

go func() {
if err := runEventReader(context.Background(), cfg.Kafka, storage, trackerServers); err != nil {
errs <- fmt.Errorf("vulcan-tracker: %v", err)
}
// close(errs)
}()
if err := <-errs; err != nil {
// handle error
log.Error.Fatal(err)
}

address := fmt.Sprintf(":%d", cfg.API.Port)
e.Logger.Fatal(e.Start(address))

}

// runEventReader is invoked by main and does the actual work of reading events.
func runEventReader(ctx context.Context, kCfg config.KafkaConfig, s storage.Storage, ts map[string]tracking.TicketTracker) error {

kcfg := map[string]any{
"bootstrap.servers": kCfg.KafkaBroker,
"group.id": kCfg.KafkaGroupID,
"auto.offset.reset": "earliest",
"enable.partition.eof": true,
}

if kCfg.KafkaUsername != "" && kCfg.KafkaPassword != "" {
kcfg["security.protocol"] = "sasl_ssl"
kcfg["sasl.mechanisms"] = "SCRAM-SHA-256"
kcfg["sasl.username"] = kCfg.KafkaUsername
kcfg["sasl.password"] = kCfg.KafkaPassword
}

proc, err := kafka.NewProcessor(kcfg)
if err != nil {
return fmt.Errorf("error creating kafka processor: %w", err)
}
defer proc.Close()

vcli := vulcan.NewClient(proc)
for {
log.Info.Println("vulcan-tracker: processing findings")

select {
case <-ctx.Done():
log.Info.Println("vulcan-tracker: context is done")
return nil
default:
}

if err := vcli.ProcessFindings(ctx, findingHandler(s, ts)); err != nil {
err = fmt.Errorf("error processing findings: %w", err)
if kCfg.RetryDuration == 0 {
return err
}
log.Error.Printf("vulcan-tracker: %v", err)
}

log.Info.Printf("vulcan-tracker: retrying in %v", kCfg.RetryDuration)
time.Sleep(kCfg.RetryDuration)
}

}

// findingHandler processes finding events coming from a stream.
func findingHandler(s storage.Storage, ts map[string]tracking.TicketTracker) vulcan.FindingHandler {
return func(payload events.FindingNotification, isNil bool) error {
log.Debug.Printf("vulcan-tracker: payload=%#v isNil=%v", payload, isNil)
for _, teamId := range payload.Target.Teams {
pConfig, err := s.ProjectConfig(teamId)
if err != nil {
// The team doesn't have a configuration in vulcan-tracker. We'll skip its findings.
continue
}
if !pConfig.AutoCreate {
// This team doesn't have enable the auto create configuration.
continue
}
err = upsertTicket(pConfig, ts[pConfig.ServerName], payload)
if err != nil {
return fmt.Errorf("could not upsert finding: %w", err)
}
}
return nil
}
}

func upsertTicket(pConfig *model.ProjectConfig, tt tracking.TicketTracker, payload events.FindingNotification) error {
// Check if there is a ticket for this finding.
ticket, err := tt.FindTicketByFindingID(pConfig.Project, pConfig.VulnerabilityIssueType, payload.ID)
if err != nil {
return fmt.Errorf("error searching the finding %s: %w", payload.ID, err)
}

// Create the ticket.
if ticket == nil {
// TODO: Define a good summary and description.
ticket = &model.Ticket{
Project: pConfig.Project,
TicketType: pConfig.VulnerabilityIssueType,
Summary: payload.Issue.Summary,
Description: fmt.Sprintf("FindingID: %s", payload.ID),
}
ticket, err := tt.CreateTicket(ticket)
if err != nil {
return fmt.Errorf("could not create a ticket: %w", err)
}
log.Debug.Printf("vulcan-tracker: created ticket %s ", ticket.Key)

return nil

} else {
// Update the ticket.
switch status := payload.Status; status {

case "FIXED":
_, err := tt.FixTicket(ticket.Key, pConfig.FixedWorkflow)
if err != nil {
return err
}
case "FALSE POSITIVE":
_, err := tt.WontFixTicket(ticket.Key, pConfig.WontFixWorkflow, "Won't Do")
if err != nil {
return err
}
}
}

return nil
}
6 changes: 6 additions & 0 deletions config.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,9 @@ user = "$JIRA_USER"
token = "$JIRA_TOKEN"
vulnerability_issue_type = "$JIRA_VULNERABILITY_ISSUE_TYPE"
project = "$JIRA_PROJECT"

[kafka]
user = "user"
pass = "supersecret"
broker = "localhost:9092"
topic = "findings-v0"
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ require (
require (
github.com/BurntSushi/toml v1.2.0
github.com/andygrunwald/go-jira v1.16.0
github.com/confluentinc/confluent-kafka-go v1.9.2
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
github.com/google/go-cmp v0.5.9
github.com/labstack/gommon v0.3.1
Expand Down
Loading