Add CI integration (#143)

This commit is contained in:
Alex Goodman 2018-12-30 14:07:56 -05:00 committed by GitHub
parent ad32c0a091
commit fc27dcd820
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
30 changed files with 892 additions and 204 deletions

15
.data/.dive-ci Normal file
View file

@ -0,0 +1,15 @@
---
plugins:
- plugin1
rules:
# If the efficiency is measured below X%, mark as failed. (expressed as a percentage between 0-1)
lowestEfficiency: 0.95
# If the amount of wasted space is at least X or larger than X, mark as failed. (expressed in B, KB, MB, and GB)
highestWastedBytes: 20Mb
# If the amount of wasted space makes up for X% of the image, mark as failed. (fail if the threshold is met or crossed; expressed as a percentage between 0-1)
highestUserWastedPercent: 0.10
plugin1/rule1: error

View file

@ -1,4 +1,4 @@
FROM alpine:latest
FROM busybox:latest
ADD README.md /somefile.txt
RUN mkdir -p /root/example/really/nested
RUN cp /somefile.txt /root/example/somefile1.txt
@ -8,7 +8,7 @@ RUN cp /somefile.txt /root/example/somefile3.txt
RUN mv /root/example/somefile3.txt /root/saved.txt
RUN cp /root/saved.txt /root/.saved.txt
RUN rm -rf /root/example/
ADD .data/ /root/.data/
ADD .scripts/ /root/.data/
RUN cp /root/saved.txt /tmp/saved.again1.txt
RUN cp /root/saved.txt /root/.data/saved.again2.txt
RUN chmod +x /root/saved.txt

View file

@ -0,0 +1,14 @@
FROM busybox:latest
ADD README.md /somefile.txt
RUN mkdir -p /root/example/really/nested
RUN cp /somefile.txt /root/example/somefile1.txt
RUN chmod 444 /root/example/somefile1.txt
RUN cp /somefile.txt /root/example/somefile2.txt
RUN cp /somefile.txt /root/example/somefile3.txt
RUN mv /root/example/somefile3.txt /root/saved.txt
RUN cp /root/saved.txt /root/.saved.txt
RUN rm -rf /root/example/
ADD .scripts/ /root/.data/
RUN cp /root/saved.txt /tmp/saved.again1.txt
RUN cp /root/saved.txt /root/.data/saved.again2.txt
RUN chmod +x /root/saved.txt

BIN
.data/demo-ci.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 178 KiB

BIN
.data/test-docker-image.tar Normal file

Binary file not shown.

View file

@ -1,5 +1,5 @@
/.git
/.scripts
/.data
/dist
/ui
/utils

3
.gitignore vendored
View file

@ -18,4 +18,5 @@
/vendor
/.image
*.log
/dist
/dist
.cover

55
.scripts/test.sh Executable file
View file

@ -0,0 +1,55 @@
#!/bin/sh
# Generate test coverage statistics for Go packages.
#
# Works around the fact that `go test -coverprofile` currently does not work
# with multiple packages, see https://code.google.com/p/go/issues/detail?id=6909
#
# Usage: script/coverage [--html|--coveralls]
#
# --html Additionally create HTML report and open it in browser
# --coveralls Push coverage statistics to coveralls.io
#
# Source: https://github.com/mlafeldt/chef-runner/blob/v0.7.0/script/coverage
set -e
workdir=.cover
profile="$workdir/cover.out"
mode=count
generate_cover_data() {
rm -rf "$workdir"
mkdir "$workdir"
for pkg in "$@"; do
f="$workdir/$(echo $pkg | tr / -).cover"
go test -v -covermode="$mode" -coverprofile="$f" "$pkg"
done
echo "mode: $mode" >"$profile"
grep -h -v "^mode:" "$workdir"/*.cover >>"$profile"
}
show_cover_report() {
go tool cover -${1}="$profile"
}
push_to_coveralls() {
echo "Pushing coverage statistics to coveralls.io"
goveralls -coverprofile="$profile"
}
generate_cover_data $(go list ./...)
case "$1" in
"")
show_cover_report func
;;
--html)
show_cover_report html
;;
--coveralls)
push_to_coveralls
;;
*)
echo >&2 "error: invalid option: $1"; exit 1 ;;
esac

View file

@ -3,7 +3,10 @@ BIN = dive
all: clean build
run: build
./build/$(BIN) build -t dive-test:latest -f .data/Dockerfile .
./build/$(BIN) build -t dive-example:latest -f .data/Dockerfile.example .
run-ci: build
CI=true ./build/$(BIN) dive-example:latest --ci-config .data/.dive-ci
run-large: build
./build/$(BIN) amir20/clashleaders:latest
@ -21,6 +24,9 @@ install:
test: build
go test -cover -v ./...
coverage: build
./.scripts/test.sh
validate:
@! gofmt -s -d -l . 2>&1 | grep -vE '^\.git/'
go vet ./...
@ -28,9 +34,12 @@ validate:
lint: build
golint -set_exit_status $$(go list ./...)
generate-test-data:
docker build -t dive-test:latest -f .data/Dockerfile.test-image . && docker image save -o .data/test-docker-image.tar dive-test:latest && echo "Exported test data!"
clean:
rm -rf build
rm -rf vendor
go clean
.PHONY: build install test lint clean release validate
.PHONY: build install test lint clean release validate generate-test-data

View file

@ -16,6 +16,13 @@ or if you want to build your image then jump straight into analyzing it:
dive build -t <some-tag> .
```
Additionally you can run this in your CI pipeline to ensure you're keeping wasted space to a minimum (this skips the UI):
```
CI=true dive <your-image>
```
![Image](.data/demo-ci.png)
**This is beta quality!** *Feel free to submit an issue if you want a new feature or find a bug :)*
## Basic Features
@ -47,6 +54,9 @@ You can build a Docker image and do an immediate analysis with one command:
You only need to replace your `docker build` command with the same `dive build`
command.
**CI Integration**
Analyze and image and get a pass/fail result based on the image efficiency and wasted space. Simply set `CI=true` in the environment when invoking any valid dive command.
## Installation
@ -127,6 +137,26 @@ docker run --rm -it \
wagoodman/dive:latest <dive arguments...>
```
## CI Integration
When running dive with the environment variable `CI=true` then the dive UI will be bypassed and will instead analyze your docker image, giving it a pass/fail indication via return code. Currently there are three metrics supported via a `.dive-ci` file that you can put at the root of your repo:
```
rules:
# If the efficiency is measured below X%, mark as failed.
# Expressed as a percentage between 0-1.
lowestEfficiency: 0.95
# If the amount of wasted space is at least X or larger than X, mark as failed.
# Expressed in B, KB, MB, and GB.
highestWastedBytes: 20MB
# If the amount of wasted space makes up for X% or more of the image, mark as failed.
# Note: the base image layer is NOT included in the total image size.
# Expressed as a percentage between 0-1; fails if the threshold is met or crossed.
highestUserWastedPercent: 0.20
```
You can override the CI config path with the `--ci-config` option.
## KeyBindings
Key Binding | Description
@ -144,7 +174,7 @@ Key Binding | Description
<kbd>PageUp</kbd> | Filetree view: scroll up a page
<kbd>PageDown</kbd> | Filetree view: scroll down a page
## Configuration
## UI Configuration
No configuration is necessary, however, you can create a config file and override values:
```yaml

View file

@ -1,15 +1,10 @@
package cmd
import (
"encoding/json"
"fmt"
"github.com/fatih/color"
"github.com/spf13/cobra"
"github.com/wagoodman/dive/filetree"
"github.com/wagoodman/dive/image"
"github.com/wagoodman/dive/ui"
"github.com/wagoodman/dive/runtime"
"github.com/wagoodman/dive/utils"
"io/ioutil"
)
// doAnalyzeCmd takes a docker image tag, digest, or id and displays the
@ -35,117 +30,9 @@ func doAnalyzeCmd(cmd *cobra.Command, args []string) {
utils.Exit(1)
}
run(userImage)
}
type export struct {
Layer []exportLayer `json:"layer"`
Image exportImage `json:"image"`
}
type exportLayer struct {
Index int `json:"index"`
DigestID string `json:"digestId"`
SizeBytes uint64 `json:"sizeBytes"`
Command string `json:"command"`
}
type exportImage struct {
SizeBytes uint64 `json:"sizeBytes"`
InefficientBytes uint64 `json:"inefficientBytes"`
EfficiencyScore float64 `json:"efficiencyScore"`
InefficientFiles []inefficientFiles `json:"inefficientFiles"`
}
type inefficientFiles struct {
Count int `json:"count"`
SizeBytes uint64 `json:"sizeBytes"`
File string `json:"file"`
}
func newExport(analysis *image.AnalysisResult) *export {
data := export{}
data.Layer = make([]exportLayer, len(analysis.Layers))
data.Image.InefficientFiles = make([]inefficientFiles, len(analysis.Inefficiencies))
// export layers in order
for revIdx := len(analysis.Layers) - 1; revIdx >= 0; revIdx-- {
layer := analysis.Layers[revIdx]
idx := (len(analysis.Layers) - 1) - revIdx
data.Layer[idx] = exportLayer{
Index: idx,
DigestID: layer.Id(),
SizeBytes: layer.Size(),
Command: layer.Command(),
}
}
// export image info
data.Image.SizeBytes = 0
for idx := 0; idx < len(analysis.Layers); idx++ {
data.Image.SizeBytes += analysis.Layers[idx].Size()
}
data.Image.EfficiencyScore = analysis.Efficiency
for idx := 0; idx < len(analysis.Inefficiencies); idx++ {
fileData := analysis.Inefficiencies[len(analysis.Inefficiencies)-1-idx]
data.Image.InefficientBytes += uint64(fileData.CumulativeSize)
data.Image.InefficientFiles[idx] = inefficientFiles{
Count: len(fileData.Nodes),
SizeBytes: uint64(fileData.CumulativeSize),
File: fileData.Path,
}
}
return &data
}
func exportStatistics(analysis *image.AnalysisResult) {
data := newExport(analysis)
payload, err := json.MarshalIndent(&data, "", " ")
if err != nil {
panic(err)
}
err = ioutil.WriteFile(exportFile, payload, 0644)
if err != nil {
panic(err)
}
}
func fetchAndAnalyze(imageID string) *image.AnalysisResult {
analyzer := image.GetAnalyzer(imageID)
fmt.Println(" Fetching image...")
err := analyzer.Parse(imageID)
if err != nil {
fmt.Printf("cannot fetch image: %v\n", err)
utils.Exit(1)
}
fmt.Println(" Analyzing image...")
result, err := analyzer.Analyze()
if err != nil {
fmt.Printf("cannot doAnalyzeCmd image: %v\n", err)
utils.Exit(1)
}
return result
}
func run(imageID string) {
color.New(color.Bold).Println("Analyzing Image")
result := fetchAndAnalyze(imageID)
if exportFile != "" {
exportStatistics(result)
color.New(color.Bold).Println(fmt.Sprintf("Exported to %s", exportFile))
utils.Exit(0)
}
fmt.Println(" Building cache...")
cache := filetree.NewFileTreeCache(result.RefTrees)
cache.Build()
ui.Run(result, cache)
runtime.Run(runtime.Options{
ImageId: userImage,
ExportFile: exportFile,
CiConfigFile: ciConfigFile,
})
}

View file

@ -1,11 +1,9 @@
package cmd
import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/wagoodman/dive/runtime"
"github.com/wagoodman/dive/utils"
"io/ioutil"
"os"
)
// buildCmd represents the build command
@ -23,25 +21,9 @@ func init() {
// doBuildCmd implements the steps taken for the build command
func doBuildCmd(cmd *cobra.Command, args []string) {
defer utils.Cleanup()
iidfile, err := ioutil.TempFile("/tmp", "dive.*.iid")
if err != nil {
utils.Cleanup()
log.Fatal(err)
}
defer os.Remove(iidfile.Name())
allArgs := append([]string{"--iidfile", iidfile.Name()}, args...)
err = utils.RunDockerCmd("build", allArgs...)
if err != nil {
utils.Cleanup()
log.Fatal(err)
}
imageId, err := ioutil.ReadFile(iidfile.Name())
if err != nil {
utils.Cleanup()
log.Fatal(err)
}
run(string(imageId))
runtime.Run(runtime.Options{
BuildArgs: args,
ExportFile: exportFile,
})
}

View file

@ -16,6 +16,7 @@ import (
var cfgFile string
var exportFile string
var ciConfigFile string
// rootCmd represents the base command when called without any subcommands
var rootCmd = &cobra.Command{
@ -46,6 +47,7 @@ func init() {
rootCmd.PersistentFlags().BoolP("version", "v", false, "display version number")
rootCmd.Flags().StringVarP(&exportFile, "json", "j", "", "Skip the interactive TUI and write the layer analysis statistics to a given file.")
rootCmd.Flags().StringVar(&ciConfigFile, "ci-config", ".dive-ci", "If CI=true in the environment, use the given yaml to drive validation rules.")
}
// initConfig reads in config file and ENV variables if set.

View file

@ -12,7 +12,7 @@ import (
)
const (
AttributeFormat = "%s%s %10s %10s "
AttributeFormat = "%s%s %11s %10s "
)
var diffTypeColor = map[DiffType]*color.Color{

View file

@ -154,7 +154,7 @@ func TestDirSize(t *testing.T) {
tree1.AddPath("/etc/nginx/public3/thing2", FileInfo{Size: 300})
node, _ := tree1.GetNode("/etc/nginx")
expected, actual := "---------- 0:0 600 B ", node.MetadataString()
expected, actual := "---------- 0:0 600 B ", node.MetadataString()
if expected != actual {
t.Errorf("Expected metadata '%s' got '%s'", expected, actual)
}

1
go.mod
View file

@ -20,6 +20,7 @@ require (
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jroimartin/gocui v0.4.0
github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213
github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e
github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a
github.com/mattn/go-colorable v0.0.9 // indirect
github.com/mattn/go-isatty v0.0.4 // indirect

2
go.sum
View file

@ -42,6 +42,8 @@ github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213 h1:qGQQKEcAR99REcM
github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1e29fT/6vq2aBdFsgNPmy8qMdSay1npru+Sw=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e h1:9MlwzLdW7QSDrhDjFlsEYmxpFyIoXmYRon3dt0io31k=
github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a h1:weJVJJRzAJBFRlAiJQROKQs8oC9vOxvm4rZmBBk0ONw=
github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=

View file

@ -16,8 +16,13 @@ import (
var dockerVersion string
func newDockerImageAnalyzer() Analyzer {
return &dockerImageAnalyzer{}
func newDockerImageAnalyzer(imageId string) Analyzer {
return &dockerImageAnalyzer{
// store discovered json files in a map so we can read the image in one pass
jsonFiles: make(map[string][]byte),
layerMap: make(map[string]*filetree.FileTree),
id: imageId,
}
}
func newDockerImageManifest(manifestBytes []byte) dockerImageManifest {
@ -49,40 +54,31 @@ func newDockerImageConfig(configBytes []byte) dockerImageConfig {
return imageConfig
}
func (image *dockerImageAnalyzer) Parse(imageID string) error {
func (image *dockerImageAnalyzer) Fetch() (io.ReadCloser, error) {
var err error
image.id = imageID
// store discovered json files in a map so we can read the image in one pass
image.jsonFiles = make(map[string][]byte)
image.layerMap = make(map[string]*filetree.FileTree)
// pull the image if it does not exist
ctx := context.Background()
image.client, err = client.NewClientWithOpts(client.WithVersion(dockerVersion), client.FromEnv)
if err != nil {
return err
return nil, err
}
_, _, err = image.client.ImageInspectWithRaw(ctx, imageID)
_, _, err = image.client.ImageInspectWithRaw(ctx, image.id)
if err != nil {
// don't use the API, the CLI has more informative output
fmt.Println("Image not available locally. Trying to pull '" + imageID + "'...")
utils.RunDockerCmd("pull", imageID)
fmt.Println("Image not available locally. Trying to pull '" + image.id + "'...")
utils.RunDockerCmd("pull", image.id)
}
tarFile, _, err := image.getReader(imageID)
readCloser, err := image.client.ImageSave(ctx, []string{image.id})
if err != nil {
return err
return nil, err
}
defer tarFile.Close()
err = image.read(tarFile)
if err != nil {
return err
}
return nil
return readCloser, nil
}
func (image *dockerImageAnalyzer) read(tarFile io.ReadCloser) error {
func (image *dockerImageAnalyzer) Parse(tarFile io.ReadCloser) error {
tarReader := tar.NewReader(tarFile)
var currentLayer uint
@ -90,7 +86,7 @@ func (image *dockerImageAnalyzer) read(tarFile io.ReadCloser) error {
header, err := tarReader.Next()
if err == io.EOF {
fmt.Println(" ╧")
fmt.Println(" ╧")
break
}
@ -167,43 +163,41 @@ func (image *dockerImageAnalyzer) Analyze() (*AnalysisResult, error) {
efficiency, inefficiencies := filetree.Efficiency(image.trees)
var sizeBytes, userSizeBytes uint64
layers := make([]Layer, len(image.layers))
for i, v := range image.layers {
layers[i] = v
sizeBytes += v.Size()
if i != 0 {
userSizeBytes += v.Size()
}
}
var wastedBytes uint64
for idx := 0; idx < len(inefficiencies); idx++ {
fileData := inefficiencies[len(inefficiencies)-1-idx]
wastedBytes += uint64(fileData.CumulativeSize)
}
return &AnalysisResult{
Layers: layers,
RefTrees: image.trees,
Efficiency: efficiency,
Inefficiencies: inefficiencies,
Layers: layers,
RefTrees: image.trees,
Efficiency: efficiency,
UserSizeByes: userSizeBytes,
SizeBytes: sizeBytes,
WastedBytes: wastedBytes,
WastedUserPercent: float64(float64(wastedBytes) / float64(userSizeBytes)),
Inefficiencies: inefficiencies,
}, nil
}
func (image *dockerImageAnalyzer) getReader(imageID string) (io.ReadCloser, int64, error) {
ctx := context.Background()
result, _, err := image.client.ImageInspectWithRaw(ctx, imageID)
if err != nil {
return nil, -1, err
}
totalSize := result.Size
readCloser, err := image.client.ImageSave(ctx, []string{imageID})
if err != nil {
return nil, -1, err
}
return readCloser, totalSize, nil
}
// todo: it is bad that this is printing out to the screen. As the interface gets more flushed out, an event update mechanism should be built in (so the caller can format and print updates)
func (image *dockerImageAnalyzer) processLayerTar(name string, layerIdx uint, reader *tar.Reader) error {
tree := filetree.NewFileTree()
tree.Name = name
title := fmt.Sprintf("[layer: %2d]", layerIdx)
message := fmt.Sprintf(" ├─ %s %s ", title, "working...")
message := fmt.Sprintf(" ├─ %s %s ", title, "working...")
fmt.Printf("\r%s", message)
fileInfos, err := image.getFileList(reader)
@ -220,12 +214,12 @@ func (image *dockerImageAnalyzer) processLayerTar(name string, layerIdx uint, re
tree.AddPath(element.Path, element)
if pb.Update(int64(idx)) {
message = fmt.Sprintf(" ├─ %s %s : %s", title, shortName, pb.String())
message = fmt.Sprintf(" ├─ %s %s : %s", title, shortName, pb.String())
fmt.Printf("\r%s", message)
}
}
pb.Done()
message = fmt.Sprintf(" ├─ %s %s : %s", title, shortName, pb.String())
message = fmt.Sprintf(" ├─ %s %s : %s", title, shortName, pb.String())
fmt.Printf("\r%s\n", message)
image.layerMap[tree.Name] = tree

View file

@ -1,10 +1,10 @@
package image
type AnalyzerFactory func() Analyzer
type AnalyzerFactory func(string) Analyzer
func GetAnalyzer(imageID string) Analyzer {
// todo: add ability to have multiple image formats... for the meantime only use docker
var factory AnalyzerFactory = newDockerImageAnalyzer
return factory()
return factory(imageID)
}

19
image/testing.go Normal file
View file

@ -0,0 +1,19 @@
package image
import (
"os"
)
func TestLoadDockerImageTar(tarPath string) (*AnalysisResult, error) {
f, err := os.Open(tarPath)
if err != nil {
return nil, err
}
defer f.Close()
analyzer := newDockerImageAnalyzer("dive-test:latest")
err = analyzer.Parse(f)
if err != nil {
return nil, err
}
return analyzer.Analyze()
}

View file

@ -3,13 +3,15 @@ package image
import (
"github.com/docker/docker/client"
"github.com/wagoodman/dive/filetree"
"io"
)
type Parser interface {
}
type Analyzer interface {
Parse(id string) error
Fetch() (io.ReadCloser, error)
Parse(io.ReadCloser) error
Analyze() (*AnalysisResult, error)
}
@ -24,10 +26,14 @@ type Layer interface {
}
type AnalysisResult struct {
Layers []Layer
RefTrees []*filetree.FileTree
Efficiency float64
Inefficiencies filetree.EfficiencySlice
Layers []Layer
RefTrees []*filetree.FileTree
Efficiency float64
SizeBytes uint64
UserSizeByes uint64 // this is all bytes except for the base image
WastedUserPercent float64 // = wasted-bytes/user-size-bytes
WastedBytes uint64
Inefficiencies filetree.EfficiencySlice
}
type dockerImageAnalyzer struct {

128
runtime/ci/evaluator.go Normal file
View file

@ -0,0 +1,128 @@
package ci
import (
"bytes"
"fmt"
"github.com/logrusorgru/aurora"
"github.com/spf13/viper"
"github.com/wagoodman/dive/image"
"io/ioutil"
"sort"
"strings"
)
func NewEvaluator() *Evaluator {
ciConfig := viper.New()
ciConfig.SetConfigType("yaml")
ciConfig.SetDefault("rules.lowestEfficiency", 0.9)
ciConfig.SetDefault("rules.highestWastedBytes", "disabled")
ciConfig.SetDefault("rules.highestUserWastedPercent", 0.1)
return &Evaluator{
Config: ciConfig,
Rules: loadCiRules(),
Results: make(map[string]RuleResult),
Pass: true,
}
}
func (ci *Evaluator) LoadConfig(configFile string) error {
fileBytes, err := ioutil.ReadFile(configFile)
if err != nil {
return err
}
err = ci.Config.ReadConfig(bytes.NewBuffer(fileBytes))
if err != nil {
return err
}
return nil
}
func (ci *Evaluator) isRuleEnabled(rule Rule) bool {
value := ci.Config.GetString(rule.Key())
if value == "disabled" {
return false
}
return true
}
func (ci *Evaluator) Evaluate(analysis *image.AnalysisResult) bool {
for _, rule := range ci.Rules {
if ci.isRuleEnabled(rule) {
value := ci.Config.GetString(rule.Key())
status, message := rule.Evaluate(analysis, value)
if _, exists := ci.Results[rule.Key()]; exists {
panic(fmt.Errorf("CI rule result recorded twice: %s", rule.Key()))
}
if status == RuleFailed {
ci.Pass = false
}
ci.Results[rule.Key()] = RuleResult{
status: status,
message: message,
}
} else {
ci.Results[rule.Key()] = RuleResult{
status: RuleDisabled,
message: "rule disabled",
}
}
}
ci.Tally.Total = len(ci.Results)
for rule, result := range ci.Results {
switch result.status {
case RulePassed:
ci.Tally.Pass++
case RuleFailed:
ci.Tally.Fail++
case RuleWarning:
ci.Tally.Warn++
case RuleDisabled:
ci.Tally.Skip++
default:
panic(fmt.Errorf("unknown test status (rule='%v'): %v", rule, result.status))
}
}
return ci.Pass
}
func (ci *Evaluator) Report() {
status := "PASS"
rules := make([]string, 0, len(ci.Results))
for name := range ci.Results {
rules = append(rules, name)
}
sort.Strings(rules)
if ci.Tally.Fail > 0 {
status = "FAIL"
}
for _, rule := range rules {
result := ci.Results[rule]
name := strings.TrimPrefix(rule, "rules.")
if result.message != "" {
fmt.Printf(" %s: %s: %s\n", result.status.String(), name, result.message)
} else {
fmt.Printf(" %s: %s\n", result.status.String(), name)
}
}
summary := fmt.Sprintf("Result:%s [Total:%d] [Passed:%d] [Failed:%d] [Warn:%d] [Skipped:%d]", status, ci.Tally.Total, ci.Tally.Pass, ci.Tally.Fail, ci.Tally.Warn, ci.Tally.Skip)
if ci.Pass {
fmt.Println(aurora.Green(summary))
} else if ci.Pass && ci.Tally.Warn > 0 {
fmt.Println(aurora.Blue(summary))
} else {
fmt.Println(aurora.Red(summary))
}
}

View file

@ -0,0 +1,57 @@
package ci
import (
"github.com/spf13/viper"
"github.com/wagoodman/dive/image"
"strings"
"testing"
)
func Test_Evaluator(t *testing.T) {
result, err := image.TestLoadDockerImageTar("../../.data/test-docker-image.tar")
if err != nil {
t.Fatalf("Test_Export: unable to fetch analysis: %v", err)
}
table := map[string]struct {
efficiency string
wastedBytes string
wastedPercent string
expectedPass bool
expectedResult map[string]RuleStatus
}{
"allFail": {"0.99", "1B", "0.01", false, map[string]RuleStatus{"lowestEfficiency": RuleFailed, "highestWastedBytes": RuleFailed, "highestUserWastedPercent": RuleFailed}},
"allPass": {"0.9", "50kB", "0.1", true, map[string]RuleStatus{"lowestEfficiency": RulePassed, "highestWastedBytes": RulePassed, "highestUserWastedPercent": RulePassed}},
"allDisabled": {"disabled", "disabled", "disabled", true, map[string]RuleStatus{"lowestEfficiency": RuleDisabled, "highestWastedBytes": RuleDisabled, "highestUserWastedPercent": RuleDisabled}},
}
for _, test := range table {
evaluator := NewEvaluator()
ciConfig := viper.New()
ciConfig.SetDefault("rules.lowestEfficiency", test.efficiency)
ciConfig.SetDefault("rules.highestWastedBytes", test.wastedBytes)
ciConfig.SetDefault("rules.highestUserWastedPercent", test.wastedPercent)
evaluator.Config = ciConfig
pass := evaluator.Evaluate(result)
if test.expectedPass != pass {
t.Errorf("Test_Evaluator: expected pass=%v, got %v", test.expectedPass, pass)
}
if len(test.expectedResult) != len(evaluator.Results) {
t.Errorf("Test_Evaluator: expected %v results, got %v", len(test.expectedResult), len(evaluator.Results))
}
for rule, actualResult := range evaluator.Results {
expectedStatus := test.expectedResult[strings.TrimPrefix(rule, "rules.")]
if expectedStatus != actualResult.status {
t.Errorf(" %v: expected %v rule failures, got %v", rule, expectedStatus, actualResult.status)
}
}
}
}

88
runtime/ci/rules.go Normal file
View file

@ -0,0 +1,88 @@
package ci
import (
"fmt"
"github.com/dustin/go-humanize"
"github.com/logrusorgru/aurora"
"github.com/wagoodman/dive/image"
"strconv"
)
func newGenericCiRule(key string, evaluator func(*image.AnalysisResult, string) (RuleStatus, string)) *GenericCiRule {
return &GenericCiRule{
key: key,
evaluator: evaluator,
}
}
func (rule *GenericCiRule) Key() string {
return rule.key
}
func (rule *GenericCiRule) Evaluate(result *image.AnalysisResult, value string) (RuleStatus, string) {
return rule.evaluator(result, value)
}
func (status RuleStatus) String() string {
switch status {
case RulePassed:
return "PASS"
case RuleFailed:
return aurora.Bold(aurora.Inverse(aurora.Red("FAIL"))).String()
case RuleWarning:
return aurora.Blue("WARN").String()
case RuleDisabled:
return aurora.Blue("SKIP").String()
default:
return aurora.Inverse("Unknown").String()
}
}
func loadCiRules() []Rule {
var rules = make([]Rule, 0)
rules = append(rules, newGenericCiRule(
"rules.lowestEfficiency",
func(analysis *image.AnalysisResult, value string) (RuleStatus, string) {
lowestEfficiency, err := strconv.ParseFloat(value, 64)
if err != nil {
return RuleFailed, fmt.Sprintf("invalid config value ('%v'): %v", value, err)
}
if lowestEfficiency > analysis.Efficiency {
return RuleFailed, fmt.Sprintf("image efficiency is too low (efficiency=%v < threshold=%v)", analysis.Efficiency, lowestEfficiency)
}
return RulePassed, ""
},
))
rules = append(rules, newGenericCiRule(
"rules.highestWastedBytes",
func(analysis *image.AnalysisResult, value string) (RuleStatus, string) {
highestWastedBytes, err := humanize.ParseBytes(value)
if err != nil {
return RuleFailed, fmt.Sprintf("invalid config value ('%v'): %v", value, err)
}
if analysis.WastedBytes > highestWastedBytes {
return RuleFailed, fmt.Sprintf("too many bytes wasted (wasted-bytes=%v > threshold=%v)", analysis.WastedBytes, highestWastedBytes)
}
return RulePassed, ""
},
))
rules = append(rules, newGenericCiRule(
"rules.highestUserWastedPercent",
func(analysis *image.AnalysisResult, value string) (RuleStatus, string) {
highestUserWastedPercent, err := strconv.ParseFloat(value, 64)
if err != nil {
return RuleFailed, fmt.Sprintf("invalid config value ('%v'): %v", value, err)
}
if highestUserWastedPercent < analysis.WastedUserPercent {
return RuleFailed, fmt.Sprintf("too many bytes wasted, relative to the user bytes added (%%-user-wasted-bytes=%v > threshold=%v)", analysis.WastedUserPercent, highestUserWastedPercent)
}
return RulePassed, ""
},
))
return rules
}

47
runtime/ci/types.go Normal file
View file

@ -0,0 +1,47 @@
package ci
import (
"github.com/spf13/viper"
"github.com/wagoodman/dive/image"
)
type RuleStatus int
type RuleResult struct {
status RuleStatus
message string
}
const (
RuleUnknown = iota
RulePassed
RuleFailed
RuleWarning
RuleDisabled
)
type Rule interface {
Key() string
Evaluate(*image.AnalysisResult, string) (RuleStatus, string)
}
type GenericCiRule struct {
key string
evaluator func(*image.AnalysisResult, string) (RuleStatus, string)
}
type Evaluator struct {
Config *viper.Viper
Rules []Rule
Results map[string]RuleResult
Tally ResultTally
Pass bool
}
type ResultTally struct {
Pass int
Fail int
Skip int
Warn int
Total int
}

54
runtime/export.go Normal file
View file

@ -0,0 +1,54 @@
package runtime
import (
"encoding/json"
"github.com/wagoodman/dive/image"
"io/ioutil"
)
func newExport(analysis *image.AnalysisResult) *export {
data := export{}
data.Layer = make([]exportLayer, len(analysis.Layers))
data.Image.InefficientFiles = make([]inefficientFiles, len(analysis.Inefficiencies))
// export layers in order
for revIdx := len(analysis.Layers) - 1; revIdx >= 0; revIdx-- {
layer := analysis.Layers[revIdx]
idx := (len(analysis.Layers) - 1) - revIdx
data.Layer[idx] = exportLayer{
Index: idx,
DigestID: layer.Id(),
SizeBytes: layer.Size(),
Command: layer.Command(),
}
}
data.Image.SizeBytes = analysis.SizeBytes
data.Image.EfficiencyScore = analysis.Efficiency
data.Image.InefficientBytes = analysis.WastedBytes
for idx := 0; idx < len(analysis.Inefficiencies); idx++ {
fileData := analysis.Inefficiencies[len(analysis.Inefficiencies)-1-idx]
data.Image.InefficientFiles[idx] = inefficientFiles{
Count: len(fileData.Nodes),
SizeBytes: uint64(fileData.CumulativeSize),
File: fileData.Path,
}
}
return &data
}
func (exp *export) marshal() ([]byte, error) {
return json.MarshalIndent(&exp, "", " ")
}
func (exp *export) toFile(exportFilePath string) error {
payload, err := exp.marshal()
if err != nil {
return err
}
return ioutil.WriteFile(exportFilePath, payload, 0644)
}

134
runtime/export_test.go Normal file
View file

@ -0,0 +1,134 @@
package runtime
import (
"github.com/wagoodman/dive/image"
"testing"
)
func Test_Export(t *testing.T) {
result, err := image.TestLoadDockerImageTar("../.data/test-docker-image.tar")
if err != nil {
t.Fatalf("Test_Export: unable to fetch analysis: %v", err)
}
export := newExport(result)
payload, err := export.marshal()
if err != nil {
t.Errorf("Test_Export: unable to export analysis: %v", err)
}
expectedResult := `{
"layer": [
{
"index": 0,
"digestId": "sha256:23bc2b70b2014dec0ac22f27bb93e9babd08cdd6f1115d0c955b9ff22b382f5a",
"sizeBytes": 1154361,
"command": "#(nop) ADD file:ce026b62356eec3ad1214f92be2c9dc063fe205bd5e600be3492c4dfb17148bd in / "
},
{
"index": 1,
"digestId": "sha256:a65b7d7ac139a0e4337bc3c73ce511f937d6140ef61a0108f7d4b8aab8d67274",
"sizeBytes": 6405,
"command": "#(nop) ADD file:139c3708fb6261126453e34483abd8bf7b26ed16d952fd976994d68e72d93be2 in /somefile.txt "
},
{
"index": 2,
"digestId": "sha256:93e208d471756ffbac88cf9c25feb442007f221d3bd73231e27b747a0a68927c",
"sizeBytes": 0,
"command": "mkdir -p /root/example/really/nested"
},
{
"index": 3,
"digestId": "sha256:4abad3abe3cb99ad7a492a9d9f6b3d66287c1646843c74128bbbec4f7be5aa9e",
"sizeBytes": 6405,
"command": "cp /somefile.txt /root/example/somefile1.txt"
},
{
"index": 4,
"digestId": "sha256:14c9a6ffcb6a0f32d1035f97373b19608e2d307961d8be156321c3f1c1504cbf",
"sizeBytes": 6405,
"command": "chmod 444 /root/example/somefile1.txt"
},
{
"index": 5,
"digestId": "sha256:778fb5770ef466f314e79cc9dc418eba76bfc0a64491ce7b167b76aa52c736c4",
"sizeBytes": 6405,
"command": "cp /somefile.txt /root/example/somefile2.txt"
},
{
"index": 6,
"digestId": "sha256:f275b8a31a71deb521cc048e6021e2ff6fa52bedb25c9b7bbe129a0195ddca5f",
"sizeBytes": 6405,
"command": "cp /somefile.txt /root/example/somefile3.txt"
},
{
"index": 7,
"digestId": "sha256:dd1effc5eb19894c3e9b57411c98dd1cf30fa1de4253c7fae53c9cea67267d83",
"sizeBytes": 6405,
"command": "mv /root/example/somefile3.txt /root/saved.txt"
},
{
"index": 8,
"digestId": "sha256:8d1869a0a066cdd12e48d648222866e77b5e2814f773bb3bd8774ab4052f0f1d",
"sizeBytes": 6405,
"command": "cp /root/saved.txt /root/.saved.txt"
},
{
"index": 9,
"digestId": "sha256:bc2e36423fa31a97223fd421f22c35466220fa160769abf697b8eb58c896b468",
"sizeBytes": 0,
"command": "rm -rf /root/example/"
},
{
"index": 10,
"digestId": "sha256:7f648d45ee7b6de2292162fba498b66cbaaf181da9004fcceef824c72dbae445",
"sizeBytes": 2187,
"command": "#(nop) ADD dir:7ec14b81316baa1a31c38c97686a8f030c98cba2035c968412749e33e0c4427e in /root/.data/ "
},
{
"index": 11,
"digestId": "sha256:a4b8f95f266d5c063c9a9473c45f2f85ddc183e37941b5e6b6b9d3c00e8e0457",
"sizeBytes": 6405,
"command": "cp /root/saved.txt /tmp/saved.again1.txt"
},
{
"index": 12,
"digestId": "sha256:22a44d45780a541e593a8862d80f3e14cb80b6bf76aa42ce68dc207a35bf3a4a",
"sizeBytes": 6405,
"command": "cp /root/saved.txt /root/.data/saved.again2.txt"
},
{
"index": 13,
"digestId": "sha256:ba689cac6a98c92d121fa5c9716a1bab526b8bb1fd6d43625c575b79e97300c5",
"sizeBytes": 6405,
"command": "chmod +x /root/saved.txt"
}
],
"image": {
"sizeBytes": 1220598,
"inefficientBytes": 32025,
"efficiencyScore": 0.9844212134184309,
"inefficientFiles": [
{
"count": 2,
"sizeBytes": 12810,
"file": "/root/saved.txt"
},
{
"count": 2,
"sizeBytes": 12810,
"file": "/root/example/somefile1.txt"
},
{
"count": 2,
"sizeBytes": 6405,
"file": "/root/example/somefile3.txt"
}
]
}
}`
actualResult := string(payload)
if expectedResult != actualResult {
t.Errorf("Test_Export: unexpected export result:\n%v", actualResult)
}
}

130
runtime/run.go Normal file
View file

@ -0,0 +1,130 @@
package runtime
import (
"fmt"
"github.com/dustin/go-humanize"
"github.com/logrusorgru/aurora"
"github.com/wagoodman/dive/filetree"
"github.com/wagoodman/dive/image"
"github.com/wagoodman/dive/runtime/ci"
"github.com/wagoodman/dive/ui"
"github.com/wagoodman/dive/utils"
"io/ioutil"
"log"
"os"
"strconv"
)
func title(s string) string {
return aurora.Bold(s).String()
}
func runCi(analysis *image.AnalysisResult, options Options) {
fmt.Printf(" efficiency: %2.4f %%\n", analysis.Efficiency*100)
fmt.Printf(" wastedBytes: %d bytes (%s)\n", analysis.WastedBytes, humanize.Bytes(analysis.WastedBytes))
fmt.Printf(" userWastedPercent: %2.4f %%\n", analysis.WastedUserPercent*100)
fmt.Println(title("Run CI Validations..."))
evaluator := ci.NewEvaluator()
err := evaluator.LoadConfig(options.CiConfigFile)
if err != nil {
fmt.Println(" Using default CI config")
} else {
fmt.Printf(" Using CI config: %s\n", options.CiConfigFile)
}
pass := evaluator.Evaluate(analysis)
evaluator.Report()
if pass {
utils.Exit(0)
}
utils.Exit(1)
}
func runBuild(buildArgs []string) string {
iidfile, err := ioutil.TempFile("/tmp", "dive.*.iid")
if err != nil {
utils.Cleanup()
log.Fatal(err)
}
defer os.Remove(iidfile.Name())
allArgs := append([]string{"--iidfile", iidfile.Name()}, buildArgs...)
err = utils.RunDockerCmd("build", allArgs...)
if err != nil {
utils.Cleanup()
log.Fatal(err)
}
imageId, err := ioutil.ReadFile(iidfile.Name())
if err != nil {
utils.Cleanup()
log.Fatal(err)
}
return string(imageId)
}
func Run(options Options) {
doExport := options.ExportFile != ""
doBuild := len(options.BuildArgs) > 0
isCi, _ := strconv.ParseBool(os.Getenv("CI"))
if doBuild {
fmt.Println(title("Building image..."))
options.ImageId = runBuild(options.BuildArgs)
}
analyzer := image.GetAnalyzer(options.ImageId)
fmt.Println(title("Fetching image..."))
reader, err := analyzer.Fetch()
if err != nil {
fmt.Printf("cannot fetch image: %v\n", err)
utils.Exit(1)
}
defer reader.Close()
fmt.Println(title("Parsing image..."))
err = analyzer.Parse(reader)
if err != nil {
fmt.Printf("cannot parse image: %v\n", err)
utils.Exit(1)
}
if doExport {
fmt.Println(title(fmt.Sprintf("Analyzing image... (export to '%s')", options.ExportFile)))
} else {
fmt.Println(title("Analyzing image..."))
}
result, err := analyzer.Analyze()
if err != nil {
fmt.Printf("cannot analyze image: %v\n", err)
utils.Exit(1)
}
if doExport {
err = newExport(result).toFile(options.ExportFile)
if err != nil {
fmt.Printf("cannot write export file: %v\n", err)
utils.Exit(1)
}
}
if isCi {
runCi(result, options)
} else {
if doExport {
utils.Exit(0)
}
fmt.Println(title("Building cache..."))
cache := filetree.NewFileTreeCache(result.RefTrees)
cache.Build()
ui.Run(result, cache)
}
}

33
runtime/types.go Normal file
View file

@ -0,0 +1,33 @@
package runtime
type Options struct {
ImageId string
ExportFile string
CiConfigFile string
BuildArgs []string
}
type export struct {
Layer []exportLayer `json:"layer"`
Image exportImage `json:"image"`
}
type exportLayer struct {
Index int `json:"index"`
DigestID string `json:"digestId"`
SizeBytes uint64 `json:"sizeBytes"`
Command string `json:"command"`
}
type exportImage struct {
SizeBytes uint64 `json:"sizeBytes"`
InefficientBytes uint64 `json:"inefficientBytes"`
EfficiencyScore float64 `json:"efficiencyScore"`
InefficientFiles []inefficientFiles `json:"inefficientFiles"`
}
type inefficientFiles struct {
Count int `json:"count"`
SizeBytes uint64 `json:"sizeBytes"`
File string `json:"file"`
}

View file

@ -105,7 +105,7 @@ func (view *DetailsView) Render() error {
data := view.inefficiencies[len(view.inefficiencies)-1-idx]
wastedSpace += data.CumulativeSize
// todo: make this report scrollable and exportable
// todo: make this report scrollable
if idx < height {
inefficiencyReport += fmt.Sprintf(template, strconv.Itoa(len(data.Nodes)), humanize.Bytes(uint64(data.CumulativeSize)), data.Path)
}