Merge pull request #231 from wagoodman/refactor-image-analysis

Refactor package structure
This commit is contained in:
Alex Goodman 2019-09-22 11:54:34 -04:00 committed by GitHub
commit 3229efafa5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
59 changed files with 417 additions and 388 deletions

View file

@ -1,7 +1,7 @@
version: 2
jobs:
golang-1.11-pipeline:
golang-1.11:
working_directory: /home/circleci/app
docker:
- image: circleci/golang:1.11
@ -21,7 +21,7 @@ jobs:
name: run static analysis & tests
command: make ci
golang-1.12-pipeline:
golang-1.12:
working_directory: /home/circleci/app
docker:
- image: circleci/golang:1.12
@ -41,7 +41,7 @@ jobs:
name: run static analysis & tests
command: make ci
golang-1.13-pipeline:
golang-1.13:
working_directory: /home/circleci/app
docker:
- image: circleci/golang:1.13
@ -65,6 +65,6 @@ workflows:
version: 2
commit:
jobs:
- golang-1.11-pipeline
- golang-1.12-pipeline
- golang-1.13-pipeline
- golang-1.11
- golang-1.12
- golang-1.13

4
.gitignore vendored
View file

@ -1,4 +1,5 @@
/.idea
/bin
# Binaries for programs and plugins
*.exe
@ -21,6 +22,3 @@
/dist
.cover
coverage.txt
# ignore the binary
dive

View file

@ -30,7 +30,7 @@ test-coverage: build
./.scripts/test-coverage.sh
validate:
grep -R 'const allowTestDataCapture = false' ui/
grep -R 'const allowTestDataCapture = false' runtime/ui/
go vet ./...
@! gofmt -s -l . 2>&1 | grep -vE '^\.git/' | grep -vE '^\.cache/'
golangci-lint run

View file

@ -2,6 +2,7 @@ package cmd
import (
"fmt"
"github.com/wagoodman/dive/dive/filetree"
"io/ioutil"
"os"
"path"
@ -11,7 +12,6 @@ import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/wagoodman/dive/filetree"
"github.com/wagoodman/dive/utils"
)

40
dive/filetree/diff.go Normal file
View file

@ -0,0 +1,40 @@
package filetree
import (
"fmt"
)
const (
Unmodified DiffType = iota
Modified
Added
Removed
)
// DiffType defines the comparison result between two FileNodes
type DiffType int
// String of a DiffType
func (diff DiffType) String() string {
switch diff {
case Unmodified:
return "Unmodified"
case Modified:
return "Modified"
case Added:
return "Added"
case Removed:
return "Removed"
default:
return fmt.Sprintf("%d", int(diff))
}
}
// merge two DiffTypes into a single result. Essentially, return the given value unless they two values differ,
// in which case we can only determine that there is "a change".
func (diff DiffType) merge(other DiffType) DiffType {
if diff == other {
return diff
}
return Modified
}

View file

@ -7,6 +7,17 @@ import (
"github.com/sirupsen/logrus"
)
// EfficiencyData represents the storage and reference statistics for a given file tree path.
type EfficiencyData struct {
Path string
Nodes []*FileNode
CumulativeSize int64
minDiscoveredSize int64
}
// EfficiencySlice represents an ordered set of EfficiencyData data structures.
type EfficiencySlice []*EfficiencyData
// Len is required for sorting.
func (efs EfficiencySlice) Len() int {
return len(efs)

View file

@ -2,75 +2,23 @@ package filetree
import (
"archive/tar"
"fmt"
"io"
"github.com/cespare/xxhash"
"github.com/sirupsen/logrus"
"io"
"os"
)
const (
Unmodified DiffType = iota
Modified
Added
Removed
)
var GlobalFileTreeCollapse bool
// NewNodeData creates an empty NodeData struct for a FileNode
func NewNodeData() *NodeData {
return &NodeData{
ViewInfo: *NewViewInfo(),
FileInfo: FileInfo{},
DiffType: Unmodified,
}
}
// Copy duplicates a NodeData
func (data *NodeData) Copy() *NodeData {
return &NodeData{
ViewInfo: *data.ViewInfo.Copy(),
FileInfo: *data.FileInfo.Copy(),
DiffType: data.DiffType,
}
}
// NewViewInfo creates a default ViewInfo
func NewViewInfo() (view *ViewInfo) {
return &ViewInfo{
Collapsed: GlobalFileTreeCollapse,
Hidden: false,
}
}
// Copy duplicates a ViewInfo
func (view *ViewInfo) Copy() (newView *ViewInfo) {
newView = NewViewInfo()
*newView = *view
return newView
}
func getHashFromReader(reader io.Reader) uint64 {
h := xxhash.New()
buf := make([]byte, 1024)
for {
n, err := reader.Read(buf)
if err != nil && err != io.EOF {
logrus.Panic(err)
}
if n == 0 {
break
}
_, err = h.Write(buf[:n])
if err != nil {
logrus.Panic(err)
}
}
return h.Sum64()
// FileInfo contains tar metadata for a specific FileNode
type FileInfo struct {
Path string
TypeFlag byte
Linkname string
hash uint64
Size int64
Mode os.FileMode
Uid int
Gid int
IsDir bool
}
// NewFileInfo extracts the metadata from a tar header and file contents and generates a new FileInfo object.
@ -135,27 +83,24 @@ func (data *FileInfo) Compare(other FileInfo) DiffType {
return Modified
}
// String of a DiffType
func (diff DiffType) String() string {
switch diff {
case Unmodified:
return "Unmodified"
case Modified:
return "Modified"
case Added:
return "Added"
case Removed:
return "Removed"
default:
return fmt.Sprintf("%d", int(diff))
}
}
func getHashFromReader(reader io.Reader) uint64 {
h := xxhash.New()
// merge two DiffTypes into a single result. Essentially, return the given value unless they two values differ,
// in which case we can only determine that there is "a change".
func (diff DiffType) merge(other DiffType) DiffType {
if diff == other {
return diff
buf := make([]byte, 1024)
for {
n, err := reader.Read(buf)
if err != nil && err != io.EOF {
logrus.Panic(err)
}
if n == 0 {
break
}
_, err = h.Write(buf[:n])
if err != nil {
logrus.Panic(err)
}
}
return Modified
return h.Sum64()
}

View file

@ -24,6 +24,16 @@ var diffTypeColor = map[DiffType]*color.Color{
Unmodified: color.New(color.Reset),
}
// FileNode represents a single file, its relation to files beneath it, the tree it exists in, and the metadata of the given file.
type FileNode struct {
Tree *FileTree
Parent *FileNode
Name string
Data NodeData
Children map[string]*FileNode
path string
}
// NewNode creates a new FileNode relative to the given parent node with a payload.
func NewNode(parent *FileNode, name string, data FileInfo) (node *FileNode) {
node = new(FileNode)

View file

@ -21,6 +21,15 @@ const (
collapsedItem = "⊕ "
)
// FileTree represents a set of files, directories, and their relations.
type FileTree struct {
Root *FileNode
Size int
FileSize uint64
Name string
Id uuid.UUID
}
// NewFileTree creates an empty FileTree
func NewFileTree() (tree *FileTree) {
tree = new(FileTree)

View file

@ -0,0 +1,28 @@
package filetree
var GlobalFileTreeCollapse bool
// NodeData is the payload for a FileNode
type NodeData struct {
ViewInfo ViewInfo
FileInfo FileInfo
DiffType DiffType
}
// NewNodeData creates an empty NodeData struct for a FileNode
func NewNodeData() *NodeData {
return &NodeData{
ViewInfo: *NewViewInfo(),
FileInfo: FileInfo{},
DiffType: Unmodified,
}
}
// Copy duplicates a NodeData
func (data *NodeData) Copy() *NodeData {
return &NodeData{
ViewInfo: *data.ViewInfo.Copy(),
FileInfo: *data.FileInfo.Copy(),
DiffType: data.DiffType,
}
}

View file

@ -0,0 +1,22 @@
package filetree
// ViewInfo contains UI specific detail for a specific FileNode
type ViewInfo struct {
Collapsed bool
Hidden bool
}
// NewViewInfo creates a default ViewInfo
func NewViewInfo() (view *ViewInfo) {
return &ViewInfo{
Collapsed: GlobalFileTreeCollapse,
Hidden: false,
}
}
// Copy duplicates a ViewInfo
func (view *ViewInfo) Copy() (newView *ViewInfo) {
newView = NewViewInfo()
*newView = *view
return newView
}

12
dive/get_analyzer.go Normal file
View file

@ -0,0 +1,12 @@
package dive
import (
"github.com/wagoodman/dive/dive/image"
"github.com/wagoodman/dive/dive/image/docker"
)
func GetAnalyzer(imageID string) image.Analyzer {
// u, _ := url.Parse(imageID)
// fmt.Printf("\n\nurl: %+v\n", u.Scheme)
return docker.NewImageAnalyzer(imageID)
}

23
dive/image/analyzer.go Normal file
View file

@ -0,0 +1,23 @@
package image
import (
"github.com/wagoodman/dive/dive/filetree"
"io"
)
type Analyzer interface {
Fetch() (io.ReadCloser, error)
Parse(io.ReadCloser) error
Analyze() (*AnalysisResult, error)
}
type AnalysisResult struct {
Layers []Layer
RefTrees []*filetree.FileTree
Efficiency float64
SizeBytes uint64
UserSizeByes uint64 // this is all bytes except for the base image
WastedUserPercent float64 // = wasted-bytes/user-size-bytes
WastedBytes uint64
Inefficiencies filetree.EfficiencySlice
}

View file

@ -1,9 +1,9 @@
package image
package docker
import (
"archive/tar"
"encoding/json"
"fmt"
"github.com/wagoodman/dive/dive/image"
"io"
"io/ioutil"
"net/http"
@ -12,16 +12,24 @@ import (
"github.com/docker/cli/cli/connhelper"
"github.com/docker/docker/client"
"github.com/sirupsen/logrus"
"github.com/wagoodman/dive/filetree"
"github.com/wagoodman/dive/dive/filetree"
"github.com/wagoodman/dive/utils"
"golang.org/x/net/context"
)
var dockerVersion string
func newDockerImageAnalyzer(imageId string) Analyzer {
return &dockerImageAnalyzer{
type imageAnalyzer struct {
id string
client *client.Client
jsonFiles map[string][]byte
trees []*filetree.FileTree
layerMap map[string]*filetree.FileTree
layers []*dockerLayer
}
func NewImageAnalyzer(imageId string) *imageAnalyzer {
return &imageAnalyzer{
// store discovered json files in a map so we can read the image in one pass
jsonFiles: make(map[string][]byte),
layerMap: make(map[string]*filetree.FileTree),
@ -29,39 +37,10 @@ func newDockerImageAnalyzer(imageId string) Analyzer {
}
}
func newDockerImageManifest(manifestBytes []byte) dockerImageManifest {
var manifest []dockerImageManifest
err := json.Unmarshal(manifestBytes, &manifest)
if err != nil {
logrus.Panic(err)
}
return manifest[0]
}
func newDockerImageConfig(configBytes []byte) dockerImageConfig {
var imageConfig dockerImageConfig
err := json.Unmarshal(configBytes, &imageConfig)
if err != nil {
logrus.Panic(err)
}
layerIdx := 0
for idx := range imageConfig.History {
if imageConfig.History[idx].EmptyLayer {
imageConfig.History[idx].ID = "<missing>"
} else {
imageConfig.History[idx].ID = imageConfig.RootFs.DiffIds[layerIdx]
layerIdx++
}
}
return imageConfig
}
func (image *dockerImageAnalyzer) Fetch() (io.ReadCloser, error) {
func (img *imageAnalyzer) Fetch() (io.ReadCloser, error) {
var err error
// pull the image if it does not exist
// pull the img if it does not exist
ctx := context.Background()
host := os.Getenv("DOCKER_HOST")
@ -94,11 +73,11 @@ func (image *dockerImageAnalyzer) Fetch() (io.ReadCloser, error) {
}
clientOpts = append(clientOpts, client.WithVersion(dockerVersion))
image.client, err = client.NewClientWithOpts(clientOpts...)
img.client, err = client.NewClientWithOpts(clientOpts...)
if err != nil {
return nil, err
}
_, _, err = image.client.ImageInspectWithRaw(ctx, image.id)
_, _, err = img.client.ImageInspectWithRaw(ctx, img.id)
if err != nil {
if !utils.IsDockerClientAvailable() {
@ -106,14 +85,14 @@ func (image *dockerImageAnalyzer) Fetch() (io.ReadCloser, error) {
}
// don't use the API, the CLI has more informative output
fmt.Println("Image not available locally. Trying to pull '" + image.id + "'...")
err = utils.RunDockerCmd("pull", image.id)
fmt.Println("Image not available locally. Trying to pull '" + img.id + "'...")
err = utils.RunDockerCmd("pull", img.id)
if err != nil {
return nil, err
}
}
readCloser, err := image.client.ImageSave(ctx, []string{image.id})
readCloser, err := img.client.ImageSave(ctx, []string{img.id})
if err != nil {
return nil, err
}
@ -121,7 +100,7 @@ func (image *dockerImageAnalyzer) Fetch() (io.ReadCloser, error) {
return readCloser, nil
}
func (image *dockerImageAnalyzer) Parse(tarFile io.ReadCloser) error {
func (img *imageAnalyzer) Parse(tarFile io.ReadCloser) error {
tarReader := tar.NewReader(tarFile)
var currentLayer uint
@ -148,7 +127,7 @@ func (image *dockerImageAnalyzer) Parse(tarFile io.ReadCloser) error {
return err
}
layerReader := tar.NewReader(tarReader)
err := image.processLayerTar(name, currentLayer, layerReader)
err := img.processLayerTar(name, currentLayer, layerReader)
if err != nil {
return err
}
@ -157,7 +136,7 @@ func (image *dockerImageAnalyzer) Parse(tarFile io.ReadCloser) error {
if err != nil {
return err
}
image.jsonFiles[name] = fileBuffer
img.jsonFiles[name] = fileBuffer
}
}
}
@ -165,31 +144,31 @@ func (image *dockerImageAnalyzer) Parse(tarFile io.ReadCloser) error {
return nil
}
func (image *dockerImageAnalyzer) Analyze() (*AnalysisResult, error) {
image.trees = make([]*filetree.FileTree, 0)
func (img *imageAnalyzer) Analyze() (*image.AnalysisResult, error) {
img.trees = make([]*filetree.FileTree, 0)
manifest := newDockerImageManifest(image.jsonFiles["manifest.json"])
config := newDockerImageConfig(image.jsonFiles[manifest.ConfigPath])
manifest := newDockerImageManifest(img.jsonFiles["manifest.json"])
config := newDockerImageConfig(img.jsonFiles[manifest.ConfigPath])
// build the content tree
for _, treeName := range manifest.LayerTarPaths {
image.trees = append(image.trees, image.layerMap[treeName])
img.trees = append(img.trees, img.layerMap[treeName])
}
// build the layers array
image.layers = make([]*dockerLayer, len(image.trees))
img.layers = make([]*dockerLayer, len(img.trees))
// note that the image config stores images in reverse chronological order, so iterate backwards through layers
// note that the img config stores images in reverse chronological order, so iterate backwards through layers
// as you iterate chronologically through history (ignoring history items that have no layer contents)
// Note: history is not required metadata in a docker image!
// Note: history is not required metadata in a docker img!
tarPathIdx := 0
histIdx := 0
for layerIdx := len(image.trees) - 1; layerIdx >= 0; layerIdx-- {
for layerIdx := len(img.trees) - 1; layerIdx >= 0; layerIdx-- {
tree := image.trees[(len(image.trees)-1)-layerIdx]
tree := img.trees[(len(img.trees)-1)-layerIdx]
// ignore empty layers, we are only observing layers with content
historyObj := dockerImageHistoryEntry{
historyObj := imageHistoryEntry{
CreatedBy: "(missing)",
}
for nextHistIdx := histIdx; nextHistIdx < len(config.History); nextHistIdx++ {
@ -203,22 +182,22 @@ func (image *dockerImageAnalyzer) Analyze() (*AnalysisResult, error) {
histIdx++
}
image.layers[layerIdx] = &dockerLayer{
img.layers[layerIdx] = &dockerLayer{
history: historyObj,
index: tarPathIdx,
tree: image.trees[layerIdx],
tree: img.trees[layerIdx],
tarPath: manifest.LayerTarPaths[tarPathIdx],
}
image.layers[layerIdx].history.Size = tree.FileSize
img.layers[layerIdx].history.Size = tree.FileSize
tarPathIdx++
}
efficiency, inefficiencies := filetree.Efficiency(image.trees)
efficiency, inefficiencies := filetree.Efficiency(img.trees)
var sizeBytes, userSizeBytes uint64
layers := make([]Layer, len(image.layers))
for i, v := range image.layers {
layers := make([]image.Layer, len(img.layers))
for i, v := range img.layers {
layers[i] = v
sizeBytes += v.Size()
if i != 0 {
@ -232,9 +211,9 @@ func (image *dockerImageAnalyzer) Analyze() (*AnalysisResult, error) {
wastedBytes += uint64(fileData.CumulativeSize)
}
return &AnalysisResult{
return &image.AnalysisResult{
Layers: layers,
RefTrees: image.trees,
RefTrees: img.trees,
Efficiency: efficiency,
UserSizeByes: userSizeBytes,
SizeBytes: sizeBytes,
@ -244,11 +223,11 @@ func (image *dockerImageAnalyzer) Analyze() (*AnalysisResult, error) {
}, nil
}
func (image *dockerImageAnalyzer) processLayerTar(name string, layerIdx uint, reader *tar.Reader) error {
func (img *imageAnalyzer) processLayerTar(name string, layerIdx uint, reader *tar.Reader) error {
tree := filetree.NewFileTree()
tree.Name = name
fileInfos, err := image.getFileList(reader)
fileInfos, err := img.getFileList(reader)
if err != nil {
return err
}
@ -262,11 +241,11 @@ func (image *dockerImageAnalyzer) processLayerTar(name string, layerIdx uint, re
}
}
image.layerMap[tree.Name] = tree
img.layerMap[tree.Name] = tree
return nil
}
func (image *dockerImageAnalyzer) getFileList(tarReader *tar.Reader) ([]filetree.FileInfo, error) {
func (img *imageAnalyzer) getFileList(tarReader *tar.Reader) ([]filetree.FileInfo, error) {
var files []filetree.FileInfo
for {

View file

@ -0,0 +1,36 @@
package docker
import (
"encoding/json"
"github.com/sirupsen/logrus"
)
type imageConfig struct {
History []imageHistoryEntry `json:"history"`
RootFs rootFs `json:"rootfs"`
}
type rootFs struct {
Type string `json:"type"`
DiffIds []string `json:"diff_ids"`
}
func newDockerImageConfig(configBytes []byte) imageConfig {
var imageConfig imageConfig
err := json.Unmarshal(configBytes, &imageConfig)
if err != nil {
logrus.Panic(err)
}
layerIdx := 0
for idx := range imageConfig.History {
if imageConfig.History[idx].EmptyLayer {
imageConfig.History[idx].ID = "<missing>"
} else {
imageConfig.History[idx].ID = imageConfig.RootFs.DiffIds[layerIdx]
layerIdx++
}
}
return imageConfig
}

View file

@ -0,0 +1,21 @@
package docker
import (
"encoding/json"
"github.com/sirupsen/logrus"
)
type imageManifest struct {
ConfigPath string `json:"Config"`
RepoTags []string `json:"RepoTags"`
LayerTarPaths []string `json:"Layers"`
}
func newDockerImageManifest(manifestBytes []byte) imageManifest {
var manifest []imageManifest
err := json.Unmarshal(manifestBytes, &manifest)
if err != nil {
logrus.Panic(err)
}
return manifest[0]
}

View file

@ -1,17 +1,30 @@
package image
package docker
import (
"fmt"
"github.com/wagoodman/dive/dive/image"
"strings"
"github.com/dustin/go-humanize"
"github.com/wagoodman/dive/filetree"
"github.com/wagoodman/dive/dive/filetree"
)
const (
// LayerFormat = "%-15s %7s %s"
LayerFormat = "%7s %s"
)
// Layer represents a Docker image layer and metadata
type dockerLayer struct {
tarPath string
history imageHistoryEntry
index int
tree *filetree.FileTree
}
type imageHistoryEntry struct {
ID string
Size uint64
Created string `json:"created"`
Author string `json:"author"`
CreatedBy string `json:"created_by"`
EmptyLayer bool `json:"empty_layer"`
}
// ShortId returns the truncated id of the current layer.
func (layer *dockerLayer) TarId() string {
@ -60,17 +73,21 @@ func (layer *dockerLayer) ShortId() string {
return id
}
func (layer *dockerLayer) StringFormat() string {
return image.LayerFormat
}
// String represents a layer in a columnar format.
func (layer *dockerLayer) String() string {
if layer.index == 0 {
return fmt.Sprintf(LayerFormat,
return fmt.Sprintf(image.LayerFormat,
// layer.ShortId(),
// fmt.Sprintf("%d",layer.Index()),
humanize.Bytes(layer.Size()),
"FROM "+layer.ShortId())
}
return fmt.Sprintf(LayerFormat,
return fmt.Sprintf(image.LayerFormat,
// layer.ShortId(),
// fmt.Sprintf("%d",layer.Index()),
humanize.Bytes(layer.Size()),

View file

@ -1,16 +1,17 @@
package image
package docker
import (
"github.com/wagoodman/dive/dive/image"
"os"
)
func TestLoadDockerImageTar(tarPath string) (*AnalysisResult, error) {
func TestLoadDockerImageTar(tarPath string) (*image.AnalysisResult, error) {
f, err := os.Open(tarPath)
if err != nil {
return nil, err
}
defer f.Close()
analyzer := newDockerImageAnalyzer("dive-test:latest")
analyzer := NewImageAnalyzer("dive-test:latest")
err = analyzer.Parse(f)
if err != nil {
return nil, err

19
dive/image/layer.go Normal file
View file

@ -0,0 +1,19 @@
package image
import (
"github.com/wagoodman/dive/dive/filetree"
)
const (
LayerFormat = "%7s %s"
)
type Layer interface {
Id() string
ShortId() string
Index() int
Command() string
Size() uint64
Tree() *filetree.FileTree
String() string
}

View file

@ -1,66 +0,0 @@
package filetree
import (
"os"
"github.com/google/uuid"
)
// FileTree represents a set of files, directories, and their relations.
type FileTree struct {
Root *FileNode
Size int
FileSize uint64
Name string
Id uuid.UUID
}
// FileNode represents a single file, its relation to files beneath it, the tree it exists in, and the metadata of the given file.
type FileNode struct {
Tree *FileTree
Parent *FileNode
Name string
Data NodeData
Children map[string]*FileNode
path string
}
// NodeData is the payload for a FileNode
type NodeData struct {
ViewInfo ViewInfo
FileInfo FileInfo
DiffType DiffType
}
// ViewInfo contains UI specific detail for a specific FileNode
type ViewInfo struct {
Collapsed bool
Hidden bool
}
// FileInfo contains tar metadata for a specific FileNode
type FileInfo struct {
Path string
TypeFlag byte
Linkname string
hash uint64
Size int64
Mode os.FileMode
Uid int
Gid int
IsDir bool
}
// DiffType defines the comparison result between two FileNodes
type DiffType int
// EfficiencyData represents the storage and reference statistics for a given file tree path.
type EfficiencyData struct {
Path string
Nodes []*FileNode
CumulativeSize int64
minDiscoveredSize int64
}
// EfficiencySlice represents an ordered set of EfficiencyData data structures.
type EfficiencySlice []*EfficiencyData

1
go.mod
View file

@ -46,6 +46,7 @@ require (
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297
golang.org/x/sys v0.0.0-20190907184412-d223b2b6db03 // indirect
golang.org/x/text v0.3.2 // indirect
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8
gotest.tools v2.2.0+incompatible // indirect
)

2
go.sum
View file

@ -99,6 +99,7 @@ github.com/golang/mock v1.0.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0=
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
@ -405,6 +406,7 @@ golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3
golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd h1:7E3PabyysDSEjnaANKBgums/hyvMI/HoHQ50qZEzTrg=
golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=

View file

@ -1,10 +0,0 @@
package image
type AnalyzerFactory func(string) Analyzer
func GetAnalyzer(imageID string) Analyzer {
// todo: add ability to have multiple image formats... for the meantime only use docker
var factory AnalyzerFactory = newDockerImageAnalyzer
return factory(imageID)
}

View file

@ -1,80 +0,0 @@
package image
import (
"io"
"github.com/docker/docker/client"
"github.com/wagoodman/dive/filetree"
)
type Parser interface {
}
type Analyzer interface {
Fetch() (io.ReadCloser, error)
Parse(io.ReadCloser) error
Analyze() (*AnalysisResult, error)
}
type Layer interface {
Id() string
ShortId() string
Index() int
Command() string
Size() uint64
Tree() *filetree.FileTree
String() string
}
type AnalysisResult struct {
Layers []Layer
RefTrees []*filetree.FileTree
Efficiency float64
SizeBytes uint64
UserSizeByes uint64 // this is all bytes except for the base image
WastedUserPercent float64 // = wasted-bytes/user-size-bytes
WastedBytes uint64
Inefficiencies filetree.EfficiencySlice
}
type dockerImageAnalyzer struct {
id string
client *client.Client
jsonFiles map[string][]byte
trees []*filetree.FileTree
layerMap map[string]*filetree.FileTree
layers []*dockerLayer
}
type dockerImageHistoryEntry struct {
ID string
Size uint64
Created string `json:"created"`
Author string `json:"author"`
CreatedBy string `json:"created_by"`
EmptyLayer bool `json:"empty_layer"`
}
type dockerImageManifest struct {
ConfigPath string `json:"Config"`
RepoTags []string `json:"RepoTags"`
LayerTarPaths []string `json:"Layers"`
}
type dockerImageConfig struct {
History []dockerImageHistoryEntry `json:"history"`
RootFs dockerRootFs `json:"rootfs"`
}
type dockerRootFs struct {
Type string `json:"type"`
DiffIds []string `json:"diff_ids"`
}
// Layer represents a Docker image layer and metadata
type dockerLayer struct {
tarPath string
history dockerImageHistoryEntry
index int
tree *filetree.FileTree
}

View file

@ -1,8 +1,10 @@
package runtime
package ci
import (
"fmt"
"github.com/dustin/go-humanize"
"github.com/wagoodman/dive/dive/image"
"github.com/wagoodman/dive/utils"
"sort"
"strconv"
"strings"
@ -10,7 +12,6 @@ import (
"github.com/spf13/viper"
"github.com/logrusorgru/aurora"
"github.com/wagoodman/dive/image"
)
type CiEvaluator struct {
@ -133,7 +134,7 @@ func (ci *CiEvaluator) Evaluate(analysis *image.AnalysisResult) bool {
}
func (ci *CiEvaluator) Report() {
fmt.Println(title("Inefficient Files:"))
fmt.Println(utils.TitleFormat("Inefficient Files:"))
template := "%5s %12s %-s\n"
fmt.Printf(template, "Count", "Wasted Space", "File Path")
@ -142,11 +143,11 @@ func (ci *CiEvaluator) Report() {
fmt.Println("None")
} else {
for _, file := range ci.InefficientFiles {
fmt.Printf(template, strconv.Itoa(file.References), humanize.Bytes(uint64(file.SizeBytes)), file.Path)
fmt.Printf(template, strconv.Itoa(file.References), humanize.Bytes(file.SizeBytes), file.Path)
}
}
fmt.Println(title("Results:"))
fmt.Println(utils.TitleFormat("Results:"))
status := "PASS"

View file

@ -1,16 +1,16 @@
package runtime
package ci
import (
"github.com/wagoodman/dive/dive/image/docker"
"strings"
"testing"
"github.com/spf13/viper"
"github.com/wagoodman/dive/image"
)
func Test_Evaluator(t *testing.T) {
result, err := image.TestLoadDockerImageTar("../.data/test-docker-image.tar")
result, err := docker.TestLoadDockerImageTar("../../.data/test-docker-image.tar")
if err != nil {
t.Fatalf("Test_Export: unable to fetch analysis: %v", err)
}

View file

@ -1,4 +1,4 @@
package runtime
package ci
type ReferenceFile struct {
References int `json:"count"`

View file

@ -1,14 +1,14 @@
package runtime
package ci
import (
"fmt"
"github.com/wagoodman/dive/dive/image"
"strconv"
"github.com/spf13/viper"
"github.com/dustin/go-humanize"
"github.com/logrusorgru/aurora"
"github.com/wagoodman/dive/image"
)
const (
@ -25,7 +25,7 @@ type CiRule interface {
Key() string
Configuration() string
Validate() error
Evaluate(*image.AnalysisResult) (RuleStatus, string)
Evaluate(result *image.AnalysisResult) (RuleStatus, string)
}
type GenericCiRule struct {

View file

@ -1,10 +1,9 @@
package runtime
package export
import (
"encoding/json"
"github.com/wagoodman/dive/dive/image"
"io/ioutil"
"github.com/wagoodman/dive/image"
)
type export struct {
@ -20,16 +19,22 @@ type exportLayer struct {
}
type exportImage struct {
SizeBytes uint64 `json:"sizeBytes"`
InefficientBytes uint64 `json:"inefficientBytes"`
EfficiencyScore float64 `json:"efficiencyScore"`
InefficientFiles []ReferenceFile `json:"ReferenceFile"`
SizeBytes uint64 `json:"sizeBytes"`
InefficientBytes uint64 `json:"inefficientBytes"`
EfficiencyScore float64 `json:"efficiencyScore"`
InefficientFiles []exportReferenceFile `json:"exportReferenceFile"`
}
func newExport(analysis *image.AnalysisResult) *export {
type exportReferenceFile struct {
References int `json:"count"`
SizeBytes uint64 `json:"sizeBytes"`
Path string `json:"file"`
}
func NewExport(analysis *image.AnalysisResult) *export {
data := export{}
data.Layer = make([]exportLayer, len(analysis.Layers))
data.Image.InefficientFiles = make([]ReferenceFile, len(analysis.Inefficiencies))
data.Image.InefficientFiles = make([]exportReferenceFile, len(analysis.Inefficiencies))
// export layers in order
for revIdx := len(analysis.Layers) - 1; revIdx >= 0; revIdx-- {
@ -51,7 +56,7 @@ func newExport(analysis *image.AnalysisResult) *export {
for idx := 0; idx < len(analysis.Inefficiencies); idx++ {
fileData := analysis.Inefficiencies[len(analysis.Inefficiencies)-1-idx]
data.Image.InefficientFiles[idx] = ReferenceFile{
data.Image.InefficientFiles[idx] = exportReferenceFile{
References: len(fileData.Nodes),
SizeBytes: uint64(fileData.CumulativeSize),
Path: fileData.Path,
@ -65,7 +70,7 @@ func (exp *export) marshal() ([]byte, error) {
return json.MarshalIndent(&exp, "", " ")
}
func (exp *export) toFile(exportFilePath string) error {
func (exp *export) ToFile(exportFilePath string) error {
payload, err := exp.marshal()
if err != nil {
return err

View file

@ -1,18 +1,17 @@
package runtime
package export
import (
"github.com/wagoodman/dive/dive/image/docker"
"testing"
"github.com/wagoodman/dive/image"
)
func Test_Export(t *testing.T) {
result, err := image.TestLoadDockerImageTar("../.data/test-docker-image.tar")
result, err := docker.TestLoadDockerImageTar("../../.data/test-docker-image.tar")
if err != nil {
t.Fatalf("Test_Export: unable to fetch analysis: %v", err)
}
export := newExport(result)
export := NewExport(result)
payload, err := export.marshal()
if err != nil {
t.Errorf("Test_Export: unable to export analysis: %v", err)
@ -109,7 +108,7 @@ func Test_Export(t *testing.T) {
"sizeBytes": 1220598,
"inefficientBytes": 32025,
"efficiencyScore": 0.9844212134184309,
"ReferenceFile": [
"exportReferenceFile": [
{
"count": 2,
"sizeBytes": 12810,

View file

@ -2,28 +2,26 @@ package runtime
import (
"fmt"
"github.com/wagoodman/dive/dive"
"github.com/wagoodman/dive/runtime/ci"
"github.com/wagoodman/dive/runtime/export"
"io/ioutil"
"log"
"os"
"github.com/dustin/go-humanize"
"github.com/logrusorgru/aurora"
"github.com/wagoodman/dive/filetree"
"github.com/wagoodman/dive/image"
"github.com/wagoodman/dive/ui"
"github.com/wagoodman/dive/dive/filetree"
"github.com/wagoodman/dive/dive/image"
"github.com/wagoodman/dive/runtime/ui"
"github.com/wagoodman/dive/utils"
)
func title(s string) string {
return aurora.Bold(s).String()
}
func runCi(analysis *image.AnalysisResult, options Options) {
fmt.Printf(" efficiency: %2.4f %%\n", analysis.Efficiency*100)
fmt.Printf(" wastedBytes: %d bytes (%s)\n", analysis.WastedBytes, humanize.Bytes(analysis.WastedBytes))
fmt.Printf(" userWastedPercent: %2.4f %%\n", analysis.WastedUserPercent*100)
evaluator := NewCiEvaluator(options.CiConfig)
evaluator := ci.NewCiEvaluator(options.CiConfig)
pass := evaluator.Evaluate(analysis)
evaluator.Report()
@ -63,13 +61,13 @@ func Run(options Options) {
doBuild := len(options.BuildArgs) > 0
if doBuild {
fmt.Println(title("Building image..."))
fmt.Println(utils.TitleFormat("Building image..."))
options.ImageId = runBuild(options.BuildArgs)
}
analyzer := image.GetAnalyzer(options.ImageId)
analyzer := dive.GetAnalyzer(options.ImageId)
fmt.Println(title("Fetching image...") + " (this can take a while with large images)")
fmt.Println(utils.TitleFormat("Fetching image...") + " (this can take a while with large images)")
reader, err := analyzer.Fetch()
if err != nil {
fmt.Printf("cannot fetch image: %v\n", err)
@ -77,7 +75,7 @@ func Run(options Options) {
}
defer reader.Close()
fmt.Println(title("Parsing image..."))
fmt.Println(utils.TitleFormat("Parsing image..."))
err = analyzer.Parse(reader)
if err != nil {
fmt.Printf("cannot parse image: %v\n", err)
@ -85,9 +83,9 @@ func Run(options Options) {
}
if doExport {
fmt.Println(title(fmt.Sprintf("Analyzing image... (export to '%s')", options.ExportFile)))
fmt.Println(utils.TitleFormat(fmt.Sprintf("Analyzing image... (export to '%s')", options.ExportFile)))
} else {
fmt.Println(title("Analyzing image..."))
fmt.Println(utils.TitleFormat("Analyzing image..."))
}
result, err := analyzer.Analyze()
@ -97,7 +95,7 @@ func Run(options Options) {
}
if doExport {
err = newExport(result).toFile(options.ExportFile)
err = export.NewExport(result).ToFile(options.ExportFile)
if err != nil {
fmt.Printf("cannot write export file: %v\n", err)
utils.Exit(1)
@ -111,7 +109,7 @@ func Run(options Options) {
utils.Exit(0)
}
fmt.Println(title("Building cache..."))
fmt.Println(utils.TitleFormat("Building cache..."))
cache := filetree.NewFileTreeCache(result.RefTrees)
cache.Build()

View file

@ -2,13 +2,13 @@ package ui
import (
"fmt"
"github.com/wagoodman/dive/dive/filetree"
"strconv"
"strings"
"github.com/dustin/go-humanize"
"github.com/jroimartin/gocui"
"github.com/lunixbochs/vtclean"
"github.com/wagoodman/dive/filetree"
)
// DetailsController holds the UI objects and data models for populating the lower-left pane. Specifically the pane that

View file

@ -11,7 +11,7 @@ import (
"github.com/wagoodman/keybinding"
"github.com/jroimartin/gocui"
"github.com/wagoodman/dive/filetree"
"github.com/wagoodman/dive/dive/filetree"
)
const (

View file

@ -11,7 +11,7 @@ import (
"github.com/wagoodman/dive/utils"
"github.com/lunixbochs/vtclean"
"github.com/wagoodman/dive/filetree"
"github.com/wagoodman/dive/dive/filetree"
)
// FileTreeViewModel holds the UI objects and data models for populating the right pane. Specifically the pane that

View file

@ -2,6 +2,7 @@ package ui
import (
"bytes"
"github.com/wagoodman/dive/dive/image/docker"
"io/ioutil"
"os"
"path/filepath"
@ -10,8 +11,7 @@ import (
"github.com/fatih/color"
"github.com/sergi/go-diff/diffmatchpatch"
"github.com/wagoodman/dive/filetree"
"github.com/wagoodman/dive/image"
"github.com/wagoodman/dive/dive/filetree"
)
const allowTestDataCapture = false
@ -73,7 +73,7 @@ func assertTestData(t *testing.T, actualBytes []byte) {
}
func initializeTestViewModel(t *testing.T) *FileTreeViewModel {
result, err := image.TestLoadDockerImageTar("../.data/test-docker-image.tar")
result, err := docker.TestLoadDockerImageTar("../../.data/test-docker-image.tar")
if err != nil {
t.Fatalf("%s: unable to fetch analysis: %v", t.Name(), err)
}

View file

@ -2,13 +2,13 @@ package ui
import (
"fmt"
"github.com/wagoodman/dive/dive/image"
"strings"
"github.com/jroimartin/gocui"
"github.com/lunixbochs/vtclean"
"github.com/sirupsen/logrus"
"github.com/spf13/viper"
"github.com/wagoodman/dive/image"
"github.com/wagoodman/dive/utils"
"github.com/wagoodman/keybinding"
)
@ -282,7 +282,6 @@ func (controller *LayerController) Render() error {
controller.header.Clear()
width, _ := g.Size()
headerStr := fmt.Sprintf("[%s]%s\n", title, strings.Repeat("─", width*2))
// headerStr += fmt.Sprintf("Cmp "+image.LayerFormat, "Layer Digest", "Size", "Command")
headerStr += fmt.Sprintf("Cmp"+image.LayerFormat, "Size", "Command")
_, _ = fmt.Fprintln(controller.header, Formatting.Header(vtclean.Clean(headerStr, false)))

View file

@ -2,13 +2,13 @@ package ui
import (
"errors"
"github.com/wagoodman/dive/dive/image"
"github.com/fatih/color"
"github.com/jroimartin/gocui"
"github.com/sirupsen/logrus"
"github.com/spf13/viper"
"github.com/wagoodman/dive/filetree"
"github.com/wagoodman/dive/image"
"github.com/wagoodman/dive/dive/filetree"
"github.com/wagoodman/dive/utils"
"github.com/wagoodman/keybinding"
)

9
utils/format.go Normal file
View file

@ -0,0 +1,9 @@
package utils
import (
"github.com/logrusorgru/aurora"
)
func TitleFormat(s string) string {
return aurora.Bold(s).String()
}