mirror of
https://github.com/wagoodman/dive
synced 2026-03-14 14:25:50 +01:00
Add tests for filetree, image, and utils modules
This commit is contained in:
parent
5804b846e8
commit
8a97070875
11 changed files with 2792 additions and 0 deletions
333
dive/filetree/comparer_test.go
Normal file
333
dive/filetree/comparer_test.go
Normal file
|
|
@ -0,0 +1,333 @@
|
|||
package filetree
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewTreeIndexKey(t *testing.T) {
|
||||
t.Run("all zeros", func(t *testing.T) {
|
||||
key := NewTreeIndexKey(0, 0, 0, 0)
|
||||
assert.Equal(t, 0, key.bottomTreeStart)
|
||||
assert.Equal(t, 0, key.bottomTreeStop)
|
||||
assert.Equal(t, 0, key.topTreeStart)
|
||||
assert.Equal(t, 0, key.topTreeStop)
|
||||
})
|
||||
|
||||
t.Run("with values", func(t *testing.T) {
|
||||
key := NewTreeIndexKey(1, 2, 3, 4)
|
||||
assert.Equal(t, 1, key.bottomTreeStart)
|
||||
assert.Equal(t, 2, key.bottomTreeStop)
|
||||
assert.Equal(t, 3, key.topTreeStart)
|
||||
assert.Equal(t, 4, key.topTreeStop)
|
||||
})
|
||||
}
|
||||
|
||||
func TestTreeIndexKey_String(t *testing.T) {
|
||||
t.Run("single layer on both sides", func(t *testing.T) {
|
||||
key := NewTreeIndexKey(0, 0, 0, 0)
|
||||
assert.Equal(t, "Index(0:0)", key.String())
|
||||
})
|
||||
|
||||
t.Run("single bottom, multiple top", func(t *testing.T) {
|
||||
key := NewTreeIndexKey(0, 0, 0, 3)
|
||||
assert.Equal(t, "Index(0:0-3)", key.String())
|
||||
})
|
||||
|
||||
t.Run("multiple bottom, single top", func(t *testing.T) {
|
||||
key := NewTreeIndexKey(0, 3, 0, 0)
|
||||
assert.Equal(t, "Index(0-3:0)", key.String())
|
||||
})
|
||||
|
||||
t.Run("multiple on both sides", func(t *testing.T) {
|
||||
key := NewTreeIndexKey(0, 2, 3, 5)
|
||||
assert.Equal(t, "Index(0-2:3-5)", key.String())
|
||||
})
|
||||
|
||||
t.Run("different ranges", func(t *testing.T) {
|
||||
key := NewTreeIndexKey(1, 1, 2, 2)
|
||||
assert.Equal(t, "Index(1:2)", key.String())
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewComparer(t *testing.T) {
|
||||
t.Run("empty ref trees", func(t *testing.T) {
|
||||
cmp := NewComparer([]*FileTree{})
|
||||
assert.NotNil(t, cmp)
|
||||
assert.Empty(t, cmp.refTrees)
|
||||
assert.Empty(t, cmp.trees)
|
||||
assert.Empty(t, cmp.pathErrors)
|
||||
})
|
||||
|
||||
t.Run("with ref trees", func(t *testing.T) {
|
||||
trees := []*FileTree{
|
||||
{Id: uuid.New()},
|
||||
{Id: uuid.New()},
|
||||
}
|
||||
cmp := NewComparer(trees)
|
||||
assert.NotNil(t, cmp)
|
||||
assert.Len(t, cmp.refTrees, 2)
|
||||
assert.Empty(t, cmp.trees)
|
||||
assert.Empty(t, cmp.pathErrors)
|
||||
})
|
||||
}
|
||||
|
||||
func TestComparer_NaturalIndexes(t *testing.T) {
|
||||
t.Run("no trees", func(t *testing.T) {
|
||||
cmp := NewComparer([]*FileTree{})
|
||||
indexes := make([]TreeIndexKey, 0)
|
||||
|
||||
for idx := range cmp.NaturalIndexes() {
|
||||
indexes = append(indexes, idx)
|
||||
}
|
||||
|
||||
assert.Empty(t, indexes)
|
||||
})
|
||||
|
||||
t.Run("single tree", func(t *testing.T) {
|
||||
trees := []*FileTree{{Id: uuid.New()}}
|
||||
cmp := NewComparer(trees)
|
||||
indexes := make([]TreeIndexKey, 0)
|
||||
|
||||
for idx := range cmp.NaturalIndexes() {
|
||||
indexes = append(indexes, idx)
|
||||
}
|
||||
|
||||
assert.Len(t, indexes, 1)
|
||||
assert.Equal(t, NewTreeIndexKey(0, 0, 0, 0), indexes[0])
|
||||
})
|
||||
|
||||
t.Run("multiple trees", func(t *testing.T) {
|
||||
trees := []*FileTree{
|
||||
{Id: uuid.New()},
|
||||
{Id: uuid.New()},
|
||||
{Id: uuid.New()},
|
||||
}
|
||||
cmp := NewComparer(trees)
|
||||
indexes := make([]TreeIndexKey, 0)
|
||||
|
||||
for idx := range cmp.NaturalIndexes() {
|
||||
indexes = append(indexes, idx)
|
||||
}
|
||||
|
||||
assert.Len(t, indexes, 3)
|
||||
// Index 0: (0:0)
|
||||
assert.Equal(t, NewTreeIndexKey(0, 0, 0, 0), indexes[0])
|
||||
// Index 1: (0:1)
|
||||
assert.Equal(t, NewTreeIndexKey(0, 0, 1, 1), indexes[1])
|
||||
// Index 2: (0-1:2)
|
||||
assert.Equal(t, NewTreeIndexKey(0, 1, 2, 2), indexes[2])
|
||||
})
|
||||
}
|
||||
|
||||
func TestComparer_AggregatedIndexes(t *testing.T) {
|
||||
t.Run("no trees", func(t *testing.T) {
|
||||
cmp := NewComparer([]*FileTree{})
|
||||
indexes := make([]TreeIndexKey, 0)
|
||||
|
||||
for idx := range cmp.AggregatedIndexes() {
|
||||
indexes = append(indexes, idx)
|
||||
}
|
||||
|
||||
assert.Empty(t, indexes)
|
||||
})
|
||||
|
||||
t.Run("single tree", func(t *testing.T) {
|
||||
trees := []*FileTree{{Id: uuid.New()}}
|
||||
cmp := NewComparer(trees)
|
||||
indexes := make([]TreeIndexKey, 0)
|
||||
|
||||
for idx := range cmp.AggregatedIndexes() {
|
||||
indexes = append(indexes, idx)
|
||||
}
|
||||
|
||||
assert.Len(t, indexes, 1)
|
||||
assert.Equal(t, NewTreeIndexKey(0, 0, 0, 0), indexes[0])
|
||||
})
|
||||
|
||||
t.Run("multiple trees", func(t *testing.T) {
|
||||
trees := []*FileTree{
|
||||
{Id: uuid.New()},
|
||||
{Id: uuid.New()},
|
||||
{Id: uuid.New()},
|
||||
}
|
||||
cmp := NewComparer(trees)
|
||||
indexes := make([]TreeIndexKey, 0)
|
||||
|
||||
for idx := range cmp.AggregatedIndexes() {
|
||||
indexes = append(indexes, idx)
|
||||
}
|
||||
|
||||
assert.Len(t, indexes, 3)
|
||||
// Index 0: (0:0)
|
||||
assert.Equal(t, NewTreeIndexKey(0, 0, 0, 0), indexes[0])
|
||||
// Index 1: (0:1) - bottom stays at 0, top starts at 1
|
||||
assert.Equal(t, NewTreeIndexKey(0, 0, 1, 1), indexes[1])
|
||||
// Index 2: (0:1-2)
|
||||
assert.Equal(t, NewTreeIndexKey(0, 0, 1, 2), indexes[2])
|
||||
})
|
||||
}
|
||||
|
||||
func TestComparer_GetPathErrors(t *testing.T) {
|
||||
t.Run("get path errors from comparer", func(t *testing.T) {
|
||||
// Create simple ref trees
|
||||
tree1 := NewFileTree()
|
||||
tree1.Name = "tree1"
|
||||
|
||||
tree2 := NewFileTree()
|
||||
tree2.Name = "tree2"
|
||||
|
||||
cmp := NewComparer([]*FileTree{tree1, tree2})
|
||||
|
||||
// Test getting path errors for a key
|
||||
key := NewTreeIndexKey(0, 0, 0, 0)
|
||||
|
||||
pathErrors, err := cmp.GetPathErrors(key)
|
||||
|
||||
// Should not error (even if tree is empty)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, pathErrors)
|
||||
// Empty tree should have no path errors
|
||||
assert.Empty(t, pathErrors)
|
||||
})
|
||||
}
|
||||
|
||||
func TestComparer_GetTree(t *testing.T) {
|
||||
t.Run("get tree from comparer", func(t *testing.T) {
|
||||
// Create simple ref trees
|
||||
tree1 := NewFileTree()
|
||||
tree1.Name = "tree1"
|
||||
|
||||
tree2 := NewFileTree()
|
||||
tree2.Name = "tree2"
|
||||
|
||||
cmp := NewComparer([]*FileTree{tree1, tree2})
|
||||
|
||||
// Test getting tree for a key
|
||||
key := NewTreeIndexKey(0, 0, 0, 0)
|
||||
|
||||
resultTree, err := cmp.GetTree(key)
|
||||
|
||||
// Should not error
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, resultTree)
|
||||
})
|
||||
|
||||
t.Run("cached tree returns same instance", func(t *testing.T) {
|
||||
tree1 := NewFileTree()
|
||||
tree1.Name = "tree1"
|
||||
|
||||
cmp := NewComparer([]*FileTree{tree1})
|
||||
key := NewTreeIndexKey(0, 0, 0, 0)
|
||||
|
||||
// Call GetTree twice
|
||||
tree1, err1 := cmp.GetTree(key)
|
||||
tree2, err2 := cmp.GetTree(key)
|
||||
|
||||
assert.NoError(t, err1)
|
||||
assert.NoError(t, err2)
|
||||
assert.NotNil(t, tree1)
|
||||
assert.NotNil(t, tree2)
|
||||
// Should return the same cached instance
|
||||
assert.Same(t, tree1, tree2)
|
||||
})
|
||||
}
|
||||
|
||||
func TestComparer_BuildCache(t *testing.T) {
|
||||
t.Run("build cache with empty ref trees", func(t *testing.T) {
|
||||
cmp := NewComparer([]*FileTree{})
|
||||
|
||||
errors := cmp.BuildCache()
|
||||
|
||||
// Should not error
|
||||
assert.Empty(t, errors)
|
||||
})
|
||||
|
||||
t.Run("build cache with single tree", func(t *testing.T) {
|
||||
tree := NewFileTree()
|
||||
tree.Name = "tree1"
|
||||
|
||||
cmp := NewComparer([]*FileTree{tree})
|
||||
|
||||
errors := cmp.BuildCache()
|
||||
|
||||
// Should not error
|
||||
assert.Empty(t, errors)
|
||||
})
|
||||
|
||||
t.Run("build cache with multiple trees", func(t *testing.T) {
|
||||
trees := []*FileTree{
|
||||
NewFileTree(),
|
||||
NewFileTree(),
|
||||
NewFileTree(),
|
||||
}
|
||||
|
||||
for i, tree := range trees {
|
||||
tree.Name = fmt.Sprintf("tree%d", i)
|
||||
}
|
||||
|
||||
cmp := NewComparer(trees)
|
||||
|
||||
errors := cmp.BuildCache()
|
||||
|
||||
// Should not error
|
||||
assert.Empty(t, errors)
|
||||
})
|
||||
}
|
||||
|
||||
func TestEfficiencySlice_Len(t *testing.T) {
|
||||
t.Run("empty slice", func(t *testing.T) {
|
||||
efs := make(EfficiencySlice, 0)
|
||||
assert.Equal(t, 0, efs.Len())
|
||||
})
|
||||
|
||||
t.Run("non-empty slice", func(t *testing.T) {
|
||||
efs := EfficiencySlice{
|
||||
&EfficiencyData{Path: "/path1"},
|
||||
&EfficiencyData{Path: "/path2"},
|
||||
}
|
||||
assert.Equal(t, 2, efs.Len())
|
||||
})
|
||||
}
|
||||
|
||||
func TestEfficiencySlice_Swap(t *testing.T) {
|
||||
efs := EfficiencySlice{
|
||||
&EfficiencyData{Path: "/path1", CumulativeSize: 100},
|
||||
&EfficiencyData{Path: "/path2", CumulativeSize: 200},
|
||||
}
|
||||
|
||||
efs.Swap(0, 1)
|
||||
|
||||
assert.Equal(t, "/path2", efs[0].Path)
|
||||
assert.Equal(t, int64(200), efs[0].CumulativeSize)
|
||||
assert.Equal(t, "/path1", efs[1].Path)
|
||||
assert.Equal(t, int64(100), efs[1].CumulativeSize)
|
||||
}
|
||||
|
||||
func TestEfficiencySlice_Less(t *testing.T) {
|
||||
t.Run("first is smaller", func(t *testing.T) {
|
||||
efs := EfficiencySlice{
|
||||
&EfficiencyData{Path: "/path1", CumulativeSize: 100},
|
||||
&EfficiencyData{Path: "/path2", CumulativeSize: 200},
|
||||
}
|
||||
assert.True(t, efs.Less(0, 1))
|
||||
})
|
||||
|
||||
t.Run("second is smaller", func(t *testing.T) {
|
||||
efs := EfficiencySlice{
|
||||
&EfficiencyData{Path: "/path1", CumulativeSize: 200},
|
||||
&EfficiencyData{Path: "/path2", CumulativeSize: 100},
|
||||
}
|
||||
assert.False(t, efs.Less(0, 1))
|
||||
})
|
||||
|
||||
t.Run("equal sizes", func(t *testing.T) {
|
||||
efs := EfficiencySlice{
|
||||
&EfficiencyData{Path: "/path1", CumulativeSize: 100},
|
||||
&EfficiencyData{Path: "/path2", CumulativeSize: 100},
|
||||
}
|
||||
assert.False(t, efs.Less(0, 1))
|
||||
})
|
||||
}
|
||||
210
dive/filetree/diff_path_error_node_data_test.go
Normal file
210
dive/filetree/diff_path_error_node_data_test.go
Normal file
|
|
@ -0,0 +1,210 @@
|
|||
package filetree
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Tests for diff.go
|
||||
|
||||
func TestDiffType_String(t *testing.T) {
|
||||
t.Run("Unmodified", func(t *testing.T) {
|
||||
assert.Equal(t, "Unmodified", Unmodified.String())
|
||||
})
|
||||
|
||||
t.Run("Modified", func(t *testing.T) {
|
||||
assert.Equal(t, "Modified", Modified.String())
|
||||
})
|
||||
|
||||
t.Run("Added", func(t *testing.T) {
|
||||
assert.Equal(t, "Added", Added.String())
|
||||
})
|
||||
|
||||
t.Run("Removed", func(t *testing.T) {
|
||||
assert.Equal(t, "Removed", Removed.String())
|
||||
})
|
||||
|
||||
t.Run("unknown value", func(t *testing.T) {
|
||||
unknownDiff := DiffType(99)
|
||||
assert.Equal(t, "99", unknownDiff.String())
|
||||
})
|
||||
}
|
||||
|
||||
func TestDiffType_Merge(t *testing.T) {
|
||||
t.Run("same values - Unmodified", func(t *testing.T) {
|
||||
result := Unmodified.merge(Unmodified)
|
||||
assert.Equal(t, Unmodified, result)
|
||||
})
|
||||
|
||||
t.Run("same values - Modified", func(t *testing.T) {
|
||||
result := Modified.merge(Modified)
|
||||
assert.Equal(t, Modified, result)
|
||||
})
|
||||
|
||||
t.Run("same values - Added", func(t *testing.T) {
|
||||
result := Added.merge(Added)
|
||||
assert.Equal(t, Added, result)
|
||||
})
|
||||
|
||||
t.Run("same values - Removed", func(t *testing.T) {
|
||||
result := Removed.merge(Removed)
|
||||
assert.Equal(t, Removed, result)
|
||||
})
|
||||
|
||||
t.Run("different values - Added and Removed", func(t *testing.T) {
|
||||
result := Added.merge(Removed)
|
||||
assert.Equal(t, Modified, result)
|
||||
})
|
||||
|
||||
t.Run("different values - Unmodified and Added", func(t *testing.T) {
|
||||
result := Unmodified.merge(Added)
|
||||
assert.Equal(t, Modified, result)
|
||||
})
|
||||
|
||||
t.Run("different values - Removed and Modified", func(t *testing.T) {
|
||||
result := Removed.merge(Modified)
|
||||
assert.Equal(t, Modified, result)
|
||||
})
|
||||
|
||||
t.Run("different values - Added and Unmodified", func(t *testing.T) {
|
||||
result := Added.merge(Unmodified)
|
||||
assert.Equal(t, Modified, result)
|
||||
})
|
||||
}
|
||||
|
||||
// Tests for path_error.go
|
||||
|
||||
func TestFileAction_String(t *testing.T) {
|
||||
t.Run("ActionAdd", func(t *testing.T) {
|
||||
assert.Equal(t, "add", ActionAdd.String())
|
||||
})
|
||||
|
||||
t.Run("ActionRemove", func(t *testing.T) {
|
||||
assert.Equal(t, "remove", ActionRemove.String())
|
||||
})
|
||||
|
||||
t.Run("unknown value", func(t *testing.T) {
|
||||
unknownAction := FileAction(99)
|
||||
assert.Equal(t, "<unknown file action>", unknownAction.String())
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewPathError(t *testing.T) {
|
||||
t.Run("create path error with all fields", func(t *testing.T) {
|
||||
err := errors.New("test error")
|
||||
pathErr := NewPathError("/test/path", ActionAdd, err)
|
||||
|
||||
assert.Equal(t, "/test/path", pathErr.Path)
|
||||
assert.Equal(t, ActionAdd, pathErr.Action)
|
||||
assert.Equal(t, err, pathErr.Err)
|
||||
})
|
||||
|
||||
t.Run("create path error with remove action", func(t *testing.T) {
|
||||
err := errors.New("remove error")
|
||||
pathErr := NewPathError("/old/path", ActionRemove, err)
|
||||
|
||||
assert.Equal(t, "/old/path", pathErr.Path)
|
||||
assert.Equal(t, ActionRemove, pathErr.Action)
|
||||
assert.Equal(t, err, pathErr.Err)
|
||||
})
|
||||
|
||||
t.Run("create path error with nil error", func(t *testing.T) {
|
||||
pathErr := NewPathError("/test/path", ActionAdd, nil)
|
||||
|
||||
assert.Equal(t, "/test/path", pathErr.Path)
|
||||
assert.Equal(t, ActionAdd, pathErr.Action)
|
||||
assert.Nil(t, pathErr.Err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPathError_String(t *testing.T) {
|
||||
t.Run("with add action", func(t *testing.T) {
|
||||
err := errors.New("file not found")
|
||||
pathErr := NewPathError("/test/file.txt", ActionAdd, err)
|
||||
|
||||
expected := "unable to add '/test/file.txt': file not found"
|
||||
assert.Equal(t, expected, pathErr.String())
|
||||
})
|
||||
|
||||
t.Run("with remove action", func(t *testing.T) {
|
||||
err := errors.New("permission denied")
|
||||
pathErr := NewPathError("/test/file.txt", ActionRemove, err)
|
||||
|
||||
expected := "unable to remove '/test/file.txt': permission denied"
|
||||
assert.Equal(t, expected, pathErr.String())
|
||||
})
|
||||
|
||||
t.Run("with nil error", func(t *testing.T) {
|
||||
pathErr := NewPathError("/test/file.txt", ActionAdd, nil)
|
||||
|
||||
expected := "unable to add '/test/file.txt': <nil>"
|
||||
assert.Equal(t, expected, pathErr.String())
|
||||
})
|
||||
|
||||
t.Run("with complex path", func(t *testing.T) {
|
||||
err := errors.New("disk full")
|
||||
pathErr := NewPathError("/very/long/path/to/file.txt", ActionAdd, err)
|
||||
|
||||
expected := "unable to add '/very/long/path/to/file.txt': disk full"
|
||||
assert.Equal(t, expected, pathErr.String())
|
||||
})
|
||||
}
|
||||
|
||||
// Tests for node_data.go
|
||||
|
||||
func TestNewNodeData(t *testing.T) {
|
||||
t.Run("creates new node data with defaults", func(t *testing.T) {
|
||||
data := NewNodeData()
|
||||
|
||||
assert.NotNil(t, data)
|
||||
assert.Equal(t, Unmodified, data.DiffType)
|
||||
// ViewInfo and FileInfo should have their zero values
|
||||
assert.NotNil(t, data)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNodeData_Copy(t *testing.T) {
|
||||
t.Run("copy node data", func(t *testing.T) {
|
||||
original := NewNodeData()
|
||||
original.DiffType = Added
|
||||
original.FileInfo.Size = 1024
|
||||
|
||||
copied := original.Copy()
|
||||
|
||||
assert.NotNil(t, copied)
|
||||
assert.Equal(t, original.DiffType, copied.DiffType)
|
||||
assert.Equal(t, original.FileInfo.Size, copied.FileInfo.Size)
|
||||
|
||||
// Verify it's a deep copy
|
||||
copied.DiffType = Modified
|
||||
assert.Equal(t, Added, original.DiffType)
|
||||
})
|
||||
|
||||
t.Run("copy creates new instance", func(t *testing.T) {
|
||||
original := NewNodeData()
|
||||
original.DiffType = Removed
|
||||
original.FileInfo.Path = "/test/path"
|
||||
|
||||
copied := original.Copy()
|
||||
|
||||
// Pointers should be different
|
||||
assert.NotSame(t, original, copied)
|
||||
|
||||
// But values should be the same
|
||||
assert.Equal(t, original.DiffType, copied.DiffType)
|
||||
assert.Equal(t, original.FileInfo.Path, copied.FileInfo.Path)
|
||||
})
|
||||
|
||||
t.Run("copy with unmodified diff type", func(t *testing.T) {
|
||||
original := NewNodeData()
|
||||
original.DiffType = Unmodified
|
||||
original.FileInfo.Size = 2048
|
||||
|
||||
copied := original.Copy()
|
||||
|
||||
assert.Equal(t, Unmodified, copied.DiffType)
|
||||
assert.NotSame(t, original, copied)
|
||||
assert.Equal(t, int64(2048), copied.FileInfo.Size)
|
||||
})
|
||||
}
|
||||
401
dive/filetree/file_info_test.go
Normal file
401
dive/filetree/file_info_test.go
Normal file
|
|
@ -0,0 +1,401 @@
|
|||
package filetree
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewFileInfoFromTarHeader(t *testing.T) {
|
||||
t.Run("regular file", func(t *testing.T) {
|
||||
// Create a tar header for a regular file
|
||||
header := &tar.Header{
|
||||
Name: "test.txt",
|
||||
Typeflag: tar.TypeReg,
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
Linkname: "",
|
||||
}
|
||||
|
||||
// Create a reader with some content
|
||||
content := []byte("hello world")
|
||||
reader := tar.NewReader(bytes.NewReader(content))
|
||||
|
||||
// Advance reader to setup (normally done by tar.Next())
|
||||
// We need to manually set up the reader state
|
||||
result := NewFileInfoFromTarHeader(reader, header, "test.txt")
|
||||
|
||||
assert.Equal(t, "test.txt", result.Path)
|
||||
assert.Equal(t, byte(tar.TypeReg), result.TypeFlag)
|
||||
assert.Equal(t, int64(1024), result.Size)
|
||||
assert.Equal(t, 1000, result.Uid)
|
||||
assert.Equal(t, 1000, result.Gid)
|
||||
assert.False(t, result.IsDir)
|
||||
assert.NotEqual(t, uint64(0), result.hash) // hash should be computed
|
||||
// Don't check Mode as it can be platform-dependent
|
||||
})
|
||||
|
||||
t.Run("directory", func(t *testing.T) {
|
||||
header := &tar.Header{
|
||||
Name: "testdir",
|
||||
Typeflag: tar.TypeDir,
|
||||
Size: 0,
|
||||
Mode: 0755,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
}
|
||||
|
||||
reader := tar.NewReader(bytes.NewReader([]byte{}))
|
||||
result := NewFileInfoFromTarHeader(reader, header, "testdir")
|
||||
|
||||
assert.Equal(t, "testdir", result.Path)
|
||||
assert.Equal(t, byte(tar.TypeDir), result.TypeFlag)
|
||||
assert.Equal(t, int64(0), result.Size)
|
||||
assert.True(t, result.IsDir)
|
||||
assert.Equal(t, uint64(0), result.hash) // directories have no hash
|
||||
})
|
||||
|
||||
t.Run("symlink", func(t *testing.T) {
|
||||
header := &tar.Header{
|
||||
Name: "link.txt",
|
||||
Typeflag: tar.TypeSymlink,
|
||||
Size: 0,
|
||||
Mode: 0777,
|
||||
Linkname: "target.txt",
|
||||
}
|
||||
|
||||
reader := tar.NewReader(bytes.NewReader([]byte{}))
|
||||
result := NewFileInfoFromTarHeader(reader, header, "link.txt")
|
||||
|
||||
assert.Equal(t, "link.txt", result.Path)
|
||||
assert.Equal(t, byte(tar.TypeSymlink), result.TypeFlag)
|
||||
assert.Equal(t, "target.txt", result.Linkname)
|
||||
assert.False(t, result.IsDir)
|
||||
// Note: current implementation computes hash for symlinks (it should only skip dirs)
|
||||
// The hash will be the xxhash of empty content since reader is empty
|
||||
assert.NotEqual(t, uint64(0), result.hash)
|
||||
})
|
||||
|
||||
t.Run("custom path", func(t *testing.T) {
|
||||
header := &tar.Header{
|
||||
Name: "original/name.txt",
|
||||
Typeflag: tar.TypeReg,
|
||||
Size: 512,
|
||||
Mode: 0600,
|
||||
}
|
||||
|
||||
content := []byte("test content")
|
||||
reader := tar.NewReader(bytes.NewReader(content))
|
||||
|
||||
result := NewFileInfoFromTarHeader(reader, header, "custom/path.txt")
|
||||
|
||||
assert.Equal(t, "custom/path.txt", result.Path)
|
||||
assert.Equal(t, int64(512), result.Size)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFileInfo_Copy(t *testing.T) {
|
||||
t.Run("copy file info", func(t *testing.T) {
|
||||
original := FileInfo{
|
||||
Path: "/test/file.txt",
|
||||
TypeFlag: byte(tar.TypeReg),
|
||||
Linkname: "",
|
||||
hash: 12345,
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
IsDir: false,
|
||||
}
|
||||
|
||||
copied := original.Copy()
|
||||
|
||||
assert.NotNil(t, copied)
|
||||
assert.Equal(t, original.Path, copied.Path)
|
||||
assert.Equal(t, original.TypeFlag, copied.TypeFlag)
|
||||
assert.Equal(t, original.Linkname, copied.Linkname)
|
||||
assert.Equal(t, original.hash, copied.hash)
|
||||
assert.Equal(t, original.Size, copied.Size)
|
||||
assert.Equal(t, original.Mode, copied.Mode)
|
||||
assert.Equal(t, original.Uid, copied.Uid)
|
||||
assert.Equal(t, original.Gid, copied.Gid)
|
||||
assert.Equal(t, original.IsDir, copied.IsDir)
|
||||
|
||||
// Verify it's a different instance
|
||||
assert.NotSame(t, &original, copied)
|
||||
})
|
||||
|
||||
t.Run("copy nil file info", func(t *testing.T) {
|
||||
var original *FileInfo
|
||||
copied := original.Copy()
|
||||
|
||||
assert.Nil(t, copied)
|
||||
})
|
||||
|
||||
t.Run("copy directory info", func(t *testing.T) {
|
||||
original := FileInfo{
|
||||
Path: "/test/dir",
|
||||
TypeFlag: byte(tar.TypeDir),
|
||||
IsDir: true,
|
||||
Size: 0,
|
||||
}
|
||||
|
||||
copied := original.Copy()
|
||||
|
||||
assert.NotNil(t, copied)
|
||||
assert.Equal(t, original.Path, copied.Path)
|
||||
assert.True(t, copied.IsDir)
|
||||
})
|
||||
|
||||
t.Run("modifying copy doesn't affect original", func(t *testing.T) {
|
||||
original := FileInfo{
|
||||
Path: "/test/file.txt",
|
||||
Size: 1024,
|
||||
hash: 12345,
|
||||
}
|
||||
|
||||
copied := original.Copy()
|
||||
copied.Size = 2048
|
||||
copied.hash = 54321
|
||||
|
||||
assert.Equal(t, int64(1024), original.Size)
|
||||
assert.Equal(t, uint64(12345), original.hash)
|
||||
assert.Equal(t, int64(2048), copied.Size)
|
||||
assert.Equal(t, uint64(54321), copied.hash)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFileInfo_Compare(t *testing.T) {
|
||||
t.Run("identical files", func(t *testing.T) {
|
||||
info1 := FileInfo{
|
||||
TypeFlag: byte(tar.TypeReg),
|
||||
hash: 12345,
|
||||
Mode: 0644,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
}
|
||||
|
||||
info2 := FileInfo{
|
||||
TypeFlag: byte(tar.TypeReg),
|
||||
hash: 12345,
|
||||
Mode: 0644,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
}
|
||||
|
||||
result := info1.Compare(info2)
|
||||
assert.Equal(t, Unmodified, result)
|
||||
})
|
||||
|
||||
t.Run("different type flag", func(t *testing.T) {
|
||||
info1 := FileInfo{
|
||||
TypeFlag: byte(tar.TypeReg),
|
||||
hash: 12345,
|
||||
Mode: 0644,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
}
|
||||
|
||||
info2 := FileInfo{
|
||||
TypeFlag: byte(tar.TypeDir),
|
||||
hash: 12345,
|
||||
Mode: 0644,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
}
|
||||
|
||||
result := info1.Compare(info2)
|
||||
assert.Equal(t, Modified, result)
|
||||
})
|
||||
|
||||
t.Run("different hash", func(t *testing.T) {
|
||||
info1 := FileInfo{
|
||||
TypeFlag: byte(tar.TypeReg),
|
||||
hash: 12345,
|
||||
Mode: 0644,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
}
|
||||
|
||||
info2 := FileInfo{
|
||||
TypeFlag: byte(tar.TypeReg),
|
||||
hash: 54321,
|
||||
Mode: 0644,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
}
|
||||
|
||||
result := info1.Compare(info2)
|
||||
assert.Equal(t, Modified, result)
|
||||
})
|
||||
|
||||
t.Run("different mode", func(t *testing.T) {
|
||||
info1 := FileInfo{
|
||||
TypeFlag: byte(tar.TypeReg),
|
||||
hash: 12345,
|
||||
Mode: 0644,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
}
|
||||
|
||||
info2 := FileInfo{
|
||||
TypeFlag: byte(tar.TypeReg),
|
||||
hash: 12345,
|
||||
Mode: 0755,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
}
|
||||
|
||||
result := info1.Compare(info2)
|
||||
assert.Equal(t, Modified, result)
|
||||
})
|
||||
|
||||
t.Run("different uid", func(t *testing.T) {
|
||||
info1 := FileInfo{
|
||||
TypeFlag: byte(tar.TypeReg),
|
||||
hash: 12345,
|
||||
Mode: 0644,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
}
|
||||
|
||||
info2 := FileInfo{
|
||||
TypeFlag: byte(tar.TypeReg),
|
||||
hash: 12345,
|
||||
Mode: 0644,
|
||||
Uid: 2000,
|
||||
Gid: 1000,
|
||||
}
|
||||
|
||||
result := info1.Compare(info2)
|
||||
assert.Equal(t, Modified, result)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetHashFromReader(t *testing.T) {
|
||||
t.Run("hash of empty reader", func(t *testing.T) {
|
||||
reader := bytes.NewReader([]byte{})
|
||||
hash := getHashFromReader(reader)
|
||||
|
||||
assert.Equal(t, uint64(17241709254077376921), hash) // xxhash of empty string
|
||||
})
|
||||
|
||||
t.Run("hash of simple string", func(t *testing.T) {
|
||||
reader := bytes.NewReader([]byte("hello world"))
|
||||
hash := getHashFromReader(reader)
|
||||
|
||||
assert.NotEqual(t, uint64(0), hash)
|
||||
// Verify consistency
|
||||
hash2 := getHashFromReader(bytes.NewReader([]byte("hello world")))
|
||||
assert.Equal(t, hash, hash2)
|
||||
})
|
||||
|
||||
t.Run("hash of binary data", func(t *testing.T) {
|
||||
data := []byte{0x00, 0x01, 0x02, 0x03, 0x04}
|
||||
reader := bytes.NewReader(data)
|
||||
hash := getHashFromReader(reader)
|
||||
|
||||
assert.NotEqual(t, uint64(0), hash)
|
||||
})
|
||||
|
||||
t.Run("hash of large data", func(t *testing.T) {
|
||||
// Create data larger than buffer size (1024 bytes)
|
||||
largeData := make([]byte, 2048)
|
||||
for i := range largeData {
|
||||
largeData[i] = byte(i % 256)
|
||||
}
|
||||
reader := bytes.NewReader(largeData)
|
||||
hash := getHashFromReader(reader)
|
||||
|
||||
assert.NotEqual(t, uint64(0), hash)
|
||||
})
|
||||
|
||||
t.Run("different content produces different hash", func(t *testing.T) {
|
||||
hash1 := getHashFromReader(bytes.NewReader([]byte("content1")))
|
||||
hash2 := getHashFromReader(bytes.NewReader([]byte("content2")))
|
||||
|
||||
assert.NotEqual(t, hash1, hash2)
|
||||
})
|
||||
|
||||
t.Run("same content produces same hash", func(t *testing.T) {
|
||||
content := []byte("same content")
|
||||
hash1 := getHashFromReader(bytes.NewReader(content))
|
||||
hash2 := getHashFromReader(bytes.NewReader(content))
|
||||
|
||||
assert.Equal(t, hash1, hash2)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewFileInfo(t *testing.T) {
|
||||
t.Run("create regular file info", func(t *testing.T) {
|
||||
// Create a temporary file
|
||||
tmpDir := t.TempDir()
|
||||
filePath := filepath.Join(tmpDir, "test.txt")
|
||||
err := os.WriteFile(filePath, []byte("test content"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := os.Stat(filePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
fileInfo := NewFileInfo(filePath, "test.txt", info)
|
||||
|
||||
assert.Equal(t, "test.txt", fileInfo.Path)
|
||||
assert.Equal(t, byte(tar.TypeReg), fileInfo.TypeFlag)
|
||||
assert.Equal(t, int64(12), fileInfo.Size) // "test content" is 12 bytes
|
||||
assert.False(t, fileInfo.IsDir)
|
||||
assert.Equal(t, -1, fileInfo.Uid) // UID/GID not supported, set to -1
|
||||
assert.Equal(t, -1, fileInfo.Gid)
|
||||
assert.NotEqual(t, uint64(0), fileInfo.hash) // hash should be computed
|
||||
// Mode may have additional bits set on different systems, just check it's not zero
|
||||
assert.NotEqual(t, os.FileMode(0), fileInfo.Mode)
|
||||
})
|
||||
|
||||
t.Run("create directory info", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
dirPath := filepath.Join(tmpDir, "testdir")
|
||||
err := os.Mkdir(dirPath, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := os.Stat(dirPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
fileInfo := NewFileInfo(dirPath, "testdir", info)
|
||||
|
||||
assert.Equal(t, "testdir", fileInfo.Path)
|
||||
assert.Equal(t, byte(tar.TypeDir), fileInfo.TypeFlag)
|
||||
assert.True(t, fileInfo.IsDir)
|
||||
assert.Equal(t, uint64(0), fileInfo.hash) // directories have no hash
|
||||
// Check that directory mode has dir bit set
|
||||
assert.True(t, fileInfo.Mode&os.ModeDir != 0)
|
||||
})
|
||||
|
||||
t.Run("create symlink info", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
targetPath := filepath.Join(tmpDir, "target.txt")
|
||||
err := os.WriteFile(targetPath, []byte("target"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
linkPath := filepath.Join(tmpDir, "link.txt")
|
||||
err = os.Symlink("target.txt", linkPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := os.Lstat(linkPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
fileInfo := NewFileInfo(linkPath, "link.txt", info)
|
||||
|
||||
assert.Equal(t, "link.txt", fileInfo.Path)
|
||||
assert.Equal(t, byte(tar.TypeSymlink), fileInfo.TypeFlag)
|
||||
assert.Equal(t, "target.txt", fileInfo.Linkname)
|
||||
assert.False(t, fileInfo.IsDir)
|
||||
// Note: current implementation computes hash for symlinks (from the target file content)
|
||||
assert.NotEqual(t, uint64(0), fileInfo.hash)
|
||||
})
|
||||
}
|
||||
|
|
@ -843,3 +843,297 @@ func TestRemoveOnIterate(t *testing.T) {
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
func TestVisibleSize(t *testing.T) {
|
||||
t.Run("empty tree", func(t *testing.T) {
|
||||
tree := NewFileTree()
|
||||
size := tree.VisibleSize()
|
||||
// Empty tree has only root, size-- makes it -1
|
||||
assert.Equal(t, -1, size)
|
||||
})
|
||||
|
||||
t.Run("all visible nodes", func(t *testing.T) {
|
||||
tree := NewFileTree()
|
||||
paths := []string{"/dir", "/dir/file1.txt", "/dir/file2.txt", "/other"}
|
||||
|
||||
for _, path := range paths {
|
||||
fakeData := FileInfo{
|
||||
Path: path,
|
||||
TypeFlag: 1,
|
||||
hash: 123,
|
||||
Size: 100,
|
||||
}
|
||||
if path == "/dir" {
|
||||
fakeData.IsDir = true
|
||||
}
|
||||
_, _, err := tree.AddPath(path, fakeData)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
size := tree.VisibleSize()
|
||||
// Should count all nodes except root: /dir, /dir/file1.txt, /dir/file2.txt, /other - 1(root) = 3
|
||||
assert.Equal(t, 3, size)
|
||||
})
|
||||
|
||||
t.Run("with hidden nodes", func(t *testing.T) {
|
||||
tree := NewFileTree()
|
||||
paths := []string{"/dir", "/dir/file1.txt", "/dir/file2.txt"}
|
||||
|
||||
for _, path := range paths {
|
||||
fakeData := FileInfo{
|
||||
Path: path,
|
||||
TypeFlag: 1,
|
||||
hash: 123,
|
||||
}
|
||||
if path == "/dir" {
|
||||
fakeData.IsDir = true
|
||||
}
|
||||
node, _, err := tree.AddPath(path, fakeData)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Hide /dir/file2.txt
|
||||
if path == "/dir/file2.txt" {
|
||||
node.Data.ViewInfo.Hidden = true
|
||||
}
|
||||
}
|
||||
|
||||
size := tree.VisibleSize()
|
||||
// Should count only visible nodes: /dir, /dir/file1.txt - 1(root) = 1 (file2.txt is hidden)
|
||||
assert.Equal(t, 1, size)
|
||||
})
|
||||
|
||||
t.Run("with collapsed directory", func(t *testing.T) {
|
||||
tree := NewFileTree()
|
||||
paths := []string{"/dir", "/dir/file1.txt", "/dir/file2.txt", "/other"}
|
||||
|
||||
for _, path := range paths {
|
||||
fakeData := FileInfo{
|
||||
Path: path,
|
||||
TypeFlag: 1,
|
||||
hash: 123,
|
||||
}
|
||||
if path == "/dir" {
|
||||
fakeData.IsDir = true
|
||||
}
|
||||
node, _, err := tree.AddPath(path, fakeData)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Collapse /dir
|
||||
if path == "/dir" {
|
||||
node.Data.ViewInfo.Collapsed = true
|
||||
}
|
||||
}
|
||||
|
||||
size := tree.VisibleSize()
|
||||
// Should count: /dir (collapsed, counted but children not), /other - 1(root) = 1
|
||||
assert.Equal(t, 1, size)
|
||||
})
|
||||
|
||||
t.Run("with hidden directory", func(t *testing.T) {
|
||||
tree := NewFileTree()
|
||||
paths := []string{"/dir", "/dir/file1.txt", "/other"}
|
||||
|
||||
for _, path := range paths {
|
||||
fakeData := FileInfo{
|
||||
Path: path,
|
||||
TypeFlag: 1,
|
||||
hash: 123,
|
||||
}
|
||||
if path == "/dir" {
|
||||
fakeData.IsDir = true
|
||||
}
|
||||
node, _, err := tree.AddPath(path, fakeData)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Hide /dir (should hide children too)
|
||||
if path == "/dir" {
|
||||
node.Data.ViewInfo.Hidden = true
|
||||
}
|
||||
}
|
||||
|
||||
size := tree.VisibleSize()
|
||||
// Should count only: /other - 1(root) = 0 (dir and its children are hidden)
|
||||
assert.Equal(t, 0, size)
|
||||
})
|
||||
|
||||
t.Run("complex tree with mixed visibility", func(t *testing.T) {
|
||||
tree := NewFileTree()
|
||||
paths := []string{
|
||||
"/dir1",
|
||||
"/dir1/file1.txt",
|
||||
"/dir1/file2.txt",
|
||||
"/dir2",
|
||||
"/dir2/file3.txt",
|
||||
"/other",
|
||||
}
|
||||
|
||||
for _, path := range paths {
|
||||
fakeData := FileInfo{
|
||||
Path: path,
|
||||
TypeFlag: 1,
|
||||
hash: 123,
|
||||
}
|
||||
if path == "/dir1" || path == "/dir2" {
|
||||
fakeData.IsDir = true
|
||||
}
|
||||
node, _, err := tree.AddPath(path, fakeData)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Hide /dir1/file2.txt
|
||||
if path == "/dir1/file2.txt" {
|
||||
node.Data.ViewInfo.Hidden = true
|
||||
}
|
||||
// Collapse /dir2
|
||||
if path == "/dir2" {
|
||||
node.Data.ViewInfo.Collapsed = true
|
||||
}
|
||||
}
|
||||
|
||||
size := tree.VisibleSize()
|
||||
// Should count: /dir1, /dir1/file1.txt, /dir2 (collapsed), /other - 1(root) = 3
|
||||
// /dir1/file2.txt is hidden, /dir2/file3.txt not counted because dir2 is collapsed
|
||||
assert.Equal(t, 3, size)
|
||||
})
|
||||
}
|
||||
|
||||
func TestVisitDepthParentFirst(t *testing.T) {
|
||||
t.Run("visits nodes parent first", func(t *testing.T) {
|
||||
tree := NewFileTree()
|
||||
paths := []string{"/dir", "/dir/file1.txt", "/dir/file2.txt"}
|
||||
|
||||
for _, path := range paths {
|
||||
fakeData := FileInfo{
|
||||
Path: path,
|
||||
TypeFlag: 1,
|
||||
hash: 123,
|
||||
}
|
||||
if path == "/dir" {
|
||||
fakeData.IsDir = true
|
||||
}
|
||||
_, _, err := tree.AddPath(path, fakeData)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
var visited []string
|
||||
visitor := func(node *FileNode) error {
|
||||
visited = append(visited, node.Path())
|
||||
return nil
|
||||
}
|
||||
evaluator := func(node *FileNode) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
err := tree.VisitDepthParentFirst(visitor, evaluator)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Parent should be visited before children
|
||||
// Order should be: root, /dir, /dir/file1.txt, /dir/file2.txt
|
||||
assert.Greater(t, len(visited), 0)
|
||||
// Find indices
|
||||
var dirIdx, file1Idx, file2Idx int
|
||||
for i, path := range visited {
|
||||
if path == "/dir" {
|
||||
dirIdx = i
|
||||
} else if path == "/dir/file1.txt" {
|
||||
file1Idx = i
|
||||
} else if path == "/dir/file2.txt" {
|
||||
file2Idx = i
|
||||
}
|
||||
}
|
||||
|
||||
// Parent should be visited before children
|
||||
assert.Less(t, dirIdx, file1Idx)
|
||||
assert.Less(t, dirIdx, file2Idx)
|
||||
})
|
||||
|
||||
t.Run("respects evaluator", func(t *testing.T) {
|
||||
tree := NewFileTree()
|
||||
paths := []string{"/dir", "/dir/file1.txt", "/dir/file2.txt"}
|
||||
|
||||
for _, path := range paths {
|
||||
fakeData := FileInfo{
|
||||
Path: path,
|
||||
TypeFlag: 1,
|
||||
hash: 123,
|
||||
}
|
||||
if path == "/dir" {
|
||||
fakeData.IsDir = true
|
||||
}
|
||||
_, _, err := tree.AddPath(path, fakeData)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
var visited []string
|
||||
visitor := func(node *FileNode) error {
|
||||
visited = append(visited, node.Path())
|
||||
return nil
|
||||
}
|
||||
// Don't visit /dir/file2.txt
|
||||
evaluator := func(node *FileNode) bool {
|
||||
return node.Path() != "/dir/file2.txt"
|
||||
}
|
||||
|
||||
err := tree.VisitDepthParentFirst(visitor, evaluator)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// /dir/file2.txt should not be visited
|
||||
assert.NotContains(t, visited, "/dir/file2.txt")
|
||||
// But /dir should be visited (parent)
|
||||
assert.Contains(t, visited, "/dir")
|
||||
})
|
||||
|
||||
t.Run("handles empty tree", func(t *testing.T) {
|
||||
tree := NewFileTree()
|
||||
|
||||
visited := false
|
||||
visitor := func(node *FileNode) error {
|
||||
visited = true
|
||||
return nil
|
||||
}
|
||||
evaluator := func(node *FileNode) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
err := tree.VisitDepthParentFirst(visitor, evaluator)
|
||||
assert.NoError(t, err)
|
||||
// Empty tree has no nodes to visit (root exists but has no children)
|
||||
// The visitor will not be called for an empty tree
|
||||
assert.False(t, visited)
|
||||
})
|
||||
|
||||
t.Run("visitor error stops iteration", func(t *testing.T) {
|
||||
tree := NewFileTree()
|
||||
paths := []string{"/dir", "/dir/file1.txt", "/dir/file2.txt"}
|
||||
|
||||
for _, path := range paths {
|
||||
fakeData := FileInfo{
|
||||
Path: path,
|
||||
TypeFlag: 1,
|
||||
hash: 123,
|
||||
}
|
||||
if path == "/dir" {
|
||||
fakeData.IsDir = true
|
||||
}
|
||||
_, _, err := tree.AddPath(path, fakeData)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
visitCount := 0
|
||||
visitor := func(node *FileNode) error {
|
||||
visitCount++
|
||||
if node.Path() == "/dir/file1.txt" {
|
||||
return assert.AnError
|
||||
}
|
||||
return nil
|
||||
}
|
||||
evaluator := func(node *FileNode) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
err := tree.VisitDepthParentFirst(visitor, evaluator)
|
||||
assert.Error(t, err)
|
||||
// Should stop after error
|
||||
assert.LessOrEqual(t, visitCount, 3) // root, /dir, /dir/file1.txt
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
|||
240
dive/filetree/order_strategy_test.go
Normal file
240
dive/filetree/order_strategy_test.go
Normal file
|
|
@ -0,0 +1,240 @@
|
|||
package filetree
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGetSortOrderStrategy(t *testing.T) {
|
||||
t.Run("ByName strategy", func(t *testing.T) {
|
||||
strategy := GetSortOrderStrategy(ByName)
|
||||
assert.IsType(t, orderByNameStrategy{}, strategy)
|
||||
})
|
||||
|
||||
t.Run("BySizeDesc strategy", func(t *testing.T) {
|
||||
strategy := GetSortOrderStrategy(BySizeDesc)
|
||||
assert.IsType(t, orderBySizeDescStrategy{}, strategy)
|
||||
})
|
||||
|
||||
t.Run("invalid value defaults to ByName", func(t *testing.T) {
|
||||
strategy := GetSortOrderStrategy(SortOrder(99))
|
||||
assert.IsType(t, orderByNameStrategy{}, strategy)
|
||||
})
|
||||
}
|
||||
|
||||
func TestOrderByNameStrategy_OrderKeys(t *testing.T) {
|
||||
t.Run("empty map", func(t *testing.T) {
|
||||
strategy := orderByNameStrategy{}
|
||||
files := make(map[string]*FileNode)
|
||||
|
||||
result := strategy.orderKeys(files)
|
||||
|
||||
assert.Empty(t, result)
|
||||
})
|
||||
|
||||
t.Run("single file", func(t *testing.T) {
|
||||
strategy := orderByNameStrategy{}
|
||||
files := map[string]*FileNode{
|
||||
"file.txt": {},
|
||||
}
|
||||
|
||||
result := strategy.orderKeys(files)
|
||||
|
||||
assert.Len(t, result, 1)
|
||||
assert.Equal(t, "file.txt", result[0])
|
||||
})
|
||||
|
||||
t.Run("multiple files sorted alphabetically", func(t *testing.T) {
|
||||
strategy := orderByNameStrategy{}
|
||||
files := map[string]*FileNode{
|
||||
"zebra.txt": {},
|
||||
"apple.txt": {},
|
||||
"banana.txt": {},
|
||||
}
|
||||
|
||||
result := strategy.orderKeys(files)
|
||||
|
||||
assert.Equal(t, []string{"apple.txt", "banana.txt", "zebra.txt"}, result)
|
||||
})
|
||||
|
||||
t.Run("files with similar names", func(t *testing.T) {
|
||||
strategy := orderByNameStrategy{}
|
||||
files := map[string]*FileNode{
|
||||
"file1.txt": {},
|
||||
"file2.txt": {},
|
||||
"file10.txt": {},
|
||||
"file20.txt": {},
|
||||
}
|
||||
|
||||
result := strategy.orderKeys(files)
|
||||
|
||||
// Lexicographic sort (not numeric)
|
||||
assert.Equal(t, []string{"file1.txt", "file10.txt", "file2.txt", "file20.txt"}, result)
|
||||
})
|
||||
|
||||
t.Run("files with paths", func(t *testing.T) {
|
||||
strategy := orderByNameStrategy{}
|
||||
files := map[string]*FileNode{
|
||||
"/usr/bin/file": {},
|
||||
"/etc/config": {},
|
||||
"/var/log/app": {},
|
||||
"/home/user/doc": {},
|
||||
}
|
||||
|
||||
result := strategy.orderKeys(files)
|
||||
|
||||
assert.Equal(t, []string{"/etc/config", "/home/user/doc", "/usr/bin/file", "/var/log/app"}, result)
|
||||
})
|
||||
|
||||
t.Run("case sensitive sorting", func(t *testing.T) {
|
||||
strategy := orderByNameStrategy{}
|
||||
files := map[string]*FileNode{
|
||||
"FILE.TXT": {},
|
||||
"file.txt": {},
|
||||
"File.Txt": {},
|
||||
}
|
||||
|
||||
result := strategy.orderKeys(files)
|
||||
|
||||
// Uppercase comes before lowercase in ASCII
|
||||
assert.Equal(t, []string{"FILE.TXT", "File.Txt", "file.txt"}, result)
|
||||
})
|
||||
}
|
||||
|
||||
func TestOrderBySizeDescStrategy_OrderKeys(t *testing.T) {
|
||||
t.Run("empty map", func(t *testing.T) {
|
||||
strategy := orderBySizeDescStrategy{}
|
||||
files := make(map[string]*FileNode)
|
||||
|
||||
result := strategy.orderKeys(files)
|
||||
|
||||
assert.Empty(t, result)
|
||||
})
|
||||
|
||||
t.Run("single file", func(t *testing.T) {
|
||||
strategy := orderBySizeDescStrategy{}
|
||||
node := &FileNode{}
|
||||
node.Size = 1024
|
||||
files := map[string]*FileNode{
|
||||
"file.txt": node,
|
||||
}
|
||||
|
||||
result := strategy.orderKeys(files)
|
||||
|
||||
assert.Len(t, result, 1)
|
||||
assert.Equal(t, "file.txt", result[0])
|
||||
})
|
||||
|
||||
t.Run("multiple files sorted by size descending", func(t *testing.T) {
|
||||
strategy := orderBySizeDescStrategy{}
|
||||
|
||||
smallNode := &FileNode{}
|
||||
smallNode.Size = 100
|
||||
|
||||
mediumNode := &FileNode{}
|
||||
mediumNode.Size = 500
|
||||
|
||||
largeNode := &FileNode{}
|
||||
largeNode.Size = 1000
|
||||
|
||||
files := map[string]*FileNode{
|
||||
"small.txt": smallNode,
|
||||
"large.txt": largeNode,
|
||||
"medium.txt": mediumNode,
|
||||
}
|
||||
|
||||
result := strategy.orderKeys(files)
|
||||
|
||||
assert.Equal(t, []string{"large.txt", "medium.txt", "small.txt"}, result)
|
||||
})
|
||||
|
||||
t.Run("files with same size sorted alphabetically", func(t *testing.T) {
|
||||
strategy := orderBySizeDescStrategy{}
|
||||
|
||||
node1 := &FileNode{}
|
||||
node1.Size = 500
|
||||
|
||||
node2 := &FileNode{}
|
||||
node2.Size = 500
|
||||
|
||||
node3 := &FileNode{}
|
||||
node3.Size = 500
|
||||
|
||||
files := map[string]*FileNode{
|
||||
"zebra.txt": node1,
|
||||
"apple.txt": node2,
|
||||
"banana.txt": node3,
|
||||
}
|
||||
|
||||
result := strategy.orderKeys(files)
|
||||
|
||||
assert.Equal(t, []string{"apple.txt", "banana.txt", "zebra.txt"}, result)
|
||||
})
|
||||
|
||||
t.Run("files with zero size", func(t *testing.T) {
|
||||
strategy := orderBySizeDescStrategy{}
|
||||
|
||||
zeroNode1 := &FileNode{}
|
||||
zeroNode1.Size = 0
|
||||
|
||||
zeroNode2 := &FileNode{}
|
||||
zeroNode2.Size = 0
|
||||
|
||||
smallNode := &FileNode{}
|
||||
smallNode.Size = 100
|
||||
|
||||
files := map[string]*FileNode{
|
||||
"file1.txt": zeroNode1,
|
||||
"file2.txt": zeroNode2,
|
||||
"file3.txt": smallNode,
|
||||
}
|
||||
|
||||
result := strategy.orderKeys(files)
|
||||
|
||||
// file3.txt comes first (100 bytes), then file1.txt and file2.txt (both 0 bytes, alphabetically)
|
||||
assert.Equal(t, []string{"file3.txt", "file1.txt", "file2.txt"}, result)
|
||||
})
|
||||
|
||||
t.Run("large and small files mixed", func(t *testing.T) {
|
||||
strategy := orderBySizeDescStrategy{}
|
||||
|
||||
// Create explicit files with known sizes
|
||||
nodeA := &FileNode{}
|
||||
nodeA.Size = 10
|
||||
|
||||
nodeB := &FileNode{}
|
||||
nodeB.Size = 1000
|
||||
|
||||
nodeC := &FileNode{}
|
||||
nodeC.Size = 500
|
||||
|
||||
nodeD := &FileNode{}
|
||||
nodeD.Size = 50
|
||||
|
||||
nodeE := &FileNode{}
|
||||
nodeE.Size = 5000
|
||||
|
||||
nodeF := &FileNode{}
|
||||
nodeF.Size = 100
|
||||
|
||||
nodes := map[string]*FileNode{
|
||||
"a.txt": nodeA,
|
||||
"b.txt": nodeB,
|
||||
"c.txt": nodeC,
|
||||
"d.txt": nodeD,
|
||||
"e.txt": nodeE,
|
||||
"f.txt": nodeF,
|
||||
}
|
||||
|
||||
result := strategy.orderKeys(nodes)
|
||||
|
||||
// Verify descending order by size
|
||||
sizesResult := make([]int64, len(result))
|
||||
for i, key := range result {
|
||||
sizesResult[i] = nodes[key].Size
|
||||
}
|
||||
|
||||
// Check that sizes are in descending order
|
||||
assert.Equal(t, []int64{5000, 1000, 500, 100, 50, 10}, sizesResult)
|
||||
})
|
||||
}
|
||||
172
dive/get_image_resolver_test.go
Normal file
172
dive/get_image_resolver_test.go
Normal file
|
|
@ -0,0 +1,172 @@
|
|||
package dive
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/wagoodman/dive/dive/image"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestImageSource_String(t *testing.T) {
|
||||
t.Run("SourceUnknown", func(t *testing.T) {
|
||||
assert.Equal(t, "unknown", SourceUnknown.String())
|
||||
})
|
||||
|
||||
t.Run("SourceDockerEngine", func(t *testing.T) {
|
||||
assert.Equal(t, "docker", SourceDockerEngine.String())
|
||||
})
|
||||
|
||||
t.Run("SourcePodmanEngine", func(t *testing.T) {
|
||||
assert.Equal(t, "podman", SourcePodmanEngine.String())
|
||||
})
|
||||
|
||||
t.Run("SourceDockerArchive", func(t *testing.T) {
|
||||
assert.Equal(t, "docker-archive", SourceDockerArchive.String())
|
||||
})
|
||||
}
|
||||
|
||||
func TestParseImageSource(t *testing.T) {
|
||||
t.Run("parse docker", func(t *testing.T) {
|
||||
result := ParseImageSource("docker")
|
||||
assert.Equal(t, SourceDockerEngine, result)
|
||||
})
|
||||
|
||||
t.Run("parse podman", func(t *testing.T) {
|
||||
result := ParseImageSource("podman")
|
||||
assert.Equal(t, SourcePodmanEngine, result)
|
||||
})
|
||||
|
||||
t.Run("parse docker-archive", func(t *testing.T) {
|
||||
result := ParseImageSource("docker-archive")
|
||||
assert.Equal(t, SourceDockerArchive, result)
|
||||
})
|
||||
|
||||
t.Run("parse docker-tar alias", func(t *testing.T) {
|
||||
result := ParseImageSource("docker-tar")
|
||||
assert.Equal(t, SourceDockerArchive, result)
|
||||
})
|
||||
|
||||
t.Run("parse unknown source", func(t *testing.T) {
|
||||
result := ParseImageSource("unknown")
|
||||
assert.Equal(t, SourceUnknown, result)
|
||||
})
|
||||
|
||||
t.Run("parse invalid source", func(t *testing.T) {
|
||||
result := ParseImageSource("invalid-source")
|
||||
assert.Equal(t, SourceUnknown, result)
|
||||
})
|
||||
|
||||
t.Run("parse empty string", func(t *testing.T) {
|
||||
result := ParseImageSource("")
|
||||
assert.Equal(t, SourceUnknown, result)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDeriveImageSource(t *testing.T) {
|
||||
t.Run("docker scheme", func(t *testing.T) {
|
||||
source, img := DeriveImageSource("docker://my-image:tag")
|
||||
assert.Equal(t, SourceDockerEngine, source)
|
||||
assert.Equal(t, "my-image:tag", img)
|
||||
})
|
||||
|
||||
t.Run("podman scheme", func(t *testing.T) {
|
||||
source, img := DeriveImageSource("podman://my-image:tag")
|
||||
assert.Equal(t, SourcePodmanEngine, source)
|
||||
assert.Equal(t, "my-image:tag", img)
|
||||
})
|
||||
|
||||
t.Run("docker-archive scheme", func(t *testing.T) {
|
||||
// Note: DeriveImageSource may not support docker-archive scheme
|
||||
// This test documents current behavior
|
||||
source, img := DeriveImageSource("docker-archive:/path/to/image.tar")
|
||||
// Current implementation returns SourceUnknown for docker-archive
|
||||
assert.Equal(t, SourceUnknown, source)
|
||||
assert.Equal(t, "", img)
|
||||
})
|
||||
|
||||
t.Run("docker-tar scheme alias", func(t *testing.T) {
|
||||
// Note: DeriveImageSource may not support docker-tar scheme
|
||||
// This test documents current behavior
|
||||
source, img := DeriveImageSource("docker-tar:/path/to/image.tar")
|
||||
// Current implementation returns SourceUnknown for docker-tar
|
||||
assert.Equal(t, SourceUnknown, source)
|
||||
assert.Equal(t, "", img)
|
||||
})
|
||||
|
||||
t.Run("no scheme", func(t *testing.T) {
|
||||
source, img := DeriveImageSource("my-image:tag")
|
||||
assert.Equal(t, SourceUnknown, source)
|
||||
assert.Equal(t, "", img)
|
||||
})
|
||||
|
||||
t.Run("unknown scheme", func(t *testing.T) {
|
||||
source, img := DeriveImageSource("unknown://my-image:tag")
|
||||
assert.Equal(t, SourceUnknown, source)
|
||||
assert.Equal(t, "", img)
|
||||
})
|
||||
|
||||
t.Run("scheme with multiple colons", func(t *testing.T) {
|
||||
source, img := DeriveImageSource("docker://my-image:tag:latest")
|
||||
assert.Equal(t, SourceDockerEngine, source)
|
||||
assert.Equal(t, "my-image:tag:latest", img)
|
||||
})
|
||||
|
||||
t.Run("empty string", func(t *testing.T) {
|
||||
source, img := DeriveImageSource("")
|
||||
assert.Equal(t, SourceUnknown, source)
|
||||
assert.Equal(t, "", img)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetImageResolver(t *testing.T) {
|
||||
t.Run("docker engine resolver", func(t *testing.T) {
|
||||
resolver, err := GetImageResolver(SourceDockerEngine)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resolver)
|
||||
// Check that resolver implements the interface
|
||||
assert.Implements(t, (*image.Resolver)(nil), resolver)
|
||||
})
|
||||
|
||||
t.Run("podman engine resolver", func(t *testing.T) {
|
||||
resolver, err := GetImageResolver(SourcePodmanEngine)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resolver)
|
||||
// Check that resolver implements the interface
|
||||
assert.Implements(t, (*image.Resolver)(nil), resolver)
|
||||
})
|
||||
|
||||
t.Run("docker archive resolver", func(t *testing.T) {
|
||||
resolver, err := GetImageResolver(SourceDockerArchive)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resolver)
|
||||
// Check that resolver implements the interface
|
||||
assert.Implements(t, (*image.Resolver)(nil), resolver)
|
||||
})
|
||||
|
||||
t.Run("unknown source returns error", func(t *testing.T) {
|
||||
resolver, err := GetImageResolver(SourceUnknown)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, resolver)
|
||||
assert.Contains(t, err.Error(), "unable to determine image resolver")
|
||||
})
|
||||
|
||||
t.Run("invalid source returns error", func(t *testing.T) {
|
||||
invalidSource := ImageSource(99)
|
||||
resolver, err := GetImageResolver(invalidSource)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, resolver)
|
||||
assert.Contains(t, err.Error(), "unable to determine image resolver")
|
||||
})
|
||||
}
|
||||
|
||||
func TestImageSources(t *testing.T) {
|
||||
t.Run("contains all valid sources", func(t *testing.T) {
|
||||
assert.Contains(t, ImageSources, "docker")
|
||||
assert.Contains(t, ImageSources, "podman")
|
||||
assert.Contains(t, ImageSources, "docker-archive")
|
||||
})
|
||||
|
||||
t.Run("does not contain unknown", func(t *testing.T) {
|
||||
assert.NotContains(t, ImageSources, "unknown")
|
||||
})
|
||||
}
|
||||
329
dive/image/docker/archive_resolver_test.go
Normal file
329
dive/image/docker/archive_resolver_test.go
Normal file
|
|
@ -0,0 +1,329 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewResolverFromArchive(t *testing.T) {
|
||||
t.Run("create new resolver", func(t *testing.T) {
|
||||
resolver := NewResolverFromArchive()
|
||||
|
||||
assert.NotNil(t, resolver)
|
||||
assert.IsType(t, &archiveResolver{}, resolver)
|
||||
})
|
||||
}
|
||||
|
||||
func TestArchiveResolver_Name(t *testing.T) {
|
||||
t.Run("resolver name", func(t *testing.T) {
|
||||
resolver := NewResolverFromArchive()
|
||||
|
||||
name := resolver.Name()
|
||||
|
||||
assert.Equal(t, "docker-archive", name)
|
||||
})
|
||||
}
|
||||
|
||||
func TestArchiveResolver_Build(t *testing.T) {
|
||||
t.Run("build not supported", func(t *testing.T) {
|
||||
resolver := NewResolverFromArchive()
|
||||
ctx := context.Background()
|
||||
args := []string{"build", "args"}
|
||||
|
||||
img, err := resolver.Build(ctx, args)
|
||||
|
||||
assert.Nil(t, img)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "build option not supported")
|
||||
})
|
||||
}
|
||||
|
||||
func TestArchiveResolver_Extract(t *testing.T) {
|
||||
t.Run("extract not implemented", func(t *testing.T) {
|
||||
resolver := NewResolverFromArchive()
|
||||
ctx := context.Background()
|
||||
|
||||
err := resolver.Extract(ctx, "id", "layer", "path")
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not implemented")
|
||||
})
|
||||
}
|
||||
|
||||
func TestArchiveResolver_Fetch(t *testing.T) {
|
||||
t.Run("file does not exist", func(t *testing.T) {
|
||||
resolver := NewResolverFromArchive()
|
||||
ctx := context.Background()
|
||||
nonExistentPath := "/non/existent/path.tar"
|
||||
|
||||
img, err := resolver.Fetch(ctx, nonExistentPath)
|
||||
|
||||
assert.Nil(t, img)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "no such file or directory")
|
||||
})
|
||||
|
||||
t.Run("fetch from valid archive", func(t *testing.T) {
|
||||
// Create a simple Docker archive tar file for testing
|
||||
tmpFile, err := os.CreateTemp("", "docker-archive-*.tar")
|
||||
assert.NoError(t, err)
|
||||
tmpPath := tmpFile.Name()
|
||||
defer os.Remove(tmpPath)
|
||||
tmpFile.Close()
|
||||
|
||||
// Create a minimal tar file with manifest.json
|
||||
// Note: This is a simplified test - in reality, a proper Docker archive
|
||||
// would have manifest.json, layer files, etc.
|
||||
// For this test, we're just checking that Fetch properly opens the file
|
||||
// and calls NewImageArchive (which will fail with an invalid archive)
|
||||
|
||||
resolver := NewResolverFromArchive()
|
||||
ctx := context.Background()
|
||||
|
||||
img, err := resolver.Fetch(ctx, tmpPath)
|
||||
|
||||
// NewImageArchive will fail with an empty/invalid tar file
|
||||
assert.Nil(t, img)
|
||||
assert.Error(t, err)
|
||||
// The error should come from NewImageArchive, not from os.Open
|
||||
assert.NotContains(t, err.Error(), "no such file or directory")
|
||||
})
|
||||
|
||||
t.Run("fetch with empty path", func(t *testing.T) {
|
||||
resolver := NewResolverFromArchive()
|
||||
ctx := context.Background()
|
||||
|
||||
img, err := resolver.Fetch(ctx, "")
|
||||
|
||||
assert.Nil(t, img)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("fetch from directory calls os.Exit", func(t *testing.T) {
|
||||
// Note: NewImageArchive calls os.Exit(1) on error, which will kill the test
|
||||
// This test documents that behavior - we cannot test it directly
|
||||
// because it would kill the test runner
|
||||
resolver := NewResolverFromArchive()
|
||||
ctx := context.Background()
|
||||
|
||||
// We can't test this case because NewImageArchive calls os.Exit(1)
|
||||
// when encountering a directory, which would terminate the test runner
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// This will cause os.Exit(1) to be called, so we skip this test
|
||||
t.Skip("NewImageArchive calls os.Exit(1) on error, which would kill the test runner")
|
||||
|
||||
_, _ = resolver.Fetch(ctx, tmpDir)
|
||||
})
|
||||
}
|
||||
|
||||
func TestArchiveResolver_Integration(t *testing.T) {
|
||||
// This test verifies that the resolver implements the Resolver interface correctly
|
||||
t.Run("resolver implements interface", func(t *testing.T) {
|
||||
resolver := NewResolverFromArchive()
|
||||
|
||||
// Verify the resolver has the correct type
|
||||
assert.IsType(t, &archiveResolver{}, resolver)
|
||||
|
||||
// Verify all methods exist and return expected types
|
||||
ctx := context.Background()
|
||||
|
||||
// Name() should return string
|
||||
name := resolver.Name()
|
||||
assert.IsType(t, "", name)
|
||||
|
||||
// Build() should return error
|
||||
img, err := resolver.Build(ctx, []string{})
|
||||
assert.Nil(t, img)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Extract() should return error
|
||||
err = resolver.Extract(ctx, "id", "layer", "path")
|
||||
assert.Error(t, err)
|
||||
|
||||
// Fetch() with invalid path should return error
|
||||
img, err = resolver.Fetch(ctx, "/invalid/path")
|
||||
assert.Nil(t, img)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestArchiveResolver_ErrorMessages(t *testing.T) {
|
||||
t.Run("build error message", func(t *testing.T) {
|
||||
resolver := NewResolverFromArchive()
|
||||
ctx := context.Background()
|
||||
|
||||
_, err := resolver.Build(ctx, []string{})
|
||||
|
||||
assert.Error(t, err)
|
||||
expectedMsg := "build option not supported for docker archive resolver"
|
||||
assert.Equal(t, expectedMsg, err.Error())
|
||||
})
|
||||
|
||||
t.Run("extract error message", func(t *testing.T) {
|
||||
resolver := NewResolverFromArchive()
|
||||
ctx := context.Background()
|
||||
|
||||
err := resolver.Extract(ctx, "id", "layer", "path")
|
||||
|
||||
assert.Error(t, err)
|
||||
expectedMsg := "not implemented"
|
||||
assert.Equal(t, expectedMsg, err.Error())
|
||||
})
|
||||
}
|
||||
|
||||
func TestArchiveResolver_FetchErrors(t *testing.T) {
|
||||
t.Run("verify file is closed on error", func(t *testing.T) {
|
||||
// Note: NewImageArchive calls os.Exit(1) on tar parsing errors
|
||||
// This prevents us from testing file handle cleanup in the normal way
|
||||
t.Skip("NewImageArchive calls os.Exit(1) on tar errors, which would kill the test runner")
|
||||
|
||||
resolver := NewResolverFromArchive()
|
||||
ctx := context.Background()
|
||||
|
||||
// Create a temp file with invalid content (will fail in NewImageArchive)
|
||||
tmpFile, err := os.CreateTemp("", "docker-archive-*.tar")
|
||||
assert.NoError(t, err)
|
||||
tmpPath := tmpFile.Name()
|
||||
defer os.Remove(tmpPath)
|
||||
|
||||
// Write some invalid content
|
||||
_, err = tmpFile.WriteString("invalid tar content")
|
||||
assert.NoError(t, err)
|
||||
tmpFile.Close()
|
||||
|
||||
// Fetch should fail, but the file should be closed
|
||||
img, err := resolver.Fetch(ctx, tmpPath)
|
||||
|
||||
assert.Nil(t, img)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Verify we can open the file again (meaning it was properly closed)
|
||||
_, err = os.Open(tmpPath)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("verify context cancellation is respected", func(t *testing.T) {
|
||||
// Note: The current implementation doesn't check context cancellation
|
||||
// This test documents the current behavior
|
||||
resolver := NewResolverFromArchive()
|
||||
|
||||
// Create a cancelled context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
|
||||
// Try to fetch with cancelled context
|
||||
// Current implementation will still try to open the file
|
||||
_, err := resolver.Fetch(ctx, "/non/existent/path")
|
||||
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestArchiveResolver_MultipleInstances(t *testing.T) {
|
||||
t.Run("multiple resolvers are independent", func(t *testing.T) {
|
||||
resolver1 := NewResolverFromArchive()
|
||||
resolver2 := NewResolverFromArchive()
|
||||
|
||||
// They should be different instances
|
||||
assert.NotSame(t, resolver1, resolver2)
|
||||
|
||||
// But have the same name
|
||||
assert.Equal(t, resolver1.Name(), resolver2.Name())
|
||||
})
|
||||
}
|
||||
|
||||
func TestArchiveResolver_NilSafety(t *testing.T) {
|
||||
t.Run("methods work on nil receiver", func(t *testing.T) {
|
||||
// This test checks if methods can handle nil receiver
|
||||
// Note: In Go, calling methods on nil pointers is valid as long as
|
||||
// the method doesn't dereference the receiver
|
||||
var resolver *archiveResolver
|
||||
|
||||
// Name() doesn't dereference the receiver, so it won't panic
|
||||
name := resolver.Name()
|
||||
assert.Equal(t, "docker-archive", name)
|
||||
|
||||
// Build and Extract also don't dereference the receiver
|
||||
ctx := context.Background()
|
||||
_, err := resolver.Build(ctx, []string{})
|
||||
assert.Error(t, err)
|
||||
|
||||
err = resolver.Extract(ctx, "id", "layer", "path")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestArchiveResolver_FetchWithInvalidArchive(t *testing.T) {
|
||||
t.Run("fetch with corrupted archive", func(t *testing.T) {
|
||||
// Note: NewImageArchive calls os.Exit(1) on tar parsing errors
|
||||
// which prevents testing with corrupted archives
|
||||
t.Skip("NewImageArchive calls os.Exit(1) on tar errors, which would kill the test runner")
|
||||
|
||||
resolver := NewResolverFromArchive()
|
||||
ctx := context.Background()
|
||||
|
||||
// Create a file with corrupted content
|
||||
tmpFile, err := os.CreateTemp("", "docker-archive-*.tar")
|
||||
assert.NoError(t, err)
|
||||
tmpPath := tmpFile.Name()
|
||||
defer os.Remove(tmpPath)
|
||||
|
||||
// Write random bytes (not a valid tar)
|
||||
_, err = tmpFile.Write([]byte{0xFF, 0xFE, 0xFD, 0xFC})
|
||||
assert.NoError(t, err)
|
||||
tmpFile.Close()
|
||||
|
||||
img, err := resolver.Fetch(ctx, tmpPath)
|
||||
|
||||
assert.Nil(t, img)
|
||||
assert.Error(t, err)
|
||||
// Should be an error from NewImageArchive or ToImage
|
||||
assert.NotContains(t, err.Error(), "no such file or directory")
|
||||
})
|
||||
}
|
||||
|
||||
func TestArchiveResolver_ContextVariants(t *testing.T) {
|
||||
t.Run("fetch with different context types", func(t *testing.T) {
|
||||
resolver := NewResolverFromArchive()
|
||||
|
||||
// Test with background context
|
||||
img1, err1 := resolver.Fetch(context.Background(), "/non/existent")
|
||||
assert.Nil(t, img1)
|
||||
assert.Error(t, err1)
|
||||
|
||||
// Test with TODO context
|
||||
img2, err2 := resolver.Fetch(context.TODO(), "/non/existent")
|
||||
assert.Nil(t, img2)
|
||||
assert.Error(t, err2)
|
||||
|
||||
// Both should return similar errors
|
||||
assert.Contains(t, err1.Error(), "no such file")
|
||||
assert.Contains(t, err2.Error(), "no such file")
|
||||
})
|
||||
|
||||
t.Run("build and extract ignore context", func(t *testing.T) {
|
||||
resolver := NewResolverFromArchive()
|
||||
|
||||
// Build doesn't use the context
|
||||
_, err1 := resolver.Build(context.Background(), []string{})
|
||||
assert.Error(t, err1)
|
||||
|
||||
_, err2 := resolver.Build(context.TODO(), []string{})
|
||||
assert.Error(t, err2)
|
||||
|
||||
assert.Equal(t, err1.Error(), err2.Error())
|
||||
|
||||
// Extract doesn't use the context
|
||||
err3 := resolver.Extract(context.Background(), "id", "layer", "path")
|
||||
assert.Error(t, err3)
|
||||
|
||||
err4 := resolver.Extract(context.TODO(), "id", "layer", "path")
|
||||
assert.Error(t, err4)
|
||||
|
||||
assert.Equal(t, err3.Error(), err4.Error())
|
||||
})
|
||||
}
|
||||
410
dive/image/docker/cli_test.go
Normal file
410
dive/image/docker/cli_test.go
Normal file
|
|
@ -0,0 +1,410 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestIsDockerClientBinaryAvailable(t *testing.T) {
|
||||
t.Run("docker is available", func(t *testing.T) {
|
||||
// This test checks if docker binary is in PATH
|
||||
// It will pass if Docker is installed, fail otherwise
|
||||
available := isDockerClientBinaryAvailable()
|
||||
|
||||
// If docker is in PATH, this should return true
|
||||
// We can't guarantee Docker is installed on all systems
|
||||
// so we just check the function doesn't panic
|
||||
if available {
|
||||
// Docker is available
|
||||
_, err := exec.LookPath("docker")
|
||||
assert.NoError(t, err, "docker should be findable via LookPath")
|
||||
} else {
|
||||
// Docker is not available
|
||||
_, err := exec.LookPath("docker")
|
||||
assert.Error(t, err, "docker should not be findable via LookPath")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("function returns boolean", func(t *testing.T) {
|
||||
// Verify the function returns a boolean value
|
||||
available := isDockerClientBinaryAvailable()
|
||||
assert.IsType(t, false, available)
|
||||
})
|
||||
|
||||
t.Run("multiple calls return same result", func(t *testing.T) {
|
||||
// Multiple calls should return the same result
|
||||
result1 := isDockerClientBinaryAvailable()
|
||||
result2 := isDockerClientBinaryAvailable()
|
||||
result3 := isDockerClientBinaryAvailable()
|
||||
|
||||
assert.Equal(t, result1, result2)
|
||||
assert.Equal(t, result2, result3)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRunDockerCmd(t *testing.T) {
|
||||
t.Run("error when docker not available", func(t *testing.T) {
|
||||
// This test verifies behavior when docker is not available
|
||||
// Since we can't easily mock exec.LookPath, we test the error message
|
||||
|
||||
// Save original PATH
|
||||
originalPath := os.Getenv("PATH")
|
||||
|
||||
// Set PATH to empty to make docker unavailable
|
||||
os.Unsetenv("PATH")
|
||||
defer func() {
|
||||
// Restore PATH
|
||||
os.Setenv("PATH", originalPath)
|
||||
}()
|
||||
|
||||
// Even with empty PATH, some systems might still find docker
|
||||
// So we'll just verify the function can be called
|
||||
err := runDockerCmd("--version")
|
||||
|
||||
// Either docker is not found (expected error) or it's found
|
||||
if err != nil {
|
||||
// Docker not found
|
||||
assert.Contains(t, err.Error(), "cannot find docker client executable")
|
||||
}
|
||||
// If docker was found, we can't test this case properly
|
||||
})
|
||||
|
||||
t.Run("constructs correct command", func(t *testing.T) {
|
||||
// This test verifies that runDockerCmd constructs the correct command
|
||||
// Since we can't easily mock exec.Command, we'll test with a non-destructive command
|
||||
|
||||
available := isDockerClientBinaryAvailable()
|
||||
|
||||
if !available {
|
||||
t.Skip("Docker not available, skipping integration test")
|
||||
}
|
||||
|
||||
// Test with --version which should always succeed if docker is installed
|
||||
err := runDockerCmd("--version")
|
||||
|
||||
// --version should succeed
|
||||
assert.NoError(t, err, "docker --version should succeed if docker is available")
|
||||
})
|
||||
|
||||
t.Run("handles command with arguments", func(t *testing.T) {
|
||||
available := isDockerClientBinaryAvailable()
|
||||
|
||||
if !available {
|
||||
t.Skip("Docker not available, skipping integration test")
|
||||
}
|
||||
|
||||
// Test with a command that has arguments
|
||||
// Using 'info' which should work on any Docker installation
|
||||
err := runDockerCmd("info", "--format", "{{.ServerVersion}}")
|
||||
|
||||
// This should succeed if docker is available
|
||||
// Note: info might fail if docker daemon is not running, but that's ok
|
||||
// We're testing that the command is constructed correctly
|
||||
_ = err // We don't assert on error since docker daemon might not be running
|
||||
})
|
||||
|
||||
t.Run("handles empty arguments", func(t *testing.T) {
|
||||
available := isDockerClientBinaryAvailable()
|
||||
|
||||
if !available {
|
||||
t.Skip("Docker not available, skipping integration test")
|
||||
}
|
||||
|
||||
// Test with empty arguments - Docker shows help and exits successfully
|
||||
// runDockerCmd requires at least one argument (cmdStr)
|
||||
err := runDockerCmd("")
|
||||
|
||||
// Docker shows help when called with empty string, exits successfully
|
||||
// So we don't expect an error
|
||||
_ = err // Don't assert, just verify it doesn't panic
|
||||
})
|
||||
|
||||
t.Run("cleans arguments", func(t *testing.T) {
|
||||
// Test that arguments are properly cleaned (whitespace trimmed)
|
||||
// This is hard to test without mocking, but we can verify it doesn't panic
|
||||
available := isDockerClientBinaryAvailable()
|
||||
|
||||
if !available {
|
||||
t.Skip("Docker not available, skipping integration test")
|
||||
}
|
||||
|
||||
// These arguments will be cleaned by utils.CleanArgs
|
||||
err := runDockerCmd(" --version ")
|
||||
|
||||
// Should succeed (whitespace should be trimmed)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRunDockerCmd_ErrorCases(t *testing.T) {
|
||||
t.Run("invalid command should error", func(t *testing.T) {
|
||||
available := isDockerClientBinaryAvailable()
|
||||
|
||||
if !available {
|
||||
t.Skip("Docker not available, skipping integration test")
|
||||
}
|
||||
|
||||
// Test with an invalid docker command
|
||||
err := runDockerCmd("invalid-command-that-does-not-exist")
|
||||
|
||||
// Should return an error
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("command with invalid flags should error", func(t *testing.T) {
|
||||
available := isDockerClientBinaryAvailable()
|
||||
|
||||
if !available {
|
||||
t.Skip("Docker not available, skipping integration test")
|
||||
}
|
||||
|
||||
// Test a valid command with invalid flags
|
||||
err := runDockerCmd("--version", "--invalid-flag")
|
||||
|
||||
// Should return an error
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRunDockerCmd_Integration(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration tests in short mode")
|
||||
}
|
||||
|
||||
available := isDockerClientBinaryAvailable()
|
||||
if !available {
|
||||
t.Skip("Docker not available, skipping integration tests")
|
||||
}
|
||||
|
||||
t.Run("docker --version", func(t *testing.T) {
|
||||
err := runDockerCmd("--version")
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("docker help", func(t *testing.T) {
|
||||
err := runDockerCmd("help")
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDockerCommandConstruction(t *testing.T) {
|
||||
// This test verifies that the docker command is constructed correctly
|
||||
// We can't directly test the construction without modifying the code
|
||||
// But we can test that the function handles various argument formats
|
||||
|
||||
t.Run("handles single command", func(t *testing.T) {
|
||||
available := isDockerClientBinaryAvailable()
|
||||
if !available {
|
||||
t.Skip("Docker not available")
|
||||
}
|
||||
|
||||
err := runDockerCmd("version")
|
||||
_ = err // Don't assert, just verify it doesn't panic
|
||||
})
|
||||
|
||||
t.Run("handles command with multiple arguments", func(t *testing.T) {
|
||||
available := isDockerClientBinaryAvailable()
|
||||
if !available {
|
||||
t.Skip("Docker not available")
|
||||
}
|
||||
|
||||
err := runDockerCmd("image", "ls", "--format", "{{.Repository}}")
|
||||
_ = err // Don't assert, just verify it doesn't panic
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsDockerClientBinaryAvailable_EdgeCases(t *testing.T) {
|
||||
t.Run("handles repeated calls", func(t *testing.T) {
|
||||
// Test that repeated calls don't have side effects
|
||||
results := make([]bool, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
results[i] = isDockerClientBinaryAvailable()
|
||||
}
|
||||
|
||||
// All results should be the same
|
||||
firstResult := results[0]
|
||||
for _, result := range results {
|
||||
assert.Equal(t, firstResult, result)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("result is consistent with exec.LookPath", func(t *testing.T) {
|
||||
// Verify that isDockerClientBinaryAvailable is consistent with exec.LookPath
|
||||
available := isDockerClientBinaryAvailable()
|
||||
_, err := exec.LookPath("docker")
|
||||
|
||||
if available {
|
||||
assert.NoError(t, err, "isDockerClientBinaryAvailable returned true but LookPath failed")
|
||||
} else {
|
||||
assert.Error(t, err, "isDockerClientBinaryAvailable returned false but LookPath succeeded")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestRunDockerCmd_Stdio(t *testing.T) {
|
||||
t.Run("stdio connections", func(t *testing.T) {
|
||||
// This test verifies that runDockerCmd properly connects stdio
|
||||
// We can't directly test this without running a command
|
||||
// But we can verify the function completes without hanging
|
||||
|
||||
available := isDockerClientBinaryAvailable()
|
||||
if !available {
|
||||
t.Skip("Docker not available")
|
||||
}
|
||||
|
||||
// Run a command that uses stdin/stdout/stderr
|
||||
// --version is a good choice as it's fast and always works
|
||||
err := runDockerCmd("--version")
|
||||
|
||||
if err == nil {
|
||||
// Command succeeded, stdio was properly connected
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
// If err != nil, docker might not be properly configured, but that's ok
|
||||
})
|
||||
}
|
||||
|
||||
func TestDockerArgsCleaning(t *testing.T) {
|
||||
t.Run("arguments with whitespace are cleaned", func(t *testing.T) {
|
||||
available := isDockerClientBinaryAvailable()
|
||||
if !available {
|
||||
t.Skip("Docker not available")
|
||||
}
|
||||
|
||||
// The function should use utils.CleanArgs which trims whitespace
|
||||
// Test with arguments that have leading/trailing whitespace
|
||||
err := runDockerCmd(" --version ")
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRunDockerCmd_CommandString(t *testing.T) {
|
||||
// This test verifies the command string construction
|
||||
// The full command is: docker + cmdStr + args
|
||||
|
||||
t.Run("command has docker prefix", func(t *testing.T) {
|
||||
available := isDockerClientBinaryAvailable()
|
||||
if !available {
|
||||
t.Skip("Docker not available")
|
||||
}
|
||||
|
||||
// The function should construct: docker --version
|
||||
err := runDockerCmd("--version")
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("command with args has correct format", func(t *testing.T) {
|
||||
available := isDockerClientBinaryAvailable()
|
||||
if !available {
|
||||
t.Skip("Docker not available")
|
||||
}
|
||||
|
||||
// The function should construct: docker image ls
|
||||
err := runDockerCmd("image", "ls")
|
||||
_ = err // Don't assert, daemon might not be running
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsDockerClientBinaryAvailable_System(t *testing.T) {
|
||||
t.Run("detects docker in PATH", func(t *testing.T) {
|
||||
// Check if docker is in common PATH locations
|
||||
path := os.Getenv("PATH")
|
||||
pathDirs := strings.Split(path, string(os.PathListSeparator))
|
||||
|
||||
dockerFound := false
|
||||
for _, dir := range pathDirs {
|
||||
dockerPath := dir + string(os.PathSeparator) + "docker"
|
||||
if _, err := os.Stat(dockerPath); err == nil {
|
||||
dockerFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
available := isDockerClientBinaryAvailable()
|
||||
|
||||
if dockerFound {
|
||||
assert.True(t, available, "Docker found in PATH but isDockerClientBinaryAvailable returned false")
|
||||
} else {
|
||||
assert.False(t, available, "Docker not found in PATH but isDockerClientBinaryAvailable returned true")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestRunDockerCmd_ErrorMessages(t *testing.T) {
|
||||
t.Run("returns specific error when docker not found", func(t *testing.T) {
|
||||
// Save original PATH
|
||||
originalPath := os.Getenv("PATH")
|
||||
defer os.Setenv("PATH", originalPath)
|
||||
|
||||
// Set PATH to a directory that doesn't contain docker
|
||||
os.Setenv("PATH", "/nonexistent/path")
|
||||
|
||||
err := runDockerCmd("--version")
|
||||
|
||||
// Should get the specific error message
|
||||
if err != nil {
|
||||
assert.Contains(t, err.Error(), "cannot find docker client executable")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestRunDockerCmd_NoPanic(t *testing.T) {
|
||||
// These tests verify that runDockerCmd doesn't panic with various inputs
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
args []string
|
||||
}{
|
||||
{"empty string command", []string{""}},
|
||||
{"single argument", []string{"--version"}},
|
||||
{"multiple arguments", []string{"image", "ls"}},
|
||||
{"arguments with spaces", []string{" image ", " ls "}},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Just verify it doesn't panic
|
||||
require.NotPanics(t, func() {
|
||||
// tc.args[0] is the cmdStr, tc.args[1:] are additional args
|
||||
if len(tc.args) > 0 {
|
||||
cmdStr := tc.args[0]
|
||||
args := tc.args[1:]
|
||||
err := runDockerCmd(cmdStr, args...)
|
||||
_ = err // Ignore errors, we're just testing for panics
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsDockerClientBinaryAvailable_NoPanic(t *testing.T) {
|
||||
t.Run("does not panic", func(t *testing.T) {
|
||||
// Verify the function doesn't panic
|
||||
assert.NotPanics(t, func() {
|
||||
_ = isDockerClientBinaryAvailable()
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("concurrent calls", func(t *testing.T) {
|
||||
// Verify multiple concurrent calls don't cause issues
|
||||
results := make(chan bool, 10)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
go func() {
|
||||
results <- isDockerClientBinaryAvailable()
|
||||
}()
|
||||
}
|
||||
|
||||
// Collect results
|
||||
for i := 0; i < 10; i++ {
|
||||
<-results
|
||||
}
|
||||
|
||||
// If we got here without deadlock or panic, the test passes
|
||||
})
|
||||
}
|
||||
287
dive/image/docker/config_manifest_test.go
Normal file
287
dive/image/docker/config_manifest_test.go
Normal file
|
|
@ -0,0 +1,287 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIsConfig(t *testing.T) {
|
||||
t.Run("valid config with layers type", func(t *testing.T) {
|
||||
configJSON := `{
|
||||
"history": [],
|
||||
"rootfs": {
|
||||
"type": "layers",
|
||||
"diff_ids": []
|
||||
}
|
||||
}`
|
||||
result := isConfig([]byte(configJSON))
|
||||
assert.True(t, result)
|
||||
})
|
||||
|
||||
t.Run("config with different rootfs type", func(t *testing.T) {
|
||||
configJSON := `{
|
||||
"history": [],
|
||||
"rootfs": {
|
||||
"type": "not-layers",
|
||||
"diff_ids": []
|
||||
}
|
||||
}`
|
||||
result := isConfig([]byte(configJSON))
|
||||
assert.False(t, result)
|
||||
})
|
||||
|
||||
t.Run("invalid JSON", func(t *testing.T) {
|
||||
invalidJSON := `{invalid json}`
|
||||
result := isConfig([]byte(invalidJSON))
|
||||
assert.False(t, result)
|
||||
})
|
||||
|
||||
t.Run("empty JSON object", func(t *testing.T) {
|
||||
emptyJSON := `{}`
|
||||
result := isConfig([]byte(emptyJSON))
|
||||
assert.False(t, result)
|
||||
})
|
||||
|
||||
t.Run("missing rootfs field", func(t *testing.T) {
|
||||
configJSON := `{"history": []}`
|
||||
result := isConfig([]byte(configJSON))
|
||||
assert.False(t, result)
|
||||
})
|
||||
|
||||
t.Run("config with history entries", func(t *testing.T) {
|
||||
configJSON := `{
|
||||
"history": [
|
||||
{
|
||||
"created": "2023-01-01T00:00:00Z",
|
||||
"author": "test",
|
||||
"created_by": "test command",
|
||||
"empty_layer": false
|
||||
}
|
||||
],
|
||||
"rootfs": {
|
||||
"type": "layers",
|
||||
"diff_ids": ["sha256:abc123"]
|
||||
}
|
||||
}`
|
||||
result := isConfig([]byte(configJSON))
|
||||
assert.True(t, result)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewConfig(t *testing.T) {
|
||||
t.Run("valid config with non-empty layers", func(t *testing.T) {
|
||||
configJSON := `{
|
||||
"history": [
|
||||
{
|
||||
"created": "2023-01-01T00:00:00Z",
|
||||
"created_by": "CMD /bin/sh",
|
||||
"empty_layer": false
|
||||
},
|
||||
{
|
||||
"created": "2023-01-01T00:00:01Z",
|
||||
"created_by": "RUN apt-get update",
|
||||
"empty_layer": false
|
||||
}
|
||||
],
|
||||
"rootfs": {
|
||||
"type": "layers",
|
||||
"diff_ids": [
|
||||
"sha256:layer1",
|
||||
"sha256:layer2"
|
||||
]
|
||||
}
|
||||
}`
|
||||
config := newConfig([]byte(configJSON))
|
||||
|
||||
require.Len(t, config.History, 2)
|
||||
assert.Equal(t, "sha256:layer1", config.History[0].ID)
|
||||
assert.Equal(t, "sha256:layer2", config.History[1].ID)
|
||||
assert.False(t, config.History[0].EmptyLayer)
|
||||
assert.False(t, config.History[1].EmptyLayer)
|
||||
})
|
||||
|
||||
t.Run("config with empty layers", func(t *testing.T) {
|
||||
configJSON := `{
|
||||
"history": [
|
||||
{
|
||||
"created": "2023-01-01T00:00:00Z",
|
||||
"created_by": "FROM scratch",
|
||||
"empty_layer": true
|
||||
},
|
||||
{
|
||||
"created": "2023-01-01T00:00:01Z",
|
||||
"created_by": "RUN echo test",
|
||||
"empty_layer": false
|
||||
}
|
||||
],
|
||||
"rootfs": {
|
||||
"type": "layers",
|
||||
"diff_ids": [
|
||||
"sha256:layer1"
|
||||
]
|
||||
}
|
||||
}`
|
||||
config := newConfig([]byte(configJSON))
|
||||
|
||||
require.Len(t, config.History, 2)
|
||||
assert.Equal(t, "<missing>", config.History[0].ID)
|
||||
assert.True(t, config.History[0].EmptyLayer)
|
||||
assert.Equal(t, "sha256:layer1", config.History[1].ID)
|
||||
assert.False(t, config.History[1].EmptyLayer)
|
||||
})
|
||||
|
||||
t.Run("config with mixed empty and non-empty layers", func(t *testing.T) {
|
||||
configJSON := `{
|
||||
"history": [
|
||||
{"empty_layer": true},
|
||||
{"empty_layer": false},
|
||||
{"empty_layer": true},
|
||||
{"empty_layer": false}
|
||||
],
|
||||
"rootfs": {
|
||||
"type": "layers",
|
||||
"diff_ids": [
|
||||
"sha256:layer1",
|
||||
"sha256:layer2"
|
||||
]
|
||||
}
|
||||
}`
|
||||
config := newConfig([]byte(configJSON))
|
||||
|
||||
require.Len(t, config.History, 4)
|
||||
assert.Equal(t, "<missing>", config.History[0].ID)
|
||||
assert.Equal(t, "sha256:layer1", config.History[1].ID)
|
||||
assert.Equal(t, "<missing>", config.History[2].ID)
|
||||
assert.Equal(t, "sha256:layer2", config.History[3].ID)
|
||||
})
|
||||
|
||||
t.Run("empty history and rootfs", func(t *testing.T) {
|
||||
configJSON := `{
|
||||
"history": [],
|
||||
"rootfs": {
|
||||
"type": "layers",
|
||||
"diff_ids": []
|
||||
}
|
||||
}`
|
||||
config := newConfig([]byte(configJSON))
|
||||
|
||||
require.Len(t, config.History, 0)
|
||||
require.Len(t, config.RootFs.DiffIds, 0)
|
||||
})
|
||||
|
||||
t.Run("invalid JSON panics", func(t *testing.T) {
|
||||
invalidJSON := `{invalid json}`
|
||||
assert.Panics(t, func() {
|
||||
newConfig([]byte(invalidJSON))
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("config with all history fields", func(t *testing.T) {
|
||||
configJSON := `{
|
||||
"history": [
|
||||
{
|
||||
"id": "original-id",
|
||||
"size": 1024,
|
||||
"created": "2023-01-01T00:00:00Z",
|
||||
"author": "test@example.com",
|
||||
"created_by": "CMD /bin/bash",
|
||||
"empty_layer": false
|
||||
}
|
||||
],
|
||||
"rootfs": {
|
||||
"type": "layers",
|
||||
"diff_ids": ["sha256:abc123"]
|
||||
}
|
||||
}`
|
||||
config := newConfig([]byte(configJSON))
|
||||
|
||||
require.Len(t, config.History, 1)
|
||||
// ID should be replaced by diff_id
|
||||
assert.Equal(t, "sha256:abc123", config.History[0].ID)
|
||||
assert.Equal(t, uint64(1024), config.History[0].Size)
|
||||
assert.Equal(t, "2023-01-01T00:00:00Z", config.History[0].Created)
|
||||
assert.Equal(t, "test@example.com", config.History[0].Author)
|
||||
assert.Equal(t, "CMD /bin/bash", config.History[0].CreatedBy)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewManifest(t *testing.T) {
|
||||
t.Run("valid manifest", func(t *testing.T) {
|
||||
manifestJSON := `[{
|
||||
"Config": "sha256:config.json",
|
||||
"RepoTags": ["test:latest"],
|
||||
"Layers": ["layer1.tar", "layer2.tar"]
|
||||
}]`
|
||||
manifest := newManifest([]byte(manifestJSON))
|
||||
|
||||
assert.Equal(t, "sha256:config.json", manifest.ConfigPath)
|
||||
assert.Equal(t, []string{"test:latest"}, manifest.RepoTags)
|
||||
assert.Equal(t, []string{"layer1.tar", "layer2.tar"}, manifest.LayerTarPaths)
|
||||
})
|
||||
|
||||
t.Run("manifest with multiple entries (returns first)", func(t *testing.T) {
|
||||
manifestJSON := `[{
|
||||
"Config": "config1.json",
|
||||
"RepoTags": ["test:1"],
|
||||
"Layers": ["layer1.tar"]
|
||||
}, {
|
||||
"Config": "config2.json",
|
||||
"RepoTags": ["test:2"],
|
||||
"Layers": ["layer2.tar"]
|
||||
}]`
|
||||
manifest := newManifest([]byte(manifestJSON))
|
||||
|
||||
assert.Equal(t, "config1.json", manifest.ConfigPath)
|
||||
assert.Equal(t, []string{"test:1"}, manifest.RepoTags)
|
||||
})
|
||||
|
||||
t.Run("manifest with empty arrays", func(t *testing.T) {
|
||||
manifestJSON := `[{
|
||||
"Config": "config.json",
|
||||
"RepoTags": [],
|
||||
"Layers": []
|
||||
}]`
|
||||
manifest := newManifest([]byte(manifestJSON))
|
||||
|
||||
assert.Equal(t, "config.json", manifest.ConfigPath)
|
||||
assert.Empty(t, manifest.RepoTags)
|
||||
assert.Empty(t, manifest.LayerTarPaths)
|
||||
})
|
||||
|
||||
t.Run("manifest with multiple repo tags", func(t *testing.T) {
|
||||
manifestJSON := `[{
|
||||
"Config": "config.json",
|
||||
"RepoTags": ["test:latest", "test:v1.0", "myrepo:test"],
|
||||
"Layers": ["layer1.tar"]
|
||||
}]`
|
||||
manifest := newManifest([]byte(manifestJSON))
|
||||
|
||||
assert.Equal(t, []string{"test:latest", "test:v1.0", "myrepo:test"}, manifest.RepoTags)
|
||||
})
|
||||
|
||||
t.Run("invalid JSON panics", func(t *testing.T) {
|
||||
invalidJSON := `[invalid json]`
|
||||
assert.Panics(t, func() {
|
||||
newManifest([]byte(invalidJSON))
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("empty manifest array panics", func(t *testing.T) {
|
||||
emptyJSON := `[]`
|
||||
assert.Panics(t, func() {
|
||||
newManifest([]byte(emptyJSON))
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("manifest with only required fields", func(t *testing.T) {
|
||||
manifestJSON := `[{
|
||||
"Config": "config.json"
|
||||
}]`
|
||||
manifest := newManifest([]byte(manifestJSON))
|
||||
|
||||
assert.Equal(t, "config.json", manifest.ConfigPath)
|
||||
assert.Nil(t, manifest.RepoTags)
|
||||
assert.Nil(t, manifest.LayerTarPaths)
|
||||
})
|
||||
}
|
||||
67
internal/utils/format_test.go
Normal file
67
internal/utils/format_test.go
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCleanArgs(t *testing.T) {
|
||||
t.Run("basic trimming", func(t *testing.T) {
|
||||
input := []string{" hello ", " world", "test "}
|
||||
expected := []string{"hello", "world", "test"}
|
||||
result := CleanArgs(input)
|
||||
assert.Equal(t, expected, result)
|
||||
})
|
||||
|
||||
t.Run("empty strings are removed", func(t *testing.T) {
|
||||
input := []string{"hello", "", "world", ""}
|
||||
expected := []string{"hello", "world"}
|
||||
result := CleanArgs(input)
|
||||
assert.Equal(t, expected, result)
|
||||
})
|
||||
|
||||
t.Run("whitespace only strings are kept but trimmed", func(t *testing.T) {
|
||||
// Note: CleanArgs only removes empty strings (""), not whitespace-only strings
|
||||
// It only trims spaces, not tabs
|
||||
input := []string{" ", " ", "hello", " "}
|
||||
expected := []string{"", "", "hello", ""}
|
||||
result := CleanArgs(input)
|
||||
assert.Equal(t, expected, result)
|
||||
})
|
||||
|
||||
t.Run("empty slice", func(t *testing.T) {
|
||||
input := []string{}
|
||||
var expected []string // nil
|
||||
result := CleanArgs(input)
|
||||
assert.Equal(t, expected, result)
|
||||
})
|
||||
|
||||
t.Run("nil slice", func(t *testing.T) {
|
||||
var input []string
|
||||
var expected []string // nil
|
||||
result := CleanArgs(input)
|
||||
assert.Equal(t, expected, result)
|
||||
})
|
||||
|
||||
t.Run("all empty strings", func(t *testing.T) {
|
||||
input := []string{"", "", ""}
|
||||
var expected []string // nil - all empty strings are removed
|
||||
result := CleanArgs(input)
|
||||
assert.Equal(t, expected, result)
|
||||
})
|
||||
|
||||
t.Run("no trimming needed", func(t *testing.T) {
|
||||
input := []string{"hello", "world"}
|
||||
expected := []string{"hello", "world"}
|
||||
result := CleanArgs(input)
|
||||
assert.Equal(t, expected, result)
|
||||
})
|
||||
|
||||
t.Run("only spaces are trimmed, not tabs", func(t *testing.T) {
|
||||
// Note: CleanArgs only trims spaces (" "), not tabs or other whitespace
|
||||
input := []string{" hello ", " world ", "\thello\t"}
|
||||
expected := []string{"hello", "world", "\thello\t"}
|
||||
result := CleanArgs(input)
|
||||
assert.Equal(t, expected, result)
|
||||
})
|
||||
}
|
||||
49
internal/utils/view_test.go
Normal file
49
internal/utils/view_test.go
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/awesome-gocui/gocui"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIsNewView(t *testing.T) {
|
||||
t.Run("all ErrUnknownView returns true", func(t *testing.T) {
|
||||
errs := []error{gocui.ErrUnknownView, gocui.ErrUnknownView}
|
||||
result := IsNewView(errs...)
|
||||
assert.True(t, result)
|
||||
})
|
||||
|
||||
t.Run("single ErrUnknownView returns true", func(t *testing.T) {
|
||||
result := IsNewView(gocui.ErrUnknownView)
|
||||
assert.True(t, result)
|
||||
})
|
||||
|
||||
t.Run("nil error returns false", func(t *testing.T) {
|
||||
result := IsNewView(nil)
|
||||
assert.False(t, result)
|
||||
})
|
||||
|
||||
t.Run("mix of nil and ErrUnknownView returns false", func(t *testing.T) {
|
||||
errs := []error{nil, gocui.ErrUnknownView}
|
||||
result := IsNewView(errs...)
|
||||
assert.False(t, result)
|
||||
})
|
||||
|
||||
t.Run("other error returns true", func(t *testing.T) {
|
||||
customErr := errors.New("custom error")
|
||||
result := IsNewView(customErr)
|
||||
assert.True(t, result)
|
||||
})
|
||||
|
||||
t.Run("mix of errors returns true", func(t *testing.T) {
|
||||
errs := []error{gocui.ErrUnknownView, errors.New("custom error")}
|
||||
result := IsNewView(errs...)
|
||||
assert.True(t, result)
|
||||
})
|
||||
|
||||
t.Run("no errors returns true", func(t *testing.T) {
|
||||
result := IsNewView()
|
||||
assert.True(t, result)
|
||||
})
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue