fix(security): close audit #4 gaps in workspace, local, s3, datanode

Co-Authored-By: Virgil <virgil@lethean.io>
This commit is contained in:
Virgil 2026-03-23 14:30:08 +00:00
parent 7819925cb6
commit 06d7800895
8 changed files with 176 additions and 70 deletions

View file

@ -233,7 +233,7 @@ func (m *Medium) Delete(p string) error {
}
// Remove the file by creating a new DataNode without it
if err := m.removeFileLocked(p); err != nil {
if err := m.removeFilesLocked(map[string]struct{}{p: {}}); err != nil {
return coreerr.E("datanode.Delete", "failed to delete file: "+p, err)
}
return nil
@ -253,10 +253,9 @@ func (m *Medium) DeleteAll(p string) error {
// Check if p itself is a file
info, err := m.dn.Stat(p)
toDelete := make(map[string]struct{})
if err == nil && !info.IsDir() {
if err := m.removeFileLocked(p); err != nil {
return coreerr.E("datanode.DeleteAll", "failed to delete file: "+p, err)
}
toDelete[p] = struct{}{}
found = true
}
@ -267,13 +266,17 @@ func (m *Medium) DeleteAll(p string) error {
}
for _, name := range entries {
if name == p || strings.HasPrefix(name, prefix) {
if err := m.removeFileLocked(name); err != nil {
return coreerr.E("datanode.DeleteAll", "failed to delete file: "+name, err)
}
toDelete[name] = struct{}{}
found = true
}
}
if found {
if err := m.removeFilesLocked(toDelete); err != nil {
return coreerr.E("datanode.DeleteAll", "failed to delete files", err)
}
}
// Remove explicit dirs under prefix
for d := range m.dirs {
if d == p || strings.HasPrefix(d, prefix) {
@ -303,15 +306,10 @@ func (m *Medium) Rename(oldPath, newPath string) error {
if !info.IsDir() {
// Read old, write new, delete old
data, err := m.readFileLocked(oldPath)
if err != nil {
if err := m.rewriteDataNodeLocked(map[string]string{oldPath: newPath}); err != nil {
return coreerr.E("datanode.Rename", "failed to read source file: "+oldPath, err)
}
m.dn.AddData(newPath, data)
m.ensureDirsLocked(path.Dir(newPath))
if err := m.removeFileLocked(oldPath); err != nil {
return coreerr.E("datanode.Rename", "failed to remove source file: "+oldPath, err)
}
return nil
}
@ -323,19 +321,15 @@ func (m *Medium) Rename(oldPath, newPath string) error {
if err != nil {
return coreerr.E("datanode.Rename", "failed to inspect tree: "+oldPath, err)
}
renames := make(map[string]string)
for _, name := range entries {
if strings.HasPrefix(name, oldPrefix) {
newName := newPrefix + strings.TrimPrefix(name, oldPrefix)
data, err := m.readFileLocked(name)
if err != nil {
return coreerr.E("datanode.Rename", "failed to read source file: "+name, err)
}
m.dn.AddData(newName, data)
if err := m.removeFileLocked(name); err != nil {
return coreerr.E("datanode.Rename", "failed to remove source file: "+name, err)
}
renames[name] = newPrefix + strings.TrimPrefix(name, oldPrefix)
}
}
if err := m.rewriteDataNodeLocked(renames); err != nil {
return coreerr.E("datanode.Rename", "failed to move source files", err)
}
// Move explicit dirs
dirsToMove := make(map[string]string)
@ -560,20 +554,38 @@ func (m *Medium) readFileLocked(name string) ([]byte, error) {
// This is necessary because Borg's DataNode doesn't expose a Remove method.
// Caller must hold m.mu write lock.
func (m *Medium) removeFileLocked(target string) error {
exclude := map[string]struct{}{target: {}}
return m.removeFilesLocked(exclude)
}
func (m *Medium) removeFilesLocked(targets map[string]struct{}) error {
renames := make(map[string]string)
for target := range targets {
renames[target] = ""
}
return m.rewriteDataNodeLocked(renames)
}
func (m *Medium) rewriteDataNodeLocked(renames map[string]string) error {
entries, err := m.collectAllLocked()
if err != nil {
return err
}
newDN := borgdatanode.New()
for _, name := range entries {
if name == target {
targetName, ok := renames[name]
if ok && targetName == "" {
continue
}
writeName := name
if ok {
writeName = targetName
}
data, err := m.readFileLocked(name)
if err != nil {
return err
}
newDN.AddData(name, data)
newDN.AddData(writeName, data)
}
m.dn = newDN
return nil

View file

@ -215,7 +215,34 @@ func TestRenameDir_Bad_ReadFailure(t *testing.T) {
err := m.Rename("src", "dst")
require.Error(t, err)
assert.Contains(t, err.Error(), "failed to read source file")
assert.Contains(t, err.Error(), "failed to move source files")
}
func TestRenameDir_Bad_AtomicFailure(t *testing.T) {
m := New()
require.NoError(t, m.Write("src/a.txt", "one"))
require.NoError(t, m.Write("src/b.txt", "two"))
readCalls := 0
originalReadAll := dataNodeReadAll
dataNodeReadAll = func(r io.Reader) ([]byte, error) {
readCalls++
if readCalls == 2 {
return nil, errors.New("read failed")
}
return originalReadAll(r)
}
t.Cleanup(func() {
dataNodeReadAll = originalReadAll
})
err := m.Rename("src", "dst")
require.Error(t, err)
assert.True(t, m.IsFile("src/a.txt"))
assert.True(t, m.IsFile("src/b.txt"))
assert.False(t, m.IsFile("dst/a.txt"))
assert.False(t, m.IsFile("dst/b.txt"))
}
func TestList_Good(t *testing.T) {

View file

@ -154,11 +154,22 @@ func canonicalPath(p string) string {
return absolutePath(p)
}
func osUserHomeDir() string {
home, err := os.UserHomeDir()
if err != nil {
return ""
}
return home
}
func isProtectedPath(full string) bool {
full = canonicalPath(full)
protected := map[string]struct{}{
canonicalPath(dirSeparator()): {},
}
if home := osUserHomeDir(); home != "" {
protected[canonicalPath(home)] = struct{}{}
}
for _, home := range []string{core.Env("HOME"), core.Env("DIR_HOME")} {
if home == "" {
continue

View file

@ -198,6 +198,21 @@ func TestDeleteAll_ProtectedHomeViaEnv(t *testing.T) {
assert.DirExists(t, tempHome)
}
func TestDelete_ProtectedHomeBypassesEnvHijack(t *testing.T) {
home, err := os.UserHomeDir()
require.NoError(t, err)
require.NotEmpty(t, home)
t.Setenv("HOME", t.TempDir())
m, err := New("/")
require.NoError(t, err)
err = m.Delete(home)
require.Error(t, err)
assert.DirExists(t, home)
}
func TestRename(t *testing.T) {
root := t.TempDir()
m, _ := New(root)

View file

@ -195,6 +195,28 @@ func (m *Medium) FileSet(p, content string) error {
return m.Write(p, content)
}
func (m *Medium) deleteObjectBatch(prefix string, keys []string) error {
if len(keys) == 0 {
return nil
}
objects := make([]types.ObjectIdentifier, len(keys))
for i, key := range keys {
objects[i] = types.ObjectIdentifier{Key: aws.String(key)}
}
deleteOut, err := m.client.DeleteObjects(context.Background(), &s3.DeleteObjectsInput{
Bucket: aws.String(m.bucket),
Delete: &types.Delete{Objects: objects, Quiet: aws.Bool(true)},
})
if err != nil {
return coreerr.E("s3.DeleteAll", "failed to delete objects", err)
}
if err := deleteObjectsError(prefix, deleteOut.Errors); err != nil {
return err
}
return nil
}
// Delete removes a single object.
func (m *Medium) Delete(p string) error {
key := m.key(p)
@ -219,16 +241,7 @@ func (m *Medium) DeleteAll(p string) error {
return coreerr.E("s3.DeleteAll", "path is required", os.ErrInvalid)
}
// First, try deleting the exact key
_, err := m.client.DeleteObject(context.Background(), &s3.DeleteObjectInput{
Bucket: aws.String(m.bucket),
Key: aws.String(key),
})
if err != nil {
return coreerr.E("s3.DeleteAll", "failed to delete object: "+key, err)
}
// Then delete all objects under the prefix
deleteKeys := []string{key}
prefix := key
if !strings.HasSuffix(prefix, "/") {
prefix += "/"
@ -247,26 +260,20 @@ func (m *Medium) DeleteAll(p string) error {
return coreerr.E("s3.DeleteAll", "failed to list objects: "+prefix, err)
}
for _, obj := range listOut.Contents {
deleteKeys = append(deleteKeys, aws.ToString(obj.Key))
if len(deleteKeys) == 1000 {
if err := m.deleteObjectBatch(prefix, deleteKeys); err != nil {
return err
}
deleteKeys = nil
}
}
if len(listOut.Contents) == 0 {
break
}
objects := make([]types.ObjectIdentifier, len(listOut.Contents))
for i, obj := range listOut.Contents {
objects[i] = types.ObjectIdentifier{Key: obj.Key}
}
deleteOut, err := m.client.DeleteObjects(context.Background(), &s3.DeleteObjectsInput{
Bucket: aws.String(m.bucket),
Delete: &types.Delete{Objects: objects, Quiet: aws.Bool(true)},
})
if err != nil {
return coreerr.E("s3.DeleteAll", "failed to delete objects", err)
}
if err := deleteObjectsError(prefix, deleteOut.Errors); err != nil {
return err
}
if listOut.IsTruncated != nil && *listOut.IsTruncated {
continuationToken = listOut.NextContinuationToken
} else {
@ -274,6 +281,12 @@ func (m *Medium) DeleteAll(p string) error {
}
}
if len(deleteKeys) > 0 {
if err := m.deleteObjectBatch(prefix, deleteKeys); err != nil {
return err
}
}
return nil
}

View file

@ -3,7 +3,6 @@ package s3
import (
"bytes"
"context"
"errors"
"fmt"
goio "io"
"io/fs"
@ -365,11 +364,18 @@ func TestDeleteAll_Bad_EmptyPath(t *testing.T) {
func TestDeleteAll_Bad_DeleteObjectError(t *testing.T) {
m, mock := newTestMedium(t)
mock.deleteObjectErrors["dir"] = errors.New("boom")
require.NoError(t, m.Write("dir", "metadata"))
mock.deleteObjectsErrs["dir"] = types.Error{
Key: aws.String("dir"),
Code: aws.String("AccessDenied"),
Message: aws.String("blocked"),
}
err := m.DeleteAll("dir")
require.Error(t, err)
assert.Contains(t, err.Error(), "failed to delete object: dir")
assert.Contains(t, err.Error(), "partial delete failed")
assert.Contains(t, err.Error(), "dir: AccessDenied blocked")
assert.True(t, m.IsFile("dir"))
}
func TestDeleteAll_Bad_PartialDelete(t *testing.T) {

View file

@ -197,15 +197,6 @@ func workspaceHome() string {
return core.Env("DIR_HOME")
}
func joinWithinRoot(root string, parts ...string) (string, error) {
candidate := core.Path(append([]string{root}, parts...)...)
sep := core.Env("DS")
if candidate == root || strings.HasPrefix(candidate, root+sep) {
return candidate, nil
}
return "", os.ErrPermission
}
func resolveWorkspacePath(rootPath, workspacePath string) error {
resolvedRoot, err := filepath.EvalSymlinks(rootPath)
if err != nil {
@ -234,23 +225,46 @@ func resolveWorkspacePath(rootPath, workspacePath string) error {
return nil
}
func joinWithinRoot(root string, parts ...string) (string, error) {
candidate := filepath.Clean(core.Path(append([]string{root}, parts...)...))
if candidate == root || strings.HasPrefix(candidate, root+string(os.PathSeparator)) {
return candidate, nil
}
return "", os.ErrPermission
}
func (s *Service) workspacePath(op, name string) (string, error) {
if name == "" {
return "", coreerr.E(op, "workspace name is required", os.ErrInvalid)
}
path, err := joinWithinRoot(s.rootPath, name)
if err != nil {
return "", coreerr.E(op, "workspace path escapes root", err)
}
if core.PathDir(path) != s.rootPath {
return "", coreerr.E(op, "invalid workspace name: "+name, os.ErrPermission)
}
path := filepath.Clean(core.Path(s.rootPath, name))
if err := resolveWorkspacePath(s.rootPath, path); err != nil {
if errors.Is(err, os.ErrPermission) {
return "", coreerr.E(op, "workspace path escapes root", err)
}
return "", coreerr.E(op, "failed to resolve workspace path", err)
}
rel, err := filepath.Rel(s.rootPath, path)
if err != nil {
return "", coreerr.E(op, "failed to resolve workspace path", err)
}
if rel == "." {
return "", coreerr.E(op, "invalid workspace name: "+name, os.ErrInvalid)
}
if rel == ".." || strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
return "", coreerr.E(op, "workspace path escapes root", os.ErrPermission)
}
if strings.Contains(rel, string(os.PathSeparator)) {
return "", coreerr.E(op, "invalid workspace name: "+name, os.ErrInvalid)
}
if core.PathDir(path) != s.rootPath {
return "", coreerr.E(op, "invalid workspace name: "+name, os.ErrPermission)
}
if path == s.rootPath {
return "", coreerr.E(op, "invalid workspace name: "+name, os.ErrInvalid)
}
return path, nil
}

View file

@ -67,6 +67,14 @@ func TestSwitchWorkspace_TraversalBlocked(t *testing.T) {
assert.Empty(t, s.activeWorkspace)
}
func TestSwitchWorkspace_DotNameBlocked(t *testing.T) {
s, _ := newTestService(t)
err := s.SwitchWorkspace(".")
require.Error(t, err)
assert.Empty(t, s.activeWorkspace)
}
func TestSwitchWorkspace_SymlinkEscapeBlocked(t *testing.T) {
s, tempHome := newTestService(t)