[agent/codex:review] Review the last commit. Check for bugs, security, missing te... #7

Closed
Virgil wants to merge 4 commits from agent/deep-audit-per-issue--4--read-claude-md into dev
12 changed files with 882 additions and 171 deletions

View file

@ -17,14 +17,26 @@ import (
"sync"
"time"
borgdatanode "forge.lthn.ai/Snider/Borg/pkg/datanode"
coreerr "forge.lthn.ai/core/go-log"
"forge.lthn.ai/Snider/Borg/pkg/datanode"
)
var (
dataNodeWalkDir = func(fsys fs.FS, root string, fn fs.WalkDirFunc) error {
return fs.WalkDir(fsys, root, fn)
}
dataNodeOpen = func(dn *borgdatanode.DataNode, name string) (fs.File, error) {
return dn.Open(name)
}
dataNodeReadAll = func(r goio.Reader) ([]byte, error) {
return goio.ReadAll(r)
}
)
// Medium is an in-memory storage backend backed by a Borg DataNode.
// All paths are relative (no leading slash). Thread-safe via RWMutex.
type Medium struct {
dn *datanode.DataNode
dn *borgdatanode.DataNode
dirs map[string]bool // explicit directory tracking
mu sync.RWMutex
}
@ -32,14 +44,14 @@ type Medium struct {
// New creates a new empty DataNode Medium.
func New() *Medium {
return &Medium{
dn: datanode.New(),
dn: borgdatanode.New(),
dirs: make(map[string]bool),
}
}
// FromTar creates a Medium from a tarball, restoring all files.
func FromTar(data []byte) (*Medium, error) {
dn, err := datanode.FromTar(data)
dn, err := borgdatanode.FromTar(data)
if err != nil {
return nil, coreerr.E("datanode.FromTar", "failed to restore", err)
}
@ -63,7 +75,7 @@ func (m *Medium) Snapshot() ([]byte, error) {
// Restore replaces the filesystem contents from a tarball.
func (m *Medium) Restore(data []byte) error {
dn, err := datanode.FromTar(data)
dn, err := borgdatanode.FromTar(data)
if err != nil {
return coreerr.E("datanode.Restore", "tar failed", err)
}
@ -76,7 +88,7 @@ func (m *Medium) Restore(data []byte) error {
// DataNode returns the underlying Borg DataNode.
// Use this to wrap the filesystem in a TIM container.
func (m *Medium) DataNode() *datanode.DataNode {
func (m *Medium) DataNode() *borgdatanode.DataNode {
m.mu.RLock()
defer m.mu.RUnlock()
return m.dn
@ -195,7 +207,11 @@ func (m *Medium) Delete(p string) error {
// Check explicit dirs
if m.dirs[p] {
// Check if dir is empty
if m.hasPrefixLocked(p + "/") {
hasChildren, err := m.hasPrefixLocked(p + "/")
if err != nil {
return coreerr.E("datanode.Delete", "failed to inspect directory: "+p, err)
}
if hasChildren {
return coreerr.E("datanode.Delete", "directory not empty: "+p, os.ErrExist)
}
delete(m.dirs, p)
@ -205,7 +221,11 @@ func (m *Medium) Delete(p string) error {
}
if info.IsDir() {
if m.hasPrefixLocked(p + "/") {
hasChildren, err := m.hasPrefixLocked(p + "/")
if err != nil {
return coreerr.E("datanode.Delete", "failed to inspect directory: "+p, err)
}
if hasChildren {
return coreerr.E("datanode.Delete", "directory not empty: "+p, os.ErrExist)
}
delete(m.dirs, p)
@ -213,7 +233,9 @@ func (m *Medium) Delete(p string) error {
}
// Remove the file by creating a new DataNode without it
m.removeFileLocked(p)
if err := m.removeFilesLocked(map[string]struct{}{p: {}}); err != nil {
return coreerr.E("datanode.Delete", "failed to delete file: "+p, err)
}
return nil
}
@ -231,20 +253,30 @@ func (m *Medium) DeleteAll(p string) error {
// Check if p itself is a file
info, err := m.dn.Stat(p)
toDelete := make(map[string]struct{})
if err == nil && !info.IsDir() {
m.removeFileLocked(p)
toDelete[p] = struct{}{}
found = true
}
// Remove all files under prefix
entries, _ := m.collectAllLocked()
entries, err := m.collectAllLocked()
if err != nil {
return coreerr.E("datanode.DeleteAll", "failed to inspect tree: "+p, err)
}
for _, name := range entries {
if name == p || strings.HasPrefix(name, prefix) {
m.removeFileLocked(name)
toDelete[name] = struct{}{}
found = true
}
}
if found {
if err := m.removeFilesLocked(toDelete); err != nil {
return coreerr.E("datanode.DeleteAll", "failed to delete files", err)
}
}
// Remove explicit dirs under prefix
for d := range m.dirs {
if d == p || strings.HasPrefix(d, prefix) {
@ -274,18 +306,10 @@ func (m *Medium) Rename(oldPath, newPath string) error {
if !info.IsDir() {
// Read old, write new, delete old
f, err := m.dn.Open(oldPath)
if err != nil {
return coreerr.E("datanode.Rename", "open failed: "+oldPath, err)
if err := m.rewriteDataNodeLocked(map[string]string{oldPath: newPath}); err != nil {
return coreerr.E("datanode.Rename", "failed to read source file: "+oldPath, err)
}
data, err := goio.ReadAll(f)
f.Close()
if err != nil {
return coreerr.E("datanode.Rename", "read failed: "+oldPath, err)
}
m.dn.AddData(newPath, data)
m.ensureDirsLocked(path.Dir(newPath))
m.removeFileLocked(oldPath)
return nil
}
@ -293,20 +317,19 @@ func (m *Medium) Rename(oldPath, newPath string) error {
oldPrefix := oldPath + "/"
newPrefix := newPath + "/"
entries, _ := m.collectAllLocked()
entries, err := m.collectAllLocked()
if err != nil {
return coreerr.E("datanode.Rename", "failed to inspect tree: "+oldPath, err)
}
renames := make(map[string]string)
for _, name := range entries {
if strings.HasPrefix(name, oldPrefix) {
newName := newPrefix + strings.TrimPrefix(name, oldPrefix)
f, err := m.dn.Open(name)
if err != nil {
continue
}
data, _ := goio.ReadAll(f)
f.Close()
m.dn.AddData(newName, data)
m.removeFileLocked(name)
renames[name] = newPrefix + strings.TrimPrefix(name, oldPrefix)
}
}
if err := m.rewriteDataNodeLocked(renames); err != nil {
return coreerr.E("datanode.Rename", "failed to move source files", err)
}
// Move explicit dirs
dirsToMove := make(map[string]string)
@ -416,10 +439,13 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) {
// Read existing content
var existing []byte
m.mu.RLock()
f, err := m.dn.Open(p)
if err == nil {
existing, _ = goio.ReadAll(f)
f.Close()
if m.IsFile(p) {
data, err := m.readFileLocked(p)
if err != nil {
m.mu.RUnlock()
return nil, coreerr.E("datanode.Append", "failed to read existing content: "+p, err)
}
existing = data
}
m.mu.RUnlock()
@ -475,27 +501,30 @@ func (m *Medium) IsDir(p string) bool {
// --- internal helpers ---
// hasPrefixLocked checks if any file path starts with prefix. Caller holds lock.
func (m *Medium) hasPrefixLocked(prefix string) bool {
entries, _ := m.collectAllLocked()
func (m *Medium) hasPrefixLocked(prefix string) (bool, error) {
entries, err := m.collectAllLocked()
if err != nil {
return false, err
}
for _, name := range entries {
if strings.HasPrefix(name, prefix) {
return true
return true, nil
}
}
for d := range m.dirs {
if strings.HasPrefix(d, prefix) {
return true
return true, nil
}
}
return false
return false, nil
}
// collectAllLocked returns all file paths in the DataNode. Caller holds lock.
func (m *Medium) collectAllLocked() ([]string, error) {
var names []string
err := fs.WalkDir(m.dn, ".", func(p string, d fs.DirEntry, err error) error {
err := dataNodeWalkDir(m.dn, ".", func(p string, d fs.DirEntry, err error) error {
if err != nil {
return nil
return err
}
if !d.IsDir() {
names = append(names, p)
@ -505,28 +534,61 @@ func (m *Medium) collectAllLocked() ([]string, error) {
return names, err
}
func (m *Medium) readFileLocked(name string) ([]byte, error) {
f, err := dataNodeOpen(m.dn, name)
if err != nil {
return nil, err
}
data, readErr := dataNodeReadAll(f)
closeErr := f.Close()
if readErr != nil {
return nil, readErr
}
if closeErr != nil {
return nil, closeErr
}
return data, nil
}
// removeFileLocked removes a single file by rebuilding the DataNode.
// This is necessary because Borg's DataNode doesn't expose a Remove method.
// Caller must hold m.mu write lock.
func (m *Medium) removeFileLocked(target string) {
entries, _ := m.collectAllLocked()
newDN := datanode.New()
func (m *Medium) removeFileLocked(target string) error {
exclude := map[string]struct{}{target: {}}
return m.removeFilesLocked(exclude)
}
func (m *Medium) removeFilesLocked(targets map[string]struct{}) error {
renames := make(map[string]string)
for target := range targets {
renames[target] = ""
}
return m.rewriteDataNodeLocked(renames)
}
func (m *Medium) rewriteDataNodeLocked(renames map[string]string) error {
entries, err := m.collectAllLocked()
if err != nil {
return err
}
newDN := borgdatanode.New()
for _, name := range entries {
if name == target {
targetName, ok := renames[name]
if ok && targetName == "" {
continue
}
f, err := m.dn.Open(name)
writeName := name
if ok {
writeName = targetName
}
data, err := m.readFileLocked(name)
if err != nil {
continue
return err
}
data, err := goio.ReadAll(f)
f.Close()
if err != nil {
continue
}
newDN.AddData(name, data)
newDN.AddData(writeName, data)
}
m.dn = newDN
return nil
}
// --- writeCloser buffers writes and flushes to DataNode on Close ---

View file

@ -1,7 +1,9 @@
package datanode
import (
"errors"
"io"
"io/fs"
"testing"
coreio "dappco.re/go/core/io"
@ -102,6 +104,23 @@ func TestDelete_Bad(t *testing.T) {
assert.Error(t, m.Delete("dir"))
}
func TestDelete_Bad_DirectoryInspectionFailure(t *testing.T) {
m := New()
require.NoError(t, m.Write("dir/file.txt", "content"))
original := dataNodeWalkDir
dataNodeWalkDir = func(_ fs.FS, _ string, _ fs.WalkDirFunc) error {
return errors.New("walk failed")
}
t.Cleanup(func() {
dataNodeWalkDir = original
})
err := m.Delete("dir")
require.Error(t, err)
assert.Contains(t, err.Error(), "failed to inspect directory")
}
func TestDeleteAll_Good(t *testing.T) {
m := New()
@ -116,6 +135,41 @@ func TestDeleteAll_Good(t *testing.T) {
assert.True(t, m.Exists("keep.txt"))
}
func TestDeleteAll_Bad_WalkFailure(t *testing.T) {
m := New()
require.NoError(t, m.Write("tree/a.txt", "a"))
original := dataNodeWalkDir
dataNodeWalkDir = func(_ fs.FS, _ string, _ fs.WalkDirFunc) error {
return errors.New("walk failed")
}
t.Cleanup(func() {
dataNodeWalkDir = original
})
err := m.DeleteAll("tree")
require.Error(t, err)
assert.Contains(t, err.Error(), "failed to inspect tree")
}
func TestDelete_Bad_RemoveFailure(t *testing.T) {
m := New()
require.NoError(t, m.Write("keep.txt", "keep"))
require.NoError(t, m.Write("bad.txt", "bad"))
original := dataNodeReadAll
dataNodeReadAll = func(_ io.Reader) ([]byte, error) {
return nil, errors.New("read failed")
}
t.Cleanup(func() {
dataNodeReadAll = original
})
err := m.Delete("bad.txt")
require.Error(t, err)
assert.Contains(t, err.Error(), "failed to delete file")
}
func TestRename_Good(t *testing.T) {
m := New()
@ -147,6 +201,50 @@ func TestRenameDir_Good(t *testing.T) {
assert.Equal(t, "package b", got)
}
func TestRenameDir_Bad_ReadFailure(t *testing.T) {
m := New()
require.NoError(t, m.Write("src/a.go", "package a"))
original := dataNodeReadAll
dataNodeReadAll = func(_ io.Reader) ([]byte, error) {
return nil, errors.New("read failed")
}
t.Cleanup(func() {
dataNodeReadAll = original
})
err := m.Rename("src", "dst")
require.Error(t, err)
assert.Contains(t, err.Error(), "failed to move source files")
}
func TestRenameDir_Bad_AtomicFailure(t *testing.T) {
m := New()
require.NoError(t, m.Write("src/a.txt", "one"))
require.NoError(t, m.Write("src/b.txt", "two"))
readCalls := 0
originalReadAll := dataNodeReadAll
dataNodeReadAll = func(r io.Reader) ([]byte, error) {
readCalls++
if readCalls == 2 {
return nil, errors.New("read failed")
}
return originalReadAll(r)
}
t.Cleanup(func() {
dataNodeReadAll = originalReadAll
})
err := m.Rename("src", "dst")
require.Error(t, err)
assert.True(t, m.IsFile("src/a.txt"))
assert.True(t, m.IsFile("src/b.txt"))
assert.False(t, m.IsFile("dst/a.txt"))
assert.False(t, m.IsFile("dst/b.txt"))
}
func TestList_Good(t *testing.T) {
m := New()
@ -230,6 +328,23 @@ func TestCreateAppend_Good(t *testing.T) {
assert.Equal(t, "hello world", got)
}
func TestAppend_Bad_ReadFailure(t *testing.T) {
m := New()
require.NoError(t, m.Write("new.txt", "hello"))
original := dataNodeReadAll
dataNodeReadAll = func(_ io.Reader) ([]byte, error) {
return nil, errors.New("read failed")
}
t.Cleanup(func() {
dataNodeReadAll = original
})
_, err := m.Append("new.txt")
require.Error(t, err)
assert.Contains(t, err.Error(), "failed to read existing content")
}
func TestStreams_Good(t *testing.T) {
m := New()

112
docs/api-contract-scan.md Normal file
View file

@ -0,0 +1,112 @@
# API Contract Scan
`CODEX.md` was not present under `/workspace`; conventions were taken from `CLAUDE.md`.
Coverage is `yes` when package tests either execute the exported function/method (`go test -coverprofile`) or reference the exported type name directly.
| Name | Signature | Package Path | Description | Test Coverage |
| --- | --- | --- | --- | --- |
| FromTar | `func FromTar(data []byte) (*Medium, error)` | `dappco.re/go/core/io/datanode` | FromTar creates a Medium from a tarball, restoring all files. | yes |
| New | `func New() *Medium` | `dappco.re/go/core/io/datanode` | New creates a new empty DataNode Medium. | yes |
| Medium.Append | `func (m *Medium) Append(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/datanode` | Opens a file for appending and returns a writer. | yes |
| Medium.Create | `func (m *Medium) Create(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/datanode` | Creates or truncates a file and returns a writer. | yes |
| Medium.DataNode | `func (m *Medium) DataNode() *borgdatanode.DataNode` | `dappco.re/go/core/io/datanode` | DataNode returns the underlying Borg DataNode. | yes |
| Medium.Delete | `func (m *Medium) Delete(p string) error` | `dappco.re/go/core/io/datanode` | Deletes a file or empty directory. | yes |
| Medium.DeleteAll | `func (m *Medium) DeleteAll(p string) error` | `dappco.re/go/core/io/datanode` | Deletes a file or directory tree recursively. | yes |
| Medium.EnsureDir | `func (m *Medium) EnsureDir(p string) error` | `dappco.re/go/core/io/datanode` | Ensures a directory exists. | yes |
| Medium.Exists | `func (m *Medium) Exists(p string) bool` | `dappco.re/go/core/io/datanode` | Reports whether a path exists. | yes |
| Medium.FileGet | `func (m *Medium) FileGet(p string) (string, error)` | `dappco.re/go/core/io/datanode` | Alias for Read. | yes |
| Medium.FileSet | `func (m *Medium) FileSet(p, content string) error` | `dappco.re/go/core/io/datanode` | Alias for Write. | yes |
| Medium.IsDir | `func (m *Medium) IsDir(p string) bool` | `dappco.re/go/core/io/datanode` | Reports whether a path exists and is a directory. | yes |
| Medium.IsFile | `func (m *Medium) IsFile(p string) bool` | `dappco.re/go/core/io/datanode` | Reports whether a path exists and is a regular file. | yes |
| Medium.List | `func (m *Medium) List(p string) ([]fs.DirEntry, error)` | `dappco.re/go/core/io/datanode` | Lists directory entries under the path. | yes |
| Medium.Open | `func (m *Medium) Open(p string) (fs.File, error)` | `dappco.re/go/core/io/datanode` | Opens a file for reading. | yes |
| Medium.Read | `func (m *Medium) Read(p string) (string, error)` | `dappco.re/go/core/io/datanode` | Reads file contents as a string. | yes |
| Medium.ReadStream | `func (m *Medium) ReadStream(p string) (goio.ReadCloser, error)` | `dappco.re/go/core/io/datanode` | Opens a streaming reader for a file. | yes |
| Medium.Rename | `func (m *Medium) Rename(oldPath, newPath string) error` | `dappco.re/go/core/io/datanode` | Renames or moves a file or directory. | yes |
| Medium.Restore | `func (m *Medium) Restore(data []byte) error` | `dappco.re/go/core/io/datanode` | Restore replaces the filesystem contents from a tarball. | yes |
| Medium.Snapshot | `func (m *Medium) Snapshot() ([]byte, error)` | `dappco.re/go/core/io/datanode` | Snapshot serializes the entire filesystem to a tarball. | yes |
| Medium.Stat | `func (m *Medium) Stat(p string) (fs.FileInfo, error)` | `dappco.re/go/core/io/datanode` | Returns file metadata for the path. | yes |
| Medium.Write | `func (m *Medium) Write(p, content string) error` | `dappco.re/go/core/io/datanode` | Writes string content to a file. | yes |
| Medium.WriteMode | `func (m *Medium) WriteMode(p, content string, mode os.FileMode) error` | `dappco.re/go/core/io/datanode` | Writes content to a file with an explicit mode. | no |
| Medium.WriteStream | `func (m *Medium) WriteStream(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/datanode` | Opens a streaming writer for a file. | yes |
| Medium | `type Medium struct` | `dappco.re/go/core/io/datanode` | Medium is an in-memory storage backend backed by a Borg DataNode. | yes |
| New | `func New(root string) (*Medium, error)` | `dappco.re/go/core/io/local` | New creates a new local Medium rooted at the given directory. | yes |
| Medium.Append | `func (m *Medium) Append(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/local` | Append opens the named file for appending, creating it if it doesn't exist. | no |
| Medium.Create | `func (m *Medium) Create(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/local` | Create creates or truncates the named file. | yes |
| Medium.Delete | `func (m *Medium) Delete(p string) error` | `dappco.re/go/core/io/local` | Delete removes a file or empty directory. | yes |
| Medium.DeleteAll | `func (m *Medium) DeleteAll(p string) error` | `dappco.re/go/core/io/local` | DeleteAll removes a file or directory recursively. | yes |
| Medium.EnsureDir | `func (m *Medium) EnsureDir(p string) error` | `dappco.re/go/core/io/local` | EnsureDir creates directory if it doesn't exist. | yes |
| Medium.Exists | `func (m *Medium) Exists(p string) bool` | `dappco.re/go/core/io/local` | Exists returns true if path exists. | yes |
| Medium.FileGet | `func (m *Medium) FileGet(p string) (string, error)` | `dappco.re/go/core/io/local` | FileGet is an alias for Read. | yes |
| Medium.FileSet | `func (m *Medium) FileSet(p, content string) error` | `dappco.re/go/core/io/local` | FileSet is an alias for Write. | yes |
| Medium.IsDir | `func (m *Medium) IsDir(p string) bool` | `dappco.re/go/core/io/local` | IsDir returns true if path is a directory. | yes |
| Medium.IsFile | `func (m *Medium) IsFile(p string) bool` | `dappco.re/go/core/io/local` | IsFile returns true if path is a regular file. | yes |
| Medium.List | `func (m *Medium) List(p string) ([]fs.DirEntry, error)` | `dappco.re/go/core/io/local` | List returns directory entries. | yes |
| Medium.Open | `func (m *Medium) Open(p string) (fs.File, error)` | `dappco.re/go/core/io/local` | Open opens the named file for reading. | yes |
| Medium.Read | `func (m *Medium) Read(p string) (string, error)` | `dappco.re/go/core/io/local` | Read returns file contents as string. | yes |
| Medium.ReadStream | `func (m *Medium) ReadStream(path string) (goio.ReadCloser, error)` | `dappco.re/go/core/io/local` | ReadStream returns a reader for the file content. | yes |
| Medium.Rename | `func (m *Medium) Rename(oldPath, newPath string) error` | `dappco.re/go/core/io/local` | Rename moves a file or directory. | yes |
| Medium.Stat | `func (m *Medium) Stat(p string) (fs.FileInfo, error)` | `dappco.re/go/core/io/local` | Stat returns file info. | yes |
| Medium.Write | `func (m *Medium) Write(p, content string) error` | `dappco.re/go/core/io/local` | Write saves content to file, creating parent directories as needed. | yes |
| Medium.WriteMode | `func (m *Medium) WriteMode(p, content string, mode os.FileMode) error` | `dappco.re/go/core/io/local` | WriteMode saves content to file with explicit permissions. | yes |
| Medium.WriteStream | `func (m *Medium) WriteStream(path string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/local` | WriteStream returns a writer for the file content. | yes |
| Medium | `type Medium struct` | `dappco.re/go/core/io/local` | Medium is a local filesystem storage backend. | yes |
| New | `func New(bucket string, opts ...Option) (*Medium, error)` | `dappco.re/go/core/io/s3` | New creates a new S3 Medium for the given bucket. | yes |
| WithClient | `func WithClient(client *s3.Client) Option` | `dappco.re/go/core/io/s3` | WithClient sets the S3 client for dependency injection. | no |
| WithPrefix | `func WithPrefix(prefix string) Option` | `dappco.re/go/core/io/s3` | WithPrefix sets an optional key prefix for all operations. | yes |
| Medium.Append | `func (m *Medium) Append(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/s3` | Append opens the named file for appending. | yes |
| Medium.Create | `func (m *Medium) Create(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/s3` | Create creates or truncates the named file. | yes |
| Medium.Delete | `func (m *Medium) Delete(p string) error` | `dappco.re/go/core/io/s3` | Delete removes a single object. | yes |
| Medium.DeleteAll | `func (m *Medium) DeleteAll(p string) error` | `dappco.re/go/core/io/s3` | DeleteAll removes all objects under the given prefix. | yes |
| Medium.EnsureDir | `func (m *Medium) EnsureDir(_ string) error` | `dappco.re/go/core/io/s3` | EnsureDir is a no-op for S3 (S3 has no real directories). | yes |
| Medium.Exists | `func (m *Medium) Exists(p string) bool` | `dappco.re/go/core/io/s3` | Exists checks if a path exists (file or directory prefix). | yes |
| Medium.FileGet | `func (m *Medium) FileGet(p string) (string, error)` | `dappco.re/go/core/io/s3` | FileGet is a convenience function that reads a file from the medium. | yes |
| Medium.FileSet | `func (m *Medium) FileSet(p, content string) error` | `dappco.re/go/core/io/s3` | FileSet is a convenience function that writes a file to the medium. | yes |
| Medium.IsDir | `func (m *Medium) IsDir(p string) bool` | `dappco.re/go/core/io/s3` | IsDir checks if a path exists and is a directory (has objects under it as a prefix). | yes |
| Medium.IsFile | `func (m *Medium) IsFile(p string) bool` | `dappco.re/go/core/io/s3` | IsFile checks if a path exists and is a regular file (not a "directory" prefix). | yes |
| Medium.List | `func (m *Medium) List(p string) ([]fs.DirEntry, error)` | `dappco.re/go/core/io/s3` | List returns directory entries for the given path using ListObjectsV2 with delimiter. | yes |
| Medium.Open | `func (m *Medium) Open(p string) (fs.File, error)` | `dappco.re/go/core/io/s3` | Open opens the named file for reading. | yes |
| Medium.Read | `func (m *Medium) Read(p string) (string, error)` | `dappco.re/go/core/io/s3` | Read retrieves the content of a file as a string. | yes |
| Medium.ReadStream | `func (m *Medium) ReadStream(p string) (goio.ReadCloser, error)` | `dappco.re/go/core/io/s3` | ReadStream returns a reader for the file content. | yes |
| Medium.Rename | `func (m *Medium) Rename(oldPath, newPath string) error` | `dappco.re/go/core/io/s3` | Rename moves an object by copying then deleting the original. | yes |
| Medium.Stat | `func (m *Medium) Stat(p string) (fs.FileInfo, error)` | `dappco.re/go/core/io/s3` | Stat returns file information for the given path using HeadObject. | yes |
| Medium.Write | `func (m *Medium) Write(p, content string) error` | `dappco.re/go/core/io/s3` | Write saves the given content to a file, overwriting it if it exists. | yes |
| Medium.WriteStream | `func (m *Medium) WriteStream(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/s3` | WriteStream returns a writer for the file content. | yes |
| Medium | `type Medium struct` | `dappco.re/go/core/io/s3` | Medium is an S3-backed storage backend implementing the io.Medium interface. | yes |
| Option | `type Option func(*Medium)` | `dappco.re/go/core/io/s3` | Option configures a Medium. | no |
| New | `func New(dbPath string, opts ...Option) (*Medium, error)` | `dappco.re/go/core/io/sqlite` | New creates a new SQLite Medium at the given database path. | yes |
| WithTable | `func WithTable(table string) Option` | `dappco.re/go/core/io/sqlite` | WithTable sets the table name (default: "files"). | yes |
| Medium.Append | `func (m *Medium) Append(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/sqlite` | Append opens the named file for appending, creating it if it doesn't exist. | yes |
| Medium.Close | `func (m *Medium) Close() error` | `dappco.re/go/core/io/sqlite` | Close closes the underlying database connection. | yes |
| Medium.Create | `func (m *Medium) Create(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/sqlite` | Create creates or truncates the named file. | yes |
| Medium.Delete | `func (m *Medium) Delete(p string) error` | `dappco.re/go/core/io/sqlite` | Delete removes a file or empty directory. | yes |
| Medium.DeleteAll | `func (m *Medium) DeleteAll(p string) error` | `dappco.re/go/core/io/sqlite` | DeleteAll removes a file or directory and all its contents recursively. | yes |
| Medium.EnsureDir | `func (m *Medium) EnsureDir(p string) error` | `dappco.re/go/core/io/sqlite` | EnsureDir makes sure a directory exists, creating it if necessary. | yes |
| Medium.Exists | `func (m *Medium) Exists(p string) bool` | `dappco.re/go/core/io/sqlite` | Exists checks if a path exists (file or directory). | yes |
| Medium.FileGet | `func (m *Medium) FileGet(p string) (string, error)` | `dappco.re/go/core/io/sqlite` | FileGet is a convenience function that reads a file from the medium. | yes |
| Medium.FileSet | `func (m *Medium) FileSet(p, content string) error` | `dappco.re/go/core/io/sqlite` | FileSet is a convenience function that writes a file to the medium. | yes |
| Medium.IsDir | `func (m *Medium) IsDir(p string) bool` | `dappco.re/go/core/io/sqlite` | IsDir checks if a path exists and is a directory. | yes |
| Medium.IsFile | `func (m *Medium) IsFile(p string) bool` | `dappco.re/go/core/io/sqlite` | IsFile checks if a path exists and is a regular file. | yes |
| Medium.List | `func (m *Medium) List(p string) ([]fs.DirEntry, error)` | `dappco.re/go/core/io/sqlite` | List returns the directory entries for the given path. | yes |
| Medium.Open | `func (m *Medium) Open(p string) (fs.File, error)` | `dappco.re/go/core/io/sqlite` | Open opens the named file for reading. | yes |
| Medium.Read | `func (m *Medium) Read(p string) (string, error)` | `dappco.re/go/core/io/sqlite` | Read retrieves the content of a file as a string. | yes |
| Medium.ReadStream | `func (m *Medium) ReadStream(p string) (goio.ReadCloser, error)` | `dappco.re/go/core/io/sqlite` | ReadStream returns a reader for the file content. | yes |
| Medium.Rename | `func (m *Medium) Rename(oldPath, newPath string) error` | `dappco.re/go/core/io/sqlite` | Rename moves a file or directory from oldPath to newPath. | yes |
| Medium.Stat | `func (m *Medium) Stat(p string) (fs.FileInfo, error)` | `dappco.re/go/core/io/sqlite` | Stat returns file information for the given path. | yes |
| Medium.Write | `func (m *Medium) Write(p, content string) error` | `dappco.re/go/core/io/sqlite` | Write saves the given content to a file, overwriting it if it exists. | yes |
| Medium.WriteStream | `func (m *Medium) WriteStream(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/sqlite` | WriteStream returns a writer for the file content. | yes |
| Medium | `type Medium struct` | `dappco.re/go/core/io/sqlite` | Medium is a SQLite-backed storage backend implementing the io.Medium interface. | yes |
| Option | `type Option func(*Medium)` | `dappco.re/go/core/io/sqlite` | Option configures a Medium. | no |
| New | `func New(c *core.Core, crypt ...cryptProvider) (any, error)` | `dappco.re/go/core/io/workspace` | New creates a new Workspace service instance. | yes |
| Workspace.CreateWorkspace | `func (Workspace) CreateWorkspace(identifier, password string) (string, error)` | `dappco.re/go/core/io/workspace` | Creates a new encrypted workspace. | yes |
| Workspace.SwitchWorkspace | `func (Workspace) SwitchWorkspace(name string) error` | `dappco.re/go/core/io/workspace` | Switches the active workspace. | yes |
| Workspace.WorkspaceFileGet | `func (Workspace) WorkspaceFileGet(filename string) (string, error)` | `dappco.re/go/core/io/workspace` | Reads a file from the active workspace. | yes |
| Workspace.WorkspaceFileSet | `func (Workspace) WorkspaceFileSet(filename, content string) error` | `dappco.re/go/core/io/workspace` | Writes a file into the active workspace. | yes |
| Service.CreateWorkspace | `func (s *Service) CreateWorkspace(identifier, password string) (string, error)` | `dappco.re/go/core/io/workspace` | CreateWorkspace creates a new encrypted workspace. | yes |
| Service.HandleIPCEvents | `func (s *Service) HandleIPCEvents(c *core.Core, msg core.Message) core.Result` | `dappco.re/go/core/io/workspace` | HandleIPCEvents handles workspace-related IPC messages. | no |
| Service.SwitchWorkspace | `func (s *Service) SwitchWorkspace(name string) error` | `dappco.re/go/core/io/workspace` | SwitchWorkspace changes the active workspace. | yes |
| Service.WorkspaceFileGet | `func (s *Service) WorkspaceFileGet(filename string) (string, error)` | `dappco.re/go/core/io/workspace` | WorkspaceFileGet retrieves the content of a file from the active workspace. | yes |
| Service.WorkspaceFileSet | `func (s *Service) WorkspaceFileSet(filename, content string) error` | `dappco.re/go/core/io/workspace` | WorkspaceFileSet saves content to a file in the active workspace. | yes |
| Service | `type Service struct` | `dappco.re/go/core/io/workspace` | Service implements the Workspace interface. | yes |
| Workspace | `type Workspace interface` | `dappco.re/go/core/io/workspace` | Workspace provides management for encrypted user workspaces. | no |

6
go.mod
View file

@ -3,9 +3,8 @@ module dappco.re/go/core/io
go 1.26.0
require (
dappco.re/go/core v0.4.7
dappco.re/go/core v0.6.0
forge.lthn.ai/Snider/Borg v0.3.1
forge.lthn.ai/core/go-crypt v0.1.6
forge.lthn.ai/core/go-log v0.0.4
github.com/aws/aws-sdk-go-v2 v1.41.4
github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1
@ -15,8 +14,6 @@ require (
)
require (
forge.lthn.ai/core/go v0.3.0 // indirect
github.com/ProtonMail/go-crypto v1.4.0 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.20 // indirect
@ -26,7 +23,6 @@ require (
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.20 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.20 // indirect
github.com/aws/smithy-go v1.24.2 // indirect
github.com/cloudflare/circl v1.6.3 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/google/uuid v1.6.0 // indirect

12
go.sum
View file

@ -1,15 +1,9 @@
dappco.re/go/core v0.4.7 h1:KmIA/2lo6rl1NMtLrKqCWfMlUqpDZYH3q0/d10dTtGA=
dappco.re/go/core v0.4.7/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A=
dappco.re/go/core v0.6.0 h1:0wmuO/UmCWXxJkxQ6XvVLnqkAuWitbd49PhxjCsplyk=
dappco.re/go/core v0.6.0/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A=
forge.lthn.ai/Snider/Borg v0.3.1 h1:gfC1ZTpLoZai07oOWJiVeQ8+qJYK8A795tgVGJHbVL8=
forge.lthn.ai/Snider/Borg v0.3.1/go.mod h1:Z7DJD0yHXsxSyM7Mjl6/g4gH1NBsIz44Bf5AFlV76Wg=
forge.lthn.ai/core/go v0.3.0 h1:mOG97ApMprwx9Ked62FdWVwXTGSF6JO6m0DrVpoH2Q4=
forge.lthn.ai/core/go v0.3.0/go.mod h1:gE6c8h+PJ2287qNhVUJ5SOe1kopEwHEquvinstpuyJc=
forge.lthn.ai/core/go-crypt v0.1.6 h1:jB7L/28S1NR+91u3GcOYuKfBLzPhhBUY1fRe6WkGVns=
forge.lthn.ai/core/go-crypt v0.1.6/go.mod h1:4VZAGqxlbadhSB66sJkdj54/HSJ+bSxVgwWK5kMMYDo=
forge.lthn.ai/core/go-log v0.0.4 h1:KTuCEPgFmuM8KJfnyQ8vPOU1Jg654W74h8IJvfQMfv0=
forge.lthn.ai/core/go-log v0.0.4/go.mod h1:r14MXKOD3LF/sI8XUJQhRk/SZHBE7jAFVuCfgkXoZPw=
github.com/ProtonMail/go-crypto v1.4.0 h1:Zq/pbM3F5DFgJiMouxEdSVY44MVoQNEKp5d5QxIQceQ=
github.com/ProtonMail/go-crypto v1.4.0/go.mod h1:e1OaTyu5SYVrO9gKOEhTc+5UcXtTUa+P3uLudwcgPqo=
github.com/aws/aws-sdk-go-v2 v1.41.4 h1:10f50G7WyU02T56ox1wWXq+zTX9I1zxG46HYuG1hH/k=
github.com/aws/aws-sdk-go-v2 v1.41.4/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7 h1:3kGOqnh1pPeddVa/E37XNTaWJ8W6vrbYV9lJEkCnhuY=
@ -32,8 +26,6 @@ github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1 h1:csi9NLpFZXb9fxY7rS1xVzgPRGMt7
github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1/go.mod h1:qXVal5H0ChqXP63t6jze5LmFalc7+ZE7wOdLtZ0LCP0=
github.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng=
github.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc=
github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8=
github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=

10
io.go
View file

@ -4,12 +4,12 @@ import (
goio "io"
"io/fs"
"os"
"path/filepath"
"strings"
"time"
coreerr "forge.lthn.ai/core/go-log"
core "dappco.re/go/core"
"dappco.re/go/core/io/local"
coreerr "forge.lthn.ai/core/go-log"
)
// Medium defines the standard interface for a storage backend.
@ -361,7 +361,7 @@ func (m *MockMedium) Open(path string) (fs.File, error) {
return nil, coreerr.E("io.MockMedium.Open", "file not found: "+path, os.ErrNotExist)
}
return &MockFile{
name: filepath.Base(path),
name: core.PathBase(path),
content: []byte(content),
}, nil
}
@ -556,7 +556,7 @@ func (m *MockMedium) Stat(path string) (fs.FileInfo, error) {
modTime = time.Now()
}
return FileInfo{
name: filepath.Base(path),
name: core.PathBase(path),
size: int64(len(content)),
mode: 0644,
modTime: modTime,
@ -564,7 +564,7 @@ func (m *MockMedium) Stat(path string) (fs.FileInfo, error) {
}
if _, ok := m.Dirs[path]; ok {
return FileInfo{
name: filepath.Base(path),
name: core.PathBase(path),
isDir: true,
mode: fs.ModeDir | 0755,
}, nil

View file

@ -6,11 +6,10 @@ import (
goio "io"
"io/fs"
"os"
"os/user"
"path/filepath"
"strings"
"time"
core "dappco.re/go/core"
coreerr "forge.lthn.ai/core/go-log"
)
@ -22,20 +21,174 @@ type Medium struct {
// New creates a new local Medium rooted at the given directory.
// Pass "/" for full filesystem access, or a specific path to sandbox.
func New(root string) (*Medium, error) {
abs, err := filepath.Abs(root)
if err != nil {
return nil, err
}
abs := absolutePath(root)
// Resolve symlinks so sandbox checks compare like-for-like.
// On macOS, /var is a symlink to /private/var — without this,
// EvalSymlinks on child paths resolves to /private/var/... while
// resolving child paths resolves to /private/var/... while
// root stays /var/..., causing false sandbox escape detections.
if resolved, err := filepath.EvalSymlinks(abs); err == nil {
if resolved, err := resolveSymlinksPath(abs); err == nil {
abs = resolved
}
return &Medium{root: abs}, nil
}
func dirSeparator() string {
if sep := core.Env("DS"); sep != "" {
return sep
}
return string(os.PathSeparator)
}
func normalisePath(p string) string {
sep := dirSeparator()
if sep == "/" {
return strings.ReplaceAll(p, "\\", sep)
}
return strings.ReplaceAll(p, "/", sep)
}
func currentWorkingDir() string {
if cwd, err := os.Getwd(); err == nil && cwd != "" {
return cwd
}
if cwd := core.Env("DIR_CWD"); cwd != "" {
return cwd
}
return "."
}
func absolutePath(p string) string {
p = normalisePath(p)
if core.PathIsAbs(p) {
return core.Path(p)
}
return core.Path(currentWorkingDir(), p)
}
func cleanSandboxPath(p string) string {
return core.Path(dirSeparator() + normalisePath(p))
}
func splitPathParts(p string) []string {
trimmed := strings.TrimPrefix(p, dirSeparator())
if trimmed == "" {
return nil
}
var parts []string
for _, part := range strings.Split(trimmed, dirSeparator()) {
if part == "" {
continue
}
parts = append(parts, part)
}
return parts
}
func resolveSymlinksPath(p string) (string, error) {
return resolveSymlinksRecursive(absolutePath(p), map[string]struct{}{})
}
func resolveSymlinksRecursive(p string, seen map[string]struct{}) (string, error) {
p = core.Path(p)
if p == dirSeparator() {
return p, nil
}
current := dirSeparator()
for _, part := range splitPathParts(p) {
next := core.Path(current, part)
info, err := os.Lstat(next)
if err != nil {
if os.IsNotExist(err) {
current = next
continue
}
return "", err
}
if info.Mode()&os.ModeSymlink == 0 {
current = next
continue
}
target, err := os.Readlink(next)
if err != nil {
return "", err
}
target = normalisePath(target)
if !core.PathIsAbs(target) {
target = core.Path(current, target)
} else {
target = core.Path(target)
}
if _, ok := seen[target]; ok {
return "", coreerr.E("local.resolveSymlinksPath", "symlink cycle: "+target, os.ErrInvalid)
}
seen[target] = struct{}{}
resolved, err := resolveSymlinksRecursive(target, seen)
delete(seen, target)
if err != nil {
return "", err
}
current = resolved
}
return current, nil
}
func isWithinRoot(root, target string) bool {
root = core.Path(root)
target = core.Path(target)
if root == dirSeparator() {
return true
}
return target == root || strings.HasPrefix(target, root+dirSeparator())
}
func canonicalPath(p string) string {
if p == "" {
return ""
}
if resolved, err := resolveSymlinksPath(p); err == nil {
return resolved
}
return absolutePath(p)
}
func osUserHomeDir() string {
home, err := os.UserHomeDir()
if err != nil {
return ""
}
return home
}
func isProtectedPath(full string) bool {
full = canonicalPath(full)
protected := map[string]struct{}{
canonicalPath(dirSeparator()): {},
}
if home := osUserHomeDir(); home != "" {
protected[canonicalPath(home)] = struct{}{}
}
for _, home := range []string{core.Env("HOME"), core.Env("DIR_HOME")} {
if home == "" {
continue
}
protected[canonicalPath(home)] = struct{}{}
}
_, ok := protected[full]
return ok
}
func logSandboxEscape(root, path, attempted string) {
username := core.Env("USER")
if username == "" {
username = "unknown"
}
fmt.Fprintf(os.Stderr, "[%s] SECURITY sandbox escape detected root=%s path=%s attempted=%s user=%s\n",
time.Now().Format(time.RFC3339), root, path, attempted, username)
}
// path sanitises and returns the full path.
// Absolute paths are sandboxed under root (unless root is "/").
func (m *Medium) path(p string) string {
@ -46,41 +199,36 @@ func (m *Medium) path(p string) string {
// If the path is relative and the medium is rooted at "/",
// treat it as relative to the current working directory.
// This makes io.Local behave more like the standard 'os' package.
if m.root == "/" && !filepath.IsAbs(p) {
cwd, _ := os.Getwd()
return filepath.Join(cwd, p)
if m.root == dirSeparator() && !core.PathIsAbs(normalisePath(p)) {
return core.Path(currentWorkingDir(), normalisePath(p))
}
// Use filepath.Clean with a leading slash to resolve all .. and . internally
// Use a cleaned absolute path to resolve all .. and . internally
// before joining with the root. This is a standard way to sandbox paths.
clean := filepath.Clean("/" + p)
clean := cleanSandboxPath(p)
// If root is "/", allow absolute paths through
if m.root == "/" {
if m.root == dirSeparator() {
return clean
}
// Join cleaned relative path with root
return filepath.Join(m.root, clean)
return core.Path(m.root, strings.TrimPrefix(clean, dirSeparator()))
}
// validatePath ensures the path is within the sandbox, following symlinks if they exist.
func (m *Medium) validatePath(p string) (string, error) {
if m.root == "/" {
if m.root == dirSeparator() {
return m.path(p), nil
}
// Split the cleaned path into components
parts := strings.Split(filepath.Clean("/"+p), string(os.PathSeparator))
parts := splitPathParts(cleanSandboxPath(p))
current := m.root
for _, part := range parts {
if part == "" {
continue
}
next := filepath.Join(current, part)
realNext, err := filepath.EvalSymlinks(next)
next := core.Path(current, part)
realNext, err := resolveSymlinksPath(next)
if err != nil {
if os.IsNotExist(err) {
// Part doesn't exist, we can't follow symlinks anymore.
@ -93,15 +241,9 @@ func (m *Medium) validatePath(p string) (string, error) {
}
// Verify the resolved part is still within the root
rel, err := filepath.Rel(m.root, realNext)
if err != nil || strings.HasPrefix(rel, "..") {
if !isWithinRoot(m.root, realNext) {
// Security event: sandbox escape attempt
username := "unknown"
if u, err := user.Current(); err == nil {
username = u.Username
}
fmt.Fprintf(os.Stderr, "[%s] SECURITY sandbox escape detected root=%s path=%s attempted=%s user=%s\n",
time.Now().Format(time.RFC3339), m.root, p, realNext, username)
logSandboxEscape(m.root, p, realNext)
return "", os.ErrPermission // Path escapes sandbox
}
current = realNext
@ -137,7 +279,7 @@ func (m *Medium) WriteMode(p, content string, mode os.FileMode) error {
if err != nil {
return err
}
if err := os.MkdirAll(filepath.Dir(full), 0755); err != nil {
if err := os.MkdirAll(core.PathDir(full), 0755); err != nil {
return err
}
return os.WriteFile(full, []byte(content), mode)
@ -221,7 +363,7 @@ func (m *Medium) Create(p string) (goio.WriteCloser, error) {
if err != nil {
return nil, err
}
if err := os.MkdirAll(filepath.Dir(full), 0755); err != nil {
if err := os.MkdirAll(core.PathDir(full), 0755); err != nil {
return nil, err
}
return os.Create(full)
@ -233,7 +375,7 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) {
if err != nil {
return nil, err
}
if err := os.MkdirAll(filepath.Dir(full), 0755); err != nil {
if err := os.MkdirAll(core.PathDir(full), 0755); err != nil {
return nil, err
}
return os.OpenFile(full, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
@ -265,7 +407,7 @@ func (m *Medium) Delete(p string) error {
if err != nil {
return err
}
if full == "/" || full == os.Getenv("HOME") {
if isProtectedPath(full) {
return coreerr.E("local.Delete", "refusing to delete protected path: "+full, nil)
}
return os.Remove(full)
@ -277,7 +419,7 @@ func (m *Medium) DeleteAll(p string) error {
if err != nil {
return err
}
if full == "/" || full == os.Getenv("HOME") {
if isProtectedPath(full) {
return coreerr.E("local.DeleteAll", "refusing to delete protected path: "+full, nil)
}
return os.RemoveAll(full)

View file

@ -8,6 +8,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNew(t *testing.T) {
@ -170,6 +171,48 @@ func TestDeleteAll(t *testing.T) {
assert.False(t, m.Exists("dir"))
}
func TestDelete_ProtectedHomeViaSymlinkEnv(t *testing.T) {
realHome := t.TempDir()
linkParent := t.TempDir()
homeLink := filepath.Join(linkParent, "home-link")
require.NoError(t, os.Symlink(realHome, homeLink))
t.Setenv("HOME", homeLink)
m, err := New("/")
require.NoError(t, err)
err = m.Delete(realHome)
require.Error(t, err)
assert.DirExists(t, realHome)
}
func TestDeleteAll_ProtectedHomeViaEnv(t *testing.T) {
tempHome := t.TempDir()
t.Setenv("HOME", tempHome)
m, err := New("/")
require.NoError(t, err)
err = m.DeleteAll(tempHome)
require.Error(t, err)
assert.DirExists(t, tempHome)
}
func TestDelete_ProtectedHomeBypassesEnvHijack(t *testing.T) {
home, err := os.UserHomeDir()
require.NoError(t, err)
require.NotEmpty(t, home)
t.Setenv("HOME", t.TempDir())
m, err := New("/")
require.NoError(t, err)
err = m.Delete(home)
require.Error(t, err)
assert.DirExists(t, home)
}
func TestRename(t *testing.T) {
root := t.TempDir()
m, _ := New(root)

View file

@ -37,6 +37,29 @@ type Medium struct {
prefix string
}
func deleteObjectsError(prefix string, errs []types.Error) error {
if len(errs) == 0 {
return nil
}
details := make([]string, 0, len(errs))
for _, item := range errs {
key := aws.ToString(item.Key)
code := aws.ToString(item.Code)
msg := aws.ToString(item.Message)
switch {
case code != "" && msg != "":
details = append(details, key+": "+code+" "+msg)
case code != "":
details = append(details, key+": "+code)
case msg != "":
details = append(details, key+": "+msg)
default:
details = append(details, key)
}
}
return coreerr.E("s3.DeleteAll", "partial delete failed under "+prefix+": "+strings.Join(details, "; "), nil)
}
// Option configures a Medium.
type Option func(*Medium)
@ -172,6 +195,28 @@ func (m *Medium) FileSet(p, content string) error {
return m.Write(p, content)
}
func (m *Medium) deleteObjectBatch(prefix string, keys []string) error {
if len(keys) == 0 {
return nil
}
objects := make([]types.ObjectIdentifier, len(keys))
for i, key := range keys {
objects[i] = types.ObjectIdentifier{Key: aws.String(key)}
}
deleteOut, err := m.client.DeleteObjects(context.Background(), &s3.DeleteObjectsInput{
Bucket: aws.String(m.bucket),
Delete: &types.Delete{Objects: objects, Quiet: aws.Bool(true)},
})
if err != nil {
return coreerr.E("s3.DeleteAll", "failed to delete objects", err)
}
if err := deleteObjectsError(prefix, deleteOut.Errors); err != nil {
return err
}
return nil
}
// Delete removes a single object.
func (m *Medium) Delete(p string) error {
key := m.key(p)
@ -196,13 +241,7 @@ func (m *Medium) DeleteAll(p string) error {
return coreerr.E("s3.DeleteAll", "path is required", os.ErrInvalid)
}
// First, try deleting the exact key
_, _ = m.client.DeleteObject(context.Background(), &s3.DeleteObjectInput{
Bucket: aws.String(m.bucket),
Key: aws.String(key),
})
// Then delete all objects under the prefix
deleteKeys := []string{key}
prefix := key
if !strings.HasSuffix(prefix, "/") {
prefix += "/"
@ -221,23 +260,20 @@ func (m *Medium) DeleteAll(p string) error {
return coreerr.E("s3.DeleteAll", "failed to list objects: "+prefix, err)
}
for _, obj := range listOut.Contents {
deleteKeys = append(deleteKeys, aws.ToString(obj.Key))
if len(deleteKeys) == 1000 {
if err := m.deleteObjectBatch(prefix, deleteKeys); err != nil {
return err
}
deleteKeys = nil
}
}
if len(listOut.Contents) == 0 {
break
}
objects := make([]types.ObjectIdentifier, len(listOut.Contents))
for i, obj := range listOut.Contents {
objects[i] = types.ObjectIdentifier{Key: obj.Key}
}
_, err = m.client.DeleteObjects(context.Background(), &s3.DeleteObjectsInput{
Bucket: aws.String(m.bucket),
Delete: &types.Delete{Objects: objects, Quiet: aws.Bool(true)},
})
if err != nil {
return coreerr.E("s3.DeleteAll", "failed to delete objects", err)
}
if listOut.IsTruncated != nil && *listOut.IsTruncated {
continuationToken = listOut.NextContinuationToken
} else {
@ -245,6 +281,12 @@ func (m *Medium) DeleteAll(p string) error {
}
}
if len(deleteKeys) > 0 {
if err := m.deleteObjectBatch(prefix, deleteKeys); err != nil {
return err
}
}
return nil
}

View file

@ -21,15 +21,19 @@ import (
// mockS3 is an in-memory mock implementing the s3API interface.
type mockS3 struct {
mu sync.RWMutex
objects map[string][]byte
mtimes map[string]time.Time
mu sync.RWMutex
objects map[string][]byte
mtimes map[string]time.Time
deleteObjectErrors map[string]error
deleteObjectsErrs map[string]types.Error
}
func newMockS3() *mockS3 {
return &mockS3{
objects: make(map[string][]byte),
mtimes: make(map[string]time.Time),
objects: make(map[string][]byte),
mtimes: make(map[string]time.Time),
deleteObjectErrors: make(map[string]error),
deleteObjectsErrs: make(map[string]types.Error),
}
}
@ -69,6 +73,9 @@ func (m *mockS3) DeleteObject(_ context.Context, params *s3.DeleteObjectInput, _
defer m.mu.Unlock()
key := aws.ToString(params.Key)
if err, ok := m.deleteObjectErrors[key]; ok {
return nil, err
}
delete(m.objects, key)
delete(m.mtimes, key)
return &s3.DeleteObjectOutput{}, nil
@ -78,12 +85,17 @@ func (m *mockS3) DeleteObjects(_ context.Context, params *s3.DeleteObjectsInput,
m.mu.Lock()
defer m.mu.Unlock()
var outErrs []types.Error
for _, obj := range params.Delete.Objects {
key := aws.ToString(obj.Key)
if errInfo, ok := m.deleteObjectsErrs[key]; ok {
outErrs = append(outErrs, errInfo)
continue
}
delete(m.objects, key)
delete(m.mtimes, key)
}
return &s3.DeleteObjectsOutput{}, nil
return &s3.DeleteObjectsOutput{Errors: outErrs}, nil
}
func (m *mockS3) HeadObject(_ context.Context, params *s3.HeadObjectInput, _ ...func(*s3.Options)) (*s3.HeadObjectOutput, error) {
@ -350,6 +362,41 @@ func TestDeleteAll_Bad_EmptyPath(t *testing.T) {
assert.Error(t, err)
}
func TestDeleteAll_Bad_DeleteObjectError(t *testing.T) {
m, mock := newTestMedium(t)
require.NoError(t, m.Write("dir", "metadata"))
mock.deleteObjectsErrs["dir"] = types.Error{
Key: aws.String("dir"),
Code: aws.String("AccessDenied"),
Message: aws.String("blocked"),
}
err := m.DeleteAll("dir")
require.Error(t, err)
assert.Contains(t, err.Error(), "partial delete failed")
assert.Contains(t, err.Error(), "dir: AccessDenied blocked")
assert.True(t, m.IsFile("dir"))
}
func TestDeleteAll_Bad_PartialDelete(t *testing.T) {
m, mock := newTestMedium(t)
require.NoError(t, m.Write("dir/file1.txt", "a"))
require.NoError(t, m.Write("dir/file2.txt", "b"))
mock.deleteObjectsErrs["dir/file2.txt"] = types.Error{
Key: aws.String("dir/file2.txt"),
Code: aws.String("AccessDenied"),
Message: aws.String("blocked"),
}
err := m.DeleteAll("dir")
require.Error(t, err)
assert.Contains(t, err.Error(), "partial delete failed")
assert.Contains(t, err.Error(), "dir/file2.txt")
assert.True(t, m.IsFile("dir/file2.txt"))
assert.False(t, m.IsFile("dir/file1.txt"))
}
func TestRename_Good(t *testing.T) {
m, _ := newTestMedium(t)

View file

@ -3,8 +3,10 @@ package workspace
import (
"crypto/sha256"
"encoding/hex"
"errors"
"os"
"path/filepath"
"strings"
"sync"
core "dappco.re/go/core"
@ -39,11 +41,11 @@ type Service struct {
// New creates a new Workspace service instance.
// An optional cryptProvider can be passed to supply PGP key generation.
func New(c *core.Core, crypt ...cryptProvider) (any, error) {
home, err := os.UserHomeDir()
if err != nil {
return nil, coreerr.E("workspace.New", "failed to determine home directory", err)
home := workspaceHome()
if home == "" {
return nil, coreerr.E("workspace.New", "failed to determine home directory", os.ErrNotExist)
}
rootPath := filepath.Join(home, ".core", "workspaces")
rootPath := core.Path(home, ".core", "workspaces")
s := &Service{
core: c,
@ -75,14 +77,17 @@ func (s *Service) CreateWorkspace(identifier, password string) (string, error) {
hash := sha256.Sum256([]byte(identifier))
wsID := hex.EncodeToString(hash[:])
wsPath := filepath.Join(s.rootPath, wsID)
wsPath, err := s.workspacePath("workspace.CreateWorkspace", wsID)
if err != nil {
return "", err
}
if s.medium.Exists(wsPath) {
return "", coreerr.E("workspace.CreateWorkspace", "workspace already exists", nil)
}
for _, d := range []string{"config", "log", "data", "files", "keys"} {
if err := s.medium.EnsureDir(filepath.Join(wsPath, d)); err != nil {
if err := s.medium.EnsureDir(core.Path(wsPath, d)); err != nil {
return "", coreerr.E("workspace.CreateWorkspace", "failed to create directory: "+d, err)
}
}
@ -92,7 +97,7 @@ func (s *Service) CreateWorkspace(identifier, password string) (string, error) {
return "", coreerr.E("workspace.CreateWorkspace", "failed to generate keys", err)
}
if err := s.medium.WriteMode(filepath.Join(wsPath, "keys", "private.key"), privKey, 0600); err != nil {
if err := s.medium.WriteMode(core.Path(wsPath, "keys", "private.key"), privKey, 0600); err != nil {
return "", coreerr.E("workspace.CreateWorkspace", "failed to save private key", err)
}
@ -104,12 +109,15 @@ func (s *Service) SwitchWorkspace(name string) error {
s.mu.Lock()
defer s.mu.Unlock()
wsPath := filepath.Join(s.rootPath, name)
wsPath, err := s.workspacePath("workspace.SwitchWorkspace", name)
if err != nil {
return err
}
if !s.medium.IsDir(wsPath) {
return coreerr.E("workspace.SwitchWorkspace", "workspace not found: "+name, nil)
}
s.activeWorkspace = name
s.activeWorkspace = core.PathBase(wsPath)
return nil
}
@ -119,7 +127,15 @@ func (s *Service) activeFilePath(op, filename string) (string, error) {
if s.activeWorkspace == "" {
return "", coreerr.E(op, "no active workspace", nil)
}
return filepath.Join(s.rootPath, s.activeWorkspace, "files", filename), nil
filesRoot := core.Path(s.rootPath, s.activeWorkspace, "files")
path, err := joinWithinRoot(filesRoot, filename)
if err != nil {
return "", coreerr.E(op, "file path escapes workspace files", os.ErrPermission)
}
if path == filesRoot {
return "", coreerr.E(op, "filename is required", os.ErrInvalid)
}
return path, nil
}
// WorkspaceFileGet retrieves the content of a file from the active workspace.
@ -171,5 +187,86 @@ func (s *Service) HandleIPCEvents(c *core.Core, msg core.Message) core.Result {
return core.Result{OK: true}
}
func workspaceHome() string {
if home := core.Env("CORE_HOME"); home != "" {
return home
}
if home := core.Env("HOME"); home != "" {
return home
}
return core.Env("DIR_HOME")
}
func resolveWorkspacePath(rootPath, workspacePath string) error {
resolvedRoot, err := filepath.EvalSymlinks(rootPath)
if err != nil {
return err
}
resolvedPath, err := filepath.EvalSymlinks(workspacePath)
if err != nil {
if !os.IsNotExist(err) {
return err
}
// The workspace may not exist yet during creation. Resolve the root and
// re-anchor the final entry under it so containment checks still compare
// canonical paths.
resolvedPath = filepath.Join(resolvedRoot, filepath.Base(workspacePath))
}
rel, err := filepath.Rel(resolvedRoot, resolvedPath)
if err != nil {
return err
}
if rel == ".." || strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
return os.ErrPermission
}
return nil
}
func joinWithinRoot(root string, parts ...string) (string, error) {
candidate := filepath.Clean(core.Path(append([]string{root}, parts...)...))
if candidate == root || strings.HasPrefix(candidate, root+string(os.PathSeparator)) {
return candidate, nil
}
return "", os.ErrPermission
}
func (s *Service) workspacePath(op, name string) (string, error) {
if name == "" {
return "", coreerr.E(op, "workspace name is required", os.ErrInvalid)
}
path := filepath.Clean(core.Path(s.rootPath, name))
if err := resolveWorkspacePath(s.rootPath, path); err != nil {
if errors.Is(err, os.ErrPermission) {
return "", coreerr.E(op, "workspace path escapes root", err)
}
return "", coreerr.E(op, "failed to resolve workspace path", err)
}
rel, err := filepath.Rel(s.rootPath, path)
if err != nil {
return "", coreerr.E(op, "failed to resolve workspace path", err)
}
if rel == "." {
return "", coreerr.E(op, "invalid workspace name: "+name, os.ErrInvalid)
}
if rel == ".." || strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
return "", coreerr.E(op, "workspace path escapes root", os.ErrPermission)
}
if strings.Contains(rel, string(os.PathSeparator)) {
return "", coreerr.E(op, "invalid workspace name: "+name, os.ErrInvalid)
}
if core.PathDir(path) != s.rootPath {
return "", coreerr.E(op, "invalid workspace name: "+name, os.ErrPermission)
}
if path == s.rootPath {
return "", coreerr.E(op, "invalid workspace name: "+name, os.ErrInvalid)
}
return path, nil
}
// Ensure Service implements Workspace.
var _ Workspace = (*Service)(nil)

View file

@ -1,48 +1,111 @@
package workspace
import (
"path/filepath"
"os"
"testing"
core "dappco.re/go/core"
"forge.lthn.ai/core/go-crypt/crypt/openpgp"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestWorkspace(t *testing.T) {
c := core.New()
pgpSvc, err := openpgp.New(nil)
assert.NoError(t, err)
type stubCrypt struct {
key string
err error
}
func (s stubCrypt) CreateKeyPair(_, _ string) (string, error) {
if s.err != nil {
return "", s.err
}
return s.key, nil
}
func newTestService(t *testing.T) (*Service, string) {
t.Helper()
tempHome := t.TempDir()
t.Setenv("HOME", tempHome)
svc, err := New(c, pgpSvc.(cryptProvider))
assert.NoError(t, err)
s := svc.(*Service)
svc, err := New(core.New(), stubCrypt{key: "private-key"})
require.NoError(t, err)
return svc.(*Service), tempHome
}
func TestWorkspace(t *testing.T) {
s, tempHome := newTestService(t)
// Test CreateWorkspace
id, err := s.CreateWorkspace("test-user", "pass123")
assert.NoError(t, err)
require.NoError(t, err)
assert.NotEmpty(t, id)
wsPath := filepath.Join(tempHome, ".core", "workspaces", id)
wsPath := core.Path(tempHome, ".core", "workspaces", id)
assert.DirExists(t, wsPath)
assert.DirExists(t, filepath.Join(wsPath, "keys"))
assert.FileExists(t, filepath.Join(wsPath, "keys", "private.key"))
assert.DirExists(t, core.Path(wsPath, "keys"))
assert.FileExists(t, core.Path(wsPath, "keys", "private.key"))
// Test SwitchWorkspace
err = s.SwitchWorkspace(id)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, id, s.activeWorkspace)
// Test File operations
filename := "secret.txt"
content := "top secret info"
err = s.WorkspaceFileSet(filename, content)
assert.NoError(t, err)
err = s.WorkspaceFileSet("secret.txt", "top secret info")
require.NoError(t, err)
got, err := s.WorkspaceFileGet(filename)
assert.NoError(t, err)
assert.Equal(t, content, got)
got, err := s.WorkspaceFileGet("secret.txt")
require.NoError(t, err)
assert.Equal(t, "top secret info", got)
}
func TestSwitchWorkspace_TraversalBlocked(t *testing.T) {
s, tempHome := newTestService(t)
outside := core.Path(tempHome, ".core", "escaped")
require.NoError(t, os.MkdirAll(outside, 0755))
err := s.SwitchWorkspace("../escaped")
require.Error(t, err)
assert.Empty(t, s.activeWorkspace)
}
func TestSwitchWorkspace_DotNameBlocked(t *testing.T) {
s, _ := newTestService(t)
err := s.SwitchWorkspace(".")
require.Error(t, err)
assert.Empty(t, s.activeWorkspace)
}
func TestSwitchWorkspace_SymlinkEscapeBlocked(t *testing.T) {
s, tempHome := newTestService(t)
outside := t.TempDir()
linkPath := core.Path(tempHome, ".core", "workspaces", "escaped-link")
require.NoError(t, os.Symlink(outside, linkPath))
err := s.SwitchWorkspace("escaped-link")
require.Error(t, err)
assert.ErrorIs(t, err, os.ErrPermission)
assert.Empty(t, s.activeWorkspace)
}
func TestWorkspaceFileSet_TraversalBlocked(t *testing.T) {
s, tempHome := newTestService(t)
id, err := s.CreateWorkspace("test-user", "pass123")
require.NoError(t, err)
require.NoError(t, s.SwitchWorkspace(id))
keyPath := core.Path(tempHome, ".core", "workspaces", id, "keys", "private.key")
before, err := os.ReadFile(keyPath)
require.NoError(t, err)
err = s.WorkspaceFileSet("../keys/private.key", "hijack")
require.Error(t, err)
after, err := os.ReadFile(keyPath)
require.NoError(t, err)
assert.Equal(t, string(before), string(after))
_, err = s.WorkspaceFileGet("../keys/private.key")
require.Error(t, err)
}