From e9aebf757b25100f42890f042d5d17ad8b1e1726 Mon Sep 17 00:00:00 2001 From: Snider Date: Sun, 22 Mar 2026 01:28:41 +0000 Subject: [PATCH 1/6] chore(deps): migrate go-log import to dappco.re/go/core/log v0.1.0 Update go.mod require lines from forge.lthn.ai to dappco.re paths where vanity redirects exist. Bump core to v0.5.0 and log to v0.1.0. Borg and go-crypt remain at forge.lthn.ai until their vanity paths are published. Co-Authored-By: Virgil --- CLAUDE.md | 16 ++++++++-------- CONSUMERS.md | 34 ++++++++++++++++++++++++++++++++++ datanode/client.go | 2 +- go.mod | 5 +++-- go.sum | 10 ++++++---- io.go | 2 +- local/client.go | 2 +- s3/s3.go | 2 +- sigil/sigils.go | 2 +- sqlite/sqlite.go | 2 +- store/medium.go | 2 +- store/store.go | 2 +- workspace/service.go | 2 +- 13 files changed, 60 insertions(+), 23 deletions(-) create mode 100644 CONSUMERS.md diff --git a/CLAUDE.md b/CLAUDE.md index 9a27f7a..009e835 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -4,7 +4,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co ## Project Overview -`forge.lthn.ai/core/go-io` is the **mandatory I/O abstraction layer** for the CoreGO ecosystem. All data access — files, configs, journals, state — MUST go through the `io.Medium` interface. Never use raw `os`, `filepath`, or `ioutil` calls. +`dappco.re/go/core/io` is the **mandatory I/O abstraction layer** for the CoreGO ecosystem. All data access — files, configs, journals, state — MUST go through the `io.Medium` interface. Never use raw `os`, `filepath`, or `ioutil` calls. ### The Premise @@ -103,13 +103,13 @@ Sigils can be created by name via `sigil.NewSigil("hex")`, `sigil.NewSigil("sha2 Standard `io` is always aliased to avoid collision with this package: ```go goio "io" -coreerr "forge.lthn.ai/core/go-log" -coreio "forge.lthn.ai/core/go-io" // when imported from subpackages +coreerr "dappco.re/go/core/log" +coreio "dappco.re/go/core/io" // when imported from subpackages ``` ### Error Handling -All errors use `coreerr.E("pkg.Method", "description", wrappedErr)` from `forge.lthn.ai/core/go-log`. Follow this pattern in new code. +All errors use `coreerr.E("pkg.Method", "description", wrappedErr)` from `dappco.re/go/core/log`. Follow this pattern in new code. ### Compile-Time Interface Checks @@ -117,10 +117,10 @@ Backend packages use `var _ io.Medium = (*Medium)(nil)` to verify interface comp ## Dependencies -- `forge.lthn.ai/Snider/Borg` — DataNode container -- `forge.lthn.ai/core/go-log` — error handling (`coreerr.E()`) -- `forge.lthn.ai/core/go` — Core DI (workspace service only) -- `forge.lthn.ai/core/go-crypt` — PGP key generation (workspace service only) +- `forge.lthn.ai/Snider/Borg` — DataNode container (pending dappco.re migration) +- `dappco.re/go/core/log` — error handling (`coreerr.E()`) +- `dappco.re/go/core` — Core DI (workspace service only) +- `forge.lthn.ai/core/go-crypt` — PGP key generation (workspace service only, pending dappco.re migration) - `aws-sdk-go-v2` — S3 backend - `golang.org/x/crypto` — XChaCha20-Poly1305, BLAKE2, SHA-3 (sigil package) - `modernc.org/sqlite` — SQLite backends (pure Go, no CGO) diff --git a/CONSUMERS.md b/CONSUMERS.md new file mode 100644 index 0000000..cd0c00f --- /dev/null +++ b/CONSUMERS.md @@ -0,0 +1,34 @@ +# Consumers of go-io + +These modules import `dappco.re/go/core/io`: + +- agent +- core +- config +- go-ai +- go-ansible +- go-blockchain +- go-build +- go-cache +- go-container +- go-crypt +- go-forge +- go-html +- go-infra +- go-ml +- go-mlx +- go-netops +- go-p2p +- go-process +- go-rag +- go-ratelimit +- go-scm +- gui +- ide +- lint +- mcp +- php +- ts +- LEM + +**Breaking change risk: 28 consumers.** diff --git a/datanode/client.go b/datanode/client.go index fcfe524..6a9a4a0 100644 --- a/datanode/client.go +++ b/datanode/client.go @@ -17,7 +17,7 @@ import ( "sync" "time" - coreerr "forge.lthn.ai/core/go-log" + coreerr "dappco.re/go/core/log" "forge.lthn.ai/Snider/Borg/pkg/datanode" ) diff --git a/go.mod b/go.mod index 6c77560..b204ef9 100644 --- a/go.mod +++ b/go.mod @@ -3,10 +3,10 @@ module dappco.re/go/core/io go 1.26.0 require ( - dappco.re/go/core v0.4.7 + dappco.re/go/core v0.5.0 + dappco.re/go/core/log v0.1.0 forge.lthn.ai/Snider/Borg v0.3.1 forge.lthn.ai/core/go-crypt v0.1.6 - forge.lthn.ai/core/go-log v0.0.4 github.com/aws/aws-sdk-go-v2 v1.41.4 github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1 github.com/stretchr/testify v1.11.1 @@ -16,6 +16,7 @@ require ( require ( forge.lthn.ai/core/go v0.3.0 // indirect + forge.lthn.ai/core/go-log v0.0.1 // indirect github.com/ProtonMail/go-crypto v1.4.0 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20 // indirect diff --git a/go.sum b/go.sum index d25c96d..5ebee45 100644 --- a/go.sum +++ b/go.sum @@ -1,13 +1,15 @@ -dappco.re/go/core v0.4.7 h1:KmIA/2lo6rl1NMtLrKqCWfMlUqpDZYH3q0/d10dTtGA= -dappco.re/go/core v0.4.7/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A= +dappco.re/go/core v0.5.0 h1:P5DJoaCiK5Q+af5UiTdWqUIW4W4qYKzpgGK50thm21U= +dappco.re/go/core v0.5.0/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A= +dappco.re/go/core/log v0.1.0 h1:pa71Vq2TD2aoEUQWFKwNcaJ3GBY8HbaNGqtE688Unyc= +dappco.re/go/core/log v0.1.0/go.mod h1:Nkqb8gsXhZAO8VLpx7B8i1iAmohhzqA20b9Zr8VUcJs= forge.lthn.ai/Snider/Borg v0.3.1 h1:gfC1ZTpLoZai07oOWJiVeQ8+qJYK8A795tgVGJHbVL8= forge.lthn.ai/Snider/Borg v0.3.1/go.mod h1:Z7DJD0yHXsxSyM7Mjl6/g4gH1NBsIz44Bf5AFlV76Wg= forge.lthn.ai/core/go v0.3.0 h1:mOG97ApMprwx9Ked62FdWVwXTGSF6JO6m0DrVpoH2Q4= forge.lthn.ai/core/go v0.3.0/go.mod h1:gE6c8h+PJ2287qNhVUJ5SOe1kopEwHEquvinstpuyJc= forge.lthn.ai/core/go-crypt v0.1.6 h1:jB7L/28S1NR+91u3GcOYuKfBLzPhhBUY1fRe6WkGVns= forge.lthn.ai/core/go-crypt v0.1.6/go.mod h1:4VZAGqxlbadhSB66sJkdj54/HSJ+bSxVgwWK5kMMYDo= -forge.lthn.ai/core/go-log v0.0.4 h1:KTuCEPgFmuM8KJfnyQ8vPOU1Jg654W74h8IJvfQMfv0= -forge.lthn.ai/core/go-log v0.0.4/go.mod h1:r14MXKOD3LF/sI8XUJQhRk/SZHBE7jAFVuCfgkXoZPw= +forge.lthn.ai/core/go-log v0.0.1 h1:x/E6EfF9vixzqiLHQOl2KT25HyBcMc9qiBkomqVlpPg= +forge.lthn.ai/core/go-log v0.0.1/go.mod h1:r14MXKOD3LF/sI8XUJQhRk/SZHBE7jAFVuCfgkXoZPw= github.com/ProtonMail/go-crypto v1.4.0 h1:Zq/pbM3F5DFgJiMouxEdSVY44MVoQNEKp5d5QxIQceQ= github.com/ProtonMail/go-crypto v1.4.0/go.mod h1:e1OaTyu5SYVrO9gKOEhTc+5UcXtTUa+P3uLudwcgPqo= github.com/aws/aws-sdk-go-v2 v1.41.4 h1:10f50G7WyU02T56ox1wWXq+zTX9I1zxG46HYuG1hH/k= diff --git a/io.go b/io.go index c31592f..21d95a0 100644 --- a/io.go +++ b/io.go @@ -8,7 +8,7 @@ import ( "strings" "time" - coreerr "forge.lthn.ai/core/go-log" + coreerr "dappco.re/go/core/log" "dappco.re/go/core/io/local" ) diff --git a/local/client.go b/local/client.go index 22fd769..61b89eb 100644 --- a/local/client.go +++ b/local/client.go @@ -11,7 +11,7 @@ import ( "strings" "time" - coreerr "forge.lthn.ai/core/go-log" + coreerr "dappco.re/go/core/log" ) // Medium is a local filesystem storage backend. diff --git a/s3/s3.go b/s3/s3.go index 86443fe..cd121de 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -15,7 +15,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" - coreerr "forge.lthn.ai/core/go-log" + coreerr "dappco.re/go/core/log" ) // s3API is the subset of the S3 client API used by this package. diff --git a/sigil/sigils.go b/sigil/sigils.go index 2baffff..54bfb74 100644 --- a/sigil/sigils.go +++ b/sigil/sigils.go @@ -13,7 +13,7 @@ import ( "encoding/json" "io" - coreerr "forge.lthn.ai/core/go-log" + coreerr "dappco.re/go/core/log" "golang.org/x/crypto/blake2b" "golang.org/x/crypto/blake2s" "golang.org/x/crypto/md4" diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index fe1642d..93aa9a2 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -11,7 +11,7 @@ import ( "strings" "time" - coreerr "forge.lthn.ai/core/go-log" + coreerr "dappco.re/go/core/log" _ "modernc.org/sqlite" // Pure Go SQLite driver ) diff --git a/store/medium.go b/store/medium.go index 4363ca4..31eeae7 100644 --- a/store/medium.go +++ b/store/medium.go @@ -8,7 +8,7 @@ import ( "strings" "time" - coreerr "forge.lthn.ai/core/go-log" + coreerr "dappco.re/go/core/log" ) // Medium wraps a Store to satisfy the io.Medium interface. diff --git a/store/store.go b/store/store.go index 7c531fb..10bdce2 100644 --- a/store/store.go +++ b/store/store.go @@ -6,7 +6,7 @@ import ( "strings" "text/template" - coreerr "forge.lthn.ai/core/go-log" + coreerr "dappco.re/go/core/log" _ "modernc.org/sqlite" ) diff --git a/workspace/service.go b/workspace/service.go index c1978a1..570e6d0 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -8,7 +8,7 @@ import ( "sync" core "dappco.re/go/core" - coreerr "forge.lthn.ai/core/go-log" + coreerr "dappco.re/go/core/log" "dappco.re/go/core/io" ) -- 2.45.3 From 2acfc3d54803e657f0ddd0f12508431c2d22a632 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 23 Mar 2026 07:26:09 +0000 Subject: [PATCH 2/6] fix(io): address audit issue 4 findings Co-Authored-By: Virgil --- datanode/client.go | 144 ++++++++++++++++++--------- datanode/client_test.go | 88 +++++++++++++++++ go.mod | 6 +- go.sum | 12 +-- io.go | 10 +- local/client.go | 203 +++++++++++++++++++++++++++++++------- local/client_test.go | 28 ++++++ s3/s3.go | 33 ++++++- s3/s3_test.go | 53 ++++++++-- workspace/service.go | 69 ++++++++++--- workspace/service_test.go | 90 ++++++++++++----- 11 files changed, 590 insertions(+), 146 deletions(-) diff --git a/datanode/client.go b/datanode/client.go index fcfe524..c4f09ad 100644 --- a/datanode/client.go +++ b/datanode/client.go @@ -17,14 +17,26 @@ import ( "sync" "time" + borgdatanode "forge.lthn.ai/Snider/Borg/pkg/datanode" coreerr "forge.lthn.ai/core/go-log" - "forge.lthn.ai/Snider/Borg/pkg/datanode" +) + +var ( + dataNodeWalkDir = func(fsys fs.FS, root string, fn fs.WalkDirFunc) error { + return fs.WalkDir(fsys, root, fn) + } + dataNodeOpen = func(dn *borgdatanode.DataNode, name string) (fs.File, error) { + return dn.Open(name) + } + dataNodeReadAll = func(r goio.Reader) ([]byte, error) { + return goio.ReadAll(r) + } ) // Medium is an in-memory storage backend backed by a Borg DataNode. // All paths are relative (no leading slash). Thread-safe via RWMutex. type Medium struct { - dn *datanode.DataNode + dn *borgdatanode.DataNode dirs map[string]bool // explicit directory tracking mu sync.RWMutex } @@ -32,14 +44,14 @@ type Medium struct { // New creates a new empty DataNode Medium. func New() *Medium { return &Medium{ - dn: datanode.New(), + dn: borgdatanode.New(), dirs: make(map[string]bool), } } // FromTar creates a Medium from a tarball, restoring all files. func FromTar(data []byte) (*Medium, error) { - dn, err := datanode.FromTar(data) + dn, err := borgdatanode.FromTar(data) if err != nil { return nil, coreerr.E("datanode.FromTar", "failed to restore", err) } @@ -63,7 +75,7 @@ func (m *Medium) Snapshot() ([]byte, error) { // Restore replaces the filesystem contents from a tarball. func (m *Medium) Restore(data []byte) error { - dn, err := datanode.FromTar(data) + dn, err := borgdatanode.FromTar(data) if err != nil { return coreerr.E("datanode.Restore", "tar failed", err) } @@ -76,7 +88,7 @@ func (m *Medium) Restore(data []byte) error { // DataNode returns the underlying Borg DataNode. // Use this to wrap the filesystem in a TIM container. -func (m *Medium) DataNode() *datanode.DataNode { +func (m *Medium) DataNode() *borgdatanode.DataNode { m.mu.RLock() defer m.mu.RUnlock() return m.dn @@ -195,7 +207,11 @@ func (m *Medium) Delete(p string) error { // Check explicit dirs if m.dirs[p] { // Check if dir is empty - if m.hasPrefixLocked(p + "/") { + hasChildren, err := m.hasPrefixLocked(p + "/") + if err != nil { + return coreerr.E("datanode.Delete", "failed to inspect directory: "+p, err) + } + if hasChildren { return coreerr.E("datanode.Delete", "directory not empty: "+p, os.ErrExist) } delete(m.dirs, p) @@ -205,7 +221,11 @@ func (m *Medium) Delete(p string) error { } if info.IsDir() { - if m.hasPrefixLocked(p + "/") { + hasChildren, err := m.hasPrefixLocked(p + "/") + if err != nil { + return coreerr.E("datanode.Delete", "failed to inspect directory: "+p, err) + } + if hasChildren { return coreerr.E("datanode.Delete", "directory not empty: "+p, os.ErrExist) } delete(m.dirs, p) @@ -213,7 +233,9 @@ func (m *Medium) Delete(p string) error { } // Remove the file by creating a new DataNode without it - m.removeFileLocked(p) + if err := m.removeFileLocked(p); err != nil { + return coreerr.E("datanode.Delete", "failed to delete file: "+p, err) + } return nil } @@ -232,15 +254,22 @@ func (m *Medium) DeleteAll(p string) error { // Check if p itself is a file info, err := m.dn.Stat(p) if err == nil && !info.IsDir() { - m.removeFileLocked(p) + if err := m.removeFileLocked(p); err != nil { + return coreerr.E("datanode.DeleteAll", "failed to delete file: "+p, err) + } found = true } // Remove all files under prefix - entries, _ := m.collectAllLocked() + entries, err := m.collectAllLocked() + if err != nil { + return coreerr.E("datanode.DeleteAll", "failed to inspect tree: "+p, err) + } for _, name := range entries { if name == p || strings.HasPrefix(name, prefix) { - m.removeFileLocked(name) + if err := m.removeFileLocked(name); err != nil { + return coreerr.E("datanode.DeleteAll", "failed to delete file: "+name, err) + } found = true } } @@ -274,18 +303,15 @@ func (m *Medium) Rename(oldPath, newPath string) error { if !info.IsDir() { // Read old, write new, delete old - f, err := m.dn.Open(oldPath) + data, err := m.readFileLocked(oldPath) if err != nil { - return coreerr.E("datanode.Rename", "open failed: "+oldPath, err) - } - data, err := goio.ReadAll(f) - f.Close() - if err != nil { - return coreerr.E("datanode.Rename", "read failed: "+oldPath, err) + return coreerr.E("datanode.Rename", "failed to read source file: "+oldPath, err) } m.dn.AddData(newPath, data) m.ensureDirsLocked(path.Dir(newPath)) - m.removeFileLocked(oldPath) + if err := m.removeFileLocked(oldPath); err != nil { + return coreerr.E("datanode.Rename", "failed to remove source file: "+oldPath, err) + } return nil } @@ -293,18 +319,21 @@ func (m *Medium) Rename(oldPath, newPath string) error { oldPrefix := oldPath + "/" newPrefix := newPath + "/" - entries, _ := m.collectAllLocked() + entries, err := m.collectAllLocked() + if err != nil { + return coreerr.E("datanode.Rename", "failed to inspect tree: "+oldPath, err) + } for _, name := range entries { if strings.HasPrefix(name, oldPrefix) { newName := newPrefix + strings.TrimPrefix(name, oldPrefix) - f, err := m.dn.Open(name) + data, err := m.readFileLocked(name) if err != nil { - continue + return coreerr.E("datanode.Rename", "failed to read source file: "+name, err) } - data, _ := goio.ReadAll(f) - f.Close() m.dn.AddData(newName, data) - m.removeFileLocked(name) + if err := m.removeFileLocked(name); err != nil { + return coreerr.E("datanode.Rename", "failed to remove source file: "+name, err) + } } } @@ -416,10 +445,13 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) { // Read existing content var existing []byte m.mu.RLock() - f, err := m.dn.Open(p) - if err == nil { - existing, _ = goio.ReadAll(f) - f.Close() + if m.IsFile(p) { + data, err := m.readFileLocked(p) + if err != nil { + m.mu.RUnlock() + return nil, coreerr.E("datanode.Append", "failed to read existing content: "+p, err) + } + existing = data } m.mu.RUnlock() @@ -475,27 +507,30 @@ func (m *Medium) IsDir(p string) bool { // --- internal helpers --- // hasPrefixLocked checks if any file path starts with prefix. Caller holds lock. -func (m *Medium) hasPrefixLocked(prefix string) bool { - entries, _ := m.collectAllLocked() +func (m *Medium) hasPrefixLocked(prefix string) (bool, error) { + entries, err := m.collectAllLocked() + if err != nil { + return false, err + } for _, name := range entries { if strings.HasPrefix(name, prefix) { - return true + return true, nil } } for d := range m.dirs { if strings.HasPrefix(d, prefix) { - return true + return true, nil } } - return false + return false, nil } // collectAllLocked returns all file paths in the DataNode. Caller holds lock. func (m *Medium) collectAllLocked() ([]string, error) { var names []string - err := fs.WalkDir(m.dn, ".", func(p string, d fs.DirEntry, err error) error { + err := dataNodeWalkDir(m.dn, ".", func(p string, d fs.DirEntry, err error) error { if err != nil { - return nil + return err } if !d.IsDir() { names = append(names, p) @@ -505,28 +540,43 @@ func (m *Medium) collectAllLocked() ([]string, error) { return names, err } +func (m *Medium) readFileLocked(name string) ([]byte, error) { + f, err := dataNodeOpen(m.dn, name) + if err != nil { + return nil, err + } + data, readErr := dataNodeReadAll(f) + closeErr := f.Close() + if readErr != nil { + return nil, readErr + } + if closeErr != nil { + return nil, closeErr + } + return data, nil +} + // removeFileLocked removes a single file by rebuilding the DataNode. // This is necessary because Borg's DataNode doesn't expose a Remove method. // Caller must hold m.mu write lock. -func (m *Medium) removeFileLocked(target string) { - entries, _ := m.collectAllLocked() - newDN := datanode.New() +func (m *Medium) removeFileLocked(target string) error { + entries, err := m.collectAllLocked() + if err != nil { + return err + } + newDN := borgdatanode.New() for _, name := range entries { if name == target { continue } - f, err := m.dn.Open(name) + data, err := m.readFileLocked(name) if err != nil { - continue - } - data, err := goio.ReadAll(f) - f.Close() - if err != nil { - continue + return err } newDN.AddData(name, data) } m.dn = newDN + return nil } // --- writeCloser buffers writes and flushes to DataNode on Close --- diff --git a/datanode/client_test.go b/datanode/client_test.go index 651d322..8beb6cd 100644 --- a/datanode/client_test.go +++ b/datanode/client_test.go @@ -1,7 +1,9 @@ package datanode import ( + "errors" "io" + "io/fs" "testing" coreio "dappco.re/go/core/io" @@ -102,6 +104,23 @@ func TestDelete_Bad(t *testing.T) { assert.Error(t, m.Delete("dir")) } +func TestDelete_Bad_DirectoryInspectionFailure(t *testing.T) { + m := New() + require.NoError(t, m.Write("dir/file.txt", "content")) + + original := dataNodeWalkDir + dataNodeWalkDir = func(_ fs.FS, _ string, _ fs.WalkDirFunc) error { + return errors.New("walk failed") + } + t.Cleanup(func() { + dataNodeWalkDir = original + }) + + err := m.Delete("dir") + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to inspect directory") +} + func TestDeleteAll_Good(t *testing.T) { m := New() @@ -116,6 +135,41 @@ func TestDeleteAll_Good(t *testing.T) { assert.True(t, m.Exists("keep.txt")) } +func TestDeleteAll_Bad_WalkFailure(t *testing.T) { + m := New() + require.NoError(t, m.Write("tree/a.txt", "a")) + + original := dataNodeWalkDir + dataNodeWalkDir = func(_ fs.FS, _ string, _ fs.WalkDirFunc) error { + return errors.New("walk failed") + } + t.Cleanup(func() { + dataNodeWalkDir = original + }) + + err := m.DeleteAll("tree") + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to inspect tree") +} + +func TestDelete_Bad_RemoveFailure(t *testing.T) { + m := New() + require.NoError(t, m.Write("keep.txt", "keep")) + require.NoError(t, m.Write("bad.txt", "bad")) + + original := dataNodeReadAll + dataNodeReadAll = func(_ io.Reader) ([]byte, error) { + return nil, errors.New("read failed") + } + t.Cleanup(func() { + dataNodeReadAll = original + }) + + err := m.Delete("bad.txt") + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to delete file") +} + func TestRename_Good(t *testing.T) { m := New() @@ -147,6 +201,23 @@ func TestRenameDir_Good(t *testing.T) { assert.Equal(t, "package b", got) } +func TestRenameDir_Bad_ReadFailure(t *testing.T) { + m := New() + require.NoError(t, m.Write("src/a.go", "package a")) + + original := dataNodeReadAll + dataNodeReadAll = func(_ io.Reader) ([]byte, error) { + return nil, errors.New("read failed") + } + t.Cleanup(func() { + dataNodeReadAll = original + }) + + err := m.Rename("src", "dst") + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to read source file") +} + func TestList_Good(t *testing.T) { m := New() @@ -230,6 +301,23 @@ func TestCreateAppend_Good(t *testing.T) { assert.Equal(t, "hello world", got) } +func TestAppend_Bad_ReadFailure(t *testing.T) { + m := New() + require.NoError(t, m.Write("new.txt", "hello")) + + original := dataNodeReadAll + dataNodeReadAll = func(_ io.Reader) ([]byte, error) { + return nil, errors.New("read failed") + } + t.Cleanup(func() { + dataNodeReadAll = original + }) + + _, err := m.Append("new.txt") + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to read existing content") +} + func TestStreams_Good(t *testing.T) { m := New() diff --git a/go.mod b/go.mod index 6c77560..9135ed8 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,8 @@ module dappco.re/go/core/io go 1.26.0 require ( - dappco.re/go/core v0.4.7 + dappco.re/go/core v0.6.0 forge.lthn.ai/Snider/Borg v0.3.1 - forge.lthn.ai/core/go-crypt v0.1.6 forge.lthn.ai/core/go-log v0.0.4 github.com/aws/aws-sdk-go-v2 v1.41.4 github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1 @@ -15,8 +14,6 @@ require ( ) require ( - forge.lthn.ai/core/go v0.3.0 // indirect - github.com/ProtonMail/go-crypto v1.4.0 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.20 // indirect @@ -26,7 +23,6 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.20 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.20 // indirect github.com/aws/smithy-go v1.24.2 // indirect - github.com/cloudflare/circl v1.6.3 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/google/uuid v1.6.0 // indirect diff --git a/go.sum b/go.sum index d25c96d..87d11bc 100644 --- a/go.sum +++ b/go.sum @@ -1,15 +1,9 @@ -dappco.re/go/core v0.4.7 h1:KmIA/2lo6rl1NMtLrKqCWfMlUqpDZYH3q0/d10dTtGA= -dappco.re/go/core v0.4.7/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A= +dappco.re/go/core v0.6.0 h1:0wmuO/UmCWXxJkxQ6XvVLnqkAuWitbd49PhxjCsplyk= +dappco.re/go/core v0.6.0/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A= forge.lthn.ai/Snider/Borg v0.3.1 h1:gfC1ZTpLoZai07oOWJiVeQ8+qJYK8A795tgVGJHbVL8= forge.lthn.ai/Snider/Borg v0.3.1/go.mod h1:Z7DJD0yHXsxSyM7Mjl6/g4gH1NBsIz44Bf5AFlV76Wg= -forge.lthn.ai/core/go v0.3.0 h1:mOG97ApMprwx9Ked62FdWVwXTGSF6JO6m0DrVpoH2Q4= -forge.lthn.ai/core/go v0.3.0/go.mod h1:gE6c8h+PJ2287qNhVUJ5SOe1kopEwHEquvinstpuyJc= -forge.lthn.ai/core/go-crypt v0.1.6 h1:jB7L/28S1NR+91u3GcOYuKfBLzPhhBUY1fRe6WkGVns= -forge.lthn.ai/core/go-crypt v0.1.6/go.mod h1:4VZAGqxlbadhSB66sJkdj54/HSJ+bSxVgwWK5kMMYDo= forge.lthn.ai/core/go-log v0.0.4 h1:KTuCEPgFmuM8KJfnyQ8vPOU1Jg654W74h8IJvfQMfv0= forge.lthn.ai/core/go-log v0.0.4/go.mod h1:r14MXKOD3LF/sI8XUJQhRk/SZHBE7jAFVuCfgkXoZPw= -github.com/ProtonMail/go-crypto v1.4.0 h1:Zq/pbM3F5DFgJiMouxEdSVY44MVoQNEKp5d5QxIQceQ= -github.com/ProtonMail/go-crypto v1.4.0/go.mod h1:e1OaTyu5SYVrO9gKOEhTc+5UcXtTUa+P3uLudwcgPqo= github.com/aws/aws-sdk-go-v2 v1.41.4 h1:10f50G7WyU02T56ox1wWXq+zTX9I1zxG46HYuG1hH/k= github.com/aws/aws-sdk-go-v2 v1.41.4/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7 h1:3kGOqnh1pPeddVa/E37XNTaWJ8W6vrbYV9lJEkCnhuY= @@ -32,8 +26,6 @@ github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1 h1:csi9NLpFZXb9fxY7rS1xVzgPRGMt7 github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1/go.mod h1:qXVal5H0ChqXP63t6jze5LmFalc7+ZE7wOdLtZ0LCP0= github.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng= github.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= -github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8= -github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/io.go b/io.go index c31592f..25fe801 100644 --- a/io.go +++ b/io.go @@ -4,12 +4,12 @@ import ( goio "io" "io/fs" "os" - "path/filepath" "strings" "time" - coreerr "forge.lthn.ai/core/go-log" + core "dappco.re/go/core" "dappco.re/go/core/io/local" + coreerr "forge.lthn.ai/core/go-log" ) // Medium defines the standard interface for a storage backend. @@ -361,7 +361,7 @@ func (m *MockMedium) Open(path string) (fs.File, error) { return nil, coreerr.E("io.MockMedium.Open", "file not found: "+path, os.ErrNotExist) } return &MockFile{ - name: filepath.Base(path), + name: core.PathBase(path), content: []byte(content), }, nil } @@ -556,7 +556,7 @@ func (m *MockMedium) Stat(path string) (fs.FileInfo, error) { modTime = time.Now() } return FileInfo{ - name: filepath.Base(path), + name: core.PathBase(path), size: int64(len(content)), mode: 0644, modTime: modTime, @@ -564,7 +564,7 @@ func (m *MockMedium) Stat(path string) (fs.FileInfo, error) { } if _, ok := m.Dirs[path]; ok { return FileInfo{ - name: filepath.Base(path), + name: core.PathBase(path), isDir: true, mode: fs.ModeDir | 0755, }, nil diff --git a/local/client.go b/local/client.go index 22fd769..d4aaafc 100644 --- a/local/client.go +++ b/local/client.go @@ -6,11 +6,10 @@ import ( goio "io" "io/fs" "os" - "os/user" - "path/filepath" "strings" "time" + core "dappco.re/go/core" coreerr "forge.lthn.ai/core/go-log" ) @@ -22,20 +21,163 @@ type Medium struct { // New creates a new local Medium rooted at the given directory. // Pass "/" for full filesystem access, or a specific path to sandbox. func New(root string) (*Medium, error) { - abs, err := filepath.Abs(root) - if err != nil { - return nil, err - } + abs := absolutePath(root) // Resolve symlinks so sandbox checks compare like-for-like. // On macOS, /var is a symlink to /private/var — without this, - // EvalSymlinks on child paths resolves to /private/var/... while + // resolving child paths resolves to /private/var/... while // root stays /var/..., causing false sandbox escape detections. - if resolved, err := filepath.EvalSymlinks(abs); err == nil { + if resolved, err := resolveSymlinksPath(abs); err == nil { abs = resolved } return &Medium{root: abs}, nil } +func dirSeparator() string { + if sep := core.Env("DS"); sep != "" { + return sep + } + return string(os.PathSeparator) +} + +func normalisePath(p string) string { + sep := dirSeparator() + if sep == "/" { + return strings.ReplaceAll(p, "\\", sep) + } + return strings.ReplaceAll(p, "/", sep) +} + +func currentWorkingDir() string { + if cwd, err := os.Getwd(); err == nil && cwd != "" { + return cwd + } + if cwd := core.Env("DIR_CWD"); cwd != "" { + return cwd + } + return "." +} + +func absolutePath(p string) string { + p = normalisePath(p) + if core.PathIsAbs(p) { + return core.Path(p) + } + return core.Path(currentWorkingDir(), p) +} + +func cleanSandboxPath(p string) string { + return core.Path(dirSeparator() + normalisePath(p)) +} + +func splitPathParts(p string) []string { + trimmed := strings.TrimPrefix(p, dirSeparator()) + if trimmed == "" { + return nil + } + var parts []string + for _, part := range strings.Split(trimmed, dirSeparator()) { + if part == "" { + continue + } + parts = append(parts, part) + } + return parts +} + +func resolveSymlinksPath(p string) (string, error) { + return resolveSymlinksRecursive(absolutePath(p), map[string]struct{}{}) +} + +func resolveSymlinksRecursive(p string, seen map[string]struct{}) (string, error) { + p = core.Path(p) + if p == dirSeparator() { + return p, nil + } + + current := dirSeparator() + for _, part := range splitPathParts(p) { + next := core.Path(current, part) + info, err := os.Lstat(next) + if err != nil { + if os.IsNotExist(err) { + current = next + continue + } + return "", err + } + if info.Mode()&os.ModeSymlink == 0 { + current = next + continue + } + + target, err := os.Readlink(next) + if err != nil { + return "", err + } + target = normalisePath(target) + if !core.PathIsAbs(target) { + target = core.Path(current, target) + } else { + target = core.Path(target) + } + if _, ok := seen[target]; ok { + return "", coreerr.E("local.resolveSymlinksPath", "symlink cycle: "+target, os.ErrInvalid) + } + seen[target] = struct{}{} + resolved, err := resolveSymlinksRecursive(target, seen) + delete(seen, target) + if err != nil { + return "", err + } + current = resolved + } + + return current, nil +} + +func isWithinRoot(root, target string) bool { + root = core.Path(root) + target = core.Path(target) + if root == dirSeparator() { + return true + } + return target == root || strings.HasPrefix(target, root+dirSeparator()) +} + +func canonicalPath(p string) string { + if p == "" { + return "" + } + if resolved, err := resolveSymlinksPath(p); err == nil { + return resolved + } + return absolutePath(p) +} + +func isProtectedPath(full string) bool { + full = canonicalPath(full) + protected := map[string]struct{}{ + canonicalPath(dirSeparator()): {}, + } + for _, home := range []string{core.Env("HOME"), core.Env("DIR_HOME")} { + if home == "" { + continue + } + protected[canonicalPath(home)] = struct{}{} + } + _, ok := protected[full] + return ok +} + +func logSandboxEscape(root, path, attempted string) { + username := core.Env("USER") + if username == "" { + username = "unknown" + } + fmt.Fprintf(os.Stderr, "[%s] SECURITY sandbox escape detected root=%s path=%s attempted=%s user=%s\n", + time.Now().Format(time.RFC3339), root, path, attempted, username) +} + // path sanitises and returns the full path. // Absolute paths are sandboxed under root (unless root is "/"). func (m *Medium) path(p string) string { @@ -46,41 +188,36 @@ func (m *Medium) path(p string) string { // If the path is relative and the medium is rooted at "/", // treat it as relative to the current working directory. // This makes io.Local behave more like the standard 'os' package. - if m.root == "/" && !filepath.IsAbs(p) { - cwd, _ := os.Getwd() - return filepath.Join(cwd, p) + if m.root == dirSeparator() && !core.PathIsAbs(normalisePath(p)) { + return core.Path(currentWorkingDir(), normalisePath(p)) } - // Use filepath.Clean with a leading slash to resolve all .. and . internally + // Use a cleaned absolute path to resolve all .. and . internally // before joining with the root. This is a standard way to sandbox paths. - clean := filepath.Clean("/" + p) + clean := cleanSandboxPath(p) // If root is "/", allow absolute paths through - if m.root == "/" { + if m.root == dirSeparator() { return clean } // Join cleaned relative path with root - return filepath.Join(m.root, clean) + return core.Path(m.root, strings.TrimPrefix(clean, dirSeparator())) } // validatePath ensures the path is within the sandbox, following symlinks if they exist. func (m *Medium) validatePath(p string) (string, error) { - if m.root == "/" { + if m.root == dirSeparator() { return m.path(p), nil } // Split the cleaned path into components - parts := strings.Split(filepath.Clean("/"+p), string(os.PathSeparator)) + parts := splitPathParts(cleanSandboxPath(p)) current := m.root for _, part := range parts { - if part == "" { - continue - } - - next := filepath.Join(current, part) - realNext, err := filepath.EvalSymlinks(next) + next := core.Path(current, part) + realNext, err := resolveSymlinksPath(next) if err != nil { if os.IsNotExist(err) { // Part doesn't exist, we can't follow symlinks anymore. @@ -93,15 +230,9 @@ func (m *Medium) validatePath(p string) (string, error) { } // Verify the resolved part is still within the root - rel, err := filepath.Rel(m.root, realNext) - if err != nil || strings.HasPrefix(rel, "..") { + if !isWithinRoot(m.root, realNext) { // Security event: sandbox escape attempt - username := "unknown" - if u, err := user.Current(); err == nil { - username = u.Username - } - fmt.Fprintf(os.Stderr, "[%s] SECURITY sandbox escape detected root=%s path=%s attempted=%s user=%s\n", - time.Now().Format(time.RFC3339), m.root, p, realNext, username) + logSandboxEscape(m.root, p, realNext) return "", os.ErrPermission // Path escapes sandbox } current = realNext @@ -137,7 +268,7 @@ func (m *Medium) WriteMode(p, content string, mode os.FileMode) error { if err != nil { return err } - if err := os.MkdirAll(filepath.Dir(full), 0755); err != nil { + if err := os.MkdirAll(core.PathDir(full), 0755); err != nil { return err } return os.WriteFile(full, []byte(content), mode) @@ -221,7 +352,7 @@ func (m *Medium) Create(p string) (goio.WriteCloser, error) { if err != nil { return nil, err } - if err := os.MkdirAll(filepath.Dir(full), 0755); err != nil { + if err := os.MkdirAll(core.PathDir(full), 0755); err != nil { return nil, err } return os.Create(full) @@ -233,7 +364,7 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) { if err != nil { return nil, err } - if err := os.MkdirAll(filepath.Dir(full), 0755); err != nil { + if err := os.MkdirAll(core.PathDir(full), 0755); err != nil { return nil, err } return os.OpenFile(full, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) @@ -265,7 +396,7 @@ func (m *Medium) Delete(p string) error { if err != nil { return err } - if full == "/" || full == os.Getenv("HOME") { + if isProtectedPath(full) { return coreerr.E("local.Delete", "refusing to delete protected path: "+full, nil) } return os.Remove(full) @@ -277,7 +408,7 @@ func (m *Medium) DeleteAll(p string) error { if err != nil { return err } - if full == "/" || full == os.Getenv("HOME") { + if isProtectedPath(full) { return coreerr.E("local.DeleteAll", "refusing to delete protected path: "+full, nil) } return os.RemoveAll(full) diff --git a/local/client_test.go b/local/client_test.go index f3deb15..120ee0e 100644 --- a/local/client_test.go +++ b/local/client_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNew(t *testing.T) { @@ -170,6 +171,33 @@ func TestDeleteAll(t *testing.T) { assert.False(t, m.Exists("dir")) } +func TestDelete_ProtectedHomeViaSymlinkEnv(t *testing.T) { + realHome := t.TempDir() + linkParent := t.TempDir() + homeLink := filepath.Join(linkParent, "home-link") + require.NoError(t, os.Symlink(realHome, homeLink)) + t.Setenv("HOME", homeLink) + + m, err := New("/") + require.NoError(t, err) + + err = m.Delete(realHome) + require.Error(t, err) + assert.DirExists(t, realHome) +} + +func TestDeleteAll_ProtectedHomeViaEnv(t *testing.T) { + tempHome := t.TempDir() + t.Setenv("HOME", tempHome) + + m, err := New("/") + require.NoError(t, err) + + err = m.DeleteAll(tempHome) + require.Error(t, err) + assert.DirExists(t, tempHome) +} + func TestRename(t *testing.T) { root := t.TempDir() m, _ := New(root) diff --git a/s3/s3.go b/s3/s3.go index 86443fe..3ca4ab9 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -37,6 +37,29 @@ type Medium struct { prefix string } +func deleteObjectsError(prefix string, errs []types.Error) error { + if len(errs) == 0 { + return nil + } + details := make([]string, 0, len(errs)) + for _, item := range errs { + key := aws.ToString(item.Key) + code := aws.ToString(item.Code) + msg := aws.ToString(item.Message) + switch { + case code != "" && msg != "": + details = append(details, key+": "+code+" "+msg) + case code != "": + details = append(details, key+": "+code) + case msg != "": + details = append(details, key+": "+msg) + default: + details = append(details, key) + } + } + return coreerr.E("s3.DeleteAll", "partial delete failed under "+prefix+": "+strings.Join(details, "; "), nil) +} + // Option configures a Medium. type Option func(*Medium) @@ -197,10 +220,13 @@ func (m *Medium) DeleteAll(p string) error { } // First, try deleting the exact key - _, _ = m.client.DeleteObject(context.Background(), &s3.DeleteObjectInput{ + _, err := m.client.DeleteObject(context.Background(), &s3.DeleteObjectInput{ Bucket: aws.String(m.bucket), Key: aws.String(key), }) + if err != nil { + return coreerr.E("s3.DeleteAll", "failed to delete object: "+key, err) + } // Then delete all objects under the prefix prefix := key @@ -230,13 +256,16 @@ func (m *Medium) DeleteAll(p string) error { objects[i] = types.ObjectIdentifier{Key: obj.Key} } - _, err = m.client.DeleteObjects(context.Background(), &s3.DeleteObjectsInput{ + deleteOut, err := m.client.DeleteObjects(context.Background(), &s3.DeleteObjectsInput{ Bucket: aws.String(m.bucket), Delete: &types.Delete{Objects: objects, Quiet: aws.Bool(true)}, }) if err != nil { return coreerr.E("s3.DeleteAll", "failed to delete objects", err) } + if err := deleteObjectsError(prefix, deleteOut.Errors); err != nil { + return err + } if listOut.IsTruncated != nil && *listOut.IsTruncated { continuationToken = listOut.NextContinuationToken diff --git a/s3/s3_test.go b/s3/s3_test.go index 1f226e7..a81efff 100644 --- a/s3/s3_test.go +++ b/s3/s3_test.go @@ -3,6 +3,7 @@ package s3 import ( "bytes" "context" + "errors" "fmt" goio "io" "io/fs" @@ -21,15 +22,19 @@ import ( // mockS3 is an in-memory mock implementing the s3API interface. type mockS3 struct { - mu sync.RWMutex - objects map[string][]byte - mtimes map[string]time.Time + mu sync.RWMutex + objects map[string][]byte + mtimes map[string]time.Time + deleteObjectErrors map[string]error + deleteObjectsErrs map[string]types.Error } func newMockS3() *mockS3 { return &mockS3{ - objects: make(map[string][]byte), - mtimes: make(map[string]time.Time), + objects: make(map[string][]byte), + mtimes: make(map[string]time.Time), + deleteObjectErrors: make(map[string]error), + deleteObjectsErrs: make(map[string]types.Error), } } @@ -69,6 +74,9 @@ func (m *mockS3) DeleteObject(_ context.Context, params *s3.DeleteObjectInput, _ defer m.mu.Unlock() key := aws.ToString(params.Key) + if err, ok := m.deleteObjectErrors[key]; ok { + return nil, err + } delete(m.objects, key) delete(m.mtimes, key) return &s3.DeleteObjectOutput{}, nil @@ -78,12 +86,17 @@ func (m *mockS3) DeleteObjects(_ context.Context, params *s3.DeleteObjectsInput, m.mu.Lock() defer m.mu.Unlock() + var outErrs []types.Error for _, obj := range params.Delete.Objects { key := aws.ToString(obj.Key) + if errInfo, ok := m.deleteObjectsErrs[key]; ok { + outErrs = append(outErrs, errInfo) + continue + } delete(m.objects, key) delete(m.mtimes, key) } - return &s3.DeleteObjectsOutput{}, nil + return &s3.DeleteObjectsOutput{Errors: outErrs}, nil } func (m *mockS3) HeadObject(_ context.Context, params *s3.HeadObjectInput, _ ...func(*s3.Options)) (*s3.HeadObjectOutput, error) { @@ -350,6 +363,34 @@ func TestDeleteAll_Bad_EmptyPath(t *testing.T) { assert.Error(t, err) } +func TestDeleteAll_Bad_DeleteObjectError(t *testing.T) { + m, mock := newTestMedium(t) + mock.deleteObjectErrors["dir"] = errors.New("boom") + + err := m.DeleteAll("dir") + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to delete object: dir") +} + +func TestDeleteAll_Bad_PartialDelete(t *testing.T) { + m, mock := newTestMedium(t) + + require.NoError(t, m.Write("dir/file1.txt", "a")) + require.NoError(t, m.Write("dir/file2.txt", "b")) + mock.deleteObjectsErrs["dir/file2.txt"] = types.Error{ + Key: aws.String("dir/file2.txt"), + Code: aws.String("AccessDenied"), + Message: aws.String("blocked"), + } + + err := m.DeleteAll("dir") + require.Error(t, err) + assert.Contains(t, err.Error(), "partial delete failed") + assert.Contains(t, err.Error(), "dir/file2.txt") + assert.True(t, m.IsFile("dir/file2.txt")) + assert.False(t, m.IsFile("dir/file1.txt")) +} + func TestRename_Good(t *testing.T) { m, _ := newTestMedium(t) diff --git a/workspace/service.go b/workspace/service.go index c1978a1..9e81764 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -4,7 +4,7 @@ import ( "crypto/sha256" "encoding/hex" "os" - "path/filepath" + "strings" "sync" core "dappco.re/go/core" @@ -39,11 +39,11 @@ type Service struct { // New creates a new Workspace service instance. // An optional cryptProvider can be passed to supply PGP key generation. func New(c *core.Core, crypt ...cryptProvider) (any, error) { - home, err := os.UserHomeDir() - if err != nil { - return nil, coreerr.E("workspace.New", "failed to determine home directory", err) + home := workspaceHome() + if home == "" { + return nil, coreerr.E("workspace.New", "failed to determine home directory", os.ErrNotExist) } - rootPath := filepath.Join(home, ".core", "workspaces") + rootPath := core.Path(home, ".core", "workspaces") s := &Service{ core: c, @@ -75,14 +75,17 @@ func (s *Service) CreateWorkspace(identifier, password string) (string, error) { hash := sha256.Sum256([]byte(identifier)) wsID := hex.EncodeToString(hash[:]) - wsPath := filepath.Join(s.rootPath, wsID) + wsPath, err := s.workspacePath("workspace.CreateWorkspace", wsID) + if err != nil { + return "", err + } if s.medium.Exists(wsPath) { return "", coreerr.E("workspace.CreateWorkspace", "workspace already exists", nil) } for _, d := range []string{"config", "log", "data", "files", "keys"} { - if err := s.medium.EnsureDir(filepath.Join(wsPath, d)); err != nil { + if err := s.medium.EnsureDir(core.Path(wsPath, d)); err != nil { return "", coreerr.E("workspace.CreateWorkspace", "failed to create directory: "+d, err) } } @@ -92,7 +95,7 @@ func (s *Service) CreateWorkspace(identifier, password string) (string, error) { return "", coreerr.E("workspace.CreateWorkspace", "failed to generate keys", err) } - if err := s.medium.WriteMode(filepath.Join(wsPath, "keys", "private.key"), privKey, 0600); err != nil { + if err := s.medium.WriteMode(core.Path(wsPath, "keys", "private.key"), privKey, 0600); err != nil { return "", coreerr.E("workspace.CreateWorkspace", "failed to save private key", err) } @@ -104,12 +107,15 @@ func (s *Service) SwitchWorkspace(name string) error { s.mu.Lock() defer s.mu.Unlock() - wsPath := filepath.Join(s.rootPath, name) + wsPath, err := s.workspacePath("workspace.SwitchWorkspace", name) + if err != nil { + return err + } if !s.medium.IsDir(wsPath) { return coreerr.E("workspace.SwitchWorkspace", "workspace not found: "+name, nil) } - s.activeWorkspace = name + s.activeWorkspace = core.PathBase(wsPath) return nil } @@ -119,7 +125,15 @@ func (s *Service) activeFilePath(op, filename string) (string, error) { if s.activeWorkspace == "" { return "", coreerr.E(op, "no active workspace", nil) } - return filepath.Join(s.rootPath, s.activeWorkspace, "files", filename), nil + filesRoot := core.Path(s.rootPath, s.activeWorkspace, "files") + path, err := joinWithinRoot(filesRoot, filename) + if err != nil { + return "", coreerr.E(op, "file path escapes workspace files", os.ErrPermission) + } + if path == filesRoot { + return "", coreerr.E(op, "filename is required", os.ErrInvalid) + } + return path, nil } // WorkspaceFileGet retrieves the content of a file from the active workspace. @@ -171,5 +185,38 @@ func (s *Service) HandleIPCEvents(c *core.Core, msg core.Message) core.Result { return core.Result{OK: true} } +func workspaceHome() string { + if home := core.Env("CORE_HOME"); home != "" { + return home + } + if home := core.Env("HOME"); home != "" { + return home + } + return core.Env("DIR_HOME") +} + +func joinWithinRoot(root string, parts ...string) (string, error) { + candidate := core.Path(append([]string{root}, parts...)...) + sep := core.Env("DS") + if candidate == root || strings.HasPrefix(candidate, root+sep) { + return candidate, nil + } + return "", os.ErrPermission +} + +func (s *Service) workspacePath(op, name string) (string, error) { + if name == "" { + return "", coreerr.E(op, "workspace name is required", os.ErrInvalid) + } + path, err := joinWithinRoot(s.rootPath, name) + if err != nil { + return "", coreerr.E(op, "workspace path escapes root", err) + } + if core.PathDir(path) != s.rootPath { + return "", coreerr.E(op, "invalid workspace name: "+name, os.ErrPermission) + } + return path, nil +} + // Ensure Service implements Workspace. var _ Workspace = (*Service)(nil) diff --git a/workspace/service_test.go b/workspace/service_test.go index 1cab667..1fc7abe 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -1,48 +1,90 @@ package workspace import ( - "path/filepath" + "os" "testing" core "dappco.re/go/core" - "forge.lthn.ai/core/go-crypt/crypt/openpgp" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestWorkspace(t *testing.T) { - c := core.New() - pgpSvc, err := openpgp.New(nil) - assert.NoError(t, err) +type stubCrypt struct { + key string + err error +} + +func (s stubCrypt) CreateKeyPair(_, _ string) (string, error) { + if s.err != nil { + return "", s.err + } + return s.key, nil +} + +func newTestService(t *testing.T) (*Service, string) { + t.Helper() tempHome := t.TempDir() t.Setenv("HOME", tempHome) - svc, err := New(c, pgpSvc.(cryptProvider)) - assert.NoError(t, err) - s := svc.(*Service) + svc, err := New(core.New(), stubCrypt{key: "private-key"}) + require.NoError(t, err) + return svc.(*Service), tempHome +} + +func TestWorkspace(t *testing.T) { + s, tempHome := newTestService(t) - // Test CreateWorkspace id, err := s.CreateWorkspace("test-user", "pass123") - assert.NoError(t, err) + require.NoError(t, err) assert.NotEmpty(t, id) - wsPath := filepath.Join(tempHome, ".core", "workspaces", id) + wsPath := core.Path(tempHome, ".core", "workspaces", id) assert.DirExists(t, wsPath) - assert.DirExists(t, filepath.Join(wsPath, "keys")) - assert.FileExists(t, filepath.Join(wsPath, "keys", "private.key")) + assert.DirExists(t, core.Path(wsPath, "keys")) + assert.FileExists(t, core.Path(wsPath, "keys", "private.key")) - // Test SwitchWorkspace err = s.SwitchWorkspace(id) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, id, s.activeWorkspace) - // Test File operations - filename := "secret.txt" - content := "top secret info" - err = s.WorkspaceFileSet(filename, content) - assert.NoError(t, err) + err = s.WorkspaceFileSet("secret.txt", "top secret info") + require.NoError(t, err) - got, err := s.WorkspaceFileGet(filename) - assert.NoError(t, err) - assert.Equal(t, content, got) + got, err := s.WorkspaceFileGet("secret.txt") + require.NoError(t, err) + assert.Equal(t, "top secret info", got) +} + +func TestSwitchWorkspace_TraversalBlocked(t *testing.T) { + s, tempHome := newTestService(t) + + outside := core.Path(tempHome, ".core", "escaped") + require.NoError(t, os.MkdirAll(outside, 0755)) + + err := s.SwitchWorkspace("../escaped") + require.Error(t, err) + assert.Empty(t, s.activeWorkspace) +} + +func TestWorkspaceFileSet_TraversalBlocked(t *testing.T) { + s, tempHome := newTestService(t) + + id, err := s.CreateWorkspace("test-user", "pass123") + require.NoError(t, err) + require.NoError(t, s.SwitchWorkspace(id)) + + keyPath := core.Path(tempHome, ".core", "workspaces", id, "keys", "private.key") + before, err := os.ReadFile(keyPath) + require.NoError(t, err) + + err = s.WorkspaceFileSet("../keys/private.key", "hijack") + require.Error(t, err) + + after, err := os.ReadFile(keyPath) + require.NoError(t, err) + assert.Equal(t, string(before), string(after)) + + _, err = s.WorkspaceFileGet("../keys/private.key") + require.Error(t, err) } -- 2.45.3 From 19c43392291effbf9bee7f0b7c9004bc01255b62 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 23 Mar 2026 13:20:41 +0000 Subject: [PATCH 3/6] docs: add security attack vector mapping --- docs/security-attack-vector-mapping.md | 168 +++++++++++++++++++++++++ 1 file changed, 168 insertions(+) create mode 100644 docs/security-attack-vector-mapping.md diff --git a/docs/security-attack-vector-mapping.md b/docs/security-attack-vector-mapping.md new file mode 100644 index 0000000..4db0e57 --- /dev/null +++ b/docs/security-attack-vector-mapping.md @@ -0,0 +1,168 @@ +# Security Attack Vector Mapping + +`CODEX.md` was not present under `/workspace`, so this mapping follows [`CLAUDE.md`](/workspace/CLAUDE.md) and the current source tree. + +Scope: +- Included: exported functions and methods that accept caller-controlled data or parse external payloads, plus public writer types returned from those APIs. +- Omitted: zero-argument accessors and teardown helpers such as `Close`, `Snapshot`, `Store`, `AsMedium`, `DataNode`, and `fs.FileInfo` getters because they are not ingress points. + +Notes: +- `local` is the in-repo filesystem containment layer. Its protection depends on `validatePath`, but most mutating operations still have a post-validation TOCTOU window before the final `os.*` call. +- `workspace.Service` uses `io.Local` rooted at `/`, so its path joins are not sandboxed by this repository. +- `datanode.FromTar` and `datanode.Restore` inherit Borg `datanode.FromTar` behavior from `forge.lthn.ai/Snider/Borg` v0.3.1: it trims leading `/`, preserves symlink tar entries, and does not reject `..` segments or large archives. + +## `io` Facade And `MockMedium` + +| Function | File:Line | Input source | What it flows into | Current validation | Potential attack vector | +| --- | --- | --- | --- | --- | --- | +| `io.NewSandboxed` | `io.go:126` | Caller-supplied sandbox root | Delegates to `local.New(root)` and stores the resolved root in a `local.Medium` | `local.New` absolutizes and best-effort resolves root symlinks; no policy check on `/` or broad roots | Misconfiguration can disable containment entirely by choosing `/` or an overly broad root | +| `io.Read` | `io.go:133` | Caller path plus chosen backend | Direct `m.Read(path)` dispatch | No facade-level validation | Inherits backend read, enumeration, and path-handling attack surface | +| `io.Write` | `io.go:138` | Caller path/content plus chosen backend | Direct `m.Write(path, content)` dispatch | No facade-level validation | Inherits backend overwrite, creation, and storage-exhaustion attack surface | +| `io.ReadStream` | `io.go:143` | Caller path plus chosen backend | Direct `m.ReadStream(path)` dispatch | No facade-level validation | Inherits backend streaming-read surface and any unbounded downstream consumption risk | +| `io.WriteStream` | `io.go:148` | Caller path plus chosen backend; later streamed bytes from returned writer | Direct `m.WriteStream(path)` dispatch | No facade-level validation | Inherits backend streaming-write surface, including arbitrary object/file creation and unbounded buffering/disk growth | +| `io.EnsureDir` | `io.go:153` | Caller path plus chosen backend | Direct `m.EnsureDir(path)` dispatch | No facade-level validation | Inherits backend directory-creation semantics; on no-op backends this can create false assumptions about isolation | +| `io.IsFile` | `io.go:158` | Caller path plus chosen backend | Direct `m.IsFile(path)` dispatch | No facade-level validation | Inherits backend existence-oracle and metadata-disclosure surface | +| `io.Copy` | `io.go:163` | Caller-selected source/destination mediums and paths | `src.Read(srcPath)` loads full content into memory, then `dst.Write(dstPath, content)` | Validation delegated to both backends | Large source content can exhaust memory; can bridge trust zones and copy attacker-controlled names/content across backends | +| `(*io.MockMedium).Read`, `FileGet`, `Open`, `ReadStream`, `List`, `Stat`, `Exists`, `IsFile`, `IsDir` | `io.go:193`, `225`, `358`, `388`, `443`, `552`, `576`, `219`, `587` | Caller path | Direct map lookup and prefix scans in in-memory maps | No normalization, auth, or path restrictions | If reused outside tests, it becomes a trivial key/value disclosure and enumeration surface | +| `(*io.MockMedium).Write`, `WriteMode`, `FileSet`, `EnsureDir` | `io.go:202`, `208`, `230`, `213` | Caller path/content/mode | Direct map writes; `WriteMode` ignores `mode` | No validation; permissions are ignored | Arbitrary overwrite/creation of entries and silent permission-policy bypass | +| `(*io.MockMedium).Create`, `Append`, `WriteStream`, `(*io.MockWriteCloser).Write` | `io.go:370`, `378`, `393`, `431` | Caller path; streamed caller bytes | Buffers bytes in memory until `Close`, then commits to `Files[path]` | No validation or size limits | Memory exhaustion and arbitrary entry overwrite if used as anything other than a test double | +| `(*io.MockMedium).Delete`, `DeleteAll`, `Rename` | `io.go:235`, `263`, `299` | Caller path(s) | Direct map mutation and prefix scans | No normalization or authorization | Arbitrary delete/rename of entries; prefix-based operations can remove more than a caller expects | + +## `local` + +| Function | File:Line | Input source | What it flows into | Current validation | Potential attack vector | +| --- | --- | --- | --- | --- | --- | +| `local.New` | `local/client.go:24` | Caller-supplied root path | `filepath.Abs`, optional `filepath.EvalSymlinks`, stored as `Medium.root` | Absolutizes root and resolves root symlink when possible | Passing `/` creates unsandboxed host filesystem access; broad roots widen blast radius | +| `(*local.Medium).Read`, `FileGet` | `local/client.go:114`, `300` | Caller path | `validatePath` then `os.ReadFile` | `validatePath` cleans path, walks symlinks component-by-component, and blocks resolved escapes from `root` | Arbitrary read of anything reachable inside the sandbox; TOCTOU symlink swap remains possible after validation and before the final read | +| `(*local.Medium).Open`, `ReadStream` | `local/client.go:210`, `248` | Caller path | `validatePath` then `os.Open`; `ReadStream` delegates to `Open` | Same `validatePath` containment check | Same read/disclosure surface as `Read`, plus a validated path can still be swapped before `os.Open` | +| `(*local.Medium).List`, `Stat`, `Exists`, `IsFile`, `IsDir` | `local/client.go:192`, `201`, `182`, `169`, `156` | Caller path | `validatePath` then `os.ReadDir` or `os.Stat` | Same `validatePath` containment check | Metadata enumeration for any path inside the sandbox; TOCTOU can still skew the checked object before the final syscall | +| `(*local.Medium).Write`, `FileSet` | `local/client.go:129`, `305` | Caller path/content | Delegates to `WriteMode(..., 0644)` | Path containment only | Arbitrary overwrite inside the sandbox; default `0644` can expose secrets if higher layers use it for sensitive data | +| `(*local.Medium).WriteMode` | `local/client.go:135` | Caller path/content/mode | `validatePath`, `os.MkdirAll`, `os.WriteFile` | Path containment only; caller controls file mode | Arbitrary file write inside the sandbox; caller can choose overly broad modes; TOCTOU after validation can retarget the write | +| `(*local.Medium).Create`, `WriteStream`, `Append` | `local/client.go:219`, `258`, `231` | Caller path; later bytes written through the returned `*os.File` | `validatePath`, `os.MkdirAll`, `os.Create` or `os.OpenFile(..., O_APPEND)` | Path containment only | Arbitrary truncate/append within the sandbox, unbounded disk growth, and the same post-validation race window | +| `(*local.Medium).EnsureDir` | `local/client.go:147` | Caller path | `validatePath` then `os.MkdirAll` | Path containment only | Arbitrary directory creation inside the sandbox; TOCTOU race can still redirect the mkdir target | +| `(*local.Medium).Delete` | `local/client.go:263` | Caller path | `validatePath` then `os.Remove` | Path containment; explicit guard blocks `/` and `$HOME` | Arbitrary file or empty-dir deletion inside the sandbox; guard does not protect other critical paths if root is too broad; TOCTOU applies | +| `(*local.Medium).DeleteAll` | `local/client.go:275` | Caller path | `validatePath` then `os.RemoveAll` | Path containment; explicit guard blocks `/` and `$HOME` | Recursive delete of any sandboxed subtree; if the medium root is broad, the blast radius is broad too | +| `(*local.Medium).Rename` | `local/client.go:287` | Caller old/new paths | `validatePath` on both sides, then `os.Rename` | Path containment on both paths | Arbitrary move/overwrite inside the sandbox; attacker-controlled rename targets can be swapped after validation | + +## `sqlite` + +| Function | File:Line | Input source | What it flows into | Current validation | Potential attack vector | +| --- | --- | --- | --- | --- | --- | +| `sqlite.WithTable` | `sqlite/sqlite.go:29` | Caller-supplied table name option | Stored on `Medium.table` and concatenated into every SQL statement | No quoting or identifier validation | SQL injection or malformed SQL if an attacker can choose the table name | +| `sqlite.New` | `sqlite/sqlite.go:37` | Caller DB path/URI and options | `sql.Open("sqlite", dbPath)`, `PRAGMA`, `CREATE TABLE` using concatenated table name | Rejects empty `dbPath`; no table-name validation | Arbitrary SQLite file/URI selection and inherited SQL injection risk from `WithTable` | +| `(*sqlite.Medium).Read`, `FileGet`, `Open`, `ReadStream` | `sqlite/sqlite.go:94`, `172`, `455`, `521` | Caller path | `cleanPath` then parameterized `SELECT`; `Open`/`ReadStream` materialize the whole BLOB in memory | Leading-slash `path.Clean` collapses traversal and rejects empty/root keys; path value is parameterized, table name is not | Arbitrary logical-key read, existence disclosure, canonicalization collisions such as `../x -> x`, and memory exhaustion on large BLOBs | +| `(*sqlite.Medium).Write`, `FileSet` | `sqlite/sqlite.go:118`, `177` | Caller path/content | `cleanPath` then parameterized upsert | Same path normalization; table name still concatenated | Arbitrary logical-key overwrite and unbounded DB growth; different raw paths can alias to the same normalized key | +| `(*sqlite.Medium).Create`, `WriteStream`, `Append`, `(*sqlite.sqliteWriteCloser).Write` | `sqlite/sqlite.go:487`, `546`, `499`, `654` | Caller path; streamed caller bytes | `cleanPath`, optional preload of existing BLOB, in-memory buffering, then upsert on `Close` | Non-empty normalized key only | Memory exhaustion from buffering and append preloads; arbitrary overwrite/append of normalized keys | +| `(*sqlite.Medium).EnsureDir` | `sqlite/sqlite.go:136` | Caller path | `cleanPath` then inserts a directory marker row | Root becomes a no-op; other paths are normalized only | Arbitrary logical directory creation and aliasing through normalized names | +| `(*sqlite.Medium).List`, `Stat`, `Exists`, `IsFile`, `IsDir` | `sqlite/sqlite.go:349`, `424`, `551`, `155`, `569` | Caller path | `cleanPath` then parameterized listing/count/stat queries | Same normalized key handling; table name still concatenated | Namespace enumeration and metadata disclosure; canonicalization collisions can hide the caller's original path spelling | +| `(*sqlite.Medium).Delete` | `sqlite/sqlite.go:182` | Caller path | `cleanPath`, directory-child count, then `DELETE` | Rejects empty/root path and non-empty dirs | Arbitrary logical-key deletion | +| `(*sqlite.Medium).DeleteAll` | `sqlite/sqlite.go:227` | Caller path | `cleanPath` then `DELETE WHERE path = ? OR path LIKE ?` | Rejects empty/root path | Bulk deletion of any logical subtree | +| `(*sqlite.Medium).Rename` | `sqlite/sqlite.go:251` | Caller old/new paths | `cleanPath` both paths, then transactional copy/delete of entry and children | Requires non-empty normalized source and destination | Arbitrary move/overwrite of logical subtrees; normalized-path aliasing can redirect or collapse entries | + +## `s3` + +| Function | File:Line | Input source | What it flows into | Current validation | Potential attack vector | +| --- | --- | --- | --- | --- | --- | +| `s3.WithPrefix` | `s3/s3.go:44` | Caller-supplied prefix | Stored on `Medium.prefix` and prepended to every key | Only ensures a trailing `/` when non-empty | Cross-tenant namespace expansion or contraction if untrusted callers can choose the prefix; empty prefix exposes the whole bucket | +| `s3.WithClient` | `s3/s3.go:55` | Caller-supplied S3 client | Stored as `Medium.client` and trusted for all I/O | No validation | Malicious or wrapped clients can exfiltrate data, fake results, or bypass expected transport controls | +| `s3.New` | `s3/s3.go:69` | Caller bucket name and options | Stores bucket/prefix/client on `Medium` | Rejects empty bucket and missing client only | Redirecting operations to attacker-chosen buckets or prefixes if config is not trusted | +| `(*s3.Medium).EnsureDir` | `s3/s3.go:144` | Caller path (ignored) | No-op | Input is ignored entirely | Semantic mismatch: callers may believe a directory boundary now exists when S3 still has only object keys | +| `(*s3.Medium).Read`, `FileGet`, `Open` | `s3/s3.go:103`, `166`, `388` | Caller path | `key(p)` then `GetObject`; `Read`/`Open` read the whole body into memory | Leading-slash `path.Clean` keeps the key under `prefix`; rejects empty key | Arbitrary read inside the configured bucket/prefix, canonicalization collisions, and memory exhaustion on large objects | +| `(*s3.Medium).ReadStream` | `s3/s3.go:464` | Caller path | `key(p)` then `GetObject`, returning the raw response body | Same normalized key handling; no size/content checks | Delivers arbitrary remote object bodies to downstream consumers without integrity, type, or size enforcement | +| `(*s3.Medium).Write`, `FileSet` | `s3/s3.go:126`, `171` | Caller path/content | `key(p)` then `PutObject` | Same normalized key handling | Arbitrary object overwrite or creation within the configured prefix | +| `(*s3.Medium).Create`, `WriteStream`, `Append`, `(*s3.s3WriteCloser).Write` | `s3/s3.go:427`, `481`, `440`, `609` | Caller path; streamed caller bytes | `key(p)`, optional preload of existing object for append, in-memory buffer, then `PutObject` on `Close` | Non-empty normalized key only | Memory exhaustion from buffering and append preloads; arbitrary overwrite/append of objects under the prefix | +| `(*s3.Medium).List`, `Stat`, `Exists`, `IsFile`, `IsDir` | `s3/s3.go:282`, `355`, `486`, `149`, `518` | Caller path | `key(p)` then `ListObjectsV2` or `HeadObject` | Normalized key stays under `prefix`; no authz or tenancy checks beyond config | Namespace enumeration and metadata disclosure across any objects reachable by the configured prefix | +| `(*s3.Medium).Delete` | `s3/s3.go:176` | Caller path | `key(p)` then `DeleteObject` | Non-empty normalized key only | Arbitrary object deletion inside the configured prefix | +| `(*s3.Medium).DeleteAll` | `s3/s3.go:193` | Caller path | `key(p)`, then exact delete plus prefix-based `ListObjectsV2` and batched `DeleteObjects` | Non-empty normalized key only | Bulk deletion of every object under a caller-chosen logical subtree | +| `(*s3.Medium).Rename` | `s3/s3.go:252` | Caller old/new paths | `key(p)` on both paths, then `CopyObject` followed by `DeleteObject` | Non-empty normalized keys only | Arbitrary move/overwrite of objects within the configured prefix; special characters in `oldPath` can also make `CopySource` handling fragile | + +## `store` + +### `store.Store` + +| Function | File:Line | Input source | What it flows into | Current validation | Potential attack vector | +| --- | --- | --- | --- | --- | --- | +| `store.New` | `store/store.go:22` | Caller DB path/URI | `sql.Open("sqlite", dbPath)`, `PRAGMA`, schema creation | No validation beyond driver errors | Arbitrary SQLite file/URI selection if configuration is attacker-controlled | +| `(*store.Store).Get` | `store/store.go:49` | Caller group/key | Parameterized `SELECT value FROM kv WHERE grp = ? AND key = ?` | Uses placeholders; no group/key policy | Arbitrary secret/config disclosure for any reachable group/key | +| `(*store.Store).Set` | `store/store.go:62` | Caller group/key/value | Parameterized upsert into `kv` | Uses placeholders; no group/key policy | Arbitrary overwrite or creation of stored values | +| `(*store.Store).Delete`, `DeleteGroup` | `store/store.go:75`, `94` | Caller group and optional key | Parameterized `DELETE` statements | Uses placeholders; no authorization or namespace policy | Single-key or whole-group deletion | +| `(*store.Store).Count`, `GetAll` | `store/store.go:84`, `103` | Caller group | Parameterized count or full scan of the group | Uses placeholders; no access control | Group enumeration and bulk disclosure of every key/value in a group | +| `(*store.Store).Render` | `store/store.go:125` | Caller template string and group name | Loads all `group` values into a map, then `template.Parse` and `template.Execute` | No template allowlist or output escaping; template funcs are default-only | Template-driven exfiltration of all values in the chosen group; downstream output injection if rendered text is later used in HTML, shell, or config sinks | + +### `store.Medium` + +| Function | File:Line | Input source | What it flows into | Current validation | Potential attack vector | +| --- | --- | --- | --- | --- | --- | +| `store.NewMedium` | `store/medium.go:23` | Caller DB path/URI | Delegates to `store.New(dbPath)` | No extra validation | Same arbitrary-DB selection risk as `store.New` | +| `(*store.Medium).EnsureDir` | `store/medium.go:80` | Caller path (ignored) | No-op | Input is ignored | Semantic mismatch: callers may assume they created a boundary when the store still treats group creation as implicit | +| `(*store.Medium).Read`, `FileGet`, `Open`, `ReadStream` | `store/medium.go:62`, `95`, `214`, `246` | Caller medium path | `splitPath` then `Store.Get`; `Open`/`ReadStream` materialize value bytes or a string reader | `path.Clean`, strip leading `/`, require `group/key`; does not forbid odd group names like `..` | Arbitrary logical-key disclosure and group/key aliasing if higher layers treat raw paths as identity | +| `(*store.Medium).Write`, `FileSet` | `store/medium.go:71`, `100` | Caller path/content | `splitPath` then `Store.Set` | Same `group/key` check only | Arbitrary overwrite of any reachable group/key | +| `(*store.Medium).Create`, `WriteStream`, `Append`, `(*store.kvWriteCloser).Write` | `store/medium.go:227`, `259`, `236`, `343` | Caller path; streamed caller bytes | `splitPath`, optional preload of existing value for append, in-memory buffer, then `Store.Set` on `Close` | Requires `group/key`; no size limit | Memory exhaustion and arbitrary value overwrite/append | +| `(*store.Medium).Delete` | `store/medium.go:105` | Caller path | `splitPath`; group-only paths call `Count`, group/key paths call `Store.Delete` | Rejects empty path; refuses non-empty group deletes | Arbitrary single-key deletion and group-existence probing | +| `(*store.Medium).DeleteAll` | `store/medium.go:124` | Caller path | `splitPath`; group-only paths call `DeleteGroup`, group/key paths call `Delete` | Rejects empty path | Whole-group deletion or single-key deletion | +| `(*store.Medium).Rename` | `store/medium.go:136` | Caller old/new paths | `splitPath`, `Store.Get`, `Store.Set`, `Store.Delete` | Requires both paths to include `group/key` | Arbitrary cross-group data movement and destination overwrite | +| `(*store.Medium).List` | `store/medium.go:154` | Caller path | Empty path lists groups; group path loads all keys via `GetAll` | `splitPath` only; no auth | Group and key enumeration; value lengths leak through returned file info sizes | +| `(*store.Medium).Stat`, `Exists`, `IsFile`, `IsDir` | `store/medium.go:191`, `264`, `85`, `278` | Caller path | `splitPath`, then `Count` or `Get` | Same `splitPath` behavior | Existence oracle and metadata disclosure for groups and keys | + +## `node` + +| Function | File:Line | Input source | What it flows into | Current validation | Potential attack vector | +| --- | --- | --- | --- | --- | --- | +| `node.AddData` | `node/node.go:40` | Caller file name and content | Stores `name` as a map key and `content` as in-memory bytes | Strips a leading `/`; ignores empty names and trailing `/`; does not clean `.` or `..` | Path-confusion payloads such as `../x` or `./x` persist verbatim and can later become traversal gadgets when copied out or tarred | +| `node.FromTar`, `(*node.Node).LoadTar` | `node/node.go:84`, `93` | Caller-supplied tar archive bytes | `archive/tar` reader, `io.ReadAll` per regular file, then `newFiles[name] = ...` | Trims a leading `/`; ignores empty names and directory entries; no `path.Clean`, no `..` rejection, no size limits | Tar-slip-style names survive in memory and can be exported later; huge or duplicate entries can exhaust memory or overwrite earlier entries | +| `(*node.Node).Read`, `FileGet`, `ReadFile`, `Open`, `ReadStream` | `node/node.go:349`, `370`, `187`, `259`, `491` | Caller path/name | Direct map lookup or directory inference; `Read` and `ReadFile` copy/convert content to memory | Only strips a leading `/` | Arbitrary access to weird literal names and confusion if callers assume canonical path handling | +| `(*node.Node).Write`, `WriteMode`, `FileSet` | `node/node.go:359`, `365`, `375` | Caller path/content/mode | Delegates to `AddData`; `WriteMode` ignores `mode` | Same minimal trimming as `AddData` | Arbitrary overwrite of any key, including attacker-planted `../` names; false sense of permission control | +| `(*node.Node).Create`, `WriteStream`, `Append`, `(*node.nodeWriter).Write` | `node/node.go:473`, `500`, `480`, `513` | Caller path; streamed caller bytes | Buffer bytes in memory and commit them as a map entry on `Close` | Only strips a leading `/`; no size limit | Memory exhaustion and creation of path-confusion payloads that can escape on later export | +| `(*node.Node).Delete`, `DeleteAll`, `Rename` | `node/node.go:411`, `421`, `445` | Caller path(s) | Direct map mutation keyed by caller-supplied names | Only strips a leading `/` | Arbitrary delete/rename of any key, including `../`-style names; no directory-safe rename logic | +| `(*node.Node).Stat`, `List`, `ReadDir`, `Exists`, `IsFile`, `IsDir` | `node/node.go:278`, `461`, `297`, `387`, `393`, `400` | Caller path/name | Directory inference from map keys and `fs` adapter methods | Only strips a leading `/` | Namespace enumeration and ambiguity around equivalent-looking path spellings | +| `(*node.Node).WalkNode`, `Walk` | `node/node.go:128`, `145` | Caller root path, callback, filters | `fs.WalkDir` over the in-memory tree | No root normalization beyond whatever `Node` already does | Attackers who can plant names can force callback traversal over weird paths; `SkipErrors` can suppress unexpected failures | +| `(*node.Node).CopyFile` | `node/node.go:200` | Caller source key, destination host path, permissions | Reads node content and calls `os.WriteFile(dst, ...)` directly | Only checks that `src` exists and is not a directory | Arbitrary host filesystem write to a caller-chosen `dst` path | +| `(*node.Node).CopyTo` | `node/node.go:218` | Caller target medium, source path, destination path | Reads node entries and calls `target.Write(destPath or destPath/rel, content)` | Only checks that the source exists | Stored `../`-style node keys can propagate into destination paths, enabling traversal or overwrite depending on the target backend | +| `(*node.Node).EnsureDir` | `node/node.go:380` | Caller path (ignored) | No-op | Input is ignored | Semantic mismatch: callers may assume a directory boundary was created when directories remain implicit | + +## `datanode` + +| Function | File:Line | Input source | What it flows into | Current validation | Potential attack vector | +| --- | --- | --- | --- | --- | --- | +| `datanode.FromTar`, `(*datanode.Medium).Restore` | `datanode/client.go:41`, `65` | Caller-supplied tar archive bytes | Delegates to Borg `datanode.FromTar(data)` and replaces the in-memory filesystem | Wrapper adds no checks; inherited Borg behavior trims leading `/` only and accepts symlink tar entries | Archive bombs, preserved symlink entries, and `../`-style names can be restored into the in-memory tree | +| `(*datanode.Medium).Read`, `FileGet`, `Open`, `ReadStream` | `datanode/client.go:97`, `175`, `394`, `429` | Caller path | `clean(p)` then `dn.Open`/`dn.Stat`; `Read` loads the full file into memory | `clean` strips a leading `/` and runs `path.Clean`, but it does not sandbox `..` at the start of the path | Arbitrary logical-key reads, including odd names such as `../x`; full reads can exhaust memory on large files | +| `(*datanode.Medium).Write`, `WriteMode`, `FileSet` | `datanode/client.go:123`, `138`, `179` | Caller path/content/mode | `clean(p)`, then `dn.AddData` and explicit parent-dir tracking | Rejects empty path only; `WriteMode` ignores `mode` | Arbitrary overwrite/creation of logical entries, including `../`-style names; canonicalization can also collapse some raw paths onto the same key | +| `(*datanode.Medium).Create`, `WriteStream`, `Append`, `(*datanode.writeCloser).Write` | `datanode/client.go:402`, `441`, `410`, `540` | Caller path; streamed caller bytes | `clean(p)`, optional preload of existing data for append, in-memory buffer, then `dn.AddData` on `Close` | Rejects empty path; no size limit | Memory exhaustion and arbitrary overwrite/append of logical entries | +| `(*datanode.Medium).EnsureDir` | `datanode/client.go:142` | Caller path | `clean(p)` then marks explicit directories in `m.dirs` | Empty path becomes a no-op; no policy on `..`-style names | Arbitrary logical directory creation and enumeration under attacker-chosen names | +| `(*datanode.Medium).Delete` | `datanode/client.go:183` | Caller path | `clean(p)`, then file removal or explicit-dir removal | Blocks deleting the empty/root path; otherwise no path policy | Arbitrary logical deletion of files or empty directories | +| `(*datanode.Medium).DeleteAll` | `datanode/client.go:220` | Caller path | `clean(p)`, then subtree walk and removal | Blocks deleting the empty/root path | Recursive deletion of any logical subtree | +| `(*datanode.Medium).Rename` | `datanode/client.go:262` | Caller old/new paths | `clean` both paths, then read-add-delete for files or subtree move for dirs | Existence checks only; no destination restrictions | Arbitrary subtree move/overwrite, including `../`-style names that later escape on export or copy-out | +| `(*datanode.Medium).List`, `Stat`, `Exists`, `IsFile`, `IsDir` | `datanode/client.go:327`, `374`, `445`, `166`, `460` | Caller path | `clean(p)`, then `dn.ReadDir`/`dn.Stat`/explicit-dir map lookups | Same non-sandboxing `clean` behavior | Namespace enumeration and metadata disclosure for weird or traversal-looking logical names | + +## `workspace` + +| Function | File:Line | Input source | What it flows into | Current validation | Potential attack vector | +| --- | --- | --- | --- | --- | --- | +| `workspace.New` | `workspace/service.go:41` | Caller `core.Core` and optional `cryptProvider` | Resolves `$HOME`, sets `rootPath = ~/.core/workspaces`, and binds `medium = io.Local` | Ensures the root directory exists; no sandboxing because `io.Local` is rooted at `/` | All later workspace path joins operate on the real host filesystem, not a project sandbox | +| `(*workspace.Service).CreateWorkspace` | `workspace/service.go:68` | Caller identifier and password | SHA-256 hashes `identifier` into `wsID`, creates directories under `rootPath`, calls `crypt.CreateKeyPair`, writes `keys/private.key` | Requires `crypt` to exist, checks for workspace existence, writes key with mode `0600`; no password policy or identifier validation | Predictable unsalted workspace IDs can leak identifier privacy through offline guessing; creates real host directories/files if exposed remotely | +| `(*workspace.Service).SwitchWorkspace` | `workspace/service.go:103` | Caller workspace name | `filepath.Join(rootPath, name)` then `medium.IsDir`, stores `activeWorkspace = name` | Only checks that the joined path currently exists as a directory | Path traversal via `name` can escape `rootPath` and bind the service to arbitrary host directories | +| `(*workspace.Service).WorkspaceFileGet` | `workspace/service.go:126` | Caller filename | `activeFilePath` uses `filepath.Join(rootPath, activeWorkspace, "files", filename)`, then `medium.Read` | Only checks that an active workspace is set; no filename containment check | `filename` can escape the `files/` directory, and a malicious `activeWorkspace` can turn reads into arbitrary host-file access | +| `(*workspace.Service).WorkspaceFileSet` | `workspace/service.go:138` | Caller filename and content | Same `activeFilePath` join, then `medium.Write` | Only checks that an active workspace is set; no filename containment check | Arbitrary host-file write if `activeWorkspace` or `filename` contains traversal segments | +| `(*workspace.Service).HandleIPCEvents` | `workspace/service.go:150` | Untrusted `core.Message` payload, typically `map[string]any` from IPC | Extracts `"action"` and dispatches to `CreateWorkspace` or `SwitchWorkspace` | Only loose type assertions; no schema, authz, or audit response on failure | Remote IPC callers can trigger workspace creation or retarget the service to arbitrary directories because downstream helpers do not enforce containment | + +## `sigil` + +| Function | File:Line | Input source | What it flows into | Current validation | Potential attack vector | +| --- | --- | --- | --- | --- | --- | +| `sigil.Transmute` | `sigil/sigil.go:46` | Caller data bytes and sigil chain | Sequential `Sigil.In` calls | No chain policy; relies on each sigil | Attacker-chosen chains can trigger expensive transforms or weaken policy if callers let the attacker choose the sigils | +| `sigil.Untransmute` | `sigil/sigil.go:62` | Caller data bytes and sigil chain | Reverse-order `Sigil.Out` calls | No chain policy; relies on each sigil | Expensive or mismatched reverse chains can become a CPU/memory DoS surface | +| `(*sigil.ReverseSigil).In`, `Out` | `sigil/sigils.go:29`, `41` | Caller data bytes | Allocates a new buffer and reverses it | Nil-safe only | Large inputs allocate a second full-sized buffer; otherwise low risk | +| `(*sigil.HexSigil).In`, `Out` | `sigil/sigils.go:50`, `60` | Caller data bytes | Hex encode/decode into fresh buffers | Nil-safe only; decode returns errors from `hex.Decode` | Large or malformed input can still drive allocation and CPU usage | +| `(*sigil.Base64Sigil).In`, `Out` | `sigil/sigils.go:74`, `84` | Caller data bytes | Base64 encode/decode into fresh buffers | Nil-safe only; decode returns errors from `StdEncoding.Decode` | Large or malformed input can still drive allocation and CPU usage | +| `(*sigil.GzipSigil).In` | `sigil/sigils.go:100` | Caller data bytes | `gzip.NewWriter`, compression into a `bytes.Buffer` | Nil-safe only | Large input can consume significant CPU and memory while compressing | +| `(*sigil.GzipSigil).Out` | `sigil/sigils.go:120` | Caller compressed bytes | `gzip.NewReader` then `io.ReadAll` | Nil-safe only; malformed gzip errors out | Zip-bomb style payloads can decompress to unbounded memory | +| `(*sigil.JSONSigil).In`, `Out` | `sigil/sigils.go:137`, `149` | Caller JSON bytes | `json.Compact`/`json.Indent`; `Out` is a pass-through | No schema validation; `Out` does nothing | Large inputs can consume CPU/memory; callers may wrongly assume `Out` validates or normalizes JSON | +| `sigil.NewHashSigil`, `(*sigil.HashSigil).In`, `Out` | `sigil/sigils.go:161`, `166`, `215` | Caller hash enum and data bytes | Selects a hash implementation, hashes input, and leaves `Out` as pass-through | Unsupported hashes error out; weak algorithms are still allowed | If algorithm choice is attacker-controlled, callers can be downgraded to weak digests such as MD4/MD5/SHA1; large inputs can still be CPU-heavy | +| `sigil.NewSigil` | `sigil/sigils.go:221` | Caller sigil name | Factory switch returning encoding, compression, formatting, hashing, or weak hash sigils | Fixed allowlist only | If exposed as user config, attackers can select weak or semantically wrong transforms and bypass higher-level crypto expectations | +| `(*sigil.XORObfuscator).Obfuscate`, `Deobfuscate` | `sigil/crypto_sigil.go:65`, `73` | Caller data and entropy bytes | SHA-256-derived keystream then XOR over a full-size output buffer | No validation | Safe only as a subroutine; if misused as standalone protection, it is merely obfuscation and still a CPU/memory surface on large input | +| `(*sigil.ShuffleMaskObfuscator).Obfuscate`, `Deobfuscate` | `sigil/crypto_sigil.go:127`, `154` | Caller data and entropy bytes | Deterministic permutation and XOR-mask over full-size buffers | No validation | Large inputs drive multiple full-size allocations and CPU work; still only obfuscation if used outside authenticated encryption | +| `sigil.NewChaChaPolySigil` | `sigil/crypto_sigil.go:247` | Caller key bytes | Copies key into `ChaChaPolySigil` state | Validates only that the key is exactly 32 bytes | Weak but correctly-sized keys are accepted; long-lived key material stays resident in process memory | +| `sigil.NewChaChaPolySigilWithObfuscator` | `sigil/crypto_sigil.go:263` | Caller key bytes and custom obfuscator | Builds a `ChaChaPolySigil` and optionally swaps the obfuscator | Key length is validated; obfuscator is trusted if non-nil | Malicious or buggy obfuscators can break the intended defense-in-depth model or leak patterns | +| `(*sigil.ChaChaPolySigil).In` | `sigil/crypto_sigil.go:276` | Caller plaintext bytes | `rand.Reader` nonce, optional obfuscation, then `chacha20poly1305.Seal` | Requires a configured key; nil input is allowed | Large plaintexts allocate full ciphertexts; if `randReader` is replaced in tests or DI, nonce quality becomes attacker-influenced | +| `(*sigil.ChaChaPolySigil).Out` | `sigil/crypto_sigil.go:315` | Caller ciphertext bytes | Nonce extraction, `aead.Open`, optional deobfuscation | Requires a configured key, checks minimum length, and relies on AEAD authentication | Primarily a CPU DoS surface on repeated bogus ciphertext; integrity is otherwise strong | +| `sigil.GetNonceFromCiphertext` | `sigil/crypto_sigil.go:359` | Caller ciphertext bytes | Copies the first 24 bytes as a nonce | Length check only | Low-risk parser surface; malformed short inputs just error | -- 2.45.3 From 39d5ca848037562b4e315ac2fa9185679743ff17 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 23 Mar 2026 14:48:12 +0000 Subject: [PATCH 4/6] docs: add convention drift audit --- docs/convention-drift-2026-03-23.md | 125 ++++++++++++++++++++++++++++ 1 file changed, 125 insertions(+) create mode 100644 docs/convention-drift-2026-03-23.md diff --git a/docs/convention-drift-2026-03-23.md b/docs/convention-drift-2026-03-23.md new file mode 100644 index 0000000..8a76d14 --- /dev/null +++ b/docs/convention-drift-2026-03-23.md @@ -0,0 +1,125 @@ + + +# Convention Drift Audit + +Date: 2026-03-23 + +Scope: tracked module files in the main repo surface (`*.go`, `*.md`), excluding `.core/`, `.github/`, `.idea/`, `go.mod`, `go.sum`, and generated coverage output. + +Conventions used: `CLAUDE.md`, `docs/development.md`, `docs/index.md`, and `docs/architecture.md`. + +Limitation: `CODEX.md` is not present in this repository. The `stdlib -> core.*` and usage-example findings below are therefore inferred from the documented guidance already in-tree. + +## Missing SPDX Headers + +- `CLAUDE.md:1` +- `bench_test.go:1` +- `client_test.go:1` +- `datanode/client.go:1` +- `datanode/client_test.go:1` +- `docs/architecture.md:1` +- `docs/development.md:1` +- `docs/index.md:1` +- `io.go:1` +- `local/client.go:1` +- `local/client_test.go:1` +- `node/node.go:1` +- `node/node_test.go:1` +- `s3/s3.go:1` +- `s3/s3_test.go:1` +- `sigil/crypto_sigil.go:1` +- `sigil/crypto_sigil_test.go:1` +- `sigil/sigil.go:1` +- `sigil/sigil_test.go:1` +- `sigil/sigils.go:1` +- `sqlite/sqlite.go:1` +- `sqlite/sqlite_test.go:1` +- `store/medium.go:1` +- `store/medium_test.go:1` +- `store/store.go:1` +- `store/store_test.go:1` +- `workspace/service.go:1` +- `workspace/service_test.go:1` + +## `stdlib -> core.*` Drift + +Interpretation note: `CLAUDE.md` only makes one direct stdlib replacement rule explicit: do not use raw `os` / `filepath` outside the backend boundary. The concrete drift in this repo therefore falls into two buckets: stale pre-`forge.lthn.ai` core import paths, and direct host-filesystem/path handling in non-backend production code. + +- `go.mod:1` still declares `module dappco.re/go/core/io` while the repo documentation identifies the module as `forge.lthn.ai/core/go-io`. +- `go.mod:6` still depends on `dappco.re/go/core` while the repo docs list `forge.lthn.ai/core/go` as the current Core dependency. +- `io.go:12` imports `dappco.re/go/core/io/local` instead of the documented `forge.lthn.ai/core/go-io/local`. +- `node/node.go:18` imports `dappco.re/go/core/io` instead of the documented `forge.lthn.ai/core/go-io`. +- `workspace/service.go:10` imports `dappco.re/go/core` instead of the documented Core package path. +- `workspace/service.go:13` imports `dappco.re/go/core/io` instead of the documented `forge.lthn.ai/core/go-io`. +- `workspace/service_test.go:7` still imports `dappco.re/go/core`. +- `datanode/client_test.go:7` still imports `dappco.re/go/core/io`. +- `workspace/service.go:6` uses raw `os.UserHomeDir()` in non-backend production code, despite the repo guidance that filesystem access must go through the `io.Medium` abstraction. +- `workspace/service.go:7` builds runtime filesystem paths with `filepath.Join()` in non-backend production code, again bypassing the documented abstraction boundary. + +## UK English Drift + +- `datanode/client.go:3` uses `serializes`; `docs/development.md` calls for UK English (`serialises`). +- `datanode/client.go:52` uses `serializes`; `docs/development.md` calls for UK English (`serialises`). +- `sigil/crypto_sigil.go:3` uses `defense-in-depth`; `docs/development.md` calls for UK English (`defence-in-depth`). +- `sigil/crypto_sigil.go:38` uses `defense`; `docs/development.md` calls for UK English (`defence`). + +## Missing Tests + +Basis: `GOWORK=off go test -coverprofile=coverage.out ./...` and `go tool cover -func=coverage.out` on 2026-03-23. This list focuses on public or semantically meaningful API entrypoints at `0.0%` coverage and omits trivial one-line accessor helpers. + +- `io.go:126` `NewSandboxed` +- `io.go:143` `ReadStream` +- `io.go:148` `WriteStream` +- `io.go:208` `(*MockMedium).WriteMode` +- `io.go:358` `(*MockMedium).Open` +- `io.go:370` `(*MockMedium).Create` +- `io.go:378` `(*MockMedium).Append` +- `io.go:388` `(*MockMedium).ReadStream` +- `io.go:393` `(*MockMedium).WriteStream` +- `datanode/client.go:138` `(*Medium).WriteMode` +- `local/client.go:231` `(*Medium).Append` +- `node/node.go:128` `(*Node).WalkNode` +- `node/node.go:218` `(*Node).CopyTo` +- `node/node.go:349` `(*Node).Read` +- `node/node.go:359` `(*Node).Write` +- `node/node.go:365` `(*Node).WriteMode` +- `node/node.go:370` `(*Node).FileGet` +- `node/node.go:375` `(*Node).FileSet` +- `node/node.go:380` `(*Node).EnsureDir` +- `node/node.go:393` `(*Node).IsFile` +- `node/node.go:400` `(*Node).IsDir` +- `node/node.go:411` `(*Node).Delete` +- `node/node.go:421` `(*Node).DeleteAll` +- `node/node.go:445` `(*Node).Rename` +- `node/node.go:461` `(*Node).List` +- `node/node.go:473` `(*Node).Create` +- `node/node.go:480` `(*Node).Append` +- `node/node.go:491` `(*Node).ReadStream` +- `node/node.go:500` `(*Node).WriteStream` +- `s3/s3.go:55` `WithClient` +- `store/medium.go:37` `(*Medium).Store` +- `store/medium.go:80` `(*Medium).EnsureDir` +- `store/medium.go:95` `(*Medium).FileGet` +- `store/medium.go:100` `(*Medium).FileSet` +- `store/medium.go:246` `(*Medium).ReadStream` +- `store/medium.go:259` `(*Medium).WriteStream` +- `workspace/service.go:150` `(*Service).HandleIPCEvents` + +## Missing Usage-Example Comments + +Interpretation note: because `CODEX.md` is absent, this section flags public entrypoints that expose the package's main behaviour but do not have a nearby comment block showing concrete usage. `sigil/sigil.go` is the only production file in the repo that currently includes an explicit `Example usage:` comment block. + +- `io.go:123` `NewSandboxed` +- `local/client.go:22` `New` +- `s3/s3.go:68` `New` +- `sqlite/sqlite.go:35` `New` +- `node/node.go:32` `New` +- `node/node.go:217` `CopyTo` +- `datanode/client.go:32` `New` +- `datanode/client.go:40` `FromTar` +- `store/store.go:21` `New` +- `store/store.go:124` `Render` +- `store/medium.go:22` `NewMedium` +- `workspace/service.go:39` `New` +- `sigil/crypto_sigil.go:247` `NewChaChaPolySigil` +- `sigil/crypto_sigil.go:263` `NewChaChaPolySigilWithObfuscator` -- 2.45.3 From e208589493b138e17748564ed47f47688e642aae Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 23 Mar 2026 14:53:51 +0000 Subject: [PATCH 5/6] docs: add API contract report --- docs/api-contract.md | 285 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 285 insertions(+) create mode 100644 docs/api-contract.md diff --git a/docs/api-contract.md b/docs/api-contract.md new file mode 100644 index 0000000..05b1b4f --- /dev/null +++ b/docs/api-contract.md @@ -0,0 +1,285 @@ +# API Contract + +Descriptions use doc comments when present; otherwise they are short code-based summaries. +Test coverage is `Yes` when same-package tests directly execute or reference the exported symbol; otherwise `No`. +`CODEX.md` was not present in the repository at generation time. + +| Name | Signature | Package Path | Description | Test Coverage | +| --- | --- | --- | --- | --- | +| `DirEntry` | `type DirEntry struct` | `dappco.re/go/core/io` | DirEntry provides a simple implementation of fs.DirEntry for mock testing. | Yes | +| `FileInfo` | `type FileInfo struct` | `dappco.re/go/core/io` | FileInfo provides a simple implementation of fs.FileInfo for mock testing. | Yes | +| `Medium` | `type Medium interface` | `dappco.re/go/core/io` | Medium defines the standard interface for a storage backend. | Yes | +| `MockFile` | `type MockFile struct` | `dappco.re/go/core/io` | MockFile implements fs.File for MockMedium. | No | +| `MockMedium` | `type MockMedium struct` | `dappco.re/go/core/io` | MockMedium is an in-memory implementation of Medium for testing. | Yes | +| `MockWriteCloser` | `type MockWriteCloser struct` | `dappco.re/go/core/io` | MockWriteCloser implements WriteCloser for MockMedium. | No | +| `Copy` | `func Copy(src Medium, srcPath string, dst Medium, dstPath string) error` | `dappco.re/go/core/io` | Copy copies a file from one medium to another. | Yes | +| `EnsureDir` | `func EnsureDir(m Medium, path string) error` | `dappco.re/go/core/io` | EnsureDir makes sure a directory exists in the given medium. | Yes | +| `IsFile` | `func IsFile(m Medium, path string) bool` | `dappco.re/go/core/io` | IsFile checks if a path exists and is a regular file in the given medium. | Yes | +| `NewMockMedium` | `func NewMockMedium() *MockMedium` | `dappco.re/go/core/io` | NewMockMedium creates a new MockMedium instance. | Yes | +| `NewSandboxed` | `func NewSandboxed(root string) (Medium, error)` | `dappco.re/go/core/io` | NewSandboxed creates a new Medium sandboxed to the given root directory. | No | +| `Read` | `func Read(m Medium, path string) (string, error)` | `dappco.re/go/core/io` | Read retrieves the content of a file from the given medium. | Yes | +| `ReadStream` | `func ReadStream(m Medium, path string) (goio.ReadCloser, error)` | `dappco.re/go/core/io` | ReadStream returns a reader for the file content from the given medium. | No | +| `Write` | `func Write(m Medium, path, content string) error` | `dappco.re/go/core/io` | Write saves the given content to a file in the given medium. | Yes | +| `WriteStream` | `func WriteStream(m Medium, path string) (goio.WriteCloser, error)` | `dappco.re/go/core/io` | WriteStream returns a writer for the file content in the given medium. | No | +| `DirEntry.Info` | `func (DirEntry) Info() (fs.FileInfo, error)` | `dappco.re/go/core/io` | Returns file info for the entry. | No | +| `DirEntry.IsDir` | `func (DirEntry) IsDir() bool` | `dappco.re/go/core/io` | Reports whether the entry represents a directory. | No | +| `DirEntry.Name` | `func (DirEntry) Name() string` | `dappco.re/go/core/io` | Returns the stored entry name. | Yes | +| `DirEntry.Type` | `func (DirEntry) Type() fs.FileMode` | `dappco.re/go/core/io` | Returns the entry type bits. | No | +| `FileInfo.IsDir` | `func (FileInfo) IsDir() bool` | `dappco.re/go/core/io` | Reports whether the entry represents a directory. | Yes | +| `FileInfo.ModTime` | `func (FileInfo) ModTime() time.Time` | `dappco.re/go/core/io` | Returns the stored modification time. | No | +| `FileInfo.Mode` | `func (FileInfo) Mode() fs.FileMode` | `dappco.re/go/core/io` | Returns the stored file mode. | No | +| `FileInfo.Name` | `func (FileInfo) Name() string` | `dappco.re/go/core/io` | Returns the stored entry name. | Yes | +| `FileInfo.Size` | `func (FileInfo) Size() int64` | `dappco.re/go/core/io` | Returns the stored size in bytes. | Yes | +| `FileInfo.Sys` | `func (FileInfo) Sys() any` | `dappco.re/go/core/io` | Returns the underlying system-specific data. | No | +| `Medium.Append` | `Append(path string) (goio.WriteCloser, error)` | `dappco.re/go/core/io` | Append opens the named file for appending, creating it if it doesn't exist. | No | +| `Medium.Create` | `Create(path string) (goio.WriteCloser, error)` | `dappco.re/go/core/io` | Create creates or truncates the named file. | No | +| `Medium.Delete` | `Delete(path string) error` | `dappco.re/go/core/io` | Delete removes a file or empty directory. | Yes | +| `Medium.DeleteAll` | `DeleteAll(path string) error` | `dappco.re/go/core/io` | DeleteAll removes a file or directory and all its contents recursively. | Yes | +| `Medium.EnsureDir` | `EnsureDir(path string) error` | `dappco.re/go/core/io` | EnsureDir makes sure a directory exists, creating it if necessary. | Yes | +| `Medium.Exists` | `Exists(path string) bool` | `dappco.re/go/core/io` | Exists checks if a path exists (file or directory). | Yes | +| `Medium.FileGet` | `FileGet(path string) (string, error)` | `dappco.re/go/core/io` | FileGet is a convenience function that reads a file from the medium. | Yes | +| `Medium.FileSet` | `FileSet(path, content string) error` | `dappco.re/go/core/io` | FileSet is a convenience function that writes a file to the medium. | Yes | +| `Medium.IsDir` | `IsDir(path string) bool` | `dappco.re/go/core/io` | IsDir checks if a path exists and is a directory. | Yes | +| `Medium.IsFile` | `IsFile(path string) bool` | `dappco.re/go/core/io` | IsFile checks if a path exists and is a regular file. | Yes | +| `Medium.List` | `List(path string) ([]fs.DirEntry, error)` | `dappco.re/go/core/io` | List returns the directory entries for the given path. | Yes | +| `Medium.Open` | `Open(path string) (fs.File, error)` | `dappco.re/go/core/io` | Open opens the named file for reading. | No | +| `Medium.Read` | `Read(path string) (string, error)` | `dappco.re/go/core/io` | Read retrieves the content of a file as a string. | Yes | +| `Medium.ReadStream` | `ReadStream(path string) (goio.ReadCloser, error)` | `dappco.re/go/core/io` | ReadStream returns a reader for the file content. | No | +| `Medium.Rename` | `Rename(oldPath, newPath string) error` | `dappco.re/go/core/io` | Rename moves a file or directory from oldPath to newPath. | Yes | +| `Medium.Stat` | `Stat(path string) (fs.FileInfo, error)` | `dappco.re/go/core/io` | Stat returns file information for the given path. | Yes | +| `Medium.Write` | `Write(path, content string) error` | `dappco.re/go/core/io` | Write saves the given content to a file, overwriting it if it exists. | Yes | +| `Medium.WriteMode` | `WriteMode(path, content string, mode os.FileMode) error` | `dappco.re/go/core/io` | WriteMode saves content with explicit file permissions. | No | +| `Medium.WriteStream` | `WriteStream(path string) (goio.WriteCloser, error)` | `dappco.re/go/core/io` | WriteStream returns a writer for the file content. | No | +| `MockFile.Close` | `func (*MockFile) Close() error` | `dappco.re/go/core/io` | Closes the current value. | No | +| `MockFile.Read` | `func (*MockFile) Read(b []byte) (int, error)` | `dappco.re/go/core/io` | Reads data from the current value. | No | +| `MockFile.Stat` | `func (*MockFile) Stat() (fs.FileInfo, error)` | `dappco.re/go/core/io` | Returns file metadata for the current value. | No | +| `MockMedium.Append` | `func (*MockMedium) Append(path string) (goio.WriteCloser, error)` | `dappco.re/go/core/io` | Append opens a file for appending in the mock filesystem. | No | +| `MockMedium.Create` | `func (*MockMedium) Create(path string) (goio.WriteCloser, error)` | `dappco.re/go/core/io` | Create creates a file in the mock filesystem. | No | +| `MockMedium.Delete` | `func (*MockMedium) Delete(path string) error` | `dappco.re/go/core/io` | Delete removes a file or empty directory from the mock filesystem. | Yes | +| `MockMedium.DeleteAll` | `func (*MockMedium) DeleteAll(path string) error` | `dappco.re/go/core/io` | DeleteAll removes a file or directory and all contents from the mock filesystem. | Yes | +| `MockMedium.EnsureDir` | `func (*MockMedium) EnsureDir(path string) error` | `dappco.re/go/core/io` | EnsureDir records that a directory exists in the mock filesystem. | Yes | +| `MockMedium.Exists` | `func (*MockMedium) Exists(path string) bool` | `dappco.re/go/core/io` | Exists checks if a path exists in the mock filesystem. | Yes | +| `MockMedium.FileGet` | `func (*MockMedium) FileGet(path string) (string, error)` | `dappco.re/go/core/io` | FileGet is a convenience function that reads a file from the mock filesystem. | Yes | +| `MockMedium.FileSet` | `func (*MockMedium) FileSet(path, content string) error` | `dappco.re/go/core/io` | FileSet is a convenience function that writes a file to the mock filesystem. | Yes | +| `MockMedium.IsDir` | `func (*MockMedium) IsDir(path string) bool` | `dappco.re/go/core/io` | IsDir checks if a path is a directory in the mock filesystem. | Yes | +| `MockMedium.IsFile` | `func (*MockMedium) IsFile(path string) bool` | `dappco.re/go/core/io` | IsFile checks if a path exists as a file in the mock filesystem. | Yes | +| `MockMedium.List` | `func (*MockMedium) List(path string) ([]fs.DirEntry, error)` | `dappco.re/go/core/io` | List returns directory entries for the mock filesystem. | Yes | +| `MockMedium.Open` | `func (*MockMedium) Open(path string) (fs.File, error)` | `dappco.re/go/core/io` | Open opens a file from the mock filesystem. | No | +| `MockMedium.Read` | `func (*MockMedium) Read(path string) (string, error)` | `dappco.re/go/core/io` | Read retrieves the content of a file from the mock filesystem. | Yes | +| `MockMedium.ReadStream` | `func (*MockMedium) ReadStream(path string) (goio.ReadCloser, error)` | `dappco.re/go/core/io` | ReadStream returns a reader for the file content in the mock filesystem. | No | +| `MockMedium.Rename` | `func (*MockMedium) Rename(oldPath, newPath string) error` | `dappco.re/go/core/io` | Rename moves a file or directory in the mock filesystem. | Yes | +| `MockMedium.Stat` | `func (*MockMedium) Stat(path string) (fs.FileInfo, error)` | `dappco.re/go/core/io` | Stat returns file information for the mock filesystem. | Yes | +| `MockMedium.Write` | `func (*MockMedium) Write(path, content string) error` | `dappco.re/go/core/io` | Write saves the given content to a file in the mock filesystem. | Yes | +| `MockMedium.WriteMode` | `func (*MockMedium) WriteMode(path, content string, mode os.FileMode) error` | `dappco.re/go/core/io` | Writes content using an explicit file mode. | No | +| `MockMedium.WriteStream` | `func (*MockMedium) WriteStream(path string) (goio.WriteCloser, error)` | `dappco.re/go/core/io` | WriteStream returns a writer for the file content in the mock filesystem. | No | +| `MockWriteCloser.Close` | `func (*MockWriteCloser) Close() error` | `dappco.re/go/core/io` | Closes the current value. | No | +| `MockWriteCloser.Write` | `func (*MockWriteCloser) Write(p []byte) (int, error)` | `dappco.re/go/core/io` | Writes data to the current value. | No | +| `Medium` | `type Medium struct` | `dappco.re/go/core/io/datanode` | Medium is an in-memory storage backend backed by a Borg DataNode. | Yes | +| `FromTar` | `func FromTar(data []byte) (*Medium, error)` | `dappco.re/go/core/io/datanode` | FromTar creates a Medium from a tarball, restoring all files. | Yes | +| `New` | `func New() *Medium` | `dappco.re/go/core/io/datanode` | New creates a new empty DataNode Medium. | Yes | +| `Medium.Append` | `func (*Medium) Append(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/datanode` | Opens the named file for appending, creating it if needed. | Yes | +| `Medium.Create` | `func (*Medium) Create(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/datanode` | Creates or truncates the named file and returns a writer. | Yes | +| `Medium.DataNode` | `func (*Medium) DataNode() *datanode.DataNode` | `dappco.re/go/core/io/datanode` | DataNode returns the underlying Borg DataNode. | Yes | +| `Medium.Delete` | `func (*Medium) Delete(p string) error` | `dappco.re/go/core/io/datanode` | Removes a file, key, or empty directory. | Yes | +| `Medium.DeleteAll` | `func (*Medium) DeleteAll(p string) error` | `dappco.re/go/core/io/datanode` | Removes a file or directory tree recursively. | Yes | +| `Medium.EnsureDir` | `func (*Medium) EnsureDir(p string) error` | `dappco.re/go/core/io/datanode` | Ensures a directory path exists. | Yes | +| `Medium.Exists` | `func (*Medium) Exists(p string) bool` | `dappco.re/go/core/io/datanode` | Reports whether the path exists. | Yes | +| `Medium.FileGet` | `func (*Medium) FileGet(p string) (string, error)` | `dappco.re/go/core/io/datanode` | Reads a file or key through the convenience accessor. | Yes | +| `Medium.FileSet` | `func (*Medium) FileSet(p, content string) error` | `dappco.re/go/core/io/datanode` | Writes a file or key through the convenience accessor. | Yes | +| `Medium.IsDir` | `func (*Medium) IsDir(p string) bool` | `dappco.re/go/core/io/datanode` | Reports whether the entry represents a directory. | Yes | +| `Medium.IsFile` | `func (*Medium) IsFile(p string) bool` | `dappco.re/go/core/io/datanode` | Reports whether the path exists as a regular file. | Yes | +| `Medium.List` | `func (*Medium) List(p string) ([]fs.DirEntry, error)` | `dappco.re/go/core/io/datanode` | Lists directory entries beneath the given path. | Yes | +| `Medium.Open` | `func (*Medium) Open(p string) (fs.File, error)` | `dappco.re/go/core/io/datanode` | Opens the named file for reading. | Yes | +| `Medium.Read` | `func (*Medium) Read(p string) (string, error)` | `dappco.re/go/core/io/datanode` | Reads data from the current value. | Yes | +| `Medium.ReadStream` | `func (*Medium) ReadStream(p string) (goio.ReadCloser, error)` | `dappco.re/go/core/io/datanode` | Opens a streaming reader for the file content. | Yes | +| `Medium.Rename` | `func (*Medium) Rename(oldPath, newPath string) error` | `dappco.re/go/core/io/datanode` | Moves a file or directory to a new path. | Yes | +| `Medium.Restore` | `func (*Medium) Restore(data []byte) error` | `dappco.re/go/core/io/datanode` | Restore replaces the filesystem contents from a tarball. | Yes | +| `Medium.Snapshot` | `func (*Medium) Snapshot() ([]byte, error)` | `dappco.re/go/core/io/datanode` | Snapshot serializes the entire filesystem to a tarball. | Yes | +| `Medium.Stat` | `func (*Medium) Stat(p string) (fs.FileInfo, error)` | `dappco.re/go/core/io/datanode` | Returns file metadata for the current value. | Yes | +| `Medium.Write` | `func (*Medium) Write(p, content string) error` | `dappco.re/go/core/io/datanode` | Writes data to the current value. | Yes | +| `Medium.WriteMode` | `func (*Medium) WriteMode(p, content string, mode os.FileMode) error` | `dappco.re/go/core/io/datanode` | Writes content using an explicit file mode. | No | +| `Medium.WriteStream` | `func (*Medium) WriteStream(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/datanode` | Opens a streaming writer for the file content. | Yes | +| `Medium` | `type Medium struct` | `dappco.re/go/core/io/local` | Medium is a local filesystem storage backend. | Yes | +| `New` | `func New(root string) (*Medium, error)` | `dappco.re/go/core/io/local` | New creates a new local Medium rooted at the given directory. | Yes | +| `Medium.Append` | `func (*Medium) Append(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/local` | Append opens the named file for appending, creating it if it doesn't exist. | No | +| `Medium.Create` | `func (*Medium) Create(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/local` | Create creates or truncates the named file. | Yes | +| `Medium.Delete` | `func (*Medium) Delete(p string) error` | `dappco.re/go/core/io/local` | Delete removes a file or empty directory. | Yes | +| `Medium.DeleteAll` | `func (*Medium) DeleteAll(p string) error` | `dappco.re/go/core/io/local` | DeleteAll removes a file or directory recursively. | Yes | +| `Medium.EnsureDir` | `func (*Medium) EnsureDir(p string) error` | `dappco.re/go/core/io/local` | EnsureDir creates directory if it doesn't exist. | Yes | +| `Medium.Exists` | `func (*Medium) Exists(p string) bool` | `dappco.re/go/core/io/local` | Exists returns true if path exists. | Yes | +| `Medium.FileGet` | `func (*Medium) FileGet(p string) (string, error)` | `dappco.re/go/core/io/local` | FileGet is an alias for Read. | Yes | +| `Medium.FileSet` | `func (*Medium) FileSet(p, content string) error` | `dappco.re/go/core/io/local` | FileSet is an alias for Write. | Yes | +| `Medium.IsDir` | `func (*Medium) IsDir(p string) bool` | `dappco.re/go/core/io/local` | IsDir returns true if path is a directory. | Yes | +| `Medium.IsFile` | `func (*Medium) IsFile(p string) bool` | `dappco.re/go/core/io/local` | IsFile returns true if path is a regular file. | Yes | +| `Medium.List` | `func (*Medium) List(p string) ([]fs.DirEntry, error)` | `dappco.re/go/core/io/local` | List returns directory entries. | Yes | +| `Medium.Open` | `func (*Medium) Open(p string) (fs.File, error)` | `dappco.re/go/core/io/local` | Open opens the named file for reading. | Yes | +| `Medium.Read` | `func (*Medium) Read(p string) (string, error)` | `dappco.re/go/core/io/local` | Read returns file contents as string. | Yes | +| `Medium.ReadStream` | `func (*Medium) ReadStream(path string) (goio.ReadCloser, error)` | `dappco.re/go/core/io/local` | ReadStream returns a reader for the file content. | Yes | +| `Medium.Rename` | `func (*Medium) Rename(oldPath, newPath string) error` | `dappco.re/go/core/io/local` | Rename moves a file or directory. | Yes | +| `Medium.Stat` | `func (*Medium) Stat(p string) (fs.FileInfo, error)` | `dappco.re/go/core/io/local` | Stat returns file info. | Yes | +| `Medium.Write` | `func (*Medium) Write(p, content string) error` | `dappco.re/go/core/io/local` | Write saves content to file, creating parent directories as needed. | Yes | +| `Medium.WriteMode` | `func (*Medium) WriteMode(p, content string, mode os.FileMode) error` | `dappco.re/go/core/io/local` | WriteMode saves content to file with explicit permissions. | Yes | +| `Medium.WriteStream` | `func (*Medium) WriteStream(path string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/local` | WriteStream returns a writer for the file content. | Yes | +| `Node` | `type Node struct` | `dappco.re/go/core/io/node` | Node is an in-memory filesystem that implements coreio.Node (and therefore coreio.Medium). | Yes | +| `WalkOptions` | `type WalkOptions struct` | `dappco.re/go/core/io/node` | WalkOptions configures the behaviour of Walk. | Yes | +| `FromTar` | `func FromTar(data []byte) (*Node, error)` | `dappco.re/go/core/io/node` | FromTar creates a new Node from a tar archive. | Yes | +| `New` | `func New() *Node` | `dappco.re/go/core/io/node` | New creates a new, empty Node. | Yes | +| `Node.AddData` | `func (*Node) AddData(name string, content []byte)` | `dappco.re/go/core/io/node` | AddData stages content in the in-memory filesystem. | Yes | +| `Node.Append` | `func (*Node) Append(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/node` | Append opens the named file for appending, creating it if needed. | No | +| `Node.CopyFile` | `func (*Node) CopyFile(src, dst string, perm fs.FileMode) error` | `dappco.re/go/core/io/node` | CopyFile copies a file from the in-memory tree to the local filesystem. | Yes | +| `Node.CopyTo` | `func (*Node) CopyTo(target coreio.Medium, sourcePath, destPath string) error` | `dappco.re/go/core/io/node` | CopyTo copies a file (or directory tree) from the node to any Medium. | No | +| `Node.Create` | `func (*Node) Create(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/node` | Create creates or truncates the named file, returning a WriteCloser. | No | +| `Node.Delete` | `func (*Node) Delete(p string) error` | `dappco.re/go/core/io/node` | Delete removes a single file. | No | +| `Node.DeleteAll` | `func (*Node) DeleteAll(p string) error` | `dappco.re/go/core/io/node` | DeleteAll removes a file or directory and all children. | No | +| `Node.EnsureDir` | `func (*Node) EnsureDir(_ string) error` | `dappco.re/go/core/io/node` | EnsureDir is a no-op because directories are implicit in Node. | No | +| `Node.Exists` | `func (*Node) Exists(p string) bool` | `dappco.re/go/core/io/node` | Exists checks if a path exists (file or directory). | Yes | +| `Node.FileGet` | `func (*Node) FileGet(p string) (string, error)` | `dappco.re/go/core/io/node` | FileGet is an alias for Read. | No | +| `Node.FileSet` | `func (*Node) FileSet(p, content string) error` | `dappco.re/go/core/io/node` | FileSet is an alias for Write. | No | +| `Node.IsDir` | `func (*Node) IsDir(p string) bool` | `dappco.re/go/core/io/node` | IsDir checks if a path exists and is a directory. | No | +| `Node.IsFile` | `func (*Node) IsFile(p string) bool` | `dappco.re/go/core/io/node` | IsFile checks if a path exists and is a regular file. | No | +| `Node.List` | `func (*Node) List(p string) ([]fs.DirEntry, error)` | `dappco.re/go/core/io/node` | List returns directory entries for the given path. | No | +| `Node.LoadTar` | `func (*Node) LoadTar(data []byte) error` | `dappco.re/go/core/io/node` | LoadTar replaces the in-memory tree with the contents of a tar archive. | Yes | +| `Node.Open` | `func (*Node) Open(name string) (fs.File, error)` | `dappco.re/go/core/io/node` | Open opens a file from the Node. | Yes | +| `Node.Read` | `func (*Node) Read(p string) (string, error)` | `dappco.re/go/core/io/node` | Read retrieves the content of a file as a string. | No | +| `Node.ReadDir` | `func (*Node) ReadDir(name string) ([]fs.DirEntry, error)` | `dappco.re/go/core/io/node` | ReadDir reads and returns all directory entries for the named directory. | Yes | +| `Node.ReadFile` | `func (*Node) ReadFile(name string) ([]byte, error)` | `dappco.re/go/core/io/node` | ReadFile returns the content of the named file as a byte slice. | Yes | +| `Node.ReadStream` | `func (*Node) ReadStream(p string) (goio.ReadCloser, error)` | `dappco.re/go/core/io/node` | ReadStream returns a ReadCloser for the file content. | No | +| `Node.Rename` | `func (*Node) Rename(oldPath, newPath string) error` | `dappco.re/go/core/io/node` | Rename moves a file from oldPath to newPath. | No | +| `Node.Stat` | `func (*Node) Stat(name string) (fs.FileInfo, error)` | `dappco.re/go/core/io/node` | Stat returns file information for the given path. | Yes | +| `Node.ToTar` | `func (*Node) ToTar() ([]byte, error)` | `dappco.re/go/core/io/node` | ToTar serialises the entire in-memory tree to a tar archive. | Yes | +| `Node.Walk` | `func (*Node) Walk(root string, fn fs.WalkDirFunc, opts ...WalkOptions) error` | `dappco.re/go/core/io/node` | Walk walks the in-memory tree with optional WalkOptions. | Yes | +| `Node.WalkNode` | `func (*Node) WalkNode(root string, fn fs.WalkDirFunc) error` | `dappco.re/go/core/io/node` | WalkNode walks the in-memory tree, calling fn for each entry. | No | +| `Node.Write` | `func (*Node) Write(p, content string) error` | `dappco.re/go/core/io/node` | Write saves the given content to a file, overwriting it if it exists. | No | +| `Node.WriteMode` | `func (*Node) WriteMode(p, content string, mode os.FileMode) error` | `dappco.re/go/core/io/node` | WriteMode saves content with explicit permissions (no-op for in-memory node). | No | +| `Node.WriteStream` | `func (*Node) WriteStream(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/node` | WriteStream returns a WriteCloser for the file content. | No | +| `Medium` | `type Medium struct` | `dappco.re/go/core/io/s3` | Medium is an S3-backed storage backend implementing the io.Medium interface. | Yes | +| `Option` | `type Option func(*Medium)` | `dappco.re/go/core/io/s3` | Option configures a Medium. | Yes | +| `New` | `func New(bucket string, opts ...Option) (*Medium, error)` | `dappco.re/go/core/io/s3` | New creates a new S3 Medium for the given bucket. | Yes | +| `WithClient` | `func WithClient(client *s3.Client) Option` | `dappco.re/go/core/io/s3` | WithClient sets the S3 client for dependency injection. | No | +| `WithPrefix` | `func WithPrefix(prefix string) Option` | `dappco.re/go/core/io/s3` | WithPrefix sets an optional key prefix for all operations. | Yes | +| `Medium.Append` | `func (*Medium) Append(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/s3` | Append opens the named file for appending. | Yes | +| `Medium.Create` | `func (*Medium) Create(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/s3` | Create creates or truncates the named file. | Yes | +| `Medium.Delete` | `func (*Medium) Delete(p string) error` | `dappco.re/go/core/io/s3` | Delete removes a single object. | Yes | +| `Medium.DeleteAll` | `func (*Medium) DeleteAll(p string) error` | `dappco.re/go/core/io/s3` | DeleteAll removes all objects under the given prefix. | Yes | +| `Medium.EnsureDir` | `func (*Medium) EnsureDir(_ string) error` | `dappco.re/go/core/io/s3` | EnsureDir is a no-op for S3 (S3 has no real directories). | Yes | +| `Medium.Exists` | `func (*Medium) Exists(p string) bool` | `dappco.re/go/core/io/s3` | Exists checks if a path exists (file or directory prefix). | Yes | +| `Medium.FileGet` | `func (*Medium) FileGet(p string) (string, error)` | `dappco.re/go/core/io/s3` | FileGet is a convenience function that reads a file from the medium. | Yes | +| `Medium.FileSet` | `func (*Medium) FileSet(p, content string) error` | `dappco.re/go/core/io/s3` | FileSet is a convenience function that writes a file to the medium. | Yes | +| `Medium.IsDir` | `func (*Medium) IsDir(p string) bool` | `dappco.re/go/core/io/s3` | IsDir checks if a path exists and is a directory (has objects under it as a prefix). | Yes | +| `Medium.IsFile` | `func (*Medium) IsFile(p string) bool` | `dappco.re/go/core/io/s3` | IsFile checks if a path exists and is a regular file (not a "directory" prefix). | Yes | +| `Medium.List` | `func (*Medium) List(p string) ([]fs.DirEntry, error)` | `dappco.re/go/core/io/s3` | List returns directory entries for the given path using ListObjectsV2 with delimiter. | Yes | +| `Medium.Open` | `func (*Medium) Open(p string) (fs.File, error)` | `dappco.re/go/core/io/s3` | Open opens the named file for reading. | Yes | +| `Medium.Read` | `func (*Medium) Read(p string) (string, error)` | `dappco.re/go/core/io/s3` | Read retrieves the content of a file as a string. | Yes | +| `Medium.ReadStream` | `func (*Medium) ReadStream(p string) (goio.ReadCloser, error)` | `dappco.re/go/core/io/s3` | ReadStream returns a reader for the file content. | Yes | +| `Medium.Rename` | `func (*Medium) Rename(oldPath, newPath string) error` | `dappco.re/go/core/io/s3` | Rename moves an object by copying then deleting the original. | Yes | +| `Medium.Stat` | `func (*Medium) Stat(p string) (fs.FileInfo, error)` | `dappco.re/go/core/io/s3` | Stat returns file information for the given path using HeadObject. | Yes | +| `Medium.Write` | `func (*Medium) Write(p, content string) error` | `dappco.re/go/core/io/s3` | Write saves the given content to a file, overwriting it if it exists. | Yes | +| `Medium.WriteStream` | `func (*Medium) WriteStream(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/s3` | WriteStream returns a writer for the file content. | Yes | +| `Base64Sigil` | `type Base64Sigil struct` | `dappco.re/go/core/io/sigil` | Base64Sigil is a Sigil that encodes/decodes data to/from base64. | Yes | +| `ChaChaPolySigil` | `type ChaChaPolySigil struct` | `dappco.re/go/core/io/sigil` | ChaChaPolySigil is a Sigil that encrypts/decrypts data using ChaCha20-Poly1305. | Yes | +| `GzipSigil` | `type GzipSigil struct` | `dappco.re/go/core/io/sigil` | GzipSigil is a Sigil that compresses/decompresses data using gzip. | Yes | +| `HashSigil` | `type HashSigil struct` | `dappco.re/go/core/io/sigil` | HashSigil is a Sigil that hashes the data using a specified algorithm. | Yes | +| `HexSigil` | `type HexSigil struct` | `dappco.re/go/core/io/sigil` | HexSigil is a Sigil that encodes/decodes data to/from hexadecimal. | Yes | +| `JSONSigil` | `type JSONSigil struct` | `dappco.re/go/core/io/sigil` | JSONSigil is a Sigil that compacts or indents JSON data. | Yes | +| `PreObfuscator` | `type PreObfuscator interface` | `dappco.re/go/core/io/sigil` | PreObfuscator applies a reversible transformation to data before encryption. | Yes | +| `ReverseSigil` | `type ReverseSigil struct` | `dappco.re/go/core/io/sigil` | ReverseSigil is a Sigil that reverses the bytes of the payload. | Yes | +| `ShuffleMaskObfuscator` | `type ShuffleMaskObfuscator struct` | `dappco.re/go/core/io/sigil` | ShuffleMaskObfuscator provides stronger obfuscation through byte shuffling and masking. | Yes | +| `Sigil` | `type Sigil interface` | `dappco.re/go/core/io/sigil` | Sigil defines the interface for a data transformer. | Yes | +| `XORObfuscator` | `type XORObfuscator struct` | `dappco.re/go/core/io/sigil` | XORObfuscator performs XOR-based obfuscation using an entropy-derived key stream. | Yes | +| `GetNonceFromCiphertext` | `func GetNonceFromCiphertext(ciphertext []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | GetNonceFromCiphertext extracts the nonce from encrypted output. | Yes | +| `NewChaChaPolySigil` | `func NewChaChaPolySigil(key []byte) (*ChaChaPolySigil, error)` | `dappco.re/go/core/io/sigil` | NewChaChaPolySigil creates a new encryption sigil with the given key. | Yes | +| `NewChaChaPolySigilWithObfuscator` | `func NewChaChaPolySigilWithObfuscator(key []byte, obfuscator PreObfuscator) (*ChaChaPolySigil, error)` | `dappco.re/go/core/io/sigil` | NewChaChaPolySigilWithObfuscator creates a new encryption sigil with custom obfuscator. | Yes | +| `NewHashSigil` | `func NewHashSigil(h crypto.Hash) *HashSigil` | `dappco.re/go/core/io/sigil` | NewHashSigil creates a new HashSigil. | Yes | +| `NewSigil` | `func NewSigil(name string) (Sigil, error)` | `dappco.re/go/core/io/sigil` | NewSigil is a factory function that returns a Sigil based on a string name. | Yes | +| `Transmute` | `func Transmute(data []byte, sigils []Sigil) ([]byte, error)` | `dappco.re/go/core/io/sigil` | Transmute applies a series of sigils to data in sequence. | Yes | +| `Untransmute` | `func Untransmute(data []byte, sigils []Sigil) ([]byte, error)` | `dappco.re/go/core/io/sigil` | Untransmute reverses a transmutation by applying Out in reverse order. | Yes | +| `Base64Sigil.In` | `func (*Base64Sigil) In(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | In encodes the data to base64. | Yes | +| `Base64Sigil.Out` | `func (*Base64Sigil) Out(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | Out decodes the data from base64. | Yes | +| `ChaChaPolySigil.In` | `func (*ChaChaPolySigil) In(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | In encrypts the data with pre-obfuscation. | Yes | +| `ChaChaPolySigil.Out` | `func (*ChaChaPolySigil) Out(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | Out decrypts the data and reverses obfuscation. | Yes | +| `GzipSigil.In` | `func (*GzipSigil) In(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | In compresses the data using gzip. | Yes | +| `GzipSigil.Out` | `func (*GzipSigil) Out(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | Out decompresses the data using gzip. | Yes | +| `HashSigil.In` | `func (*HashSigil) In(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | In hashes the data. | Yes | +| `HashSigil.Out` | `func (*HashSigil) Out(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | Out is a no-op for HashSigil. | Yes | +| `HexSigil.In` | `func (*HexSigil) In(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | In encodes the data to hexadecimal. | Yes | +| `HexSigil.Out` | `func (*HexSigil) Out(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | Out decodes the data from hexadecimal. | Yes | +| `JSONSigil.In` | `func (*JSONSigil) In(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | In compacts or indents the JSON data. | Yes | +| `JSONSigil.Out` | `func (*JSONSigil) Out(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | Out is a no-op for JSONSigil. | Yes | +| `PreObfuscator.Deobfuscate` | `Deobfuscate(data []byte, entropy []byte) []byte` | `dappco.re/go/core/io/sigil` | Deobfuscate reverses the transformation after decryption. | Yes | +| `PreObfuscator.Obfuscate` | `Obfuscate(data []byte, entropy []byte) []byte` | `dappco.re/go/core/io/sigil` | Obfuscate transforms plaintext before encryption using the provided entropy. | Yes | +| `ReverseSigil.In` | `func (*ReverseSigil) In(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | In reverses the bytes of the data. | Yes | +| `ReverseSigil.Out` | `func (*ReverseSigil) Out(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | Out reverses the bytes of the data. | Yes | +| `ShuffleMaskObfuscator.Deobfuscate` | `func (*ShuffleMaskObfuscator) Deobfuscate(data []byte, entropy []byte) []byte` | `dappco.re/go/core/io/sigil` | Deobfuscate reverses the shuffle and mask operations. | Yes | +| `ShuffleMaskObfuscator.Obfuscate` | `func (*ShuffleMaskObfuscator) Obfuscate(data []byte, entropy []byte) []byte` | `dappco.re/go/core/io/sigil` | Obfuscate shuffles bytes and applies a mask derived from entropy. | Yes | +| `Sigil.In` | `In(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | In applies the forward transformation to the data. | Yes | +| `Sigil.Out` | `Out(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | Out applies the reverse transformation to the data. | Yes | +| `XORObfuscator.Deobfuscate` | `func (*XORObfuscator) Deobfuscate(data []byte, entropy []byte) []byte` | `dappco.re/go/core/io/sigil` | Deobfuscate reverses the XOR transformation (XOR is symmetric). | Yes | +| `XORObfuscator.Obfuscate` | `func (*XORObfuscator) Obfuscate(data []byte, entropy []byte) []byte` | `dappco.re/go/core/io/sigil` | Obfuscate XORs the data with a key stream derived from the entropy. | Yes | +| `Medium` | `type Medium struct` | `dappco.re/go/core/io/sqlite` | Medium is a SQLite-backed storage backend implementing the io.Medium interface. | Yes | +| `Option` | `type Option func(*Medium)` | `dappco.re/go/core/io/sqlite` | Option configures a Medium. | Yes | +| `New` | `func New(dbPath string, opts ...Option) (*Medium, error)` | `dappco.re/go/core/io/sqlite` | New creates a new SQLite Medium at the given database path. | Yes | +| `WithTable` | `func WithTable(table string) Option` | `dappco.re/go/core/io/sqlite` | WithTable sets the table name (default: "files"). | Yes | +| `Medium.Append` | `func (*Medium) Append(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/sqlite` | Append opens the named file for appending, creating it if it doesn't exist. | Yes | +| `Medium.Close` | `func (*Medium) Close() error` | `dappco.re/go/core/io/sqlite` | Close closes the underlying database connection. | Yes | +| `Medium.Create` | `func (*Medium) Create(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/sqlite` | Create creates or truncates the named file. | Yes | +| `Medium.Delete` | `func (*Medium) Delete(p string) error` | `dappco.re/go/core/io/sqlite` | Delete removes a file or empty directory. | Yes | +| `Medium.DeleteAll` | `func (*Medium) DeleteAll(p string) error` | `dappco.re/go/core/io/sqlite` | DeleteAll removes a file or directory and all its contents recursively. | Yes | +| `Medium.EnsureDir` | `func (*Medium) EnsureDir(p string) error` | `dappco.re/go/core/io/sqlite` | EnsureDir makes sure a directory exists, creating it if necessary. | Yes | +| `Medium.Exists` | `func (*Medium) Exists(p string) bool` | `dappco.re/go/core/io/sqlite` | Exists checks if a path exists (file or directory). | Yes | +| `Medium.FileGet` | `func (*Medium) FileGet(p string) (string, error)` | `dappco.re/go/core/io/sqlite` | FileGet is a convenience function that reads a file from the medium. | Yes | +| `Medium.FileSet` | `func (*Medium) FileSet(p, content string) error` | `dappco.re/go/core/io/sqlite` | FileSet is a convenience function that writes a file to the medium. | Yes | +| `Medium.IsDir` | `func (*Medium) IsDir(p string) bool` | `dappco.re/go/core/io/sqlite` | IsDir checks if a path exists and is a directory. | Yes | +| `Medium.IsFile` | `func (*Medium) IsFile(p string) bool` | `dappco.re/go/core/io/sqlite` | IsFile checks if a path exists and is a regular file. | Yes | +| `Medium.List` | `func (*Medium) List(p string) ([]fs.DirEntry, error)` | `dappco.re/go/core/io/sqlite` | List returns the directory entries for the given path. | Yes | +| `Medium.Open` | `func (*Medium) Open(p string) (fs.File, error)` | `dappco.re/go/core/io/sqlite` | Open opens the named file for reading. | Yes | +| `Medium.Read` | `func (*Medium) Read(p string) (string, error)` | `dappco.re/go/core/io/sqlite` | Read retrieves the content of a file as a string. | Yes | +| `Medium.ReadStream` | `func (*Medium) ReadStream(p string) (goio.ReadCloser, error)` | `dappco.re/go/core/io/sqlite` | ReadStream returns a reader for the file content. | Yes | +| `Medium.Rename` | `func (*Medium) Rename(oldPath, newPath string) error` | `dappco.re/go/core/io/sqlite` | Rename moves a file or directory from oldPath to newPath. | Yes | +| `Medium.Stat` | `func (*Medium) Stat(p string) (fs.FileInfo, error)` | `dappco.re/go/core/io/sqlite` | Stat returns file information for the given path. | Yes | +| `Medium.Write` | `func (*Medium) Write(p, content string) error` | `dappco.re/go/core/io/sqlite` | Write saves the given content to a file, overwriting it if it exists. | Yes | +| `Medium.WriteStream` | `func (*Medium) WriteStream(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/sqlite` | WriteStream returns a writer for the file content. | Yes | +| `Medium` | `type Medium struct` | `dappco.re/go/core/io/store` | Medium wraps a Store to satisfy the io.Medium interface. | Yes | +| `Store` | `type Store struct` | `dappco.re/go/core/io/store` | Store is a group-namespaced key-value store backed by SQLite. | Yes | +| `New` | `func New(dbPath string) (*Store, error)` | `dappco.re/go/core/io/store` | New creates a Store at the given SQLite path. | Yes | +| `NewMedium` | `func NewMedium(dbPath string) (*Medium, error)` | `dappco.re/go/core/io/store` | NewMedium creates an io.Medium backed by a KV store at the given SQLite path. | Yes | +| `Medium.Append` | `func (*Medium) Append(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/store` | Append opens a key for appending. | Yes | +| `Medium.Close` | `func (*Medium) Close() error` | `dappco.re/go/core/io/store` | Close closes the underlying store. | Yes | +| `Medium.Create` | `func (*Medium) Create(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/store` | Create creates or truncates a key. | Yes | +| `Medium.Delete` | `func (*Medium) Delete(p string) error` | `dappco.re/go/core/io/store` | Delete removes a key, or checks that a group is empty. | Yes | +| `Medium.DeleteAll` | `func (*Medium) DeleteAll(p string) error` | `dappco.re/go/core/io/store` | DeleteAll removes a key, or all keys in a group. | Yes | +| `Medium.EnsureDir` | `func (*Medium) EnsureDir(_ string) error` | `dappco.re/go/core/io/store` | EnsureDir is a no-op — groups are created implicitly on Set. | No | +| `Medium.Exists` | `func (*Medium) Exists(p string) bool` | `dappco.re/go/core/io/store` | Exists returns true if a group or key exists. | Yes | +| `Medium.FileGet` | `func (*Medium) FileGet(p string) (string, error)` | `dappco.re/go/core/io/store` | FileGet is an alias for Read. | No | +| `Medium.FileSet` | `func (*Medium) FileSet(p, content string) error` | `dappco.re/go/core/io/store` | FileSet is an alias for Write. | No | +| `Medium.IsDir` | `func (*Medium) IsDir(p string) bool` | `dappco.re/go/core/io/store` | IsDir returns true if the path is a group with entries. | Yes | +| `Medium.IsFile` | `func (*Medium) IsFile(p string) bool` | `dappco.re/go/core/io/store` | IsFile returns true if a group/key pair exists. | Yes | +| `Medium.List` | `func (*Medium) List(p string) ([]fs.DirEntry, error)` | `dappco.re/go/core/io/store` | List returns directory entries. | Yes | +| `Medium.Open` | `func (*Medium) Open(p string) (fs.File, error)` | `dappco.re/go/core/io/store` | Open opens a key for reading. | Yes | +| `Medium.Read` | `func (*Medium) Read(p string) (string, error)` | `dappco.re/go/core/io/store` | Read retrieves the value at group/key. | Yes | +| `Medium.ReadStream` | `func (*Medium) ReadStream(p string) (goio.ReadCloser, error)` | `dappco.re/go/core/io/store` | ReadStream returns a reader for the value. | No | +| `Medium.Rename` | `func (*Medium) Rename(oldPath, newPath string) error` | `dappco.re/go/core/io/store` | Rename moves a key from one path to another. | Yes | +| `Medium.Stat` | `func (*Medium) Stat(p string) (fs.FileInfo, error)` | `dappco.re/go/core/io/store` | Stat returns file info for a group (dir) or key (file). | Yes | +| `Medium.Store` | `func (*Medium) Store() *Store` | `dappco.re/go/core/io/store` | Store returns the underlying KV store for direct access. | No | +| `Medium.Write` | `func (*Medium) Write(p, content string) error` | `dappco.re/go/core/io/store` | Write stores a value at group/key. | Yes | +| `Medium.WriteStream` | `func (*Medium) WriteStream(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/store` | WriteStream returns a writer. | No | +| `Store.AsMedium` | `func (*Store) AsMedium() *Medium` | `dappco.re/go/core/io/store` | AsMedium returns a Medium adapter for an existing Store. | Yes | +| `Store.Close` | `func (*Store) Close() error` | `dappco.re/go/core/io/store` | Close closes the underlying database. | Yes | +| `Store.Count` | `func (*Store) Count(group string) (int, error)` | `dappco.re/go/core/io/store` | Count returns the number of keys in a group. | Yes | +| `Store.Delete` | `func (*Store) Delete(group, key string) error` | `dappco.re/go/core/io/store` | Delete removes a single key from a group. | Yes | +| `Store.DeleteGroup` | `func (*Store) DeleteGroup(group string) error` | `dappco.re/go/core/io/store` | DeleteGroup removes all keys in a group. | Yes | +| `Store.Get` | `func (*Store) Get(group, key string) (string, error)` | `dappco.re/go/core/io/store` | Get retrieves a value by group and key. | Yes | +| `Store.GetAll` | `func (*Store) GetAll(group string) (map[string]string, error)` | `dappco.re/go/core/io/store` | GetAll returns all key-value pairs in a group. | Yes | +| `Store.Render` | `func (*Store) Render(tmplStr, group string) (string, error)` | `dappco.re/go/core/io/store` | Render loads all key-value pairs from a group and renders a Go template. | Yes | +| `Store.Set` | `func (*Store) Set(group, key, value string) error` | `dappco.re/go/core/io/store` | Set stores a value by group and key, overwriting if exists. | Yes | +| `Service` | `type Service struct` | `dappco.re/go/core/io/workspace` | Service implements the Workspace interface. | Yes | +| `Workspace` | `type Workspace interface` | `dappco.re/go/core/io/workspace` | Workspace provides management for encrypted user workspaces. | No | +| `New` | `func New(c *core.Core, crypt ...cryptProvider) (any, error)` | `dappco.re/go/core/io/workspace` | New creates a new Workspace service instance. | Yes | +| `Service.CreateWorkspace` | `func (*Service) CreateWorkspace(identifier, password string) (string, error)` | `dappco.re/go/core/io/workspace` | CreateWorkspace creates a new encrypted workspace. | Yes | +| `Service.HandleIPCEvents` | `func (*Service) HandleIPCEvents(c *core.Core, msg core.Message) core.Result` | `dappco.re/go/core/io/workspace` | HandleIPCEvents handles workspace-related IPC messages. | No | +| `Service.SwitchWorkspace` | `func (*Service) SwitchWorkspace(name string) error` | `dappco.re/go/core/io/workspace` | SwitchWorkspace changes the active workspace. | Yes | +| `Service.WorkspaceFileGet` | `func (*Service) WorkspaceFileGet(filename string) (string, error)` | `dappco.re/go/core/io/workspace` | WorkspaceFileGet retrieves the content of a file from the active workspace. | Yes | +| `Service.WorkspaceFileSet` | `func (*Service) WorkspaceFileSet(filename, content string) error` | `dappco.re/go/core/io/workspace` | WorkspaceFileSet saves content to a file in the active workspace. | Yes | +| `Workspace.CreateWorkspace` | `CreateWorkspace(identifier, password string) (string, error)` | `dappco.re/go/core/io/workspace` | Creates a new encrypted workspace and returns its ID. | Yes | +| `Workspace.SwitchWorkspace` | `SwitchWorkspace(name string) error` | `dappco.re/go/core/io/workspace` | Switches the active workspace. | Yes | +| `Workspace.WorkspaceFileGet` | `WorkspaceFileGet(filename string) (string, error)` | `dappco.re/go/core/io/workspace` | Reads a file from the active workspace. | Yes | +| `Workspace.WorkspaceFileSet` | `WorkspaceFileSet(filename, content string) error` | `dappco.re/go/core/io/workspace` | Writes a file into the active workspace. | Yes | -- 2.45.3 From 9688402be51d779c1f2ac64a6493265a4aad7f10 Mon Sep 17 00:00:00 2001 From: Virgil Date: Sun, 29 Mar 2026 18:52:55 +0000 Subject: [PATCH 6/6] fix(ax): align path semantics and usage docs --- datanode/client.go | 77 ++++++++++++++++++---- datanode/client_test.go | 36 +++++++++++ go.sum | 62 ++++++++++++++++++ io.go | 24 ++++++- local/client.go | 10 +++ node/node.go | 138 ++++++++++++++++++++++++++++++---------- node/node_test.go | 38 +++++++++++ s3/s3.go | 9 +++ sigil/crypto_sigil.go | 15 ++++- sqlite/sqlite.go | 10 +++ store/medium.go | 37 +++++++++-- store/medium_test.go | 11 ++++ store/store.go | 17 +++++ workspace/service.go | 30 +++++++++ 14 files changed, 455 insertions(+), 59 deletions(-) diff --git a/datanode/client.go b/datanode/client.go index 9dfff29..fd094f3 100644 --- a/datanode/client.go +++ b/datanode/client.go @@ -1,9 +1,16 @@ // Package datanode provides an in-memory io.Medium backed by Borg's DataNode. // -// DataNode is an in-memory fs.FS that serializes to tar. Wrapping it as a +// DataNode is an in-memory fs.FS that serialises to tar. Wrapping it as a // Medium lets any code that works with io.Medium transparently operate on // an in-memory filesystem that can be snapshotted, shipped as a crash report, // or wrapped in a TIM container for runc execution. +// +// Example usage: +// +// m := datanode.New() +// _ = m.Write("notes/todo.txt", "write docs") +// snap, _ := m.Snapshot() +// restored, _ := datanode.FromTar(snap) package datanode import ( @@ -42,6 +49,11 @@ type Medium struct { } // New creates a new empty DataNode Medium. +// +// Example: +// +// m := datanode.New() +// _ = m.Write("notes/todo.txt", "write docs") func New() *Medium { return &Medium{ dn: borgdatanode.New(), @@ -50,18 +62,21 @@ func New() *Medium { } // FromTar creates a Medium from a tarball, restoring all files. +// +// Example: +// +// m := datanode.New() +// snap, _ := m.Snapshot() +// restored, _ := datanode.FromTar(snap) func FromTar(data []byte) (*Medium, error) { - dn, err := borgdatanode.FromTar(data) - if err != nil { - return nil, coreerr.E("datanode.FromTar", "failed to restore", err) + m := New() + if err := m.Restore(data); err != nil { + return nil, err } - return &Medium{ - dn: dn, - dirs: make(map[string]bool), - }, nil + return m, nil } -// Snapshot serializes the entire filesystem to a tarball. +// Snapshot serialises the entire filesystem to a tarball. // Use this for crash reports, workspace packaging, or TIM creation. func (m *Medium) Snapshot() ([]byte, error) { m.mu.RLock() @@ -83,7 +98,7 @@ func (m *Medium) Restore(data []byte) error { defer m.mu.Unlock() m.dn = dn m.dirs = make(map[string]bool) - return nil + return m.rebuildCleanLocked() } // DataNode returns the underlying Borg DataNode. @@ -97,11 +112,14 @@ func (m *Medium) DataNode() *borgdatanode.DataNode { // clean normalises a path: strips leading slash, cleans traversal. func clean(p string) string { p = strings.TrimPrefix(p, "/") - p = path.Clean(p) - if p == "." { + if p == "" { return "" } - return p + p = path.Clean("/" + p) + if p == "/" { + return "" + } + return strings.TrimPrefix(p, "/") } // --- io.Medium interface --- @@ -111,6 +129,9 @@ func (m *Medium) Read(p string) (string, error) { defer m.mu.RUnlock() p = clean(p) + if p == "" { + return "", coreerr.E("datanode.Read", "path is required", os.ErrInvalid) + } f, err := m.dn.Open(p) if err != nil { return "", coreerr.E("datanode.Read", "not found: "+p, os.ErrNotExist) @@ -294,6 +315,9 @@ func (m *Medium) Rename(oldPath, newPath string) error { oldPath = clean(oldPath) newPath = clean(newPath) + if oldPath == "" || newPath == "" { + return coreerr.E("datanode.Rename", "both old and new paths are required", os.ErrInvalid) + } // Check if source is a file info, err := m.dn.Stat(oldPath) @@ -463,11 +487,14 @@ func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) { defer m.mu.RUnlock() p = clean(p) + if p == "" { + return nil, coreerr.E("datanode.ReadStream", "path is required", os.ErrInvalid) + } f, err := m.dn.Open(p) if err != nil { return nil, coreerr.E("datanode.ReadStream", "not found: "+p, os.ErrNotExist) } - return f.(goio.ReadCloser), nil + return f, nil } func (m *Medium) WriteStream(p string) (goio.WriteCloser, error) { @@ -540,6 +567,28 @@ func (m *Medium) collectAllLocked() ([]string, error) { return names, err } +func (m *Medium) rebuildCleanLocked() error { + entries, err := m.collectAllLocked() + if err != nil { + return err + } + + newDN := borgdatanode.New() + for _, name := range entries { + cleanName := clean(name) + if cleanName == "" { + continue + } + data, err := m.readFileLocked(name) + if err != nil { + return err + } + newDN.AddData(cleanName, data) + } + m.dn = newDN + return nil +} + func (m *Medium) readFileLocked(name string) ([]byte, error) { f, err := dataNodeOpen(m.dn, name) if err != nil { diff --git a/datanode/client_test.go b/datanode/client_test.go index 8beb6cd..08db8a8 100644 --- a/datanode/client_test.go +++ b/datanode/client_test.go @@ -1,6 +1,8 @@ package datanode import ( + "archive/tar" + "bytes" "errors" "io" "io/fs" @@ -62,6 +64,17 @@ func TestLeadingSlash_Good(t *testing.T) { assert.Equal(t, "stripped", got) } +func TestTraversal_Normalised_Good(t *testing.T) { + m := New() + + require.NoError(t, m.Write("../escape.txt", "safe")) + + got, err := m.Read("escape.txt") + require.NoError(t, err) + assert.Equal(t, "safe", got) + assert.True(t, m.Exists("../escape.txt")) +} + func TestIsFile_Good(t *testing.T) { m := New() @@ -369,6 +382,29 @@ func TestSnapshotRestore_Good(t *testing.T) { assert.Equal(t, "charlie", got) } +func TestFromTar_NormalisesPaths_Good(t *testing.T) { + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + + hdr := &tar.Header{ + Name: "../escape.txt", + Mode: 0600, + Size: int64(len("safe")), + Typeflag: tar.TypeReg, + } + require.NoError(t, tw.WriteHeader(hdr)) + _, err := tw.Write([]byte("safe")) + require.NoError(t, err) + require.NoError(t, tw.Close()) + + m, err := FromTar(buf.Bytes()) + require.NoError(t, err) + + got, err := m.Read("escape.txt") + require.NoError(t, err) + assert.Equal(t, "safe", got) +} + func TestRestore_Good(t *testing.T) { m := New() diff --git a/go.sum b/go.sum index 40a1cd0..931199d 100644 --- a/go.sum +++ b/go.sum @@ -2,8 +2,12 @@ dappco.re/go/core v0.6.0 h1:0wmuO/UmCWXxJkxQ6XvVLnqkAuWitbd49PhxjCsplyk= dappco.re/go/core v0.6.0/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A= dappco.re/go/core/log v0.1.0 h1:pa71Vq2TD2aoEUQWFKwNcaJ3GBY8HbaNGqtE688Unyc= dappco.re/go/core/log v0.1.0/go.mod h1:Nkqb8gsXhZAO8VLpx7B8i1iAmohhzqA20b9Zr8VUcJs= +dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= forge.lthn.ai/Snider/Borg v0.3.1 h1:gfC1ZTpLoZai07oOWJiVeQ8+qJYK8A795tgVGJHbVL8= forge.lthn.ai/Snider/Borg v0.3.1/go.mod h1:Z7DJD0yHXsxSyM7Mjl6/g4gH1NBsIz44Bf5AFlV76Wg= +forge.lthn.ai/Snider/Enchantrix v0.0.4/go.mod h1:OGCwuVeZPq3OPe2h6TX/ZbgEjHU6B7owpIBeXQGbSe0= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE= github.com/aws/aws-sdk-go-v2 v1.41.4 h1:10f50G7WyU02T56ox1wWXq+zTX9I1zxG46HYuG1hH/k= github.com/aws/aws-sdk-go-v2 v1.41.4/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7 h1:3kGOqnh1pPeddVa/E37XNTaWJ8W6vrbYV9lJEkCnhuY= @@ -26,47 +30,105 @@ github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1 h1:csi9NLpFZXb9fxY7rS1xVzgPRGMt7 github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1/go.mod h1:qXVal5H0ChqXP63t6jze5LmFalc7+ZE7wOdLtZ0LCP0= github.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng= github.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= +github.com/bep/debounce v1.2.1/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0= +github.com/clipperhouse/uax29/v2 v2.4.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g= +github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.6.1/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= +github.com/go-git/go-billy/v5 v5.7.0/go.mod h1:/1IUejTKH8xipsAcdfcSAlUlo2J7lkYV8GTKxAT/L3E= +github.com/go-git/go-git/v5 v5.16.4/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/godbus/dbus/v5 v5.2.2/go.mod h1:3AAv2+hPq5rdnr5txxxRwiGjPXamgoIHgz9FPBfOp3c= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-github/v39 v39.2.0/go.mod h1:C1s8C5aCC9L+JXIYpJM5GYytdX52vC1bLvHEF1IhBrE= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs= github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jchv/go-winloader v0.0.0-20250406163304-c1995be93bd1/go.mod h1:alcuEEnZsY1WQsagKhZDsoPCRoOijYqhZvPwLG0kzVs= +github.com/kevinburke/ssh_config v1.4.0/go.mod h1:q2RIzfka+BXARoNexmF9gkxEX7DmvbW9P4hIVx2Kg4M= +github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/labstack/echo/v4 v4.13.3/go.mod h1:o90YNEeQWjDozo584l7AwhJMHN0bOC4tAfg+Xox9q5g= +github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU= +github.com/leaanthony/go-ansi-parser v1.6.1/go.mod h1:+vva/2y4alzVmmIEpk9QDhA7vLC5zKDTRwfZGOp3IWU= +github.com/leaanthony/gosod v1.0.4/go.mod h1:GKuIL0zzPj3O1SdWQOdgURSuhkF+Urizzxh26t9f1cw= +github.com/leaanthony/slicer v1.6.0/go.mod h1:o/Iz29g7LN0GqH3aMjWAe90381nyZlDNquK+mtH2Fj8= +github.com/leaanthony/u v1.1.1/go.mod h1:9+o6hejoRljvZ3BzdYlVL0JYCwtnAsVuN9pVTQcaRfI= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/pjbgf/sha1cd v0.5.0/go.mod h1:lhpGlyHLpQZoxMv8HcgXvZEhcGs0PG/vsZnEJ7H0iCM= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/samber/lo v1.52.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0= +github.com/schollz/progressbar/v3 v3.18.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec= +github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/skeema/knownhosts v1.3.2/go.mod h1:bEg3iQAuw+jyiw+484wwFJoKSLwcfd7fqRy+N0QTiow= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/tkrajina/go-reflector v0.5.8/go.mod h1:ECbqLgccecY5kPmPmXg1MrHW585yMcDkVl6IvJe64T4= +github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/wailsapp/go-webview2 v1.0.23/go.mod h1:qJmWAmAmaniuKGZPWwne+uor3AHMB5PFhqiK0Bbj8kc= +github.com/wailsapp/mimetype v1.4.1/go.mod h1:9aV5k31bBOv5z6u+QP8TltzvNGJPmNJD4XlAL3U+j3o= +github.com/wailsapp/wails/v2 v2.11.0/go.mod h1:jrf0ZaM6+GBc1wRmXsM8cIvzlg0karYin3erahI4+0k= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4= golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA= +golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA= golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= +golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= +golang.org/x/oauth2 v0.35.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/telemetry v0.0.0-20260311193753-579e4da9a98c/go.mod h1:TpUTTEp9frx7rTdLpC9gFG9kdI7zVLFTFFlqaH2Cncw= +golang.org/x/term v0.41.0/go.mod h1:3pfBgksrReYfZ5lvYM0kSO0LIkAl4Yl2bXOkKP7Ec2A= +golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA= golang.org/x/tools v0.43.0 h1:12BdW9CeB3Z+J/I/wj34VMl8X+fEXBxVR90JeMX5E7s= golang.org/x/tools v0.43.0/go.mod h1:uHkMso649BX2cZK6+RpuIPXS3ho2hZo4FVwfoy1vIk0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis= diff --git a/io.go b/io.go index 8d18e0a..41a3d79 100644 --- a/io.go +++ b/io.go @@ -1,3 +1,11 @@ +// Package io defines the Medium abstraction used across go-io. +// +// Example usage: +// +// sandbox, _ := io.NewSandboxed("/home/user/project") +// _ = sandbox.Write("config/app.yaml", "key: value") +// mem := io.NewMockMedium() +// _ = io.Copy(sandbox, "config/app.yaml", mem, "config/app.yaml") package io import ( @@ -8,8 +16,8 @@ import ( "time" core "dappco.re/go/core" - coreerr "dappco.re/go/core/log" "dappco.re/go/core/io/local" + coreerr "dappco.re/go/core/log" ) // Medium defines the standard interface for a storage backend. @@ -123,6 +131,11 @@ func init() { // NewSandboxed creates a new Medium sandboxed to the given root directory. // All file operations are restricted to paths within the root. // The root directory will be created if it doesn't exist. +// +// Example: +// +// sandbox, _ := io.NewSandboxed("/home/user/project") +// _ = sandbox.Write("notes/todo.txt", "write docs") func NewSandboxed(root string) (Medium, error) { return local.New(root) } @@ -160,6 +173,10 @@ func IsFile(m Medium, path string) bool { } // Copy copies a file from one medium to another. +// +// Example: +// +// _ = io.Copy(src, "backup.tar", dst, "backup.tar") func Copy(src Medium, srcPath string, dst Medium, dstPath string) error { content, err := src.Read(srcPath) if err != nil { @@ -181,6 +198,11 @@ type MockMedium struct { } // NewMockMedium creates a new MockMedium instance. +// +// Example: +// +// m := io.NewMockMedium() +// _ = m.Write("config/theme", "dark") func NewMockMedium() *MockMedium { return &MockMedium{ Files: make(map[string]string), diff --git a/local/client.go b/local/client.go index 713c42e..2b57002 100644 --- a/local/client.go +++ b/local/client.go @@ -1,4 +1,9 @@ // Package local provides a local filesystem implementation of the io.Medium interface. +// +// Example usage: +// +// m, _ := local.New("/home/user/project") +// _ = m.Write("config/app.yaml", "key: value") package local import ( @@ -20,6 +25,11 @@ type Medium struct { // New creates a new local Medium rooted at the given directory. // Pass "/" for full filesystem access, or a specific path to sandbox. +// +// Example: +// +// m, _ := local.New(t.TempDir()) +// _ = m.Write("config/app.yaml", "key: value") func New(root string) (*Medium, error) { abs := absolutePath(root) // Resolve symlinks so sandbox checks compare like-for-like. diff --git a/node/node.go b/node/node.go index 418d590..175e341 100644 --- a/node/node.go +++ b/node/node.go @@ -1,6 +1,12 @@ // Package node provides an in-memory filesystem implementation of io.Medium // ported from Borg's DataNode. It stores files in memory with implicit // directory structure and supports tar serialisation. +// +// Example usage: +// +// n := node.New() +// n.AddData("notes/todo.txt", []byte("write docs")) +// tarball, _ := n.ToTar() package node import ( @@ -16,11 +22,11 @@ import ( "time" coreio "dappco.re/go/core/io" + coreerr "dappco.re/go/core/log" ) -// Node is an in-memory filesystem that implements coreio.Node (and therefore -// coreio.Medium). Directories are implicit -- they exist whenever a file path -// contains a "/". +// Node is an in-memory filesystem that implements coreio.Medium. +// Directories are implicit -- they exist whenever a file path contains a "/". type Node struct { files map[string]*dataFile } @@ -30,6 +36,11 @@ var _ coreio.Medium = (*Node)(nil) var _ fs.ReadFileFS = (*Node)(nil) // New creates a new, empty Node. +// +// Example: +// +// n := node.New() +// n.AddData("notes/todo.txt", []byte("write docs")) func New() *Node { return &Node{files: make(map[string]*dataFile)} } @@ -46,6 +57,10 @@ func (n *Node) AddData(name string, content []byte) { if strings.HasSuffix(name, "/") { return } + name = cleanPath(name) + if name == "" { + return + } n.files[name] = &dataFile{ name: name, content: content, @@ -112,6 +127,10 @@ func (n *Node) LoadTar(data []byte) error { if name == "" || strings.HasSuffix(name, "/") { continue } + name = cleanPath(name) + if name == "" { + continue + } newFiles[name] = &dataFile{ name: name, content: content, @@ -185,7 +204,10 @@ func (n *Node) Walk(root string, fn fs.WalkDirFunc, opts ...WalkOptions) error { // ReadFile returns the content of the named file as a byte slice. // Implements fs.ReadFileFS. func (n *Node) ReadFile(name string) ([]byte, error) { - name = strings.TrimPrefix(name, "/") + name = cleanPath(name) + if name == "" { + return nil, &fs.PathError{Op: "readfile", Path: name, Err: fs.ErrNotExist} + } f, ok := n.files[name] if !ok { return nil, &fs.PathError{Op: "read", Path: name, Err: fs.ErrNotExist} @@ -198,7 +220,10 @@ func (n *Node) ReadFile(name string) ([]byte, error) { // CopyFile copies a file from the in-memory tree to the local filesystem. func (n *Node) CopyFile(src, dst string, perm fs.FileMode) error { - src = strings.TrimPrefix(src, "/") + src = cleanPath(src) + if src == "" { + return &fs.PathError{Op: "copyfile", Path: src, Err: fs.ErrInvalid} + } f, ok := n.files[src] if !ok { // Check if it's a directory — can't copy directories this way. @@ -215,8 +240,13 @@ func (n *Node) CopyFile(src, dst string, perm fs.FileMode) error { } // CopyTo copies a file (or directory tree) from the node to any Medium. +// +// Example: +// +// dst, _ := io.NewSandboxed("/tmp/work") +// _ = n.CopyTo(dst, "notes", "archive/notes") func (n *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) error { - sourcePath = strings.TrimPrefix(sourcePath, "/") + sourcePath = cleanPath(sourcePath) info, err := n.Stat(sourcePath) if err != nil { return err @@ -242,10 +272,7 @@ func (n *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) error { continue } rel := strings.TrimPrefix(p, prefix) - dest := destPath - if rel != "" { - dest = destPath + "/" + rel - } + dest := path.Join(destPath, rel) if err := target.Write(dest, string(f.content)); err != nil { return err } @@ -257,15 +284,15 @@ func (n *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) error { // Open opens a file from the Node. Implements fs.FS. func (n *Node) Open(name string) (fs.File, error) { - name = strings.TrimPrefix(name, "/") + name = cleanPath(name) + if name == "" { + return &dirFile{path: ".", modTime: time.Now()}, nil + } if file, ok := n.files[name]; ok { return &dataFileReader{file: file}, nil } // Check if it's a directory prefix := name + "/" - if name == "." || name == "" { - prefix = "" - } for p := range n.files { if strings.HasPrefix(p, prefix) { return &dirFile{path: name, modTime: time.Now()}, nil @@ -276,15 +303,15 @@ func (n *Node) Open(name string) (fs.File, error) { // Stat returns file information for the given path. func (n *Node) Stat(name string) (fs.FileInfo, error) { - name = strings.TrimPrefix(name, "/") + name = cleanPath(name) + if name == "" { + return &dirInfo{name: ".", modTime: time.Now()}, nil + } if file, ok := n.files[name]; ok { return file.Stat() } // Check if it's a directory prefix := name + "/" - if name == "." || name == "" { - prefix = "" - } for p := range n.files { if strings.HasPrefix(p, prefix) { return &dirInfo{name: path.Base(name), modTime: time.Now()}, nil @@ -295,14 +322,15 @@ func (n *Node) Stat(name string) (fs.FileInfo, error) { // ReadDir reads and returns all directory entries for the named directory. func (n *Node) ReadDir(name string) ([]fs.DirEntry, error) { - name = strings.TrimPrefix(name, "/") - if name == "." { - name = "" - } + name = cleanPath(name) // Disallow reading a file as a directory. - if info, err := n.Stat(name); err == nil && !info.IsDir() { - return nil, &fs.PathError{Op: "readdir", Path: name, Err: fs.ErrInvalid} + if name != "" { + if info, err := n.Stat(name); err != nil { + return nil, err + } else if !info.IsDir() { + return nil, &fs.PathError{Op: "readdir", Path: name, Err: fs.ErrInvalid} + } } entries := []fs.DirEntry{} @@ -347,7 +375,10 @@ func (n *Node) ReadDir(name string) ([]fs.DirEntry, error) { // Read retrieves the content of a file as a string. func (n *Node) Read(p string) (string, error) { - p = strings.TrimPrefix(p, "/") + p = cleanPath(p) + if p == "" { + return "", fs.ErrNotExist + } f, ok := n.files[p] if !ok { return "", fs.ErrNotExist @@ -357,6 +388,10 @@ func (n *Node) Read(p string) (string, error) { // Write saves the given content to a file, overwriting it if it exists. func (n *Node) Write(p, content string) error { + p = cleanPath(p) + if p == "" { + return coreerr.E("node.Write", "path is required", os.ErrInvalid) + } n.AddData(p, []byte(content)) return nil } @@ -391,7 +426,10 @@ func (n *Node) Exists(p string) bool { // IsFile checks if a path exists and is a regular file. func (n *Node) IsFile(p string) bool { - p = strings.TrimPrefix(p, "/") + p = cleanPath(p) + if p == "" { + return false + } _, ok := n.files[p] return ok } @@ -409,7 +447,10 @@ func (n *Node) IsDir(p string) bool { // Delete removes a single file. func (n *Node) Delete(p string) error { - p = strings.TrimPrefix(p, "/") + p = cleanPath(p) + if p == "" { + return coreerr.E("node.Delete", "path is required", os.ErrInvalid) + } if _, ok := n.files[p]; ok { delete(n.files, p) return nil @@ -419,7 +460,10 @@ func (n *Node) Delete(p string) error { // DeleteAll removes a file or directory and all children. func (n *Node) DeleteAll(p string) error { - p = strings.TrimPrefix(p, "/") + p = cleanPath(p) + if p == "" { + return coreerr.E("node.DeleteAll", "path is required", os.ErrInvalid) + } found := false if _, ok := n.files[p]; ok { @@ -443,8 +487,11 @@ func (n *Node) DeleteAll(p string) error { // Rename moves a file from oldPath to newPath. func (n *Node) Rename(oldPath, newPath string) error { - oldPath = strings.TrimPrefix(oldPath, "/") - newPath = strings.TrimPrefix(newPath, "/") + oldPath = cleanPath(oldPath) + newPath = cleanPath(newPath) + if oldPath == "" || newPath == "" { + return coreerr.E("node.Rename", "both old and new paths are required", os.ErrInvalid) + } f, ok := n.files[oldPath] if !ok { @@ -459,8 +506,8 @@ func (n *Node) Rename(oldPath, newPath string) error { // List returns directory entries for the given path. func (n *Node) List(p string) ([]fs.DirEntry, error) { - p = strings.TrimPrefix(p, "/") - if p == "" || p == "." { + p = cleanPath(p) + if p == "" { return n.ReadDir(".") } return n.ReadDir(p) @@ -471,14 +518,20 @@ func (n *Node) List(p string) ([]fs.DirEntry, error) { // Create creates or truncates the named file, returning a WriteCloser. // Content is committed to the Node on Close. func (n *Node) Create(p string) (goio.WriteCloser, error) { - p = strings.TrimPrefix(p, "/") + p = cleanPath(p) + if p == "" { + return nil, coreerr.E("node.Create", "path is required", os.ErrInvalid) + } return &nodeWriter{node: n, path: p}, nil } // Append opens the named file for appending, creating it if needed. // Content is committed to the Node on Close. func (n *Node) Append(p string) (goio.WriteCloser, error) { - p = strings.TrimPrefix(p, "/") + p = cleanPath(p) + if p == "" { + return nil, coreerr.E("node.Append", "path is required", os.ErrInvalid) + } var existing []byte if f, ok := n.files[p]; ok { existing = make([]byte, len(f.content)) @@ -493,7 +546,10 @@ func (n *Node) ReadStream(p string) (goio.ReadCloser, error) { if err != nil { return nil, err } - return goio.NopCloser(f), nil + if info, err := f.Stat(); err == nil && info.IsDir() { + return nil, &fs.PathError{Op: "readstream", Path: p, Err: fs.ErrInvalid} + } + return f, nil } // WriteStream returns a WriteCloser for the file content. @@ -501,6 +557,18 @@ func (n *Node) WriteStream(p string) (goio.WriteCloser, error) { return n.Create(p) } +func cleanPath(p string) string { + p = strings.TrimPrefix(p, "/") + if p == "" { + return "" + } + clean := path.Clean("/" + p) + if clean == "/" { + return "" + } + return strings.TrimPrefix(clean, "/") +} + // ---------- Internal types ---------- // nodeWriter buffers writes and commits them to the Node on Close. diff --git a/node/node_test.go b/node/node_test.go index 1ecbe3f..6ef7060 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -71,6 +71,13 @@ func TestAddData_Ugly(t *testing.T) { _, ok := n.files["hello.txt"] assert.True(t, ok, "leading slash should be trimmed") }) + + t.Run("Traversal", func(t *testing.T) { + n := New() + n.AddData("../escape.txt", []byte("safe")) + _, ok := n.files["escape.txt"] + assert.True(t, ok, "traversal should collapse to a clean path") + }) } // --------------------------------------------------------------------------- @@ -191,6 +198,14 @@ func TestReadFile_Ugly(t *testing.T) { assert.Equal(t, []byte("original"), data2, "ReadFile must return an independent copy") } +func TestWrite_Bad_EmptyPath(t *testing.T) { + n := New() + + err := n.Write("", "content") + require.Error(t, err) + assert.ErrorIs(t, err, os.ErrInvalid) +} + // --------------------------------------------------------------------------- // ReadDir // --------------------------------------------------------------------------- @@ -458,6 +473,29 @@ func TestFromTar_Good(t *testing.T) { assert.True(t, n.Exists("bar/baz.txt"), "bar/baz.txt should exist") } +func TestFromTar_NormalisesPaths_Good(t *testing.T) { + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + + hdr := &tar.Header{ + Name: "../escape.txt", + Mode: 0600, + Size: int64(len("safe")), + Typeflag: tar.TypeReg, + } + require.NoError(t, tw.WriteHeader(hdr)) + _, err := tw.Write([]byte("safe")) + require.NoError(t, err) + require.NoError(t, tw.Close()) + + n, err := FromTar(buf.Bytes()) + require.NoError(t, err) + + got, err := n.Read("escape.txt") + require.NoError(t, err) + assert.Equal(t, "safe", got) +} + func TestFromTar_Bad(t *testing.T) { // Truncated data that cannot be a valid tar. truncated := make([]byte, 100) diff --git a/s3/s3.go b/s3/s3.go index 513e205..45a9bbd 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -1,4 +1,9 @@ // Package s3 provides an S3-backed implementation of the io.Medium interface. +// +// Example usage: +// +// m, _ := s3.New("my-bucket", s3.WithClient(client), s3.WithPrefix("uploads")) +// _ = m.Write("notes/todo.txt", "write docs") package s3 import ( @@ -89,6 +94,10 @@ func withAPI(api s3API) Option { } // New creates a new S3 Medium for the given bucket. +// +// Example: +// +// bucket, _ := s3.New("my-bucket", s3.WithClient(awsClient), s3.WithPrefix("uploads/")) func New(bucket string, opts ...Option) (*Medium, error) { if bucket == "" { return nil, coreerr.E("s3.New", "bucket name is required", nil) diff --git a/sigil/crypto_sigil.go b/sigil/crypto_sigil.go index 8bacd44..76b36ab 100644 --- a/sigil/crypto_sigil.go +++ b/sigil/crypto_sigil.go @@ -1,6 +1,6 @@ // This file implements the Pre-Obfuscation Layer Protocol with // XChaCha20-Poly1305 encryption. The protocol applies a reversible transformation -// to plaintext BEFORE it reaches CPU encryption routines, providing defense-in-depth +// to plaintext BEFORE it reaches CPU encryption routines, providing defence-in-depth // against side-channel attacks. // // The encryption flow is: @@ -35,7 +35,7 @@ var ( // PreObfuscator applies a reversible transformation to data before encryption. // This ensures that raw plaintext patterns are never sent directly to CPU -// encryption routines, providing defense against side-channel attacks. +// encryption routines, providing defence against side-channel attacks. // // Implementations must be deterministic: given the same entropy, the transformation // must be perfectly reversible: Deobfuscate(Obfuscate(x, e), e) == x @@ -244,6 +244,12 @@ type ChaChaPolySigil struct { // NewChaChaPolySigil creates a new encryption sigil with the given key. // The key must be exactly 32 bytes. +// +// Example: +// +// key := make([]byte, 32) +// s, _ := sigil.NewChaChaPolySigil(key) +// ciphertext, _ := s.In([]byte("secret")) func NewChaChaPolySigil(key []byte) (*ChaChaPolySigil, error) { if len(key) != 32 { return nil, ErrInvalidKey @@ -260,6 +266,11 @@ func NewChaChaPolySigil(key []byte) (*ChaChaPolySigil, error) { } // NewChaChaPolySigilWithObfuscator creates a new encryption sigil with custom obfuscator. +// +// Example: +// +// key := make([]byte, 32) +// s, _ := sigil.NewChaChaPolySigilWithObfuscator(key, &sigil.ShuffleMaskObfuscator{}) func NewChaChaPolySigilWithObfuscator(key []byte, obfuscator PreObfuscator) (*ChaChaPolySigil, error) { sigil, err := NewChaChaPolySigil(key) if err != nil { diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index 93aa9a2..de0baf9 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -1,4 +1,9 @@ // Package sqlite provides a SQLite-backed implementation of the io.Medium interface. +// +// Example usage: +// +// m, _ := sqlite.New(":memory:") +// _ = m.Write("config/theme", "dark") package sqlite import ( @@ -34,6 +39,11 @@ func WithTable(table string) Option { // New creates a new SQLite Medium at the given database path. // Use ":memory:" for an in-memory database. +// +// Example: +// +// m, _ := sqlite.New(":memory:") +// _ = m.Write("config/theme", "dark") func New(dbPath string, opts ...Option) (*Medium, error) { if dbPath == "" { return nil, coreerr.E("sqlite.New", "database path is required", nil) diff --git a/store/medium.go b/store/medium.go index 31eeae7..8615cf9 100644 --- a/store/medium.go +++ b/store/medium.go @@ -15,11 +15,21 @@ import ( // Paths are mapped as group/key — first segment is the group, // the rest is the key. List("") returns groups as directories, // List("group") returns keys as files. +// +// Example usage: +// +// m, _ := store.NewMedium(":memory:") +// _ = m.Write("config/theme", "dark") type Medium struct { s *Store } // NewMedium creates an io.Medium backed by a KV store at the given SQLite path. +// +// Example: +// +// m, _ := store.NewMedium(":memory:") +// _ = m.Write("config/theme", "dark") func NewMedium(dbPath string) (*Medium, error) { s, err := New(dbPath) if err != nil { @@ -46,7 +56,10 @@ func (m *Medium) Close() error { // splitPath splits a medium-style path into group and key. // First segment = group, remainder = key. func splitPath(p string) (group, key string) { - clean := path.Clean(p) + clean := path.Clean("/" + p) + if clean == "/" { + return "", "" + } clean = strings.TrimPrefix(clean, "/") if clean == "" || clean == "." { return "", "" @@ -292,9 +305,14 @@ type kvFileInfo struct { isDir bool } -func (fi *kvFileInfo) Name() string { return fi.name } -func (fi *kvFileInfo) Size() int64 { return fi.size } -func (fi *kvFileInfo) Mode() fs.FileMode { if fi.isDir { return fs.ModeDir | 0755 }; return 0644 } +func (fi *kvFileInfo) Name() string { return fi.name } +func (fi *kvFileInfo) Size() int64 { return fi.size } +func (fi *kvFileInfo) Mode() fs.FileMode { + if fi.isDir { + return fs.ModeDir | 0755 + } + return 0644 +} func (fi *kvFileInfo) ModTime() time.Time { return time.Time{} } func (fi *kvFileInfo) IsDir() bool { return fi.isDir } func (fi *kvFileInfo) Sys() any { return nil } @@ -305,9 +323,14 @@ type kvDirEntry struct { size int64 } -func (de *kvDirEntry) Name() string { return de.name } -func (de *kvDirEntry) IsDir() bool { return de.isDir } -func (de *kvDirEntry) Type() fs.FileMode { if de.isDir { return fs.ModeDir }; return 0 } +func (de *kvDirEntry) Name() string { return de.name } +func (de *kvDirEntry) IsDir() bool { return de.isDir } +func (de *kvDirEntry) Type() fs.FileMode { + if de.isDir { + return fs.ModeDir + } + return 0 +} func (de *kvDirEntry) Info() (fs.FileInfo, error) { return &kvFileInfo{name: de.name, size: de.size, isDir: de.isDir}, nil } diff --git a/store/medium_test.go b/store/medium_test.go index 19722e7..c2ddccb 100644 --- a/store/medium_test.go +++ b/store/medium_test.go @@ -39,6 +39,17 @@ func TestMedium_Read_Bad_NotFound(t *testing.T) { assert.Error(t, err) } +func TestMedium_PathTraversal_Good(t *testing.T) { + m := newTestMedium(t) + + require.NoError(t, m.Write("../config/theme", "dark")) + + val, err := m.Read("config/theme") + require.NoError(t, err) + assert.Equal(t, "dark", val) + assert.True(t, m.Exists("../config")) +} + func TestMedium_IsFile_Good(t *testing.T) { m := newTestMedium(t) _ = m.Write("grp/key", "val") diff --git a/store/store.go b/store/store.go index 10bdce2..e8ea567 100644 --- a/store/store.go +++ b/store/store.go @@ -1,3 +1,10 @@ +// Package store provides a group-namespaced key-value store backed by SQLite. +// +// Example usage: +// +// s, _ := store.New(":memory:") +// _ = s.Set("user", "theme", "dark") +// out, _ := s.Render(`{"theme":"{{ .theme }}"}`, "user") package store import ( @@ -19,6 +26,11 @@ type Store struct { } // New creates a Store at the given SQLite path. Use ":memory:" for tests. +// +// Example: +// +// s, _ := store.New(":memory:") +// _ = s.Set("user", "wallet", "iz...") func New(dbPath string) (*Store, error) { db, err := sql.Open("sqlite", dbPath) if err != nil { @@ -122,6 +134,11 @@ func (s *Store) GetAll(group string) (map[string]string, error) { } // Render loads all key-value pairs from a group and renders a Go template. +// +// Example: +// +// s.Set("user", "pool", "pool.lthn.io:3333") +// out, _ := s.Render(`{"pool":"{{ .pool }}"}`, "user") func (s *Store) Render(tmplStr, group string) (string, error) { rows, err := s.db.Query("SELECT key, value FROM kv WHERE grp = ?", group) if err != nil { diff --git a/workspace/service.go b/workspace/service.go index 6c7c87a..61bd6b3 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -1,3 +1,10 @@ +// Package workspace provides encrypted workspace management built on io.Medium. +// +// Example usage: +// +// svc, _ := workspace.New(core.New(), myCryptProvider) +// _ = svc.SwitchWorkspace(id) +// _ = svc.WorkspaceFileSet("secret.txt", "top secret") package workspace import ( @@ -38,6 +45,10 @@ type Service struct { // New creates a new Workspace service instance. // An optional cryptProvider can be passed to supply PGP key generation. +// +// Example: +// +// svc, _ := workspace.New(core.New(), myCryptProvider) func New(c *core.Core, crypt ...cryptProvider) (any, error) { home := workspaceHome() if home == "" { @@ -65,6 +76,10 @@ func New(c *core.Core, crypt ...cryptProvider) (any, error) { // CreateWorkspace creates a new encrypted workspace. // Identifier is hashed (SHA-256) to create the directory name. // A PGP keypair is generated using the password. +// +// Example: +// +// id, _ := svc.CreateWorkspace("alice", "passphrase") func (s *Service) CreateWorkspace(identifier, password string) (string, error) { s.mu.Lock() defer s.mu.Unlock() @@ -103,6 +118,10 @@ func (s *Service) CreateWorkspace(identifier, password string) (string, error) { } // SwitchWorkspace changes the active workspace. +// +// Example: +// +// _ = svc.SwitchWorkspace(id) func (s *Service) SwitchWorkspace(name string) error { s.mu.Lock() defer s.mu.Unlock() @@ -137,6 +156,10 @@ func (s *Service) activeFilePath(op, filename string) (string, error) { } // WorkspaceFileGet retrieves the content of a file from the active workspace. +// +// Example: +// +// got, _ := svc.WorkspaceFileGet("secret.txt") func (s *Service) WorkspaceFileGet(filename string) (string, error) { s.mu.RLock() defer s.mu.RUnlock() @@ -149,6 +172,10 @@ func (s *Service) WorkspaceFileGet(filename string) (string, error) { } // WorkspaceFileSet saves content to a file in the active workspace. +// +// Example: +// +// _ = svc.WorkspaceFileSet("secret.txt", "top secret") func (s *Service) WorkspaceFileSet(filename, content string) error { s.mu.Lock() defer s.mu.Unlock() @@ -198,6 +225,9 @@ func workspaceHome() string { func joinWithinRoot(root string, parts ...string) (string, error) { candidate := core.Path(append([]string{root}, parts...)...) sep := core.Env("DS") + if sep == "" { + sep = string(os.PathSeparator) + } if candidate == root || strings.HasPrefix(candidate, root+sep) { return candidate, nil } -- 2.45.3