From e9aebf757b25100f42890f042d5d17ad8b1e1726 Mon Sep 17 00:00:00 2001 From: Snider Date: Sun, 22 Mar 2026 01:28:41 +0000 Subject: [PATCH 1/6] chore(deps): migrate go-log import to dappco.re/go/core/log v0.1.0 Update go.mod require lines from forge.lthn.ai to dappco.re paths where vanity redirects exist. Bump core to v0.5.0 and log to v0.1.0. Borg and go-crypt remain at forge.lthn.ai until their vanity paths are published. Co-Authored-By: Virgil --- CLAUDE.md | 16 ++++++++-------- CONSUMERS.md | 34 ++++++++++++++++++++++++++++++++++ datanode/client.go | 2 +- go.mod | 5 +++-- go.sum | 10 ++++++---- io.go | 2 +- local/client.go | 2 +- s3/s3.go | 2 +- sigil/sigils.go | 2 +- sqlite/sqlite.go | 2 +- store/medium.go | 2 +- store/store.go | 2 +- workspace/service.go | 2 +- 13 files changed, 60 insertions(+), 23 deletions(-) create mode 100644 CONSUMERS.md diff --git a/CLAUDE.md b/CLAUDE.md index 9a27f7a..009e835 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -4,7 +4,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co ## Project Overview -`forge.lthn.ai/core/go-io` is the **mandatory I/O abstraction layer** for the CoreGO ecosystem. All data access — files, configs, journals, state — MUST go through the `io.Medium` interface. Never use raw `os`, `filepath`, or `ioutil` calls. +`dappco.re/go/core/io` is the **mandatory I/O abstraction layer** for the CoreGO ecosystem. All data access — files, configs, journals, state — MUST go through the `io.Medium` interface. Never use raw `os`, `filepath`, or `ioutil` calls. ### The Premise @@ -103,13 +103,13 @@ Sigils can be created by name via `sigil.NewSigil("hex")`, `sigil.NewSigil("sha2 Standard `io` is always aliased to avoid collision with this package: ```go goio "io" -coreerr "forge.lthn.ai/core/go-log" -coreio "forge.lthn.ai/core/go-io" // when imported from subpackages +coreerr "dappco.re/go/core/log" +coreio "dappco.re/go/core/io" // when imported from subpackages ``` ### Error Handling -All errors use `coreerr.E("pkg.Method", "description", wrappedErr)` from `forge.lthn.ai/core/go-log`. Follow this pattern in new code. +All errors use `coreerr.E("pkg.Method", "description", wrappedErr)` from `dappco.re/go/core/log`. Follow this pattern in new code. ### Compile-Time Interface Checks @@ -117,10 +117,10 @@ Backend packages use `var _ io.Medium = (*Medium)(nil)` to verify interface comp ## Dependencies -- `forge.lthn.ai/Snider/Borg` — DataNode container -- `forge.lthn.ai/core/go-log` — error handling (`coreerr.E()`) -- `forge.lthn.ai/core/go` — Core DI (workspace service only) -- `forge.lthn.ai/core/go-crypt` — PGP key generation (workspace service only) +- `forge.lthn.ai/Snider/Borg` — DataNode container (pending dappco.re migration) +- `dappco.re/go/core/log` — error handling (`coreerr.E()`) +- `dappco.re/go/core` — Core DI (workspace service only) +- `forge.lthn.ai/core/go-crypt` — PGP key generation (workspace service only, pending dappco.re migration) - `aws-sdk-go-v2` — S3 backend - `golang.org/x/crypto` — XChaCha20-Poly1305, BLAKE2, SHA-3 (sigil package) - `modernc.org/sqlite` — SQLite backends (pure Go, no CGO) diff --git a/CONSUMERS.md b/CONSUMERS.md new file mode 100644 index 0000000..cd0c00f --- /dev/null +++ b/CONSUMERS.md @@ -0,0 +1,34 @@ +# Consumers of go-io + +These modules import `dappco.re/go/core/io`: + +- agent +- core +- config +- go-ai +- go-ansible +- go-blockchain +- go-build +- go-cache +- go-container +- go-crypt +- go-forge +- go-html +- go-infra +- go-ml +- go-mlx +- go-netops +- go-p2p +- go-process +- go-rag +- go-ratelimit +- go-scm +- gui +- ide +- lint +- mcp +- php +- ts +- LEM + +**Breaking change risk: 28 consumers.** diff --git a/datanode/client.go b/datanode/client.go index fcfe524..6a9a4a0 100644 --- a/datanode/client.go +++ b/datanode/client.go @@ -17,7 +17,7 @@ import ( "sync" "time" - coreerr "forge.lthn.ai/core/go-log" + coreerr "dappco.re/go/core/log" "forge.lthn.ai/Snider/Borg/pkg/datanode" ) diff --git a/go.mod b/go.mod index 6c77560..b204ef9 100644 --- a/go.mod +++ b/go.mod @@ -3,10 +3,10 @@ module dappco.re/go/core/io go 1.26.0 require ( - dappco.re/go/core v0.4.7 + dappco.re/go/core v0.5.0 + dappco.re/go/core/log v0.1.0 forge.lthn.ai/Snider/Borg v0.3.1 forge.lthn.ai/core/go-crypt v0.1.6 - forge.lthn.ai/core/go-log v0.0.4 github.com/aws/aws-sdk-go-v2 v1.41.4 github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1 github.com/stretchr/testify v1.11.1 @@ -16,6 +16,7 @@ require ( require ( forge.lthn.ai/core/go v0.3.0 // indirect + forge.lthn.ai/core/go-log v0.0.1 // indirect github.com/ProtonMail/go-crypto v1.4.0 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20 // indirect diff --git a/go.sum b/go.sum index d25c96d..5ebee45 100644 --- a/go.sum +++ b/go.sum @@ -1,13 +1,15 @@ -dappco.re/go/core v0.4.7 h1:KmIA/2lo6rl1NMtLrKqCWfMlUqpDZYH3q0/d10dTtGA= -dappco.re/go/core v0.4.7/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A= +dappco.re/go/core v0.5.0 h1:P5DJoaCiK5Q+af5UiTdWqUIW4W4qYKzpgGK50thm21U= +dappco.re/go/core v0.5.0/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A= +dappco.re/go/core/log v0.1.0 h1:pa71Vq2TD2aoEUQWFKwNcaJ3GBY8HbaNGqtE688Unyc= +dappco.re/go/core/log v0.1.0/go.mod h1:Nkqb8gsXhZAO8VLpx7B8i1iAmohhzqA20b9Zr8VUcJs= forge.lthn.ai/Snider/Borg v0.3.1 h1:gfC1ZTpLoZai07oOWJiVeQ8+qJYK8A795tgVGJHbVL8= forge.lthn.ai/Snider/Borg v0.3.1/go.mod h1:Z7DJD0yHXsxSyM7Mjl6/g4gH1NBsIz44Bf5AFlV76Wg= forge.lthn.ai/core/go v0.3.0 h1:mOG97ApMprwx9Ked62FdWVwXTGSF6JO6m0DrVpoH2Q4= forge.lthn.ai/core/go v0.3.0/go.mod h1:gE6c8h+PJ2287qNhVUJ5SOe1kopEwHEquvinstpuyJc= forge.lthn.ai/core/go-crypt v0.1.6 h1:jB7L/28S1NR+91u3GcOYuKfBLzPhhBUY1fRe6WkGVns= forge.lthn.ai/core/go-crypt v0.1.6/go.mod h1:4VZAGqxlbadhSB66sJkdj54/HSJ+bSxVgwWK5kMMYDo= -forge.lthn.ai/core/go-log v0.0.4 h1:KTuCEPgFmuM8KJfnyQ8vPOU1Jg654W74h8IJvfQMfv0= -forge.lthn.ai/core/go-log v0.0.4/go.mod h1:r14MXKOD3LF/sI8XUJQhRk/SZHBE7jAFVuCfgkXoZPw= +forge.lthn.ai/core/go-log v0.0.1 h1:x/E6EfF9vixzqiLHQOl2KT25HyBcMc9qiBkomqVlpPg= +forge.lthn.ai/core/go-log v0.0.1/go.mod h1:r14MXKOD3LF/sI8XUJQhRk/SZHBE7jAFVuCfgkXoZPw= github.com/ProtonMail/go-crypto v1.4.0 h1:Zq/pbM3F5DFgJiMouxEdSVY44MVoQNEKp5d5QxIQceQ= github.com/ProtonMail/go-crypto v1.4.0/go.mod h1:e1OaTyu5SYVrO9gKOEhTc+5UcXtTUa+P3uLudwcgPqo= github.com/aws/aws-sdk-go-v2 v1.41.4 h1:10f50G7WyU02T56ox1wWXq+zTX9I1zxG46HYuG1hH/k= diff --git a/io.go b/io.go index c31592f..21d95a0 100644 --- a/io.go +++ b/io.go @@ -8,7 +8,7 @@ import ( "strings" "time" - coreerr "forge.lthn.ai/core/go-log" + coreerr "dappco.re/go/core/log" "dappco.re/go/core/io/local" ) diff --git a/local/client.go b/local/client.go index 22fd769..61b89eb 100644 --- a/local/client.go +++ b/local/client.go @@ -11,7 +11,7 @@ import ( "strings" "time" - coreerr "forge.lthn.ai/core/go-log" + coreerr "dappco.re/go/core/log" ) // Medium is a local filesystem storage backend. diff --git a/s3/s3.go b/s3/s3.go index 86443fe..cd121de 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -15,7 +15,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" - coreerr "forge.lthn.ai/core/go-log" + coreerr "dappco.re/go/core/log" ) // s3API is the subset of the S3 client API used by this package. diff --git a/sigil/sigils.go b/sigil/sigils.go index 2baffff..54bfb74 100644 --- a/sigil/sigils.go +++ b/sigil/sigils.go @@ -13,7 +13,7 @@ import ( "encoding/json" "io" - coreerr "forge.lthn.ai/core/go-log" + coreerr "dappco.re/go/core/log" "golang.org/x/crypto/blake2b" "golang.org/x/crypto/blake2s" "golang.org/x/crypto/md4" diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index fe1642d..93aa9a2 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -11,7 +11,7 @@ import ( "strings" "time" - coreerr "forge.lthn.ai/core/go-log" + coreerr "dappco.re/go/core/log" _ "modernc.org/sqlite" // Pure Go SQLite driver ) diff --git a/store/medium.go b/store/medium.go index 4363ca4..31eeae7 100644 --- a/store/medium.go +++ b/store/medium.go @@ -8,7 +8,7 @@ import ( "strings" "time" - coreerr "forge.lthn.ai/core/go-log" + coreerr "dappco.re/go/core/log" ) // Medium wraps a Store to satisfy the io.Medium interface. diff --git a/store/store.go b/store/store.go index 7c531fb..10bdce2 100644 --- a/store/store.go +++ b/store/store.go @@ -6,7 +6,7 @@ import ( "strings" "text/template" - coreerr "forge.lthn.ai/core/go-log" + coreerr "dappco.re/go/core/log" _ "modernc.org/sqlite" ) diff --git a/workspace/service.go b/workspace/service.go index c1978a1..570e6d0 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -8,7 +8,7 @@ import ( "sync" core "dappco.re/go/core" - coreerr "forge.lthn.ai/core/go-log" + coreerr "dappco.re/go/core/log" "dappco.re/go/core/io" ) -- 2.45.3 From 2acfc3d54803e657f0ddd0f12508431c2d22a632 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 23 Mar 2026 07:26:09 +0000 Subject: [PATCH 2/6] fix(io): address audit issue 4 findings Co-Authored-By: Virgil --- datanode/client.go | 144 ++++++++++++++++++--------- datanode/client_test.go | 88 +++++++++++++++++ go.mod | 6 +- go.sum | 12 +-- io.go | 10 +- local/client.go | 203 +++++++++++++++++++++++++++++++------- local/client_test.go | 28 ++++++ s3/s3.go | 33 ++++++- s3/s3_test.go | 53 ++++++++-- workspace/service.go | 69 ++++++++++--- workspace/service_test.go | 90 ++++++++++++----- 11 files changed, 590 insertions(+), 146 deletions(-) diff --git a/datanode/client.go b/datanode/client.go index fcfe524..c4f09ad 100644 --- a/datanode/client.go +++ b/datanode/client.go @@ -17,14 +17,26 @@ import ( "sync" "time" + borgdatanode "forge.lthn.ai/Snider/Borg/pkg/datanode" coreerr "forge.lthn.ai/core/go-log" - "forge.lthn.ai/Snider/Borg/pkg/datanode" +) + +var ( + dataNodeWalkDir = func(fsys fs.FS, root string, fn fs.WalkDirFunc) error { + return fs.WalkDir(fsys, root, fn) + } + dataNodeOpen = func(dn *borgdatanode.DataNode, name string) (fs.File, error) { + return dn.Open(name) + } + dataNodeReadAll = func(r goio.Reader) ([]byte, error) { + return goio.ReadAll(r) + } ) // Medium is an in-memory storage backend backed by a Borg DataNode. // All paths are relative (no leading slash). Thread-safe via RWMutex. type Medium struct { - dn *datanode.DataNode + dn *borgdatanode.DataNode dirs map[string]bool // explicit directory tracking mu sync.RWMutex } @@ -32,14 +44,14 @@ type Medium struct { // New creates a new empty DataNode Medium. func New() *Medium { return &Medium{ - dn: datanode.New(), + dn: borgdatanode.New(), dirs: make(map[string]bool), } } // FromTar creates a Medium from a tarball, restoring all files. func FromTar(data []byte) (*Medium, error) { - dn, err := datanode.FromTar(data) + dn, err := borgdatanode.FromTar(data) if err != nil { return nil, coreerr.E("datanode.FromTar", "failed to restore", err) } @@ -63,7 +75,7 @@ func (m *Medium) Snapshot() ([]byte, error) { // Restore replaces the filesystem contents from a tarball. func (m *Medium) Restore(data []byte) error { - dn, err := datanode.FromTar(data) + dn, err := borgdatanode.FromTar(data) if err != nil { return coreerr.E("datanode.Restore", "tar failed", err) } @@ -76,7 +88,7 @@ func (m *Medium) Restore(data []byte) error { // DataNode returns the underlying Borg DataNode. // Use this to wrap the filesystem in a TIM container. -func (m *Medium) DataNode() *datanode.DataNode { +func (m *Medium) DataNode() *borgdatanode.DataNode { m.mu.RLock() defer m.mu.RUnlock() return m.dn @@ -195,7 +207,11 @@ func (m *Medium) Delete(p string) error { // Check explicit dirs if m.dirs[p] { // Check if dir is empty - if m.hasPrefixLocked(p + "/") { + hasChildren, err := m.hasPrefixLocked(p + "/") + if err != nil { + return coreerr.E("datanode.Delete", "failed to inspect directory: "+p, err) + } + if hasChildren { return coreerr.E("datanode.Delete", "directory not empty: "+p, os.ErrExist) } delete(m.dirs, p) @@ -205,7 +221,11 @@ func (m *Medium) Delete(p string) error { } if info.IsDir() { - if m.hasPrefixLocked(p + "/") { + hasChildren, err := m.hasPrefixLocked(p + "/") + if err != nil { + return coreerr.E("datanode.Delete", "failed to inspect directory: "+p, err) + } + if hasChildren { return coreerr.E("datanode.Delete", "directory not empty: "+p, os.ErrExist) } delete(m.dirs, p) @@ -213,7 +233,9 @@ func (m *Medium) Delete(p string) error { } // Remove the file by creating a new DataNode without it - m.removeFileLocked(p) + if err := m.removeFileLocked(p); err != nil { + return coreerr.E("datanode.Delete", "failed to delete file: "+p, err) + } return nil } @@ -232,15 +254,22 @@ func (m *Medium) DeleteAll(p string) error { // Check if p itself is a file info, err := m.dn.Stat(p) if err == nil && !info.IsDir() { - m.removeFileLocked(p) + if err := m.removeFileLocked(p); err != nil { + return coreerr.E("datanode.DeleteAll", "failed to delete file: "+p, err) + } found = true } // Remove all files under prefix - entries, _ := m.collectAllLocked() + entries, err := m.collectAllLocked() + if err != nil { + return coreerr.E("datanode.DeleteAll", "failed to inspect tree: "+p, err) + } for _, name := range entries { if name == p || strings.HasPrefix(name, prefix) { - m.removeFileLocked(name) + if err := m.removeFileLocked(name); err != nil { + return coreerr.E("datanode.DeleteAll", "failed to delete file: "+name, err) + } found = true } } @@ -274,18 +303,15 @@ func (m *Medium) Rename(oldPath, newPath string) error { if !info.IsDir() { // Read old, write new, delete old - f, err := m.dn.Open(oldPath) + data, err := m.readFileLocked(oldPath) if err != nil { - return coreerr.E("datanode.Rename", "open failed: "+oldPath, err) - } - data, err := goio.ReadAll(f) - f.Close() - if err != nil { - return coreerr.E("datanode.Rename", "read failed: "+oldPath, err) + return coreerr.E("datanode.Rename", "failed to read source file: "+oldPath, err) } m.dn.AddData(newPath, data) m.ensureDirsLocked(path.Dir(newPath)) - m.removeFileLocked(oldPath) + if err := m.removeFileLocked(oldPath); err != nil { + return coreerr.E("datanode.Rename", "failed to remove source file: "+oldPath, err) + } return nil } @@ -293,18 +319,21 @@ func (m *Medium) Rename(oldPath, newPath string) error { oldPrefix := oldPath + "/" newPrefix := newPath + "/" - entries, _ := m.collectAllLocked() + entries, err := m.collectAllLocked() + if err != nil { + return coreerr.E("datanode.Rename", "failed to inspect tree: "+oldPath, err) + } for _, name := range entries { if strings.HasPrefix(name, oldPrefix) { newName := newPrefix + strings.TrimPrefix(name, oldPrefix) - f, err := m.dn.Open(name) + data, err := m.readFileLocked(name) if err != nil { - continue + return coreerr.E("datanode.Rename", "failed to read source file: "+name, err) } - data, _ := goio.ReadAll(f) - f.Close() m.dn.AddData(newName, data) - m.removeFileLocked(name) + if err := m.removeFileLocked(name); err != nil { + return coreerr.E("datanode.Rename", "failed to remove source file: "+name, err) + } } } @@ -416,10 +445,13 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) { // Read existing content var existing []byte m.mu.RLock() - f, err := m.dn.Open(p) - if err == nil { - existing, _ = goio.ReadAll(f) - f.Close() + if m.IsFile(p) { + data, err := m.readFileLocked(p) + if err != nil { + m.mu.RUnlock() + return nil, coreerr.E("datanode.Append", "failed to read existing content: "+p, err) + } + existing = data } m.mu.RUnlock() @@ -475,27 +507,30 @@ func (m *Medium) IsDir(p string) bool { // --- internal helpers --- // hasPrefixLocked checks if any file path starts with prefix. Caller holds lock. -func (m *Medium) hasPrefixLocked(prefix string) bool { - entries, _ := m.collectAllLocked() +func (m *Medium) hasPrefixLocked(prefix string) (bool, error) { + entries, err := m.collectAllLocked() + if err != nil { + return false, err + } for _, name := range entries { if strings.HasPrefix(name, prefix) { - return true + return true, nil } } for d := range m.dirs { if strings.HasPrefix(d, prefix) { - return true + return true, nil } } - return false + return false, nil } // collectAllLocked returns all file paths in the DataNode. Caller holds lock. func (m *Medium) collectAllLocked() ([]string, error) { var names []string - err := fs.WalkDir(m.dn, ".", func(p string, d fs.DirEntry, err error) error { + err := dataNodeWalkDir(m.dn, ".", func(p string, d fs.DirEntry, err error) error { if err != nil { - return nil + return err } if !d.IsDir() { names = append(names, p) @@ -505,28 +540,43 @@ func (m *Medium) collectAllLocked() ([]string, error) { return names, err } +func (m *Medium) readFileLocked(name string) ([]byte, error) { + f, err := dataNodeOpen(m.dn, name) + if err != nil { + return nil, err + } + data, readErr := dataNodeReadAll(f) + closeErr := f.Close() + if readErr != nil { + return nil, readErr + } + if closeErr != nil { + return nil, closeErr + } + return data, nil +} + // removeFileLocked removes a single file by rebuilding the DataNode. // This is necessary because Borg's DataNode doesn't expose a Remove method. // Caller must hold m.mu write lock. -func (m *Medium) removeFileLocked(target string) { - entries, _ := m.collectAllLocked() - newDN := datanode.New() +func (m *Medium) removeFileLocked(target string) error { + entries, err := m.collectAllLocked() + if err != nil { + return err + } + newDN := borgdatanode.New() for _, name := range entries { if name == target { continue } - f, err := m.dn.Open(name) + data, err := m.readFileLocked(name) if err != nil { - continue - } - data, err := goio.ReadAll(f) - f.Close() - if err != nil { - continue + return err } newDN.AddData(name, data) } m.dn = newDN + return nil } // --- writeCloser buffers writes and flushes to DataNode on Close --- diff --git a/datanode/client_test.go b/datanode/client_test.go index 651d322..8beb6cd 100644 --- a/datanode/client_test.go +++ b/datanode/client_test.go @@ -1,7 +1,9 @@ package datanode import ( + "errors" "io" + "io/fs" "testing" coreio "dappco.re/go/core/io" @@ -102,6 +104,23 @@ func TestDelete_Bad(t *testing.T) { assert.Error(t, m.Delete("dir")) } +func TestDelete_Bad_DirectoryInspectionFailure(t *testing.T) { + m := New() + require.NoError(t, m.Write("dir/file.txt", "content")) + + original := dataNodeWalkDir + dataNodeWalkDir = func(_ fs.FS, _ string, _ fs.WalkDirFunc) error { + return errors.New("walk failed") + } + t.Cleanup(func() { + dataNodeWalkDir = original + }) + + err := m.Delete("dir") + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to inspect directory") +} + func TestDeleteAll_Good(t *testing.T) { m := New() @@ -116,6 +135,41 @@ func TestDeleteAll_Good(t *testing.T) { assert.True(t, m.Exists("keep.txt")) } +func TestDeleteAll_Bad_WalkFailure(t *testing.T) { + m := New() + require.NoError(t, m.Write("tree/a.txt", "a")) + + original := dataNodeWalkDir + dataNodeWalkDir = func(_ fs.FS, _ string, _ fs.WalkDirFunc) error { + return errors.New("walk failed") + } + t.Cleanup(func() { + dataNodeWalkDir = original + }) + + err := m.DeleteAll("tree") + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to inspect tree") +} + +func TestDelete_Bad_RemoveFailure(t *testing.T) { + m := New() + require.NoError(t, m.Write("keep.txt", "keep")) + require.NoError(t, m.Write("bad.txt", "bad")) + + original := dataNodeReadAll + dataNodeReadAll = func(_ io.Reader) ([]byte, error) { + return nil, errors.New("read failed") + } + t.Cleanup(func() { + dataNodeReadAll = original + }) + + err := m.Delete("bad.txt") + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to delete file") +} + func TestRename_Good(t *testing.T) { m := New() @@ -147,6 +201,23 @@ func TestRenameDir_Good(t *testing.T) { assert.Equal(t, "package b", got) } +func TestRenameDir_Bad_ReadFailure(t *testing.T) { + m := New() + require.NoError(t, m.Write("src/a.go", "package a")) + + original := dataNodeReadAll + dataNodeReadAll = func(_ io.Reader) ([]byte, error) { + return nil, errors.New("read failed") + } + t.Cleanup(func() { + dataNodeReadAll = original + }) + + err := m.Rename("src", "dst") + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to read source file") +} + func TestList_Good(t *testing.T) { m := New() @@ -230,6 +301,23 @@ func TestCreateAppend_Good(t *testing.T) { assert.Equal(t, "hello world", got) } +func TestAppend_Bad_ReadFailure(t *testing.T) { + m := New() + require.NoError(t, m.Write("new.txt", "hello")) + + original := dataNodeReadAll + dataNodeReadAll = func(_ io.Reader) ([]byte, error) { + return nil, errors.New("read failed") + } + t.Cleanup(func() { + dataNodeReadAll = original + }) + + _, err := m.Append("new.txt") + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to read existing content") +} + func TestStreams_Good(t *testing.T) { m := New() diff --git a/go.mod b/go.mod index 6c77560..9135ed8 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,8 @@ module dappco.re/go/core/io go 1.26.0 require ( - dappco.re/go/core v0.4.7 + dappco.re/go/core v0.6.0 forge.lthn.ai/Snider/Borg v0.3.1 - forge.lthn.ai/core/go-crypt v0.1.6 forge.lthn.ai/core/go-log v0.0.4 github.com/aws/aws-sdk-go-v2 v1.41.4 github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1 @@ -15,8 +14,6 @@ require ( ) require ( - forge.lthn.ai/core/go v0.3.0 // indirect - github.com/ProtonMail/go-crypto v1.4.0 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.20 // indirect @@ -26,7 +23,6 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.20 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.20 // indirect github.com/aws/smithy-go v1.24.2 // indirect - github.com/cloudflare/circl v1.6.3 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/google/uuid v1.6.0 // indirect diff --git a/go.sum b/go.sum index d25c96d..87d11bc 100644 --- a/go.sum +++ b/go.sum @@ -1,15 +1,9 @@ -dappco.re/go/core v0.4.7 h1:KmIA/2lo6rl1NMtLrKqCWfMlUqpDZYH3q0/d10dTtGA= -dappco.re/go/core v0.4.7/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A= +dappco.re/go/core v0.6.0 h1:0wmuO/UmCWXxJkxQ6XvVLnqkAuWitbd49PhxjCsplyk= +dappco.re/go/core v0.6.0/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A= forge.lthn.ai/Snider/Borg v0.3.1 h1:gfC1ZTpLoZai07oOWJiVeQ8+qJYK8A795tgVGJHbVL8= forge.lthn.ai/Snider/Borg v0.3.1/go.mod h1:Z7DJD0yHXsxSyM7Mjl6/g4gH1NBsIz44Bf5AFlV76Wg= -forge.lthn.ai/core/go v0.3.0 h1:mOG97ApMprwx9Ked62FdWVwXTGSF6JO6m0DrVpoH2Q4= -forge.lthn.ai/core/go v0.3.0/go.mod h1:gE6c8h+PJ2287qNhVUJ5SOe1kopEwHEquvinstpuyJc= -forge.lthn.ai/core/go-crypt v0.1.6 h1:jB7L/28S1NR+91u3GcOYuKfBLzPhhBUY1fRe6WkGVns= -forge.lthn.ai/core/go-crypt v0.1.6/go.mod h1:4VZAGqxlbadhSB66sJkdj54/HSJ+bSxVgwWK5kMMYDo= forge.lthn.ai/core/go-log v0.0.4 h1:KTuCEPgFmuM8KJfnyQ8vPOU1Jg654W74h8IJvfQMfv0= forge.lthn.ai/core/go-log v0.0.4/go.mod h1:r14MXKOD3LF/sI8XUJQhRk/SZHBE7jAFVuCfgkXoZPw= -github.com/ProtonMail/go-crypto v1.4.0 h1:Zq/pbM3F5DFgJiMouxEdSVY44MVoQNEKp5d5QxIQceQ= -github.com/ProtonMail/go-crypto v1.4.0/go.mod h1:e1OaTyu5SYVrO9gKOEhTc+5UcXtTUa+P3uLudwcgPqo= github.com/aws/aws-sdk-go-v2 v1.41.4 h1:10f50G7WyU02T56ox1wWXq+zTX9I1zxG46HYuG1hH/k= github.com/aws/aws-sdk-go-v2 v1.41.4/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7 h1:3kGOqnh1pPeddVa/E37XNTaWJ8W6vrbYV9lJEkCnhuY= @@ -32,8 +26,6 @@ github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1 h1:csi9NLpFZXb9fxY7rS1xVzgPRGMt7 github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1/go.mod h1:qXVal5H0ChqXP63t6jze5LmFalc7+ZE7wOdLtZ0LCP0= github.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng= github.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= -github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8= -github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/io.go b/io.go index c31592f..25fe801 100644 --- a/io.go +++ b/io.go @@ -4,12 +4,12 @@ import ( goio "io" "io/fs" "os" - "path/filepath" "strings" "time" - coreerr "forge.lthn.ai/core/go-log" + core "dappco.re/go/core" "dappco.re/go/core/io/local" + coreerr "forge.lthn.ai/core/go-log" ) // Medium defines the standard interface for a storage backend. @@ -361,7 +361,7 @@ func (m *MockMedium) Open(path string) (fs.File, error) { return nil, coreerr.E("io.MockMedium.Open", "file not found: "+path, os.ErrNotExist) } return &MockFile{ - name: filepath.Base(path), + name: core.PathBase(path), content: []byte(content), }, nil } @@ -556,7 +556,7 @@ func (m *MockMedium) Stat(path string) (fs.FileInfo, error) { modTime = time.Now() } return FileInfo{ - name: filepath.Base(path), + name: core.PathBase(path), size: int64(len(content)), mode: 0644, modTime: modTime, @@ -564,7 +564,7 @@ func (m *MockMedium) Stat(path string) (fs.FileInfo, error) { } if _, ok := m.Dirs[path]; ok { return FileInfo{ - name: filepath.Base(path), + name: core.PathBase(path), isDir: true, mode: fs.ModeDir | 0755, }, nil diff --git a/local/client.go b/local/client.go index 22fd769..d4aaafc 100644 --- a/local/client.go +++ b/local/client.go @@ -6,11 +6,10 @@ import ( goio "io" "io/fs" "os" - "os/user" - "path/filepath" "strings" "time" + core "dappco.re/go/core" coreerr "forge.lthn.ai/core/go-log" ) @@ -22,20 +21,163 @@ type Medium struct { // New creates a new local Medium rooted at the given directory. // Pass "/" for full filesystem access, or a specific path to sandbox. func New(root string) (*Medium, error) { - abs, err := filepath.Abs(root) - if err != nil { - return nil, err - } + abs := absolutePath(root) // Resolve symlinks so sandbox checks compare like-for-like. // On macOS, /var is a symlink to /private/var — without this, - // EvalSymlinks on child paths resolves to /private/var/... while + // resolving child paths resolves to /private/var/... while // root stays /var/..., causing false sandbox escape detections. - if resolved, err := filepath.EvalSymlinks(abs); err == nil { + if resolved, err := resolveSymlinksPath(abs); err == nil { abs = resolved } return &Medium{root: abs}, nil } +func dirSeparator() string { + if sep := core.Env("DS"); sep != "" { + return sep + } + return string(os.PathSeparator) +} + +func normalisePath(p string) string { + sep := dirSeparator() + if sep == "/" { + return strings.ReplaceAll(p, "\\", sep) + } + return strings.ReplaceAll(p, "/", sep) +} + +func currentWorkingDir() string { + if cwd, err := os.Getwd(); err == nil && cwd != "" { + return cwd + } + if cwd := core.Env("DIR_CWD"); cwd != "" { + return cwd + } + return "." +} + +func absolutePath(p string) string { + p = normalisePath(p) + if core.PathIsAbs(p) { + return core.Path(p) + } + return core.Path(currentWorkingDir(), p) +} + +func cleanSandboxPath(p string) string { + return core.Path(dirSeparator() + normalisePath(p)) +} + +func splitPathParts(p string) []string { + trimmed := strings.TrimPrefix(p, dirSeparator()) + if trimmed == "" { + return nil + } + var parts []string + for _, part := range strings.Split(trimmed, dirSeparator()) { + if part == "" { + continue + } + parts = append(parts, part) + } + return parts +} + +func resolveSymlinksPath(p string) (string, error) { + return resolveSymlinksRecursive(absolutePath(p), map[string]struct{}{}) +} + +func resolveSymlinksRecursive(p string, seen map[string]struct{}) (string, error) { + p = core.Path(p) + if p == dirSeparator() { + return p, nil + } + + current := dirSeparator() + for _, part := range splitPathParts(p) { + next := core.Path(current, part) + info, err := os.Lstat(next) + if err != nil { + if os.IsNotExist(err) { + current = next + continue + } + return "", err + } + if info.Mode()&os.ModeSymlink == 0 { + current = next + continue + } + + target, err := os.Readlink(next) + if err != nil { + return "", err + } + target = normalisePath(target) + if !core.PathIsAbs(target) { + target = core.Path(current, target) + } else { + target = core.Path(target) + } + if _, ok := seen[target]; ok { + return "", coreerr.E("local.resolveSymlinksPath", "symlink cycle: "+target, os.ErrInvalid) + } + seen[target] = struct{}{} + resolved, err := resolveSymlinksRecursive(target, seen) + delete(seen, target) + if err != nil { + return "", err + } + current = resolved + } + + return current, nil +} + +func isWithinRoot(root, target string) bool { + root = core.Path(root) + target = core.Path(target) + if root == dirSeparator() { + return true + } + return target == root || strings.HasPrefix(target, root+dirSeparator()) +} + +func canonicalPath(p string) string { + if p == "" { + return "" + } + if resolved, err := resolveSymlinksPath(p); err == nil { + return resolved + } + return absolutePath(p) +} + +func isProtectedPath(full string) bool { + full = canonicalPath(full) + protected := map[string]struct{}{ + canonicalPath(dirSeparator()): {}, + } + for _, home := range []string{core.Env("HOME"), core.Env("DIR_HOME")} { + if home == "" { + continue + } + protected[canonicalPath(home)] = struct{}{} + } + _, ok := protected[full] + return ok +} + +func logSandboxEscape(root, path, attempted string) { + username := core.Env("USER") + if username == "" { + username = "unknown" + } + fmt.Fprintf(os.Stderr, "[%s] SECURITY sandbox escape detected root=%s path=%s attempted=%s user=%s\n", + time.Now().Format(time.RFC3339), root, path, attempted, username) +} + // path sanitises and returns the full path. // Absolute paths are sandboxed under root (unless root is "/"). func (m *Medium) path(p string) string { @@ -46,41 +188,36 @@ func (m *Medium) path(p string) string { // If the path is relative and the medium is rooted at "/", // treat it as relative to the current working directory. // This makes io.Local behave more like the standard 'os' package. - if m.root == "/" && !filepath.IsAbs(p) { - cwd, _ := os.Getwd() - return filepath.Join(cwd, p) + if m.root == dirSeparator() && !core.PathIsAbs(normalisePath(p)) { + return core.Path(currentWorkingDir(), normalisePath(p)) } - // Use filepath.Clean with a leading slash to resolve all .. and . internally + // Use a cleaned absolute path to resolve all .. and . internally // before joining with the root. This is a standard way to sandbox paths. - clean := filepath.Clean("/" + p) + clean := cleanSandboxPath(p) // If root is "/", allow absolute paths through - if m.root == "/" { + if m.root == dirSeparator() { return clean } // Join cleaned relative path with root - return filepath.Join(m.root, clean) + return core.Path(m.root, strings.TrimPrefix(clean, dirSeparator())) } // validatePath ensures the path is within the sandbox, following symlinks if they exist. func (m *Medium) validatePath(p string) (string, error) { - if m.root == "/" { + if m.root == dirSeparator() { return m.path(p), nil } // Split the cleaned path into components - parts := strings.Split(filepath.Clean("/"+p), string(os.PathSeparator)) + parts := splitPathParts(cleanSandboxPath(p)) current := m.root for _, part := range parts { - if part == "" { - continue - } - - next := filepath.Join(current, part) - realNext, err := filepath.EvalSymlinks(next) + next := core.Path(current, part) + realNext, err := resolveSymlinksPath(next) if err != nil { if os.IsNotExist(err) { // Part doesn't exist, we can't follow symlinks anymore. @@ -93,15 +230,9 @@ func (m *Medium) validatePath(p string) (string, error) { } // Verify the resolved part is still within the root - rel, err := filepath.Rel(m.root, realNext) - if err != nil || strings.HasPrefix(rel, "..") { + if !isWithinRoot(m.root, realNext) { // Security event: sandbox escape attempt - username := "unknown" - if u, err := user.Current(); err == nil { - username = u.Username - } - fmt.Fprintf(os.Stderr, "[%s] SECURITY sandbox escape detected root=%s path=%s attempted=%s user=%s\n", - time.Now().Format(time.RFC3339), m.root, p, realNext, username) + logSandboxEscape(m.root, p, realNext) return "", os.ErrPermission // Path escapes sandbox } current = realNext @@ -137,7 +268,7 @@ func (m *Medium) WriteMode(p, content string, mode os.FileMode) error { if err != nil { return err } - if err := os.MkdirAll(filepath.Dir(full), 0755); err != nil { + if err := os.MkdirAll(core.PathDir(full), 0755); err != nil { return err } return os.WriteFile(full, []byte(content), mode) @@ -221,7 +352,7 @@ func (m *Medium) Create(p string) (goio.WriteCloser, error) { if err != nil { return nil, err } - if err := os.MkdirAll(filepath.Dir(full), 0755); err != nil { + if err := os.MkdirAll(core.PathDir(full), 0755); err != nil { return nil, err } return os.Create(full) @@ -233,7 +364,7 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) { if err != nil { return nil, err } - if err := os.MkdirAll(filepath.Dir(full), 0755); err != nil { + if err := os.MkdirAll(core.PathDir(full), 0755); err != nil { return nil, err } return os.OpenFile(full, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) @@ -265,7 +396,7 @@ func (m *Medium) Delete(p string) error { if err != nil { return err } - if full == "/" || full == os.Getenv("HOME") { + if isProtectedPath(full) { return coreerr.E("local.Delete", "refusing to delete protected path: "+full, nil) } return os.Remove(full) @@ -277,7 +408,7 @@ func (m *Medium) DeleteAll(p string) error { if err != nil { return err } - if full == "/" || full == os.Getenv("HOME") { + if isProtectedPath(full) { return coreerr.E("local.DeleteAll", "refusing to delete protected path: "+full, nil) } return os.RemoveAll(full) diff --git a/local/client_test.go b/local/client_test.go index f3deb15..120ee0e 100644 --- a/local/client_test.go +++ b/local/client_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNew(t *testing.T) { @@ -170,6 +171,33 @@ func TestDeleteAll(t *testing.T) { assert.False(t, m.Exists("dir")) } +func TestDelete_ProtectedHomeViaSymlinkEnv(t *testing.T) { + realHome := t.TempDir() + linkParent := t.TempDir() + homeLink := filepath.Join(linkParent, "home-link") + require.NoError(t, os.Symlink(realHome, homeLink)) + t.Setenv("HOME", homeLink) + + m, err := New("/") + require.NoError(t, err) + + err = m.Delete(realHome) + require.Error(t, err) + assert.DirExists(t, realHome) +} + +func TestDeleteAll_ProtectedHomeViaEnv(t *testing.T) { + tempHome := t.TempDir() + t.Setenv("HOME", tempHome) + + m, err := New("/") + require.NoError(t, err) + + err = m.DeleteAll(tempHome) + require.Error(t, err) + assert.DirExists(t, tempHome) +} + func TestRename(t *testing.T) { root := t.TempDir() m, _ := New(root) diff --git a/s3/s3.go b/s3/s3.go index 86443fe..3ca4ab9 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -37,6 +37,29 @@ type Medium struct { prefix string } +func deleteObjectsError(prefix string, errs []types.Error) error { + if len(errs) == 0 { + return nil + } + details := make([]string, 0, len(errs)) + for _, item := range errs { + key := aws.ToString(item.Key) + code := aws.ToString(item.Code) + msg := aws.ToString(item.Message) + switch { + case code != "" && msg != "": + details = append(details, key+": "+code+" "+msg) + case code != "": + details = append(details, key+": "+code) + case msg != "": + details = append(details, key+": "+msg) + default: + details = append(details, key) + } + } + return coreerr.E("s3.DeleteAll", "partial delete failed under "+prefix+": "+strings.Join(details, "; "), nil) +} + // Option configures a Medium. type Option func(*Medium) @@ -197,10 +220,13 @@ func (m *Medium) DeleteAll(p string) error { } // First, try deleting the exact key - _, _ = m.client.DeleteObject(context.Background(), &s3.DeleteObjectInput{ + _, err := m.client.DeleteObject(context.Background(), &s3.DeleteObjectInput{ Bucket: aws.String(m.bucket), Key: aws.String(key), }) + if err != nil { + return coreerr.E("s3.DeleteAll", "failed to delete object: "+key, err) + } // Then delete all objects under the prefix prefix := key @@ -230,13 +256,16 @@ func (m *Medium) DeleteAll(p string) error { objects[i] = types.ObjectIdentifier{Key: obj.Key} } - _, err = m.client.DeleteObjects(context.Background(), &s3.DeleteObjectsInput{ + deleteOut, err := m.client.DeleteObjects(context.Background(), &s3.DeleteObjectsInput{ Bucket: aws.String(m.bucket), Delete: &types.Delete{Objects: objects, Quiet: aws.Bool(true)}, }) if err != nil { return coreerr.E("s3.DeleteAll", "failed to delete objects", err) } + if err := deleteObjectsError(prefix, deleteOut.Errors); err != nil { + return err + } if listOut.IsTruncated != nil && *listOut.IsTruncated { continuationToken = listOut.NextContinuationToken diff --git a/s3/s3_test.go b/s3/s3_test.go index 1f226e7..a81efff 100644 --- a/s3/s3_test.go +++ b/s3/s3_test.go @@ -3,6 +3,7 @@ package s3 import ( "bytes" "context" + "errors" "fmt" goio "io" "io/fs" @@ -21,15 +22,19 @@ import ( // mockS3 is an in-memory mock implementing the s3API interface. type mockS3 struct { - mu sync.RWMutex - objects map[string][]byte - mtimes map[string]time.Time + mu sync.RWMutex + objects map[string][]byte + mtimes map[string]time.Time + deleteObjectErrors map[string]error + deleteObjectsErrs map[string]types.Error } func newMockS3() *mockS3 { return &mockS3{ - objects: make(map[string][]byte), - mtimes: make(map[string]time.Time), + objects: make(map[string][]byte), + mtimes: make(map[string]time.Time), + deleteObjectErrors: make(map[string]error), + deleteObjectsErrs: make(map[string]types.Error), } } @@ -69,6 +74,9 @@ func (m *mockS3) DeleteObject(_ context.Context, params *s3.DeleteObjectInput, _ defer m.mu.Unlock() key := aws.ToString(params.Key) + if err, ok := m.deleteObjectErrors[key]; ok { + return nil, err + } delete(m.objects, key) delete(m.mtimes, key) return &s3.DeleteObjectOutput{}, nil @@ -78,12 +86,17 @@ func (m *mockS3) DeleteObjects(_ context.Context, params *s3.DeleteObjectsInput, m.mu.Lock() defer m.mu.Unlock() + var outErrs []types.Error for _, obj := range params.Delete.Objects { key := aws.ToString(obj.Key) + if errInfo, ok := m.deleteObjectsErrs[key]; ok { + outErrs = append(outErrs, errInfo) + continue + } delete(m.objects, key) delete(m.mtimes, key) } - return &s3.DeleteObjectsOutput{}, nil + return &s3.DeleteObjectsOutput{Errors: outErrs}, nil } func (m *mockS3) HeadObject(_ context.Context, params *s3.HeadObjectInput, _ ...func(*s3.Options)) (*s3.HeadObjectOutput, error) { @@ -350,6 +363,34 @@ func TestDeleteAll_Bad_EmptyPath(t *testing.T) { assert.Error(t, err) } +func TestDeleteAll_Bad_DeleteObjectError(t *testing.T) { + m, mock := newTestMedium(t) + mock.deleteObjectErrors["dir"] = errors.New("boom") + + err := m.DeleteAll("dir") + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to delete object: dir") +} + +func TestDeleteAll_Bad_PartialDelete(t *testing.T) { + m, mock := newTestMedium(t) + + require.NoError(t, m.Write("dir/file1.txt", "a")) + require.NoError(t, m.Write("dir/file2.txt", "b")) + mock.deleteObjectsErrs["dir/file2.txt"] = types.Error{ + Key: aws.String("dir/file2.txt"), + Code: aws.String("AccessDenied"), + Message: aws.String("blocked"), + } + + err := m.DeleteAll("dir") + require.Error(t, err) + assert.Contains(t, err.Error(), "partial delete failed") + assert.Contains(t, err.Error(), "dir/file2.txt") + assert.True(t, m.IsFile("dir/file2.txt")) + assert.False(t, m.IsFile("dir/file1.txt")) +} + func TestRename_Good(t *testing.T) { m, _ := newTestMedium(t) diff --git a/workspace/service.go b/workspace/service.go index c1978a1..9e81764 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -4,7 +4,7 @@ import ( "crypto/sha256" "encoding/hex" "os" - "path/filepath" + "strings" "sync" core "dappco.re/go/core" @@ -39,11 +39,11 @@ type Service struct { // New creates a new Workspace service instance. // An optional cryptProvider can be passed to supply PGP key generation. func New(c *core.Core, crypt ...cryptProvider) (any, error) { - home, err := os.UserHomeDir() - if err != nil { - return nil, coreerr.E("workspace.New", "failed to determine home directory", err) + home := workspaceHome() + if home == "" { + return nil, coreerr.E("workspace.New", "failed to determine home directory", os.ErrNotExist) } - rootPath := filepath.Join(home, ".core", "workspaces") + rootPath := core.Path(home, ".core", "workspaces") s := &Service{ core: c, @@ -75,14 +75,17 @@ func (s *Service) CreateWorkspace(identifier, password string) (string, error) { hash := sha256.Sum256([]byte(identifier)) wsID := hex.EncodeToString(hash[:]) - wsPath := filepath.Join(s.rootPath, wsID) + wsPath, err := s.workspacePath("workspace.CreateWorkspace", wsID) + if err != nil { + return "", err + } if s.medium.Exists(wsPath) { return "", coreerr.E("workspace.CreateWorkspace", "workspace already exists", nil) } for _, d := range []string{"config", "log", "data", "files", "keys"} { - if err := s.medium.EnsureDir(filepath.Join(wsPath, d)); err != nil { + if err := s.medium.EnsureDir(core.Path(wsPath, d)); err != nil { return "", coreerr.E("workspace.CreateWorkspace", "failed to create directory: "+d, err) } } @@ -92,7 +95,7 @@ func (s *Service) CreateWorkspace(identifier, password string) (string, error) { return "", coreerr.E("workspace.CreateWorkspace", "failed to generate keys", err) } - if err := s.medium.WriteMode(filepath.Join(wsPath, "keys", "private.key"), privKey, 0600); err != nil { + if err := s.medium.WriteMode(core.Path(wsPath, "keys", "private.key"), privKey, 0600); err != nil { return "", coreerr.E("workspace.CreateWorkspace", "failed to save private key", err) } @@ -104,12 +107,15 @@ func (s *Service) SwitchWorkspace(name string) error { s.mu.Lock() defer s.mu.Unlock() - wsPath := filepath.Join(s.rootPath, name) + wsPath, err := s.workspacePath("workspace.SwitchWorkspace", name) + if err != nil { + return err + } if !s.medium.IsDir(wsPath) { return coreerr.E("workspace.SwitchWorkspace", "workspace not found: "+name, nil) } - s.activeWorkspace = name + s.activeWorkspace = core.PathBase(wsPath) return nil } @@ -119,7 +125,15 @@ func (s *Service) activeFilePath(op, filename string) (string, error) { if s.activeWorkspace == "" { return "", coreerr.E(op, "no active workspace", nil) } - return filepath.Join(s.rootPath, s.activeWorkspace, "files", filename), nil + filesRoot := core.Path(s.rootPath, s.activeWorkspace, "files") + path, err := joinWithinRoot(filesRoot, filename) + if err != nil { + return "", coreerr.E(op, "file path escapes workspace files", os.ErrPermission) + } + if path == filesRoot { + return "", coreerr.E(op, "filename is required", os.ErrInvalid) + } + return path, nil } // WorkspaceFileGet retrieves the content of a file from the active workspace. @@ -171,5 +185,38 @@ func (s *Service) HandleIPCEvents(c *core.Core, msg core.Message) core.Result { return core.Result{OK: true} } +func workspaceHome() string { + if home := core.Env("CORE_HOME"); home != "" { + return home + } + if home := core.Env("HOME"); home != "" { + return home + } + return core.Env("DIR_HOME") +} + +func joinWithinRoot(root string, parts ...string) (string, error) { + candidate := core.Path(append([]string{root}, parts...)...) + sep := core.Env("DS") + if candidate == root || strings.HasPrefix(candidate, root+sep) { + return candidate, nil + } + return "", os.ErrPermission +} + +func (s *Service) workspacePath(op, name string) (string, error) { + if name == "" { + return "", coreerr.E(op, "workspace name is required", os.ErrInvalid) + } + path, err := joinWithinRoot(s.rootPath, name) + if err != nil { + return "", coreerr.E(op, "workspace path escapes root", err) + } + if core.PathDir(path) != s.rootPath { + return "", coreerr.E(op, "invalid workspace name: "+name, os.ErrPermission) + } + return path, nil +} + // Ensure Service implements Workspace. var _ Workspace = (*Service)(nil) diff --git a/workspace/service_test.go b/workspace/service_test.go index 1cab667..1fc7abe 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -1,48 +1,90 @@ package workspace import ( - "path/filepath" + "os" "testing" core "dappco.re/go/core" - "forge.lthn.ai/core/go-crypt/crypt/openpgp" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestWorkspace(t *testing.T) { - c := core.New() - pgpSvc, err := openpgp.New(nil) - assert.NoError(t, err) +type stubCrypt struct { + key string + err error +} + +func (s stubCrypt) CreateKeyPair(_, _ string) (string, error) { + if s.err != nil { + return "", s.err + } + return s.key, nil +} + +func newTestService(t *testing.T) (*Service, string) { + t.Helper() tempHome := t.TempDir() t.Setenv("HOME", tempHome) - svc, err := New(c, pgpSvc.(cryptProvider)) - assert.NoError(t, err) - s := svc.(*Service) + svc, err := New(core.New(), stubCrypt{key: "private-key"}) + require.NoError(t, err) + return svc.(*Service), tempHome +} + +func TestWorkspace(t *testing.T) { + s, tempHome := newTestService(t) - // Test CreateWorkspace id, err := s.CreateWorkspace("test-user", "pass123") - assert.NoError(t, err) + require.NoError(t, err) assert.NotEmpty(t, id) - wsPath := filepath.Join(tempHome, ".core", "workspaces", id) + wsPath := core.Path(tempHome, ".core", "workspaces", id) assert.DirExists(t, wsPath) - assert.DirExists(t, filepath.Join(wsPath, "keys")) - assert.FileExists(t, filepath.Join(wsPath, "keys", "private.key")) + assert.DirExists(t, core.Path(wsPath, "keys")) + assert.FileExists(t, core.Path(wsPath, "keys", "private.key")) - // Test SwitchWorkspace err = s.SwitchWorkspace(id) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, id, s.activeWorkspace) - // Test File operations - filename := "secret.txt" - content := "top secret info" - err = s.WorkspaceFileSet(filename, content) - assert.NoError(t, err) + err = s.WorkspaceFileSet("secret.txt", "top secret info") + require.NoError(t, err) - got, err := s.WorkspaceFileGet(filename) - assert.NoError(t, err) - assert.Equal(t, content, got) + got, err := s.WorkspaceFileGet("secret.txt") + require.NoError(t, err) + assert.Equal(t, "top secret info", got) +} + +func TestSwitchWorkspace_TraversalBlocked(t *testing.T) { + s, tempHome := newTestService(t) + + outside := core.Path(tempHome, ".core", "escaped") + require.NoError(t, os.MkdirAll(outside, 0755)) + + err := s.SwitchWorkspace("../escaped") + require.Error(t, err) + assert.Empty(t, s.activeWorkspace) +} + +func TestWorkspaceFileSet_TraversalBlocked(t *testing.T) { + s, tempHome := newTestService(t) + + id, err := s.CreateWorkspace("test-user", "pass123") + require.NoError(t, err) + require.NoError(t, s.SwitchWorkspace(id)) + + keyPath := core.Path(tempHome, ".core", "workspaces", id, "keys", "private.key") + before, err := os.ReadFile(keyPath) + require.NoError(t, err) + + err = s.WorkspaceFileSet("../keys/private.key", "hijack") + require.Error(t, err) + + after, err := os.ReadFile(keyPath) + require.NoError(t, err) + assert.Equal(t, string(before), string(after)) + + _, err = s.WorkspaceFileGet("../keys/private.key") + require.Error(t, err) } -- 2.45.3 From 19c43392291effbf9bee7f0b7c9004bc01255b62 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 23 Mar 2026 13:20:41 +0000 Subject: [PATCH 3/6] docs: add security attack vector mapping --- docs/security-attack-vector-mapping.md | 168 +++++++++++++++++++++++++ 1 file changed, 168 insertions(+) create mode 100644 docs/security-attack-vector-mapping.md diff --git a/docs/security-attack-vector-mapping.md b/docs/security-attack-vector-mapping.md new file mode 100644 index 0000000..4db0e57 --- /dev/null +++ b/docs/security-attack-vector-mapping.md @@ -0,0 +1,168 @@ +# Security Attack Vector Mapping + +`CODEX.md` was not present under `/workspace`, so this mapping follows [`CLAUDE.md`](/workspace/CLAUDE.md) and the current source tree. + +Scope: +- Included: exported functions and methods that accept caller-controlled data or parse external payloads, plus public writer types returned from those APIs. +- Omitted: zero-argument accessors and teardown helpers such as `Close`, `Snapshot`, `Store`, `AsMedium`, `DataNode`, and `fs.FileInfo` getters because they are not ingress points. + +Notes: +- `local` is the in-repo filesystem containment layer. Its protection depends on `validatePath`, but most mutating operations still have a post-validation TOCTOU window before the final `os.*` call. +- `workspace.Service` uses `io.Local` rooted at `/`, so its path joins are not sandboxed by this repository. +- `datanode.FromTar` and `datanode.Restore` inherit Borg `datanode.FromTar` behavior from `forge.lthn.ai/Snider/Borg` v0.3.1: it trims leading `/`, preserves symlink tar entries, and does not reject `..` segments or large archives. + +## `io` Facade And `MockMedium` + +| Function | File:Line | Input source | What it flows into | Current validation | Potential attack vector | +| --- | --- | --- | --- | --- | --- | +| `io.NewSandboxed` | `io.go:126` | Caller-supplied sandbox root | Delegates to `local.New(root)` and stores the resolved root in a `local.Medium` | `local.New` absolutizes and best-effort resolves root symlinks; no policy check on `/` or broad roots | Misconfiguration can disable containment entirely by choosing `/` or an overly broad root | +| `io.Read` | `io.go:133` | Caller path plus chosen backend | Direct `m.Read(path)` dispatch | No facade-level validation | Inherits backend read, enumeration, and path-handling attack surface | +| `io.Write` | `io.go:138` | Caller path/content plus chosen backend | Direct `m.Write(path, content)` dispatch | No facade-level validation | Inherits backend overwrite, creation, and storage-exhaustion attack surface | +| `io.ReadStream` | `io.go:143` | Caller path plus chosen backend | Direct `m.ReadStream(path)` dispatch | No facade-level validation | Inherits backend streaming-read surface and any unbounded downstream consumption risk | +| `io.WriteStream` | `io.go:148` | Caller path plus chosen backend; later streamed bytes from returned writer | Direct `m.WriteStream(path)` dispatch | No facade-level validation | Inherits backend streaming-write surface, including arbitrary object/file creation and unbounded buffering/disk growth | +| `io.EnsureDir` | `io.go:153` | Caller path plus chosen backend | Direct `m.EnsureDir(path)` dispatch | No facade-level validation | Inherits backend directory-creation semantics; on no-op backends this can create false assumptions about isolation | +| `io.IsFile` | `io.go:158` | Caller path plus chosen backend | Direct `m.IsFile(path)` dispatch | No facade-level validation | Inherits backend existence-oracle and metadata-disclosure surface | +| `io.Copy` | `io.go:163` | Caller-selected source/destination mediums and paths | `src.Read(srcPath)` loads full content into memory, then `dst.Write(dstPath, content)` | Validation delegated to both backends | Large source content can exhaust memory; can bridge trust zones and copy attacker-controlled names/content across backends | +| `(*io.MockMedium).Read`, `FileGet`, `Open`, `ReadStream`, `List`, `Stat`, `Exists`, `IsFile`, `IsDir` | `io.go:193`, `225`, `358`, `388`, `443`, `552`, `576`, `219`, `587` | Caller path | Direct map lookup and prefix scans in in-memory maps | No normalization, auth, or path restrictions | If reused outside tests, it becomes a trivial key/value disclosure and enumeration surface | +| `(*io.MockMedium).Write`, `WriteMode`, `FileSet`, `EnsureDir` | `io.go:202`, `208`, `230`, `213` | Caller path/content/mode | Direct map writes; `WriteMode` ignores `mode` | No validation; permissions are ignored | Arbitrary overwrite/creation of entries and silent permission-policy bypass | +| `(*io.MockMedium).Create`, `Append`, `WriteStream`, `(*io.MockWriteCloser).Write` | `io.go:370`, `378`, `393`, `431` | Caller path; streamed caller bytes | Buffers bytes in memory until `Close`, then commits to `Files[path]` | No validation or size limits | Memory exhaustion and arbitrary entry overwrite if used as anything other than a test double | +| `(*io.MockMedium).Delete`, `DeleteAll`, `Rename` | `io.go:235`, `263`, `299` | Caller path(s) | Direct map mutation and prefix scans | No normalization or authorization | Arbitrary delete/rename of entries; prefix-based operations can remove more than a caller expects | + +## `local` + +| Function | File:Line | Input source | What it flows into | Current validation | Potential attack vector | +| --- | --- | --- | --- | --- | --- | +| `local.New` | `local/client.go:24` | Caller-supplied root path | `filepath.Abs`, optional `filepath.EvalSymlinks`, stored as `Medium.root` | Absolutizes root and resolves root symlink when possible | Passing `/` creates unsandboxed host filesystem access; broad roots widen blast radius | +| `(*local.Medium).Read`, `FileGet` | `local/client.go:114`, `300` | Caller path | `validatePath` then `os.ReadFile` | `validatePath` cleans path, walks symlinks component-by-component, and blocks resolved escapes from `root` | Arbitrary read of anything reachable inside the sandbox; TOCTOU symlink swap remains possible after validation and before the final read | +| `(*local.Medium).Open`, `ReadStream` | `local/client.go:210`, `248` | Caller path | `validatePath` then `os.Open`; `ReadStream` delegates to `Open` | Same `validatePath` containment check | Same read/disclosure surface as `Read`, plus a validated path can still be swapped before `os.Open` | +| `(*local.Medium).List`, `Stat`, `Exists`, `IsFile`, `IsDir` | `local/client.go:192`, `201`, `182`, `169`, `156` | Caller path | `validatePath` then `os.ReadDir` or `os.Stat` | Same `validatePath` containment check | Metadata enumeration for any path inside the sandbox; TOCTOU can still skew the checked object before the final syscall | +| `(*local.Medium).Write`, `FileSet` | `local/client.go:129`, `305` | Caller path/content | Delegates to `WriteMode(..., 0644)` | Path containment only | Arbitrary overwrite inside the sandbox; default `0644` can expose secrets if higher layers use it for sensitive data | +| `(*local.Medium).WriteMode` | `local/client.go:135` | Caller path/content/mode | `validatePath`, `os.MkdirAll`, `os.WriteFile` | Path containment only; caller controls file mode | Arbitrary file write inside the sandbox; caller can choose overly broad modes; TOCTOU after validation can retarget the write | +| `(*local.Medium).Create`, `WriteStream`, `Append` | `local/client.go:219`, `258`, `231` | Caller path; later bytes written through the returned `*os.File` | `validatePath`, `os.MkdirAll`, `os.Create` or `os.OpenFile(..., O_APPEND)` | Path containment only | Arbitrary truncate/append within the sandbox, unbounded disk growth, and the same post-validation race window | +| `(*local.Medium).EnsureDir` | `local/client.go:147` | Caller path | `validatePath` then `os.MkdirAll` | Path containment only | Arbitrary directory creation inside the sandbox; TOCTOU race can still redirect the mkdir target | +| `(*local.Medium).Delete` | `local/client.go:263` | Caller path | `validatePath` then `os.Remove` | Path containment; explicit guard blocks `/` and `$HOME` | Arbitrary file or empty-dir deletion inside the sandbox; guard does not protect other critical paths if root is too broad; TOCTOU applies | +| `(*local.Medium).DeleteAll` | `local/client.go:275` | Caller path | `validatePath` then `os.RemoveAll` | Path containment; explicit guard blocks `/` and `$HOME` | Recursive delete of any sandboxed subtree; if the medium root is broad, the blast radius is broad too | +| `(*local.Medium).Rename` | `local/client.go:287` | Caller old/new paths | `validatePath` on both sides, then `os.Rename` | Path containment on both paths | Arbitrary move/overwrite inside the sandbox; attacker-controlled rename targets can be swapped after validation | + +## `sqlite` + +| Function | File:Line | Input source | What it flows into | Current validation | Potential attack vector | +| --- | --- | --- | --- | --- | --- | +| `sqlite.WithTable` | `sqlite/sqlite.go:29` | Caller-supplied table name option | Stored on `Medium.table` and concatenated into every SQL statement | No quoting or identifier validation | SQL injection or malformed SQL if an attacker can choose the table name | +| `sqlite.New` | `sqlite/sqlite.go:37` | Caller DB path/URI and options | `sql.Open("sqlite", dbPath)`, `PRAGMA`, `CREATE TABLE` using concatenated table name | Rejects empty `dbPath`; no table-name validation | Arbitrary SQLite file/URI selection and inherited SQL injection risk from `WithTable` | +| `(*sqlite.Medium).Read`, `FileGet`, `Open`, `ReadStream` | `sqlite/sqlite.go:94`, `172`, `455`, `521` | Caller path | `cleanPath` then parameterized `SELECT`; `Open`/`ReadStream` materialize the whole BLOB in memory | Leading-slash `path.Clean` collapses traversal and rejects empty/root keys; path value is parameterized, table name is not | Arbitrary logical-key read, existence disclosure, canonicalization collisions such as `../x -> x`, and memory exhaustion on large BLOBs | +| `(*sqlite.Medium).Write`, `FileSet` | `sqlite/sqlite.go:118`, `177` | Caller path/content | `cleanPath` then parameterized upsert | Same path normalization; table name still concatenated | Arbitrary logical-key overwrite and unbounded DB growth; different raw paths can alias to the same normalized key | +| `(*sqlite.Medium).Create`, `WriteStream`, `Append`, `(*sqlite.sqliteWriteCloser).Write` | `sqlite/sqlite.go:487`, `546`, `499`, `654` | Caller path; streamed caller bytes | `cleanPath`, optional preload of existing BLOB, in-memory buffering, then upsert on `Close` | Non-empty normalized key only | Memory exhaustion from buffering and append preloads; arbitrary overwrite/append of normalized keys | +| `(*sqlite.Medium).EnsureDir` | `sqlite/sqlite.go:136` | Caller path | `cleanPath` then inserts a directory marker row | Root becomes a no-op; other paths are normalized only | Arbitrary logical directory creation and aliasing through normalized names | +| `(*sqlite.Medium).List`, `Stat`, `Exists`, `IsFile`, `IsDir` | `sqlite/sqlite.go:349`, `424`, `551`, `155`, `569` | Caller path | `cleanPath` then parameterized listing/count/stat queries | Same normalized key handling; table name still concatenated | Namespace enumeration and metadata disclosure; canonicalization collisions can hide the caller's original path spelling | +| `(*sqlite.Medium).Delete` | `sqlite/sqlite.go:182` | Caller path | `cleanPath`, directory-child count, then `DELETE` | Rejects empty/root path and non-empty dirs | Arbitrary logical-key deletion | +| `(*sqlite.Medium).DeleteAll` | `sqlite/sqlite.go:227` | Caller path | `cleanPath` then `DELETE WHERE path = ? OR path LIKE ?` | Rejects empty/root path | Bulk deletion of any logical subtree | +| `(*sqlite.Medium).Rename` | `sqlite/sqlite.go:251` | Caller old/new paths | `cleanPath` both paths, then transactional copy/delete of entry and children | Requires non-empty normalized source and destination | Arbitrary move/overwrite of logical subtrees; normalized-path aliasing can redirect or collapse entries | + +## `s3` + +| Function | File:Line | Input source | What it flows into | Current validation | Potential attack vector | +| --- | --- | --- | --- | --- | --- | +| `s3.WithPrefix` | `s3/s3.go:44` | Caller-supplied prefix | Stored on `Medium.prefix` and prepended to every key | Only ensures a trailing `/` when non-empty | Cross-tenant namespace expansion or contraction if untrusted callers can choose the prefix; empty prefix exposes the whole bucket | +| `s3.WithClient` | `s3/s3.go:55` | Caller-supplied S3 client | Stored as `Medium.client` and trusted for all I/O | No validation | Malicious or wrapped clients can exfiltrate data, fake results, or bypass expected transport controls | +| `s3.New` | `s3/s3.go:69` | Caller bucket name and options | Stores bucket/prefix/client on `Medium` | Rejects empty bucket and missing client only | Redirecting operations to attacker-chosen buckets or prefixes if config is not trusted | +| `(*s3.Medium).EnsureDir` | `s3/s3.go:144` | Caller path (ignored) | No-op | Input is ignored entirely | Semantic mismatch: callers may believe a directory boundary now exists when S3 still has only object keys | +| `(*s3.Medium).Read`, `FileGet`, `Open` | `s3/s3.go:103`, `166`, `388` | Caller path | `key(p)` then `GetObject`; `Read`/`Open` read the whole body into memory | Leading-slash `path.Clean` keeps the key under `prefix`; rejects empty key | Arbitrary read inside the configured bucket/prefix, canonicalization collisions, and memory exhaustion on large objects | +| `(*s3.Medium).ReadStream` | `s3/s3.go:464` | Caller path | `key(p)` then `GetObject`, returning the raw response body | Same normalized key handling; no size/content checks | Delivers arbitrary remote object bodies to downstream consumers without integrity, type, or size enforcement | +| `(*s3.Medium).Write`, `FileSet` | `s3/s3.go:126`, `171` | Caller path/content | `key(p)` then `PutObject` | Same normalized key handling | Arbitrary object overwrite or creation within the configured prefix | +| `(*s3.Medium).Create`, `WriteStream`, `Append`, `(*s3.s3WriteCloser).Write` | `s3/s3.go:427`, `481`, `440`, `609` | Caller path; streamed caller bytes | `key(p)`, optional preload of existing object for append, in-memory buffer, then `PutObject` on `Close` | Non-empty normalized key only | Memory exhaustion from buffering and append preloads; arbitrary overwrite/append of objects under the prefix | +| `(*s3.Medium).List`, `Stat`, `Exists`, `IsFile`, `IsDir` | `s3/s3.go:282`, `355`, `486`, `149`, `518` | Caller path | `key(p)` then `ListObjectsV2` or `HeadObject` | Normalized key stays under `prefix`; no authz or tenancy checks beyond config | Namespace enumeration and metadata disclosure across any objects reachable by the configured prefix | +| `(*s3.Medium).Delete` | `s3/s3.go:176` | Caller path | `key(p)` then `DeleteObject` | Non-empty normalized key only | Arbitrary object deletion inside the configured prefix | +| `(*s3.Medium).DeleteAll` | `s3/s3.go:193` | Caller path | `key(p)`, then exact delete plus prefix-based `ListObjectsV2` and batched `DeleteObjects` | Non-empty normalized key only | Bulk deletion of every object under a caller-chosen logical subtree | +| `(*s3.Medium).Rename` | `s3/s3.go:252` | Caller old/new paths | `key(p)` on both paths, then `CopyObject` followed by `DeleteObject` | Non-empty normalized keys only | Arbitrary move/overwrite of objects within the configured prefix; special characters in `oldPath` can also make `CopySource` handling fragile | + +## `store` + +### `store.Store` + +| Function | File:Line | Input source | What it flows into | Current validation | Potential attack vector | +| --- | --- | --- | --- | --- | --- | +| `store.New` | `store/store.go:22` | Caller DB path/URI | `sql.Open("sqlite", dbPath)`, `PRAGMA`, schema creation | No validation beyond driver errors | Arbitrary SQLite file/URI selection if configuration is attacker-controlled | +| `(*store.Store).Get` | `store/store.go:49` | Caller group/key | Parameterized `SELECT value FROM kv WHERE grp = ? AND key = ?` | Uses placeholders; no group/key policy | Arbitrary secret/config disclosure for any reachable group/key | +| `(*store.Store).Set` | `store/store.go:62` | Caller group/key/value | Parameterized upsert into `kv` | Uses placeholders; no group/key policy | Arbitrary overwrite or creation of stored values | +| `(*store.Store).Delete`, `DeleteGroup` | `store/store.go:75`, `94` | Caller group and optional key | Parameterized `DELETE` statements | Uses placeholders; no authorization or namespace policy | Single-key or whole-group deletion | +| `(*store.Store).Count`, `GetAll` | `store/store.go:84`, `103` | Caller group | Parameterized count or full scan of the group | Uses placeholders; no access control | Group enumeration and bulk disclosure of every key/value in a group | +| `(*store.Store).Render` | `store/store.go:125` | Caller template string and group name | Loads all `group` values into a map, then `template.Parse` and `template.Execute` | No template allowlist or output escaping; template funcs are default-only | Template-driven exfiltration of all values in the chosen group; downstream output injection if rendered text is later used in HTML, shell, or config sinks | + +### `store.Medium` + +| Function | File:Line | Input source | What it flows into | Current validation | Potential attack vector | +| --- | --- | --- | --- | --- | --- | +| `store.NewMedium` | `store/medium.go:23` | Caller DB path/URI | Delegates to `store.New(dbPath)` | No extra validation | Same arbitrary-DB selection risk as `store.New` | +| `(*store.Medium).EnsureDir` | `store/medium.go:80` | Caller path (ignored) | No-op | Input is ignored | Semantic mismatch: callers may assume they created a boundary when the store still treats group creation as implicit | +| `(*store.Medium).Read`, `FileGet`, `Open`, `ReadStream` | `store/medium.go:62`, `95`, `214`, `246` | Caller medium path | `splitPath` then `Store.Get`; `Open`/`ReadStream` materialize value bytes or a string reader | `path.Clean`, strip leading `/`, require `group/key`; does not forbid odd group names like `..` | Arbitrary logical-key disclosure and group/key aliasing if higher layers treat raw paths as identity | +| `(*store.Medium).Write`, `FileSet` | `store/medium.go:71`, `100` | Caller path/content | `splitPath` then `Store.Set` | Same `group/key` check only | Arbitrary overwrite of any reachable group/key | +| `(*store.Medium).Create`, `WriteStream`, `Append`, `(*store.kvWriteCloser).Write` | `store/medium.go:227`, `259`, `236`, `343` | Caller path; streamed caller bytes | `splitPath`, optional preload of existing value for append, in-memory buffer, then `Store.Set` on `Close` | Requires `group/key`; no size limit | Memory exhaustion and arbitrary value overwrite/append | +| `(*store.Medium).Delete` | `store/medium.go:105` | Caller path | `splitPath`; group-only paths call `Count`, group/key paths call `Store.Delete` | Rejects empty path; refuses non-empty group deletes | Arbitrary single-key deletion and group-existence probing | +| `(*store.Medium).DeleteAll` | `store/medium.go:124` | Caller path | `splitPath`; group-only paths call `DeleteGroup`, group/key paths call `Delete` | Rejects empty path | Whole-group deletion or single-key deletion | +| `(*store.Medium).Rename` | `store/medium.go:136` | Caller old/new paths | `splitPath`, `Store.Get`, `Store.Set`, `Store.Delete` | Requires both paths to include `group/key` | Arbitrary cross-group data movement and destination overwrite | +| `(*store.Medium).List` | `store/medium.go:154` | Caller path | Empty path lists groups; group path loads all keys via `GetAll` | `splitPath` only; no auth | Group and key enumeration; value lengths leak through returned file info sizes | +| `(*store.Medium).Stat`, `Exists`, `IsFile`, `IsDir` | `store/medium.go:191`, `264`, `85`, `278` | Caller path | `splitPath`, then `Count` or `Get` | Same `splitPath` behavior | Existence oracle and metadata disclosure for groups and keys | + +## `node` + +| Function | File:Line | Input source | What it flows into | Current validation | Potential attack vector | +| --- | --- | --- | --- | --- | --- | +| `node.AddData` | `node/node.go:40` | Caller file name and content | Stores `name` as a map key and `content` as in-memory bytes | Strips a leading `/`; ignores empty names and trailing `/`; does not clean `.` or `..` | Path-confusion payloads such as `../x` or `./x` persist verbatim and can later become traversal gadgets when copied out or tarred | +| `node.FromTar`, `(*node.Node).LoadTar` | `node/node.go:84`, `93` | Caller-supplied tar archive bytes | `archive/tar` reader, `io.ReadAll` per regular file, then `newFiles[name] = ...` | Trims a leading `/`; ignores empty names and directory entries; no `path.Clean`, no `..` rejection, no size limits | Tar-slip-style names survive in memory and can be exported later; huge or duplicate entries can exhaust memory or overwrite earlier entries | +| `(*node.Node).Read`, `FileGet`, `ReadFile`, `Open`, `ReadStream` | `node/node.go:349`, `370`, `187`, `259`, `491` | Caller path/name | Direct map lookup or directory inference; `Read` and `ReadFile` copy/convert content to memory | Only strips a leading `/` | Arbitrary access to weird literal names and confusion if callers assume canonical path handling | +| `(*node.Node).Write`, `WriteMode`, `FileSet` | `node/node.go:359`, `365`, `375` | Caller path/content/mode | Delegates to `AddData`; `WriteMode` ignores `mode` | Same minimal trimming as `AddData` | Arbitrary overwrite of any key, including attacker-planted `../` names; false sense of permission control | +| `(*node.Node).Create`, `WriteStream`, `Append`, `(*node.nodeWriter).Write` | `node/node.go:473`, `500`, `480`, `513` | Caller path; streamed caller bytes | Buffer bytes in memory and commit them as a map entry on `Close` | Only strips a leading `/`; no size limit | Memory exhaustion and creation of path-confusion payloads that can escape on later export | +| `(*node.Node).Delete`, `DeleteAll`, `Rename` | `node/node.go:411`, `421`, `445` | Caller path(s) | Direct map mutation keyed by caller-supplied names | Only strips a leading `/` | Arbitrary delete/rename of any key, including `../`-style names; no directory-safe rename logic | +| `(*node.Node).Stat`, `List`, `ReadDir`, `Exists`, `IsFile`, `IsDir` | `node/node.go:278`, `461`, `297`, `387`, `393`, `400` | Caller path/name | Directory inference from map keys and `fs` adapter methods | Only strips a leading `/` | Namespace enumeration and ambiguity around equivalent-looking path spellings | +| `(*node.Node).WalkNode`, `Walk` | `node/node.go:128`, `145` | Caller root path, callback, filters | `fs.WalkDir` over the in-memory tree | No root normalization beyond whatever `Node` already does | Attackers who can plant names can force callback traversal over weird paths; `SkipErrors` can suppress unexpected failures | +| `(*node.Node).CopyFile` | `node/node.go:200` | Caller source key, destination host path, permissions | Reads node content and calls `os.WriteFile(dst, ...)` directly | Only checks that `src` exists and is not a directory | Arbitrary host filesystem write to a caller-chosen `dst` path | +| `(*node.Node).CopyTo` | `node/node.go:218` | Caller target medium, source path, destination path | Reads node entries and calls `target.Write(destPath or destPath/rel, content)` | Only checks that the source exists | Stored `../`-style node keys can propagate into destination paths, enabling traversal or overwrite depending on the target backend | +| `(*node.Node).EnsureDir` | `node/node.go:380` | Caller path (ignored) | No-op | Input is ignored | Semantic mismatch: callers may assume a directory boundary was created when directories remain implicit | + +## `datanode` + +| Function | File:Line | Input source | What it flows into | Current validation | Potential attack vector | +| --- | --- | --- | --- | --- | --- | +| `datanode.FromTar`, `(*datanode.Medium).Restore` | `datanode/client.go:41`, `65` | Caller-supplied tar archive bytes | Delegates to Borg `datanode.FromTar(data)` and replaces the in-memory filesystem | Wrapper adds no checks; inherited Borg behavior trims leading `/` only and accepts symlink tar entries | Archive bombs, preserved symlink entries, and `../`-style names can be restored into the in-memory tree | +| `(*datanode.Medium).Read`, `FileGet`, `Open`, `ReadStream` | `datanode/client.go:97`, `175`, `394`, `429` | Caller path | `clean(p)` then `dn.Open`/`dn.Stat`; `Read` loads the full file into memory | `clean` strips a leading `/` and runs `path.Clean`, but it does not sandbox `..` at the start of the path | Arbitrary logical-key reads, including odd names such as `../x`; full reads can exhaust memory on large files | +| `(*datanode.Medium).Write`, `WriteMode`, `FileSet` | `datanode/client.go:123`, `138`, `179` | Caller path/content/mode | `clean(p)`, then `dn.AddData` and explicit parent-dir tracking | Rejects empty path only; `WriteMode` ignores `mode` | Arbitrary overwrite/creation of logical entries, including `../`-style names; canonicalization can also collapse some raw paths onto the same key | +| `(*datanode.Medium).Create`, `WriteStream`, `Append`, `(*datanode.writeCloser).Write` | `datanode/client.go:402`, `441`, `410`, `540` | Caller path; streamed caller bytes | `clean(p)`, optional preload of existing data for append, in-memory buffer, then `dn.AddData` on `Close` | Rejects empty path; no size limit | Memory exhaustion and arbitrary overwrite/append of logical entries | +| `(*datanode.Medium).EnsureDir` | `datanode/client.go:142` | Caller path | `clean(p)` then marks explicit directories in `m.dirs` | Empty path becomes a no-op; no policy on `..`-style names | Arbitrary logical directory creation and enumeration under attacker-chosen names | +| `(*datanode.Medium).Delete` | `datanode/client.go:183` | Caller path | `clean(p)`, then file removal or explicit-dir removal | Blocks deleting the empty/root path; otherwise no path policy | Arbitrary logical deletion of files or empty directories | +| `(*datanode.Medium).DeleteAll` | `datanode/client.go:220` | Caller path | `clean(p)`, then subtree walk and removal | Blocks deleting the empty/root path | Recursive deletion of any logical subtree | +| `(*datanode.Medium).Rename` | `datanode/client.go:262` | Caller old/new paths | `clean` both paths, then read-add-delete for files or subtree move for dirs | Existence checks only; no destination restrictions | Arbitrary subtree move/overwrite, including `../`-style names that later escape on export or copy-out | +| `(*datanode.Medium).List`, `Stat`, `Exists`, `IsFile`, `IsDir` | `datanode/client.go:327`, `374`, `445`, `166`, `460` | Caller path | `clean(p)`, then `dn.ReadDir`/`dn.Stat`/explicit-dir map lookups | Same non-sandboxing `clean` behavior | Namespace enumeration and metadata disclosure for weird or traversal-looking logical names | + +## `workspace` + +| Function | File:Line | Input source | What it flows into | Current validation | Potential attack vector | +| --- | --- | --- | --- | --- | --- | +| `workspace.New` | `workspace/service.go:41` | Caller `core.Core` and optional `cryptProvider` | Resolves `$HOME`, sets `rootPath = ~/.core/workspaces`, and binds `medium = io.Local` | Ensures the root directory exists; no sandboxing because `io.Local` is rooted at `/` | All later workspace path joins operate on the real host filesystem, not a project sandbox | +| `(*workspace.Service).CreateWorkspace` | `workspace/service.go:68` | Caller identifier and password | SHA-256 hashes `identifier` into `wsID`, creates directories under `rootPath`, calls `crypt.CreateKeyPair`, writes `keys/private.key` | Requires `crypt` to exist, checks for workspace existence, writes key with mode `0600`; no password policy or identifier validation | Predictable unsalted workspace IDs can leak identifier privacy through offline guessing; creates real host directories/files if exposed remotely | +| `(*workspace.Service).SwitchWorkspace` | `workspace/service.go:103` | Caller workspace name | `filepath.Join(rootPath, name)` then `medium.IsDir`, stores `activeWorkspace = name` | Only checks that the joined path currently exists as a directory | Path traversal via `name` can escape `rootPath` and bind the service to arbitrary host directories | +| `(*workspace.Service).WorkspaceFileGet` | `workspace/service.go:126` | Caller filename | `activeFilePath` uses `filepath.Join(rootPath, activeWorkspace, "files", filename)`, then `medium.Read` | Only checks that an active workspace is set; no filename containment check | `filename` can escape the `files/` directory, and a malicious `activeWorkspace` can turn reads into arbitrary host-file access | +| `(*workspace.Service).WorkspaceFileSet` | `workspace/service.go:138` | Caller filename and content | Same `activeFilePath` join, then `medium.Write` | Only checks that an active workspace is set; no filename containment check | Arbitrary host-file write if `activeWorkspace` or `filename` contains traversal segments | +| `(*workspace.Service).HandleIPCEvents` | `workspace/service.go:150` | Untrusted `core.Message` payload, typically `map[string]any` from IPC | Extracts `"action"` and dispatches to `CreateWorkspace` or `SwitchWorkspace` | Only loose type assertions; no schema, authz, or audit response on failure | Remote IPC callers can trigger workspace creation or retarget the service to arbitrary directories because downstream helpers do not enforce containment | + +## `sigil` + +| Function | File:Line | Input source | What it flows into | Current validation | Potential attack vector | +| --- | --- | --- | --- | --- | --- | +| `sigil.Transmute` | `sigil/sigil.go:46` | Caller data bytes and sigil chain | Sequential `Sigil.In` calls | No chain policy; relies on each sigil | Attacker-chosen chains can trigger expensive transforms or weaken policy if callers let the attacker choose the sigils | +| `sigil.Untransmute` | `sigil/sigil.go:62` | Caller data bytes and sigil chain | Reverse-order `Sigil.Out` calls | No chain policy; relies on each sigil | Expensive or mismatched reverse chains can become a CPU/memory DoS surface | +| `(*sigil.ReverseSigil).In`, `Out` | `sigil/sigils.go:29`, `41` | Caller data bytes | Allocates a new buffer and reverses it | Nil-safe only | Large inputs allocate a second full-sized buffer; otherwise low risk | +| `(*sigil.HexSigil).In`, `Out` | `sigil/sigils.go:50`, `60` | Caller data bytes | Hex encode/decode into fresh buffers | Nil-safe only; decode returns errors from `hex.Decode` | Large or malformed input can still drive allocation and CPU usage | +| `(*sigil.Base64Sigil).In`, `Out` | `sigil/sigils.go:74`, `84` | Caller data bytes | Base64 encode/decode into fresh buffers | Nil-safe only; decode returns errors from `StdEncoding.Decode` | Large or malformed input can still drive allocation and CPU usage | +| `(*sigil.GzipSigil).In` | `sigil/sigils.go:100` | Caller data bytes | `gzip.NewWriter`, compression into a `bytes.Buffer` | Nil-safe only | Large input can consume significant CPU and memory while compressing | +| `(*sigil.GzipSigil).Out` | `sigil/sigils.go:120` | Caller compressed bytes | `gzip.NewReader` then `io.ReadAll` | Nil-safe only; malformed gzip errors out | Zip-bomb style payloads can decompress to unbounded memory | +| `(*sigil.JSONSigil).In`, `Out` | `sigil/sigils.go:137`, `149` | Caller JSON bytes | `json.Compact`/`json.Indent`; `Out` is a pass-through | No schema validation; `Out` does nothing | Large inputs can consume CPU/memory; callers may wrongly assume `Out` validates or normalizes JSON | +| `sigil.NewHashSigil`, `(*sigil.HashSigil).In`, `Out` | `sigil/sigils.go:161`, `166`, `215` | Caller hash enum and data bytes | Selects a hash implementation, hashes input, and leaves `Out` as pass-through | Unsupported hashes error out; weak algorithms are still allowed | If algorithm choice is attacker-controlled, callers can be downgraded to weak digests such as MD4/MD5/SHA1; large inputs can still be CPU-heavy | +| `sigil.NewSigil` | `sigil/sigils.go:221` | Caller sigil name | Factory switch returning encoding, compression, formatting, hashing, or weak hash sigils | Fixed allowlist only | If exposed as user config, attackers can select weak or semantically wrong transforms and bypass higher-level crypto expectations | +| `(*sigil.XORObfuscator).Obfuscate`, `Deobfuscate` | `sigil/crypto_sigil.go:65`, `73` | Caller data and entropy bytes | SHA-256-derived keystream then XOR over a full-size output buffer | No validation | Safe only as a subroutine; if misused as standalone protection, it is merely obfuscation and still a CPU/memory surface on large input | +| `(*sigil.ShuffleMaskObfuscator).Obfuscate`, `Deobfuscate` | `sigil/crypto_sigil.go:127`, `154` | Caller data and entropy bytes | Deterministic permutation and XOR-mask over full-size buffers | No validation | Large inputs drive multiple full-size allocations and CPU work; still only obfuscation if used outside authenticated encryption | +| `sigil.NewChaChaPolySigil` | `sigil/crypto_sigil.go:247` | Caller key bytes | Copies key into `ChaChaPolySigil` state | Validates only that the key is exactly 32 bytes | Weak but correctly-sized keys are accepted; long-lived key material stays resident in process memory | +| `sigil.NewChaChaPolySigilWithObfuscator` | `sigil/crypto_sigil.go:263` | Caller key bytes and custom obfuscator | Builds a `ChaChaPolySigil` and optionally swaps the obfuscator | Key length is validated; obfuscator is trusted if non-nil | Malicious or buggy obfuscators can break the intended defense-in-depth model or leak patterns | +| `(*sigil.ChaChaPolySigil).In` | `sigil/crypto_sigil.go:276` | Caller plaintext bytes | `rand.Reader` nonce, optional obfuscation, then `chacha20poly1305.Seal` | Requires a configured key; nil input is allowed | Large plaintexts allocate full ciphertexts; if `randReader` is replaced in tests or DI, nonce quality becomes attacker-influenced | +| `(*sigil.ChaChaPolySigil).Out` | `sigil/crypto_sigil.go:315` | Caller ciphertext bytes | Nonce extraction, `aead.Open`, optional deobfuscation | Requires a configured key, checks minimum length, and relies on AEAD authentication | Primarily a CPU DoS surface on repeated bogus ciphertext; integrity is otherwise strong | +| `sigil.GetNonceFromCiphertext` | `sigil/crypto_sigil.go:359` | Caller ciphertext bytes | Copies the first 24 bytes as a nonce | Length check only | Low-risk parser surface; malformed short inputs just error | -- 2.45.3 From 39d5ca848037562b4e315ac2fa9185679743ff17 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 23 Mar 2026 14:48:12 +0000 Subject: [PATCH 4/6] docs: add convention drift audit --- docs/convention-drift-2026-03-23.md | 125 ++++++++++++++++++++++++++++ 1 file changed, 125 insertions(+) create mode 100644 docs/convention-drift-2026-03-23.md diff --git a/docs/convention-drift-2026-03-23.md b/docs/convention-drift-2026-03-23.md new file mode 100644 index 0000000..8a76d14 --- /dev/null +++ b/docs/convention-drift-2026-03-23.md @@ -0,0 +1,125 @@ + + +# Convention Drift Audit + +Date: 2026-03-23 + +Scope: tracked module files in the main repo surface (`*.go`, `*.md`), excluding `.core/`, `.github/`, `.idea/`, `go.mod`, `go.sum`, and generated coverage output. + +Conventions used: `CLAUDE.md`, `docs/development.md`, `docs/index.md`, and `docs/architecture.md`. + +Limitation: `CODEX.md` is not present in this repository. The `stdlib -> core.*` and usage-example findings below are therefore inferred from the documented guidance already in-tree. + +## Missing SPDX Headers + +- `CLAUDE.md:1` +- `bench_test.go:1` +- `client_test.go:1` +- `datanode/client.go:1` +- `datanode/client_test.go:1` +- `docs/architecture.md:1` +- `docs/development.md:1` +- `docs/index.md:1` +- `io.go:1` +- `local/client.go:1` +- `local/client_test.go:1` +- `node/node.go:1` +- `node/node_test.go:1` +- `s3/s3.go:1` +- `s3/s3_test.go:1` +- `sigil/crypto_sigil.go:1` +- `sigil/crypto_sigil_test.go:1` +- `sigil/sigil.go:1` +- `sigil/sigil_test.go:1` +- `sigil/sigils.go:1` +- `sqlite/sqlite.go:1` +- `sqlite/sqlite_test.go:1` +- `store/medium.go:1` +- `store/medium_test.go:1` +- `store/store.go:1` +- `store/store_test.go:1` +- `workspace/service.go:1` +- `workspace/service_test.go:1` + +## `stdlib -> core.*` Drift + +Interpretation note: `CLAUDE.md` only makes one direct stdlib replacement rule explicit: do not use raw `os` / `filepath` outside the backend boundary. The concrete drift in this repo therefore falls into two buckets: stale pre-`forge.lthn.ai` core import paths, and direct host-filesystem/path handling in non-backend production code. + +- `go.mod:1` still declares `module dappco.re/go/core/io` while the repo documentation identifies the module as `forge.lthn.ai/core/go-io`. +- `go.mod:6` still depends on `dappco.re/go/core` while the repo docs list `forge.lthn.ai/core/go` as the current Core dependency. +- `io.go:12` imports `dappco.re/go/core/io/local` instead of the documented `forge.lthn.ai/core/go-io/local`. +- `node/node.go:18` imports `dappco.re/go/core/io` instead of the documented `forge.lthn.ai/core/go-io`. +- `workspace/service.go:10` imports `dappco.re/go/core` instead of the documented Core package path. +- `workspace/service.go:13` imports `dappco.re/go/core/io` instead of the documented `forge.lthn.ai/core/go-io`. +- `workspace/service_test.go:7` still imports `dappco.re/go/core`. +- `datanode/client_test.go:7` still imports `dappco.re/go/core/io`. +- `workspace/service.go:6` uses raw `os.UserHomeDir()` in non-backend production code, despite the repo guidance that filesystem access must go through the `io.Medium` abstraction. +- `workspace/service.go:7` builds runtime filesystem paths with `filepath.Join()` in non-backend production code, again bypassing the documented abstraction boundary. + +## UK English Drift + +- `datanode/client.go:3` uses `serializes`; `docs/development.md` calls for UK English (`serialises`). +- `datanode/client.go:52` uses `serializes`; `docs/development.md` calls for UK English (`serialises`). +- `sigil/crypto_sigil.go:3` uses `defense-in-depth`; `docs/development.md` calls for UK English (`defence-in-depth`). +- `sigil/crypto_sigil.go:38` uses `defense`; `docs/development.md` calls for UK English (`defence`). + +## Missing Tests + +Basis: `GOWORK=off go test -coverprofile=coverage.out ./...` and `go tool cover -func=coverage.out` on 2026-03-23. This list focuses on public or semantically meaningful API entrypoints at `0.0%` coverage and omits trivial one-line accessor helpers. + +- `io.go:126` `NewSandboxed` +- `io.go:143` `ReadStream` +- `io.go:148` `WriteStream` +- `io.go:208` `(*MockMedium).WriteMode` +- `io.go:358` `(*MockMedium).Open` +- `io.go:370` `(*MockMedium).Create` +- `io.go:378` `(*MockMedium).Append` +- `io.go:388` `(*MockMedium).ReadStream` +- `io.go:393` `(*MockMedium).WriteStream` +- `datanode/client.go:138` `(*Medium).WriteMode` +- `local/client.go:231` `(*Medium).Append` +- `node/node.go:128` `(*Node).WalkNode` +- `node/node.go:218` `(*Node).CopyTo` +- `node/node.go:349` `(*Node).Read` +- `node/node.go:359` `(*Node).Write` +- `node/node.go:365` `(*Node).WriteMode` +- `node/node.go:370` `(*Node).FileGet` +- `node/node.go:375` `(*Node).FileSet` +- `node/node.go:380` `(*Node).EnsureDir` +- `node/node.go:393` `(*Node).IsFile` +- `node/node.go:400` `(*Node).IsDir` +- `node/node.go:411` `(*Node).Delete` +- `node/node.go:421` `(*Node).DeleteAll` +- `node/node.go:445` `(*Node).Rename` +- `node/node.go:461` `(*Node).List` +- `node/node.go:473` `(*Node).Create` +- `node/node.go:480` `(*Node).Append` +- `node/node.go:491` `(*Node).ReadStream` +- `node/node.go:500` `(*Node).WriteStream` +- `s3/s3.go:55` `WithClient` +- `store/medium.go:37` `(*Medium).Store` +- `store/medium.go:80` `(*Medium).EnsureDir` +- `store/medium.go:95` `(*Medium).FileGet` +- `store/medium.go:100` `(*Medium).FileSet` +- `store/medium.go:246` `(*Medium).ReadStream` +- `store/medium.go:259` `(*Medium).WriteStream` +- `workspace/service.go:150` `(*Service).HandleIPCEvents` + +## Missing Usage-Example Comments + +Interpretation note: because `CODEX.md` is absent, this section flags public entrypoints that expose the package's main behaviour but do not have a nearby comment block showing concrete usage. `sigil/sigil.go` is the only production file in the repo that currently includes an explicit `Example usage:` comment block. + +- `io.go:123` `NewSandboxed` +- `local/client.go:22` `New` +- `s3/s3.go:68` `New` +- `sqlite/sqlite.go:35` `New` +- `node/node.go:32` `New` +- `node/node.go:217` `CopyTo` +- `datanode/client.go:32` `New` +- `datanode/client.go:40` `FromTar` +- `store/store.go:21` `New` +- `store/store.go:124` `Render` +- `store/medium.go:22` `NewMedium` +- `workspace/service.go:39` `New` +- `sigil/crypto_sigil.go:247` `NewChaChaPolySigil` +- `sigil/crypto_sigil.go:263` `NewChaChaPolySigilWithObfuscator` -- 2.45.3 From e208589493b138e17748564ed47f47688e642aae Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 23 Mar 2026 14:53:51 +0000 Subject: [PATCH 5/6] docs: add API contract report --- docs/api-contract.md | 285 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 285 insertions(+) create mode 100644 docs/api-contract.md diff --git a/docs/api-contract.md b/docs/api-contract.md new file mode 100644 index 0000000..05b1b4f --- /dev/null +++ b/docs/api-contract.md @@ -0,0 +1,285 @@ +# API Contract + +Descriptions use doc comments when present; otherwise they are short code-based summaries. +Test coverage is `Yes` when same-package tests directly execute or reference the exported symbol; otherwise `No`. +`CODEX.md` was not present in the repository at generation time. + +| Name | Signature | Package Path | Description | Test Coverage | +| --- | --- | --- | --- | --- | +| `DirEntry` | `type DirEntry struct` | `dappco.re/go/core/io` | DirEntry provides a simple implementation of fs.DirEntry for mock testing. | Yes | +| `FileInfo` | `type FileInfo struct` | `dappco.re/go/core/io` | FileInfo provides a simple implementation of fs.FileInfo for mock testing. | Yes | +| `Medium` | `type Medium interface` | `dappco.re/go/core/io` | Medium defines the standard interface for a storage backend. | Yes | +| `MockFile` | `type MockFile struct` | `dappco.re/go/core/io` | MockFile implements fs.File for MockMedium. | No | +| `MockMedium` | `type MockMedium struct` | `dappco.re/go/core/io` | MockMedium is an in-memory implementation of Medium for testing. | Yes | +| `MockWriteCloser` | `type MockWriteCloser struct` | `dappco.re/go/core/io` | MockWriteCloser implements WriteCloser for MockMedium. | No | +| `Copy` | `func Copy(src Medium, srcPath string, dst Medium, dstPath string) error` | `dappco.re/go/core/io` | Copy copies a file from one medium to another. | Yes | +| `EnsureDir` | `func EnsureDir(m Medium, path string) error` | `dappco.re/go/core/io` | EnsureDir makes sure a directory exists in the given medium. | Yes | +| `IsFile` | `func IsFile(m Medium, path string) bool` | `dappco.re/go/core/io` | IsFile checks if a path exists and is a regular file in the given medium. | Yes | +| `NewMockMedium` | `func NewMockMedium() *MockMedium` | `dappco.re/go/core/io` | NewMockMedium creates a new MockMedium instance. | Yes | +| `NewSandboxed` | `func NewSandboxed(root string) (Medium, error)` | `dappco.re/go/core/io` | NewSandboxed creates a new Medium sandboxed to the given root directory. | No | +| `Read` | `func Read(m Medium, path string) (string, error)` | `dappco.re/go/core/io` | Read retrieves the content of a file from the given medium. | Yes | +| `ReadStream` | `func ReadStream(m Medium, path string) (goio.ReadCloser, error)` | `dappco.re/go/core/io` | ReadStream returns a reader for the file content from the given medium. | No | +| `Write` | `func Write(m Medium, path, content string) error` | `dappco.re/go/core/io` | Write saves the given content to a file in the given medium. | Yes | +| `WriteStream` | `func WriteStream(m Medium, path string) (goio.WriteCloser, error)` | `dappco.re/go/core/io` | WriteStream returns a writer for the file content in the given medium. | No | +| `DirEntry.Info` | `func (DirEntry) Info() (fs.FileInfo, error)` | `dappco.re/go/core/io` | Returns file info for the entry. | No | +| `DirEntry.IsDir` | `func (DirEntry) IsDir() bool` | `dappco.re/go/core/io` | Reports whether the entry represents a directory. | No | +| `DirEntry.Name` | `func (DirEntry) Name() string` | `dappco.re/go/core/io` | Returns the stored entry name. | Yes | +| `DirEntry.Type` | `func (DirEntry) Type() fs.FileMode` | `dappco.re/go/core/io` | Returns the entry type bits. | No | +| `FileInfo.IsDir` | `func (FileInfo) IsDir() bool` | `dappco.re/go/core/io` | Reports whether the entry represents a directory. | Yes | +| `FileInfo.ModTime` | `func (FileInfo) ModTime() time.Time` | `dappco.re/go/core/io` | Returns the stored modification time. | No | +| `FileInfo.Mode` | `func (FileInfo) Mode() fs.FileMode` | `dappco.re/go/core/io` | Returns the stored file mode. | No | +| `FileInfo.Name` | `func (FileInfo) Name() string` | `dappco.re/go/core/io` | Returns the stored entry name. | Yes | +| `FileInfo.Size` | `func (FileInfo) Size() int64` | `dappco.re/go/core/io` | Returns the stored size in bytes. | Yes | +| `FileInfo.Sys` | `func (FileInfo) Sys() any` | `dappco.re/go/core/io` | Returns the underlying system-specific data. | No | +| `Medium.Append` | `Append(path string) (goio.WriteCloser, error)` | `dappco.re/go/core/io` | Append opens the named file for appending, creating it if it doesn't exist. | No | +| `Medium.Create` | `Create(path string) (goio.WriteCloser, error)` | `dappco.re/go/core/io` | Create creates or truncates the named file. | No | +| `Medium.Delete` | `Delete(path string) error` | `dappco.re/go/core/io` | Delete removes a file or empty directory. | Yes | +| `Medium.DeleteAll` | `DeleteAll(path string) error` | `dappco.re/go/core/io` | DeleteAll removes a file or directory and all its contents recursively. | Yes | +| `Medium.EnsureDir` | `EnsureDir(path string) error` | `dappco.re/go/core/io` | EnsureDir makes sure a directory exists, creating it if necessary. | Yes | +| `Medium.Exists` | `Exists(path string) bool` | `dappco.re/go/core/io` | Exists checks if a path exists (file or directory). | Yes | +| `Medium.FileGet` | `FileGet(path string) (string, error)` | `dappco.re/go/core/io` | FileGet is a convenience function that reads a file from the medium. | Yes | +| `Medium.FileSet` | `FileSet(path, content string) error` | `dappco.re/go/core/io` | FileSet is a convenience function that writes a file to the medium. | Yes | +| `Medium.IsDir` | `IsDir(path string) bool` | `dappco.re/go/core/io` | IsDir checks if a path exists and is a directory. | Yes | +| `Medium.IsFile` | `IsFile(path string) bool` | `dappco.re/go/core/io` | IsFile checks if a path exists and is a regular file. | Yes | +| `Medium.List` | `List(path string) ([]fs.DirEntry, error)` | `dappco.re/go/core/io` | List returns the directory entries for the given path. | Yes | +| `Medium.Open` | `Open(path string) (fs.File, error)` | `dappco.re/go/core/io` | Open opens the named file for reading. | No | +| `Medium.Read` | `Read(path string) (string, error)` | `dappco.re/go/core/io` | Read retrieves the content of a file as a string. | Yes | +| `Medium.ReadStream` | `ReadStream(path string) (goio.ReadCloser, error)` | `dappco.re/go/core/io` | ReadStream returns a reader for the file content. | No | +| `Medium.Rename` | `Rename(oldPath, newPath string) error` | `dappco.re/go/core/io` | Rename moves a file or directory from oldPath to newPath. | Yes | +| `Medium.Stat` | `Stat(path string) (fs.FileInfo, error)` | `dappco.re/go/core/io` | Stat returns file information for the given path. | Yes | +| `Medium.Write` | `Write(path, content string) error` | `dappco.re/go/core/io` | Write saves the given content to a file, overwriting it if it exists. | Yes | +| `Medium.WriteMode` | `WriteMode(path, content string, mode os.FileMode) error` | `dappco.re/go/core/io` | WriteMode saves content with explicit file permissions. | No | +| `Medium.WriteStream` | `WriteStream(path string) (goio.WriteCloser, error)` | `dappco.re/go/core/io` | WriteStream returns a writer for the file content. | No | +| `MockFile.Close` | `func (*MockFile) Close() error` | `dappco.re/go/core/io` | Closes the current value. | No | +| `MockFile.Read` | `func (*MockFile) Read(b []byte) (int, error)` | `dappco.re/go/core/io` | Reads data from the current value. | No | +| `MockFile.Stat` | `func (*MockFile) Stat() (fs.FileInfo, error)` | `dappco.re/go/core/io` | Returns file metadata for the current value. | No | +| `MockMedium.Append` | `func (*MockMedium) Append(path string) (goio.WriteCloser, error)` | `dappco.re/go/core/io` | Append opens a file for appending in the mock filesystem. | No | +| `MockMedium.Create` | `func (*MockMedium) Create(path string) (goio.WriteCloser, error)` | `dappco.re/go/core/io` | Create creates a file in the mock filesystem. | No | +| `MockMedium.Delete` | `func (*MockMedium) Delete(path string) error` | `dappco.re/go/core/io` | Delete removes a file or empty directory from the mock filesystem. | Yes | +| `MockMedium.DeleteAll` | `func (*MockMedium) DeleteAll(path string) error` | `dappco.re/go/core/io` | DeleteAll removes a file or directory and all contents from the mock filesystem. | Yes | +| `MockMedium.EnsureDir` | `func (*MockMedium) EnsureDir(path string) error` | `dappco.re/go/core/io` | EnsureDir records that a directory exists in the mock filesystem. | Yes | +| `MockMedium.Exists` | `func (*MockMedium) Exists(path string) bool` | `dappco.re/go/core/io` | Exists checks if a path exists in the mock filesystem. | Yes | +| `MockMedium.FileGet` | `func (*MockMedium) FileGet(path string) (string, error)` | `dappco.re/go/core/io` | FileGet is a convenience function that reads a file from the mock filesystem. | Yes | +| `MockMedium.FileSet` | `func (*MockMedium) FileSet(path, content string) error` | `dappco.re/go/core/io` | FileSet is a convenience function that writes a file to the mock filesystem. | Yes | +| `MockMedium.IsDir` | `func (*MockMedium) IsDir(path string) bool` | `dappco.re/go/core/io` | IsDir checks if a path is a directory in the mock filesystem. | Yes | +| `MockMedium.IsFile` | `func (*MockMedium) IsFile(path string) bool` | `dappco.re/go/core/io` | IsFile checks if a path exists as a file in the mock filesystem. | Yes | +| `MockMedium.List` | `func (*MockMedium) List(path string) ([]fs.DirEntry, error)` | `dappco.re/go/core/io` | List returns directory entries for the mock filesystem. | Yes | +| `MockMedium.Open` | `func (*MockMedium) Open(path string) (fs.File, error)` | `dappco.re/go/core/io` | Open opens a file from the mock filesystem. | No | +| `MockMedium.Read` | `func (*MockMedium) Read(path string) (string, error)` | `dappco.re/go/core/io` | Read retrieves the content of a file from the mock filesystem. | Yes | +| `MockMedium.ReadStream` | `func (*MockMedium) ReadStream(path string) (goio.ReadCloser, error)` | `dappco.re/go/core/io` | ReadStream returns a reader for the file content in the mock filesystem. | No | +| `MockMedium.Rename` | `func (*MockMedium) Rename(oldPath, newPath string) error` | `dappco.re/go/core/io` | Rename moves a file or directory in the mock filesystem. | Yes | +| `MockMedium.Stat` | `func (*MockMedium) Stat(path string) (fs.FileInfo, error)` | `dappco.re/go/core/io` | Stat returns file information for the mock filesystem. | Yes | +| `MockMedium.Write` | `func (*MockMedium) Write(path, content string) error` | `dappco.re/go/core/io` | Write saves the given content to a file in the mock filesystem. | Yes | +| `MockMedium.WriteMode` | `func (*MockMedium) WriteMode(path, content string, mode os.FileMode) error` | `dappco.re/go/core/io` | Writes content using an explicit file mode. | No | +| `MockMedium.WriteStream` | `func (*MockMedium) WriteStream(path string) (goio.WriteCloser, error)` | `dappco.re/go/core/io` | WriteStream returns a writer for the file content in the mock filesystem. | No | +| `MockWriteCloser.Close` | `func (*MockWriteCloser) Close() error` | `dappco.re/go/core/io` | Closes the current value. | No | +| `MockWriteCloser.Write` | `func (*MockWriteCloser) Write(p []byte) (int, error)` | `dappco.re/go/core/io` | Writes data to the current value. | No | +| `Medium` | `type Medium struct` | `dappco.re/go/core/io/datanode` | Medium is an in-memory storage backend backed by a Borg DataNode. | Yes | +| `FromTar` | `func FromTar(data []byte) (*Medium, error)` | `dappco.re/go/core/io/datanode` | FromTar creates a Medium from a tarball, restoring all files. | Yes | +| `New` | `func New() *Medium` | `dappco.re/go/core/io/datanode` | New creates a new empty DataNode Medium. | Yes | +| `Medium.Append` | `func (*Medium) Append(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/datanode` | Opens the named file for appending, creating it if needed. | Yes | +| `Medium.Create` | `func (*Medium) Create(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/datanode` | Creates or truncates the named file and returns a writer. | Yes | +| `Medium.DataNode` | `func (*Medium) DataNode() *datanode.DataNode` | `dappco.re/go/core/io/datanode` | DataNode returns the underlying Borg DataNode. | Yes | +| `Medium.Delete` | `func (*Medium) Delete(p string) error` | `dappco.re/go/core/io/datanode` | Removes a file, key, or empty directory. | Yes | +| `Medium.DeleteAll` | `func (*Medium) DeleteAll(p string) error` | `dappco.re/go/core/io/datanode` | Removes a file or directory tree recursively. | Yes | +| `Medium.EnsureDir` | `func (*Medium) EnsureDir(p string) error` | `dappco.re/go/core/io/datanode` | Ensures a directory path exists. | Yes | +| `Medium.Exists` | `func (*Medium) Exists(p string) bool` | `dappco.re/go/core/io/datanode` | Reports whether the path exists. | Yes | +| `Medium.FileGet` | `func (*Medium) FileGet(p string) (string, error)` | `dappco.re/go/core/io/datanode` | Reads a file or key through the convenience accessor. | Yes | +| `Medium.FileSet` | `func (*Medium) FileSet(p, content string) error` | `dappco.re/go/core/io/datanode` | Writes a file or key through the convenience accessor. | Yes | +| `Medium.IsDir` | `func (*Medium) IsDir(p string) bool` | `dappco.re/go/core/io/datanode` | Reports whether the entry represents a directory. | Yes | +| `Medium.IsFile` | `func (*Medium) IsFile(p string) bool` | `dappco.re/go/core/io/datanode` | Reports whether the path exists as a regular file. | Yes | +| `Medium.List` | `func (*Medium) List(p string) ([]fs.DirEntry, error)` | `dappco.re/go/core/io/datanode` | Lists directory entries beneath the given path. | Yes | +| `Medium.Open` | `func (*Medium) Open(p string) (fs.File, error)` | `dappco.re/go/core/io/datanode` | Opens the named file for reading. | Yes | +| `Medium.Read` | `func (*Medium) Read(p string) (string, error)` | `dappco.re/go/core/io/datanode` | Reads data from the current value. | Yes | +| `Medium.ReadStream` | `func (*Medium) ReadStream(p string) (goio.ReadCloser, error)` | `dappco.re/go/core/io/datanode` | Opens a streaming reader for the file content. | Yes | +| `Medium.Rename` | `func (*Medium) Rename(oldPath, newPath string) error` | `dappco.re/go/core/io/datanode` | Moves a file or directory to a new path. | Yes | +| `Medium.Restore` | `func (*Medium) Restore(data []byte) error` | `dappco.re/go/core/io/datanode` | Restore replaces the filesystem contents from a tarball. | Yes | +| `Medium.Snapshot` | `func (*Medium) Snapshot() ([]byte, error)` | `dappco.re/go/core/io/datanode` | Snapshot serializes the entire filesystem to a tarball. | Yes | +| `Medium.Stat` | `func (*Medium) Stat(p string) (fs.FileInfo, error)` | `dappco.re/go/core/io/datanode` | Returns file metadata for the current value. | Yes | +| `Medium.Write` | `func (*Medium) Write(p, content string) error` | `dappco.re/go/core/io/datanode` | Writes data to the current value. | Yes | +| `Medium.WriteMode` | `func (*Medium) WriteMode(p, content string, mode os.FileMode) error` | `dappco.re/go/core/io/datanode` | Writes content using an explicit file mode. | No | +| `Medium.WriteStream` | `func (*Medium) WriteStream(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/datanode` | Opens a streaming writer for the file content. | Yes | +| `Medium` | `type Medium struct` | `dappco.re/go/core/io/local` | Medium is a local filesystem storage backend. | Yes | +| `New` | `func New(root string) (*Medium, error)` | `dappco.re/go/core/io/local` | New creates a new local Medium rooted at the given directory. | Yes | +| `Medium.Append` | `func (*Medium) Append(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/local` | Append opens the named file for appending, creating it if it doesn't exist. | No | +| `Medium.Create` | `func (*Medium) Create(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/local` | Create creates or truncates the named file. | Yes | +| `Medium.Delete` | `func (*Medium) Delete(p string) error` | `dappco.re/go/core/io/local` | Delete removes a file or empty directory. | Yes | +| `Medium.DeleteAll` | `func (*Medium) DeleteAll(p string) error` | `dappco.re/go/core/io/local` | DeleteAll removes a file or directory recursively. | Yes | +| `Medium.EnsureDir` | `func (*Medium) EnsureDir(p string) error` | `dappco.re/go/core/io/local` | EnsureDir creates directory if it doesn't exist. | Yes | +| `Medium.Exists` | `func (*Medium) Exists(p string) bool` | `dappco.re/go/core/io/local` | Exists returns true if path exists. | Yes | +| `Medium.FileGet` | `func (*Medium) FileGet(p string) (string, error)` | `dappco.re/go/core/io/local` | FileGet is an alias for Read. | Yes | +| `Medium.FileSet` | `func (*Medium) FileSet(p, content string) error` | `dappco.re/go/core/io/local` | FileSet is an alias for Write. | Yes | +| `Medium.IsDir` | `func (*Medium) IsDir(p string) bool` | `dappco.re/go/core/io/local` | IsDir returns true if path is a directory. | Yes | +| `Medium.IsFile` | `func (*Medium) IsFile(p string) bool` | `dappco.re/go/core/io/local` | IsFile returns true if path is a regular file. | Yes | +| `Medium.List` | `func (*Medium) List(p string) ([]fs.DirEntry, error)` | `dappco.re/go/core/io/local` | List returns directory entries. | Yes | +| `Medium.Open` | `func (*Medium) Open(p string) (fs.File, error)` | `dappco.re/go/core/io/local` | Open opens the named file for reading. | Yes | +| `Medium.Read` | `func (*Medium) Read(p string) (string, error)` | `dappco.re/go/core/io/local` | Read returns file contents as string. | Yes | +| `Medium.ReadStream` | `func (*Medium) ReadStream(path string) (goio.ReadCloser, error)` | `dappco.re/go/core/io/local` | ReadStream returns a reader for the file content. | Yes | +| `Medium.Rename` | `func (*Medium) Rename(oldPath, newPath string) error` | `dappco.re/go/core/io/local` | Rename moves a file or directory. | Yes | +| `Medium.Stat` | `func (*Medium) Stat(p string) (fs.FileInfo, error)` | `dappco.re/go/core/io/local` | Stat returns file info. | Yes | +| `Medium.Write` | `func (*Medium) Write(p, content string) error` | `dappco.re/go/core/io/local` | Write saves content to file, creating parent directories as needed. | Yes | +| `Medium.WriteMode` | `func (*Medium) WriteMode(p, content string, mode os.FileMode) error` | `dappco.re/go/core/io/local` | WriteMode saves content to file with explicit permissions. | Yes | +| `Medium.WriteStream` | `func (*Medium) WriteStream(path string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/local` | WriteStream returns a writer for the file content. | Yes | +| `Node` | `type Node struct` | `dappco.re/go/core/io/node` | Node is an in-memory filesystem that implements coreio.Node (and therefore coreio.Medium). | Yes | +| `WalkOptions` | `type WalkOptions struct` | `dappco.re/go/core/io/node` | WalkOptions configures the behaviour of Walk. | Yes | +| `FromTar` | `func FromTar(data []byte) (*Node, error)` | `dappco.re/go/core/io/node` | FromTar creates a new Node from a tar archive. | Yes | +| `New` | `func New() *Node` | `dappco.re/go/core/io/node` | New creates a new, empty Node. | Yes | +| `Node.AddData` | `func (*Node) AddData(name string, content []byte)` | `dappco.re/go/core/io/node` | AddData stages content in the in-memory filesystem. | Yes | +| `Node.Append` | `func (*Node) Append(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/node` | Append opens the named file for appending, creating it if needed. | No | +| `Node.CopyFile` | `func (*Node) CopyFile(src, dst string, perm fs.FileMode) error` | `dappco.re/go/core/io/node` | CopyFile copies a file from the in-memory tree to the local filesystem. | Yes | +| `Node.CopyTo` | `func (*Node) CopyTo(target coreio.Medium, sourcePath, destPath string) error` | `dappco.re/go/core/io/node` | CopyTo copies a file (or directory tree) from the node to any Medium. | No | +| `Node.Create` | `func (*Node) Create(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/node` | Create creates or truncates the named file, returning a WriteCloser. | No | +| `Node.Delete` | `func (*Node) Delete(p string) error` | `dappco.re/go/core/io/node` | Delete removes a single file. | No | +| `Node.DeleteAll` | `func (*Node) DeleteAll(p string) error` | `dappco.re/go/core/io/node` | DeleteAll removes a file or directory and all children. | No | +| `Node.EnsureDir` | `func (*Node) EnsureDir(_ string) error` | `dappco.re/go/core/io/node` | EnsureDir is a no-op because directories are implicit in Node. | No | +| `Node.Exists` | `func (*Node) Exists(p string) bool` | `dappco.re/go/core/io/node` | Exists checks if a path exists (file or directory). | Yes | +| `Node.FileGet` | `func (*Node) FileGet(p string) (string, error)` | `dappco.re/go/core/io/node` | FileGet is an alias for Read. | No | +| `Node.FileSet` | `func (*Node) FileSet(p, content string) error` | `dappco.re/go/core/io/node` | FileSet is an alias for Write. | No | +| `Node.IsDir` | `func (*Node) IsDir(p string) bool` | `dappco.re/go/core/io/node` | IsDir checks if a path exists and is a directory. | No | +| `Node.IsFile` | `func (*Node) IsFile(p string) bool` | `dappco.re/go/core/io/node` | IsFile checks if a path exists and is a regular file. | No | +| `Node.List` | `func (*Node) List(p string) ([]fs.DirEntry, error)` | `dappco.re/go/core/io/node` | List returns directory entries for the given path. | No | +| `Node.LoadTar` | `func (*Node) LoadTar(data []byte) error` | `dappco.re/go/core/io/node` | LoadTar replaces the in-memory tree with the contents of a tar archive. | Yes | +| `Node.Open` | `func (*Node) Open(name string) (fs.File, error)` | `dappco.re/go/core/io/node` | Open opens a file from the Node. | Yes | +| `Node.Read` | `func (*Node) Read(p string) (string, error)` | `dappco.re/go/core/io/node` | Read retrieves the content of a file as a string. | No | +| `Node.ReadDir` | `func (*Node) ReadDir(name string) ([]fs.DirEntry, error)` | `dappco.re/go/core/io/node` | ReadDir reads and returns all directory entries for the named directory. | Yes | +| `Node.ReadFile` | `func (*Node) ReadFile(name string) ([]byte, error)` | `dappco.re/go/core/io/node` | ReadFile returns the content of the named file as a byte slice. | Yes | +| `Node.ReadStream` | `func (*Node) ReadStream(p string) (goio.ReadCloser, error)` | `dappco.re/go/core/io/node` | ReadStream returns a ReadCloser for the file content. | No | +| `Node.Rename` | `func (*Node) Rename(oldPath, newPath string) error` | `dappco.re/go/core/io/node` | Rename moves a file from oldPath to newPath. | No | +| `Node.Stat` | `func (*Node) Stat(name string) (fs.FileInfo, error)` | `dappco.re/go/core/io/node` | Stat returns file information for the given path. | Yes | +| `Node.ToTar` | `func (*Node) ToTar() ([]byte, error)` | `dappco.re/go/core/io/node` | ToTar serialises the entire in-memory tree to a tar archive. | Yes | +| `Node.Walk` | `func (*Node) Walk(root string, fn fs.WalkDirFunc, opts ...WalkOptions) error` | `dappco.re/go/core/io/node` | Walk walks the in-memory tree with optional WalkOptions. | Yes | +| `Node.WalkNode` | `func (*Node) WalkNode(root string, fn fs.WalkDirFunc) error` | `dappco.re/go/core/io/node` | WalkNode walks the in-memory tree, calling fn for each entry. | No | +| `Node.Write` | `func (*Node) Write(p, content string) error` | `dappco.re/go/core/io/node` | Write saves the given content to a file, overwriting it if it exists. | No | +| `Node.WriteMode` | `func (*Node) WriteMode(p, content string, mode os.FileMode) error` | `dappco.re/go/core/io/node` | WriteMode saves content with explicit permissions (no-op for in-memory node). | No | +| `Node.WriteStream` | `func (*Node) WriteStream(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/node` | WriteStream returns a WriteCloser for the file content. | No | +| `Medium` | `type Medium struct` | `dappco.re/go/core/io/s3` | Medium is an S3-backed storage backend implementing the io.Medium interface. | Yes | +| `Option` | `type Option func(*Medium)` | `dappco.re/go/core/io/s3` | Option configures a Medium. | Yes | +| `New` | `func New(bucket string, opts ...Option) (*Medium, error)` | `dappco.re/go/core/io/s3` | New creates a new S3 Medium for the given bucket. | Yes | +| `WithClient` | `func WithClient(client *s3.Client) Option` | `dappco.re/go/core/io/s3` | WithClient sets the S3 client for dependency injection. | No | +| `WithPrefix` | `func WithPrefix(prefix string) Option` | `dappco.re/go/core/io/s3` | WithPrefix sets an optional key prefix for all operations. | Yes | +| `Medium.Append` | `func (*Medium) Append(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/s3` | Append opens the named file for appending. | Yes | +| `Medium.Create` | `func (*Medium) Create(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/s3` | Create creates or truncates the named file. | Yes | +| `Medium.Delete` | `func (*Medium) Delete(p string) error` | `dappco.re/go/core/io/s3` | Delete removes a single object. | Yes | +| `Medium.DeleteAll` | `func (*Medium) DeleteAll(p string) error` | `dappco.re/go/core/io/s3` | DeleteAll removes all objects under the given prefix. | Yes | +| `Medium.EnsureDir` | `func (*Medium) EnsureDir(_ string) error` | `dappco.re/go/core/io/s3` | EnsureDir is a no-op for S3 (S3 has no real directories). | Yes | +| `Medium.Exists` | `func (*Medium) Exists(p string) bool` | `dappco.re/go/core/io/s3` | Exists checks if a path exists (file or directory prefix). | Yes | +| `Medium.FileGet` | `func (*Medium) FileGet(p string) (string, error)` | `dappco.re/go/core/io/s3` | FileGet is a convenience function that reads a file from the medium. | Yes | +| `Medium.FileSet` | `func (*Medium) FileSet(p, content string) error` | `dappco.re/go/core/io/s3` | FileSet is a convenience function that writes a file to the medium. | Yes | +| `Medium.IsDir` | `func (*Medium) IsDir(p string) bool` | `dappco.re/go/core/io/s3` | IsDir checks if a path exists and is a directory (has objects under it as a prefix). | Yes | +| `Medium.IsFile` | `func (*Medium) IsFile(p string) bool` | `dappco.re/go/core/io/s3` | IsFile checks if a path exists and is a regular file (not a "directory" prefix). | Yes | +| `Medium.List` | `func (*Medium) List(p string) ([]fs.DirEntry, error)` | `dappco.re/go/core/io/s3` | List returns directory entries for the given path using ListObjectsV2 with delimiter. | Yes | +| `Medium.Open` | `func (*Medium) Open(p string) (fs.File, error)` | `dappco.re/go/core/io/s3` | Open opens the named file for reading. | Yes | +| `Medium.Read` | `func (*Medium) Read(p string) (string, error)` | `dappco.re/go/core/io/s3` | Read retrieves the content of a file as a string. | Yes | +| `Medium.ReadStream` | `func (*Medium) ReadStream(p string) (goio.ReadCloser, error)` | `dappco.re/go/core/io/s3` | ReadStream returns a reader for the file content. | Yes | +| `Medium.Rename` | `func (*Medium) Rename(oldPath, newPath string) error` | `dappco.re/go/core/io/s3` | Rename moves an object by copying then deleting the original. | Yes | +| `Medium.Stat` | `func (*Medium) Stat(p string) (fs.FileInfo, error)` | `dappco.re/go/core/io/s3` | Stat returns file information for the given path using HeadObject. | Yes | +| `Medium.Write` | `func (*Medium) Write(p, content string) error` | `dappco.re/go/core/io/s3` | Write saves the given content to a file, overwriting it if it exists. | Yes | +| `Medium.WriteStream` | `func (*Medium) WriteStream(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/s3` | WriteStream returns a writer for the file content. | Yes | +| `Base64Sigil` | `type Base64Sigil struct` | `dappco.re/go/core/io/sigil` | Base64Sigil is a Sigil that encodes/decodes data to/from base64. | Yes | +| `ChaChaPolySigil` | `type ChaChaPolySigil struct` | `dappco.re/go/core/io/sigil` | ChaChaPolySigil is a Sigil that encrypts/decrypts data using ChaCha20-Poly1305. | Yes | +| `GzipSigil` | `type GzipSigil struct` | `dappco.re/go/core/io/sigil` | GzipSigil is a Sigil that compresses/decompresses data using gzip. | Yes | +| `HashSigil` | `type HashSigil struct` | `dappco.re/go/core/io/sigil` | HashSigil is a Sigil that hashes the data using a specified algorithm. | Yes | +| `HexSigil` | `type HexSigil struct` | `dappco.re/go/core/io/sigil` | HexSigil is a Sigil that encodes/decodes data to/from hexadecimal. | Yes | +| `JSONSigil` | `type JSONSigil struct` | `dappco.re/go/core/io/sigil` | JSONSigil is a Sigil that compacts or indents JSON data. | Yes | +| `PreObfuscator` | `type PreObfuscator interface` | `dappco.re/go/core/io/sigil` | PreObfuscator applies a reversible transformation to data before encryption. | Yes | +| `ReverseSigil` | `type ReverseSigil struct` | `dappco.re/go/core/io/sigil` | ReverseSigil is a Sigil that reverses the bytes of the payload. | Yes | +| `ShuffleMaskObfuscator` | `type ShuffleMaskObfuscator struct` | `dappco.re/go/core/io/sigil` | ShuffleMaskObfuscator provides stronger obfuscation through byte shuffling and masking. | Yes | +| `Sigil` | `type Sigil interface` | `dappco.re/go/core/io/sigil` | Sigil defines the interface for a data transformer. | Yes | +| `XORObfuscator` | `type XORObfuscator struct` | `dappco.re/go/core/io/sigil` | XORObfuscator performs XOR-based obfuscation using an entropy-derived key stream. | Yes | +| `GetNonceFromCiphertext` | `func GetNonceFromCiphertext(ciphertext []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | GetNonceFromCiphertext extracts the nonce from encrypted output. | Yes | +| `NewChaChaPolySigil` | `func NewChaChaPolySigil(key []byte) (*ChaChaPolySigil, error)` | `dappco.re/go/core/io/sigil` | NewChaChaPolySigil creates a new encryption sigil with the given key. | Yes | +| `NewChaChaPolySigilWithObfuscator` | `func NewChaChaPolySigilWithObfuscator(key []byte, obfuscator PreObfuscator) (*ChaChaPolySigil, error)` | `dappco.re/go/core/io/sigil` | NewChaChaPolySigilWithObfuscator creates a new encryption sigil with custom obfuscator. | Yes | +| `NewHashSigil` | `func NewHashSigil(h crypto.Hash) *HashSigil` | `dappco.re/go/core/io/sigil` | NewHashSigil creates a new HashSigil. | Yes | +| `NewSigil` | `func NewSigil(name string) (Sigil, error)` | `dappco.re/go/core/io/sigil` | NewSigil is a factory function that returns a Sigil based on a string name. | Yes | +| `Transmute` | `func Transmute(data []byte, sigils []Sigil) ([]byte, error)` | `dappco.re/go/core/io/sigil` | Transmute applies a series of sigils to data in sequence. | Yes | +| `Untransmute` | `func Untransmute(data []byte, sigils []Sigil) ([]byte, error)` | `dappco.re/go/core/io/sigil` | Untransmute reverses a transmutation by applying Out in reverse order. | Yes | +| `Base64Sigil.In` | `func (*Base64Sigil) In(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | In encodes the data to base64. | Yes | +| `Base64Sigil.Out` | `func (*Base64Sigil) Out(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | Out decodes the data from base64. | Yes | +| `ChaChaPolySigil.In` | `func (*ChaChaPolySigil) In(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | In encrypts the data with pre-obfuscation. | Yes | +| `ChaChaPolySigil.Out` | `func (*ChaChaPolySigil) Out(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | Out decrypts the data and reverses obfuscation. | Yes | +| `GzipSigil.In` | `func (*GzipSigil) In(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | In compresses the data using gzip. | Yes | +| `GzipSigil.Out` | `func (*GzipSigil) Out(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | Out decompresses the data using gzip. | Yes | +| `HashSigil.In` | `func (*HashSigil) In(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | In hashes the data. | Yes | +| `HashSigil.Out` | `func (*HashSigil) Out(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | Out is a no-op for HashSigil. | Yes | +| `HexSigil.In` | `func (*HexSigil) In(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | In encodes the data to hexadecimal. | Yes | +| `HexSigil.Out` | `func (*HexSigil) Out(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | Out decodes the data from hexadecimal. | Yes | +| `JSONSigil.In` | `func (*JSONSigil) In(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | In compacts or indents the JSON data. | Yes | +| `JSONSigil.Out` | `func (*JSONSigil) Out(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | Out is a no-op for JSONSigil. | Yes | +| `PreObfuscator.Deobfuscate` | `Deobfuscate(data []byte, entropy []byte) []byte` | `dappco.re/go/core/io/sigil` | Deobfuscate reverses the transformation after decryption. | Yes | +| `PreObfuscator.Obfuscate` | `Obfuscate(data []byte, entropy []byte) []byte` | `dappco.re/go/core/io/sigil` | Obfuscate transforms plaintext before encryption using the provided entropy. | Yes | +| `ReverseSigil.In` | `func (*ReverseSigil) In(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | In reverses the bytes of the data. | Yes | +| `ReverseSigil.Out` | `func (*ReverseSigil) Out(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | Out reverses the bytes of the data. | Yes | +| `ShuffleMaskObfuscator.Deobfuscate` | `func (*ShuffleMaskObfuscator) Deobfuscate(data []byte, entropy []byte) []byte` | `dappco.re/go/core/io/sigil` | Deobfuscate reverses the shuffle and mask operations. | Yes | +| `ShuffleMaskObfuscator.Obfuscate` | `func (*ShuffleMaskObfuscator) Obfuscate(data []byte, entropy []byte) []byte` | `dappco.re/go/core/io/sigil` | Obfuscate shuffles bytes and applies a mask derived from entropy. | Yes | +| `Sigil.In` | `In(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | In applies the forward transformation to the data. | Yes | +| `Sigil.Out` | `Out(data []byte) ([]byte, error)` | `dappco.re/go/core/io/sigil` | Out applies the reverse transformation to the data. | Yes | +| `XORObfuscator.Deobfuscate` | `func (*XORObfuscator) Deobfuscate(data []byte, entropy []byte) []byte` | `dappco.re/go/core/io/sigil` | Deobfuscate reverses the XOR transformation (XOR is symmetric). | Yes | +| `XORObfuscator.Obfuscate` | `func (*XORObfuscator) Obfuscate(data []byte, entropy []byte) []byte` | `dappco.re/go/core/io/sigil` | Obfuscate XORs the data with a key stream derived from the entropy. | Yes | +| `Medium` | `type Medium struct` | `dappco.re/go/core/io/sqlite` | Medium is a SQLite-backed storage backend implementing the io.Medium interface. | Yes | +| `Option` | `type Option func(*Medium)` | `dappco.re/go/core/io/sqlite` | Option configures a Medium. | Yes | +| `New` | `func New(dbPath string, opts ...Option) (*Medium, error)` | `dappco.re/go/core/io/sqlite` | New creates a new SQLite Medium at the given database path. | Yes | +| `WithTable` | `func WithTable(table string) Option` | `dappco.re/go/core/io/sqlite` | WithTable sets the table name (default: "files"). | Yes | +| `Medium.Append` | `func (*Medium) Append(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/sqlite` | Append opens the named file for appending, creating it if it doesn't exist. | Yes | +| `Medium.Close` | `func (*Medium) Close() error` | `dappco.re/go/core/io/sqlite` | Close closes the underlying database connection. | Yes | +| `Medium.Create` | `func (*Medium) Create(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/sqlite` | Create creates or truncates the named file. | Yes | +| `Medium.Delete` | `func (*Medium) Delete(p string) error` | `dappco.re/go/core/io/sqlite` | Delete removes a file or empty directory. | Yes | +| `Medium.DeleteAll` | `func (*Medium) DeleteAll(p string) error` | `dappco.re/go/core/io/sqlite` | DeleteAll removes a file or directory and all its contents recursively. | Yes | +| `Medium.EnsureDir` | `func (*Medium) EnsureDir(p string) error` | `dappco.re/go/core/io/sqlite` | EnsureDir makes sure a directory exists, creating it if necessary. | Yes | +| `Medium.Exists` | `func (*Medium) Exists(p string) bool` | `dappco.re/go/core/io/sqlite` | Exists checks if a path exists (file or directory). | Yes | +| `Medium.FileGet` | `func (*Medium) FileGet(p string) (string, error)` | `dappco.re/go/core/io/sqlite` | FileGet is a convenience function that reads a file from the medium. | Yes | +| `Medium.FileSet` | `func (*Medium) FileSet(p, content string) error` | `dappco.re/go/core/io/sqlite` | FileSet is a convenience function that writes a file to the medium. | Yes | +| `Medium.IsDir` | `func (*Medium) IsDir(p string) bool` | `dappco.re/go/core/io/sqlite` | IsDir checks if a path exists and is a directory. | Yes | +| `Medium.IsFile` | `func (*Medium) IsFile(p string) bool` | `dappco.re/go/core/io/sqlite` | IsFile checks if a path exists and is a regular file. | Yes | +| `Medium.List` | `func (*Medium) List(p string) ([]fs.DirEntry, error)` | `dappco.re/go/core/io/sqlite` | List returns the directory entries for the given path. | Yes | +| `Medium.Open` | `func (*Medium) Open(p string) (fs.File, error)` | `dappco.re/go/core/io/sqlite` | Open opens the named file for reading. | Yes | +| `Medium.Read` | `func (*Medium) Read(p string) (string, error)` | `dappco.re/go/core/io/sqlite` | Read retrieves the content of a file as a string. | Yes | +| `Medium.ReadStream` | `func (*Medium) ReadStream(p string) (goio.ReadCloser, error)` | `dappco.re/go/core/io/sqlite` | ReadStream returns a reader for the file content. | Yes | +| `Medium.Rename` | `func (*Medium) Rename(oldPath, newPath string) error` | `dappco.re/go/core/io/sqlite` | Rename moves a file or directory from oldPath to newPath. | Yes | +| `Medium.Stat` | `func (*Medium) Stat(p string) (fs.FileInfo, error)` | `dappco.re/go/core/io/sqlite` | Stat returns file information for the given path. | Yes | +| `Medium.Write` | `func (*Medium) Write(p, content string) error` | `dappco.re/go/core/io/sqlite` | Write saves the given content to a file, overwriting it if it exists. | Yes | +| `Medium.WriteStream` | `func (*Medium) WriteStream(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/sqlite` | WriteStream returns a writer for the file content. | Yes | +| `Medium` | `type Medium struct` | `dappco.re/go/core/io/store` | Medium wraps a Store to satisfy the io.Medium interface. | Yes | +| `Store` | `type Store struct` | `dappco.re/go/core/io/store` | Store is a group-namespaced key-value store backed by SQLite. | Yes | +| `New` | `func New(dbPath string) (*Store, error)` | `dappco.re/go/core/io/store` | New creates a Store at the given SQLite path. | Yes | +| `NewMedium` | `func NewMedium(dbPath string) (*Medium, error)` | `dappco.re/go/core/io/store` | NewMedium creates an io.Medium backed by a KV store at the given SQLite path. | Yes | +| `Medium.Append` | `func (*Medium) Append(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/store` | Append opens a key for appending. | Yes | +| `Medium.Close` | `func (*Medium) Close() error` | `dappco.re/go/core/io/store` | Close closes the underlying store. | Yes | +| `Medium.Create` | `func (*Medium) Create(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/store` | Create creates or truncates a key. | Yes | +| `Medium.Delete` | `func (*Medium) Delete(p string) error` | `dappco.re/go/core/io/store` | Delete removes a key, or checks that a group is empty. | Yes | +| `Medium.DeleteAll` | `func (*Medium) DeleteAll(p string) error` | `dappco.re/go/core/io/store` | DeleteAll removes a key, or all keys in a group. | Yes | +| `Medium.EnsureDir` | `func (*Medium) EnsureDir(_ string) error` | `dappco.re/go/core/io/store` | EnsureDir is a no-op — groups are created implicitly on Set. | No | +| `Medium.Exists` | `func (*Medium) Exists(p string) bool` | `dappco.re/go/core/io/store` | Exists returns true if a group or key exists. | Yes | +| `Medium.FileGet` | `func (*Medium) FileGet(p string) (string, error)` | `dappco.re/go/core/io/store` | FileGet is an alias for Read. | No | +| `Medium.FileSet` | `func (*Medium) FileSet(p, content string) error` | `dappco.re/go/core/io/store` | FileSet is an alias for Write. | No | +| `Medium.IsDir` | `func (*Medium) IsDir(p string) bool` | `dappco.re/go/core/io/store` | IsDir returns true if the path is a group with entries. | Yes | +| `Medium.IsFile` | `func (*Medium) IsFile(p string) bool` | `dappco.re/go/core/io/store` | IsFile returns true if a group/key pair exists. | Yes | +| `Medium.List` | `func (*Medium) List(p string) ([]fs.DirEntry, error)` | `dappco.re/go/core/io/store` | List returns directory entries. | Yes | +| `Medium.Open` | `func (*Medium) Open(p string) (fs.File, error)` | `dappco.re/go/core/io/store` | Open opens a key for reading. | Yes | +| `Medium.Read` | `func (*Medium) Read(p string) (string, error)` | `dappco.re/go/core/io/store` | Read retrieves the value at group/key. | Yes | +| `Medium.ReadStream` | `func (*Medium) ReadStream(p string) (goio.ReadCloser, error)` | `dappco.re/go/core/io/store` | ReadStream returns a reader for the value. | No | +| `Medium.Rename` | `func (*Medium) Rename(oldPath, newPath string) error` | `dappco.re/go/core/io/store` | Rename moves a key from one path to another. | Yes | +| `Medium.Stat` | `func (*Medium) Stat(p string) (fs.FileInfo, error)` | `dappco.re/go/core/io/store` | Stat returns file info for a group (dir) or key (file). | Yes | +| `Medium.Store` | `func (*Medium) Store() *Store` | `dappco.re/go/core/io/store` | Store returns the underlying KV store for direct access. | No | +| `Medium.Write` | `func (*Medium) Write(p, content string) error` | `dappco.re/go/core/io/store` | Write stores a value at group/key. | Yes | +| `Medium.WriteStream` | `func (*Medium) WriteStream(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/store` | WriteStream returns a writer. | No | +| `Store.AsMedium` | `func (*Store) AsMedium() *Medium` | `dappco.re/go/core/io/store` | AsMedium returns a Medium adapter for an existing Store. | Yes | +| `Store.Close` | `func (*Store) Close() error` | `dappco.re/go/core/io/store` | Close closes the underlying database. | Yes | +| `Store.Count` | `func (*Store) Count(group string) (int, error)` | `dappco.re/go/core/io/store` | Count returns the number of keys in a group. | Yes | +| `Store.Delete` | `func (*Store) Delete(group, key string) error` | `dappco.re/go/core/io/store` | Delete removes a single key from a group. | Yes | +| `Store.DeleteGroup` | `func (*Store) DeleteGroup(group string) error` | `dappco.re/go/core/io/store` | DeleteGroup removes all keys in a group. | Yes | +| `Store.Get` | `func (*Store) Get(group, key string) (string, error)` | `dappco.re/go/core/io/store` | Get retrieves a value by group and key. | Yes | +| `Store.GetAll` | `func (*Store) GetAll(group string) (map[string]string, error)` | `dappco.re/go/core/io/store` | GetAll returns all key-value pairs in a group. | Yes | +| `Store.Render` | `func (*Store) Render(tmplStr, group string) (string, error)` | `dappco.re/go/core/io/store` | Render loads all key-value pairs from a group and renders a Go template. | Yes | +| `Store.Set` | `func (*Store) Set(group, key, value string) error` | `dappco.re/go/core/io/store` | Set stores a value by group and key, overwriting if exists. | Yes | +| `Service` | `type Service struct` | `dappco.re/go/core/io/workspace` | Service implements the Workspace interface. | Yes | +| `Workspace` | `type Workspace interface` | `dappco.re/go/core/io/workspace` | Workspace provides management for encrypted user workspaces. | No | +| `New` | `func New(c *core.Core, crypt ...cryptProvider) (any, error)` | `dappco.re/go/core/io/workspace` | New creates a new Workspace service instance. | Yes | +| `Service.CreateWorkspace` | `func (*Service) CreateWorkspace(identifier, password string) (string, error)` | `dappco.re/go/core/io/workspace` | CreateWorkspace creates a new encrypted workspace. | Yes | +| `Service.HandleIPCEvents` | `func (*Service) HandleIPCEvents(c *core.Core, msg core.Message) core.Result` | `dappco.re/go/core/io/workspace` | HandleIPCEvents handles workspace-related IPC messages. | No | +| `Service.SwitchWorkspace` | `func (*Service) SwitchWorkspace(name string) error` | `dappco.re/go/core/io/workspace` | SwitchWorkspace changes the active workspace. | Yes | +| `Service.WorkspaceFileGet` | `func (*Service) WorkspaceFileGet(filename string) (string, error)` | `dappco.re/go/core/io/workspace` | WorkspaceFileGet retrieves the content of a file from the active workspace. | Yes | +| `Service.WorkspaceFileSet` | `func (*Service) WorkspaceFileSet(filename, content string) error` | `dappco.re/go/core/io/workspace` | WorkspaceFileSet saves content to a file in the active workspace. | Yes | +| `Workspace.CreateWorkspace` | `CreateWorkspace(identifier, password string) (string, error)` | `dappco.re/go/core/io/workspace` | Creates a new encrypted workspace and returns its ID. | Yes | +| `Workspace.SwitchWorkspace` | `SwitchWorkspace(name string) error` | `dappco.re/go/core/io/workspace` | Switches the active workspace. | Yes | +| `Workspace.WorkspaceFileGet` | `WorkspaceFileGet(filename string) (string, error)` | `dappco.re/go/core/io/workspace` | Reads a file from the active workspace. | Yes | +| `Workspace.WorkspaceFileSet` | `WorkspaceFileSet(filename, content string) error` | `dappco.re/go/core/io/workspace` | Writes a file into the active workspace. | Yes | -- 2.45.3 From 987507cae8f4f644177868fc9dc361ed9d9018e4 Mon Sep 17 00:00:00 2001 From: Virgil Date: Fri, 27 Mar 2026 19:01:20 +0000 Subject: [PATCH 6/6] docs(specs): populate package RFCs Co-Authored-By: Virgil --- specs/RFC.md | 167 ++++++++++++++++++++++++++++++++++++++++ specs/datanode/RFC.md | 106 +++++++++++++++++++++++++ specs/local/RFC.md | 104 +++++++++++++++++++++++++ specs/node/RFC.md | 138 +++++++++++++++++++++++++++++++++ specs/s3/RFC.md | 137 +++++++++++++++++++++++++++++++++ specs/sigil/RFC.md | 171 +++++++++++++++++++++++++++++++++++++++++ specs/sqlite/RFC.md | 114 +++++++++++++++++++++++++++ specs/store/RFC.md | 120 +++++++++++++++++++++++++++++ specs/workspace/RFC.md | 60 +++++++++++++++ 9 files changed, 1117 insertions(+) create mode 100644 specs/RFC.md create mode 100644 specs/datanode/RFC.md create mode 100644 specs/local/RFC.md create mode 100644 specs/node/RFC.md create mode 100644 specs/s3/RFC.md create mode 100644 specs/sigil/RFC.md create mode 100644 specs/sqlite/RFC.md create mode 100644 specs/store/RFC.md create mode 100644 specs/workspace/RFC.md diff --git a/specs/RFC.md b/specs/RFC.md new file mode 100644 index 0000000..68521fd --- /dev/null +++ b/specs/RFC.md @@ -0,0 +1,167 @@ +# io + +**Import:** `dappco.re/go/core/io` +**Files:** 1 + +No package doc comment in source. + +## Types + +### Medium +- **File:** io.go +- **Purpose:** Medium defines the standard interface for a storage backend. This allows for different implementations (e.g., local disk, S3, SFTP) to be used interchangeably. +- **Methods:** + - `Read func(path string) (string, error)` — Read retrieves the content of a file as a string. + - `Write func(path, content string) error` — Write saves the given content to a file, overwriting it if it exists. Default permissions: 0644. For sensitive files, use WriteMode. + - `WriteMode func(path, content string, mode os.FileMode) error` — WriteMode saves content with explicit file permissions. Use 0600 for sensitive files (keys, secrets, encrypted output). + - `EnsureDir func(path string) error` — EnsureDir makes sure a directory exists, creating it if necessary. + - `IsFile func(path string) bool` — IsFile checks if a path exists and is a regular file. + - `FileGet func(path string) (string, error)` — FileGet is a convenience function that reads a file from the medium. + - `FileSet func(path, content string) error` — FileSet is a convenience function that writes a file to the medium. + - `Delete func(path string) error` — Delete removes a file or empty directory. + - `DeleteAll func(path string) error` — DeleteAll removes a file or directory and all its contents recursively. + - `Rename func(oldPath, newPath string) error` — Rename moves a file or directory from oldPath to newPath. + - `List func(path string) ([]fs.DirEntry, error)` — List returns the directory entries for the given path. + - `Stat func(path string) (fs.FileInfo, error)` — Stat returns file information for the given path. + - `Open func(path string) (fs.File, error)` — Open opens the named file for reading. + - `Create func(path string) (goio.WriteCloser, error)` — Create creates or truncates the named file. + - `Append func(path string) (goio.WriteCloser, error)` — Append opens the named file for appending, creating it if it doesn't exist. + - `ReadStream func(path string) (goio.ReadCloser, error)` — ReadStream returns a reader for the file content. Use this for large files to avoid loading the entire content into memory. + - `WriteStream func(path string) (goio.WriteCloser, error)` — WriteStream returns a writer for the file content. Use this for large files to avoid loading the entire content into memory. + - `Exists func(path string) bool` — Exists checks if a path exists (file or directory). + - `IsDir func(path string) bool` — IsDir checks if a path exists and is a directory. + +### FileInfo +- **File:** io.go +- **Purpose:** FileInfo provides a simple implementation of fs.FileInfo for mock testing. +- **Fields:** + - `name string` — No doc comment in source. + - `size int64` — No doc comment in source. + - `mode fs.FileMode` — No doc comment in source. + - `modTime time.Time` — No doc comment in source. + - `isDir bool` — No doc comment in source. +- **Associated Methods:** + - `func (fi FileInfo) Name() string` — No doc comment in source. + - `func (fi FileInfo) Size() int64` — No doc comment in source. + - `func (fi FileInfo) Mode() fs.FileMode` — No doc comment in source. + - `func (fi FileInfo) ModTime() time.Time` — No doc comment in source. + - `func (fi FileInfo) IsDir() bool` — No doc comment in source. + - `func (fi FileInfo) Sys() any` — No doc comment in source. + +### DirEntry +- **File:** io.go +- **Purpose:** DirEntry provides a simple implementation of fs.DirEntry for mock testing. +- **Fields:** + - `name string` — No doc comment in source. + - `isDir bool` — No doc comment in source. + - `mode fs.FileMode` — No doc comment in source. + - `info fs.FileInfo` — No doc comment in source. +- **Associated Methods:** + - `func (de DirEntry) Name() string` — No doc comment in source. + - `func (de DirEntry) IsDir() bool` — No doc comment in source. + - `func (de DirEntry) Type() fs.FileMode` — No doc comment in source. + - `func (de DirEntry) Info() (fs.FileInfo, error)` — No doc comment in source. + +### MockMedium +- **File:** io.go +- **Purpose:** MockMedium is an in-memory implementation of Medium for testing. +- **Fields:** + - `Files map[string]string` — No doc comment in source. + - `Dirs map[string]bool` — No doc comment in source. + - `ModTimes map[string]time.Time` — No doc comment in source. +- **Associated Methods:** + - `func (m *MockMedium) Read(path string) (string, error)` — Read retrieves the content of a file from the mock filesystem. + - `func (m *MockMedium) Write(path, content string) error` — Write saves the given content to a file in the mock filesystem. + - `func (m *MockMedium) WriteMode(path, content string, mode os.FileMode) error` — No doc comment in source. + - `func (m *MockMedium) EnsureDir(path string) error` — EnsureDir records that a directory exists in the mock filesystem. + - `func (m *MockMedium) IsFile(path string) bool` — IsFile checks if a path exists as a file in the mock filesystem. + - `func (m *MockMedium) FileGet(path string) (string, error)` — FileGet is a convenience function that reads a file from the mock filesystem. + - `func (m *MockMedium) FileSet(path, content string) error` — FileSet is a convenience function that writes a file to the mock filesystem. + - `func (m *MockMedium) Delete(path string) error` — Delete removes a file or empty directory from the mock filesystem. + - `func (m *MockMedium) DeleteAll(path string) error` — DeleteAll removes a file or directory and all contents from the mock filesystem. + - `func (m *MockMedium) Rename(oldPath, newPath string) error` — Rename moves a file or directory in the mock filesystem. + - `func (m *MockMedium) Open(path string) (fs.File, error)` — Open opens a file from the mock filesystem. + - `func (m *MockMedium) Create(path string) (goio.WriteCloser, error)` — Create creates a file in the mock filesystem. + - `func (m *MockMedium) Append(path string) (goio.WriteCloser, error)` — Append opens a file for appending in the mock filesystem. + - `func (m *MockMedium) ReadStream(path string) (goio.ReadCloser, error)` — ReadStream returns a reader for the file content in the mock filesystem. + - `func (m *MockMedium) WriteStream(path string) (goio.WriteCloser, error)` — WriteStream returns a writer for the file content in the mock filesystem. + - `func (m *MockMedium) List(path string) ([]fs.DirEntry, error)` — List returns directory entries for the mock filesystem. + - `func (m *MockMedium) Stat(path string) (fs.FileInfo, error)` — Stat returns file information for the mock filesystem. + - `func (m *MockMedium) Exists(path string) bool` — Exists checks if a path exists in the mock filesystem. + - `func (m *MockMedium) IsDir(path string) bool` — IsDir checks if a path is a directory in the mock filesystem. + +### MockFile +- **File:** io.go +- **Purpose:** MockFile implements fs.File for MockMedium. +- **Fields:** + - `name string` — No doc comment in source. + - `content []byte` — No doc comment in source. + - `offset int64` — No doc comment in source. +- **Associated Methods:** + - `func (f *MockFile) Stat() (fs.FileInfo, error)` — No doc comment in source. + - `func (f *MockFile) Read(b []byte) (int, error)` — No doc comment in source. + - `func (f *MockFile) Close() error` — No doc comment in source. + +### MockWriteCloser +- **File:** io.go +- **Purpose:** MockWriteCloser implements WriteCloser for MockMedium. +- **Fields:** + - `medium *MockMedium` — No doc comment in source. + - `path string` — No doc comment in source. + - `data []byte` — No doc comment in source. +- **Associated Methods:** + - `func (w *MockWriteCloser) Write(p []byte) (int, error)` — No doc comment in source. + - `func (w *MockWriteCloser) Close() error` — No doc comment in source. + +## Functions + +### init +- **File:** io.go +- **Signature:** `func init()` +- **Purpose:** No doc comment in source. + +### NewSandboxed +- **File:** io.go +- **Signature:** `func NewSandboxed(root string) (Medium, error)` +- **Purpose:** NewSandboxed creates a new Medium sandboxed to the given root directory. All file operations are restricted to paths within the root. The root directory will be created if it doesn't exist. + +### Read +- **File:** io.go +- **Signature:** `func Read(m Medium, path string) (string, error)` +- **Purpose:** Read retrieves the content of a file from the given medium. + +### Write +- **File:** io.go +- **Signature:** `func Write(m Medium, path, content string) error` +- **Purpose:** Write saves the given content to a file in the given medium. + +### ReadStream +- **File:** io.go +- **Signature:** `func ReadStream(m Medium, path string) (goio.ReadCloser, error)` +- **Purpose:** ReadStream returns a reader for the file content from the given medium. + +### WriteStream +- **File:** io.go +- **Signature:** `func WriteStream(m Medium, path string) (goio.WriteCloser, error)` +- **Purpose:** WriteStream returns a writer for the file content in the given medium. + +### EnsureDir +- **File:** io.go +- **Signature:** `func EnsureDir(m Medium, path string) error` +- **Purpose:** EnsureDir makes sure a directory exists in the given medium. + +### IsFile +- **File:** io.go +- **Signature:** `func IsFile(m Medium, path string) bool` +- **Purpose:** IsFile checks if a path exists and is a regular file in the given medium. + +### Copy +- **File:** io.go +- **Signature:** `func Copy(src Medium, srcPath string, dst Medium, dstPath string) error` +- **Purpose:** Copy copies a file from one medium to another. + +### NewMockMedium +- **File:** io.go +- **Signature:** `func NewMockMedium() *MockMedium` +- **Purpose:** NewMockMedium creates a new MockMedium instance. + diff --git a/specs/datanode/RFC.md b/specs/datanode/RFC.md new file mode 100644 index 0000000..ea79d64 --- /dev/null +++ b/specs/datanode/RFC.md @@ -0,0 +1,106 @@ +# datanode + +**Import:** `dappco.re/go/core/io/datanode` +**Files:** 1 + +Package datanode provides an in-memory io.Medium backed by Borg's DataNode. + +DataNode is an in-memory fs.FS that serializes to tar. Wrapping it as a +Medium lets any code that works with io.Medium transparently operate on +an in-memory filesystem that can be snapshotted, shipped as a crash report, +or wrapped in a TIM container for runc execution. + +## Types + +### Medium +- **File:** client.go +- **Purpose:** Medium is an in-memory storage backend backed by a Borg DataNode. All paths are relative (no leading slash). Thread-safe via RWMutex. +- **Fields:** + - `dn *borgdatanode.DataNode` — No doc comment in source. + - `dirs map[string]bool` — explicit directory tracking + - `mu sync.RWMutex` — No doc comment in source. +- **Associated Methods:** + - `func (m *Medium) Snapshot() ([]byte, error)` — Snapshot serializes the entire filesystem to a tarball. Use this for crash reports, workspace packaging, or TIM creation. + - `func (m *Medium) Restore(data []byte) error` — Restore replaces the filesystem contents from a tarball. + - `func (m *Medium) DataNode() *borgdatanode.DataNode` — DataNode returns the underlying Borg DataNode. Use this to wrap the filesystem in a TIM container. + - `func (m *Medium) Read(p string) (string, error)` — No doc comment in source. + - `func (m *Medium) Write(p, content string) error` — No doc comment in source. + - `func (m *Medium) WriteMode(p, content string, mode os.FileMode) error` — No doc comment in source. + - `func (m *Medium) EnsureDir(p string) error` — No doc comment in source. + - `func (m *Medium) ensureDirsLocked(p string)` — ensureDirsLocked marks a directory and all ancestors as existing. Caller must hold m.mu. + - `func (m *Medium) IsFile(p string) bool` — No doc comment in source. + - `func (m *Medium) FileGet(p string) (string, error)` — No doc comment in source. + - `func (m *Medium) FileSet(p, content string) error` — No doc comment in source. + - `func (m *Medium) Delete(p string) error` — No doc comment in source. + - `func (m *Medium) DeleteAll(p string) error` — No doc comment in source. + - `func (m *Medium) Rename(oldPath, newPath string) error` — No doc comment in source. + - `func (m *Medium) List(p string) ([]fs.DirEntry, error)` — No doc comment in source. + - `func (m *Medium) Stat(p string) (fs.FileInfo, error)` — No doc comment in source. + - `func (m *Medium) Open(p string) (fs.File, error)` — No doc comment in source. + - `func (m *Medium) Create(p string) (goio.WriteCloser, error)` — No doc comment in source. + - `func (m *Medium) Append(p string) (goio.WriteCloser, error)` — No doc comment in source. + - `func (m *Medium) ReadStream(p string) (goio.ReadCloser, error)` — No doc comment in source. + - `func (m *Medium) WriteStream(p string) (goio.WriteCloser, error)` — No doc comment in source. + - `func (m *Medium) Exists(p string) bool` — No doc comment in source. + - `func (m *Medium) IsDir(p string) bool` — No doc comment in source. + - `func (m *Medium) hasPrefixLocked(prefix string) (bool, error)` — hasPrefixLocked checks if any file path starts with prefix. Caller holds lock. + - `func (m *Medium) collectAllLocked() ([]string, error)` — collectAllLocked returns all file paths in the DataNode. Caller holds lock. + - `func (m *Medium) readFileLocked(name string) ([]byte, error)` — No doc comment in source. + - `func (m *Medium) removeFileLocked(target string) error` — removeFileLocked removes a single file by rebuilding the DataNode. This is necessary because Borg's DataNode doesn't expose a Remove method. Caller must hold m.mu write lock. + +### writeCloser +- **File:** client.go +- **Purpose:** No doc comment in source. +- **Fields:** + - `m *Medium` — No doc comment in source. + - `path string` — No doc comment in source. + - `buf []byte` — No doc comment in source. +- **Associated Methods:** + - `func (w *writeCloser) Write(p []byte) (int, error)` — No doc comment in source. + - `func (w *writeCloser) Close() error` — No doc comment in source. + +### dirEntry +- **File:** client.go +- **Purpose:** No doc comment in source. +- **Fields:** + - `name string` — No doc comment in source. +- **Associated Methods:** + - `func (d *dirEntry) Name() string` — No doc comment in source. + - `func (d *dirEntry) IsDir() bool` — No doc comment in source. + - `func (d *dirEntry) Type() fs.FileMode` — No doc comment in source. + - `func (d *dirEntry) Info() (fs.FileInfo, error)` — No doc comment in source. + +### fileInfo +- **File:** client.go +- **Purpose:** No doc comment in source. +- **Fields:** + - `name string` — No doc comment in source. + - `size int64` — No doc comment in source. + - `mode fs.FileMode` — No doc comment in source. + - `modTime time.Time` — No doc comment in source. + - `isDir bool` — No doc comment in source. +- **Associated Methods:** + - `func (fi *fileInfo) Name() string` — No doc comment in source. + - `func (fi *fileInfo) Size() int64` — No doc comment in source. + - `func (fi *fileInfo) Mode() fs.FileMode` — No doc comment in source. + - `func (fi *fileInfo) ModTime() time.Time` — No doc comment in source. + - `func (fi *fileInfo) IsDir() bool` — No doc comment in source. + - `func (fi *fileInfo) Sys() any` — No doc comment in source. + +## Functions + +### New +- **File:** client.go +- **Signature:** `func New() *Medium` +- **Purpose:** New creates a new empty DataNode Medium. + +### FromTar +- **File:** client.go +- **Signature:** `func FromTar(data []byte) (*Medium, error)` +- **Purpose:** FromTar creates a Medium from a tarball, restoring all files. + +### clean +- **File:** client.go +- **Signature:** `func clean(p string) string` +- **Purpose:** clean normalises a path: strips leading slash, cleans traversal. + diff --git a/specs/local/RFC.md b/specs/local/RFC.md new file mode 100644 index 0000000..180f867 --- /dev/null +++ b/specs/local/RFC.md @@ -0,0 +1,104 @@ +# local + +**Import:** `dappco.re/go/core/io/local` +**Files:** 1 + +Package local provides a local filesystem implementation of the io.Medium interface. + +## Types + +### Medium +- **File:** client.go +- **Purpose:** Medium is a local filesystem storage backend. +- **Fields:** + - `root string` — No doc comment in source. +- **Associated Methods:** + - `func (m *Medium) path(p string) string` — path sanitises and returns the full path. Absolute paths are sandboxed under root (unless root is "/"). + - `func (m *Medium) validatePath(p string) (string, error)` — validatePath ensures the path is within the sandbox, following symlinks if they exist. + - `func (m *Medium) Read(p string) (string, error)` — Read returns file contents as string. + - `func (m *Medium) Write(p, content string) error` — Write saves content to file, creating parent directories as needed. Files are created with mode 0644. For sensitive files (keys, secrets), use WriteMode with 0600. + - `func (m *Medium) WriteMode(p, content string, mode os.FileMode) error` — WriteMode saves content to file with explicit permissions. Use 0600 for sensitive files (encryption output, private keys, auth hashes). + - `func (m *Medium) EnsureDir(p string) error` — EnsureDir creates directory if it doesn't exist. + - `func (m *Medium) IsDir(p string) bool` — IsDir returns true if path is a directory. + - `func (m *Medium) IsFile(p string) bool` — IsFile returns true if path is a regular file. + - `func (m *Medium) Exists(p string) bool` — Exists returns true if path exists. + - `func (m *Medium) List(p string) ([]fs.DirEntry, error)` — List returns directory entries. + - `func (m *Medium) Stat(p string) (fs.FileInfo, error)` — Stat returns file info. + - `func (m *Medium) Open(p string) (fs.File, error)` — Open opens the named file for reading. + - `func (m *Medium) Create(p string) (goio.WriteCloser, error)` — Create creates or truncates the named file. + - `func (m *Medium) Append(p string) (goio.WriteCloser, error)` — Append opens the named file for appending, creating it if it doesn't exist. + - `func (m *Medium) ReadStream(path string) (goio.ReadCloser, error)` — ReadStream returns a reader for the file content. This is a convenience wrapper around Open that exposes a streaming-oriented API, as required by the io.Medium interface, while Open provides the more general filesystem-level operation. Both methods are kept for semantic clarity and backward compatibility. + - `func (m *Medium) WriteStream(path string) (goio.WriteCloser, error)` — WriteStream returns a writer for the file content. This is a convenience wrapper around Create that exposes a streaming-oriented API, as required by the io.Medium interface, while Create provides the more general filesystem-level operation. Both methods are kept for semantic clarity and backward compatibility. + - `func (m *Medium) Delete(p string) error` — Delete removes a file or empty directory. + - `func (m *Medium) DeleteAll(p string) error` — DeleteAll removes a file or directory recursively. + - `func (m *Medium) Rename(oldPath, newPath string) error` — Rename moves a file or directory. + - `func (m *Medium) FileGet(p string) (string, error)` — FileGet is an alias for Read. + - `func (m *Medium) FileSet(p, content string) error` — FileSet is an alias for Write. + +## Functions + +### New +- **File:** client.go +- **Signature:** `func New(root string) (*Medium, error)` +- **Purpose:** New creates a new local Medium rooted at the given directory. Pass "/" for full filesystem access, or a specific path to sandbox. + +### dirSeparator +- **File:** client.go +- **Signature:** `func dirSeparator() string` +- **Purpose:** No doc comment in source. + +### normalisePath +- **File:** client.go +- **Signature:** `func normalisePath(p string) string` +- **Purpose:** No doc comment in source. + +### currentWorkingDir +- **File:** client.go +- **Signature:** `func currentWorkingDir() string` +- **Purpose:** No doc comment in source. + +### absolutePath +- **File:** client.go +- **Signature:** `func absolutePath(p string) string` +- **Purpose:** No doc comment in source. + +### cleanSandboxPath +- **File:** client.go +- **Signature:** `func cleanSandboxPath(p string) string` +- **Purpose:** No doc comment in source. + +### splitPathParts +- **File:** client.go +- **Signature:** `func splitPathParts(p string) []string` +- **Purpose:** No doc comment in source. + +### resolveSymlinksPath +- **File:** client.go +- **Signature:** `func resolveSymlinksPath(p string) (string, error)` +- **Purpose:** No doc comment in source. + +### resolveSymlinksRecursive +- **File:** client.go +- **Signature:** `func resolveSymlinksRecursive(p string, seen map[string]struct{}) (string, error)` +- **Purpose:** No doc comment in source. + +### isWithinRoot +- **File:** client.go +- **Signature:** `func isWithinRoot(root, target string) bool` +- **Purpose:** No doc comment in source. + +### canonicalPath +- **File:** client.go +- **Signature:** `func canonicalPath(p string) string` +- **Purpose:** No doc comment in source. + +### isProtectedPath +- **File:** client.go +- **Signature:** `func isProtectedPath(full string) bool` +- **Purpose:** No doc comment in source. + +### logSandboxEscape +- **File:** client.go +- **Signature:** `func logSandboxEscape(root, path, attempted string)` +- **Purpose:** No doc comment in source. + diff --git a/specs/node/RFC.md b/specs/node/RFC.md new file mode 100644 index 0000000..3c16698 --- /dev/null +++ b/specs/node/RFC.md @@ -0,0 +1,138 @@ +# node + +**Import:** `dappco.re/go/core/io/node` +**Files:** 1 + +Package node provides an in-memory filesystem implementation of io.Medium +ported from Borg's DataNode. It stores files in memory with implicit +directory structure and supports tar serialisation. + +## Types + +### Node +- **File:** node.go +- **Purpose:** Node is an in-memory filesystem that implements coreio.Node (and therefore coreio.Medium). Directories are implicit -- they exist whenever a file path contains a "/". +- **Fields:** + - `files map[string]*dataFile` — No doc comment in source. +- **Associated Methods:** + - `func (n *Node) AddData(name string, content []byte)` — AddData stages content in the in-memory filesystem. + - `func (n *Node) ToTar() ([]byte, error)` — ToTar serialises the entire in-memory tree to a tar archive. + - `func (n *Node) LoadTar(data []byte) error` — LoadTar replaces the in-memory tree with the contents of a tar archive. + - `func (n *Node) WalkNode(root string, fn fs.WalkDirFunc) error` — WalkNode walks the in-memory tree, calling fn for each entry. + - `func (n *Node) Walk(root string, fn fs.WalkDirFunc, opts ...WalkOptions) error` — Walk walks the in-memory tree with optional WalkOptions. + - `func (n *Node) ReadFile(name string) ([]byte, error)` — ReadFile returns the content of the named file as a byte slice. Implements fs.ReadFileFS. + - `func (n *Node) CopyFile(src, dst string, perm fs.FileMode) error` — CopyFile copies a file from the in-memory tree to the local filesystem. + - `func (n *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) error` — CopyTo copies a file (or directory tree) from the node to any Medium. + - `func (n *Node) Open(name string) (fs.File, error)` — Open opens a file from the Node. Implements fs.FS. + - `func (n *Node) Stat(name string) (fs.FileInfo, error)` — Stat returns file information for the given path. + - `func (n *Node) ReadDir(name string) ([]fs.DirEntry, error)` — ReadDir reads and returns all directory entries for the named directory. + - `func (n *Node) Read(p string) (string, error)` — Read retrieves the content of a file as a string. + - `func (n *Node) Write(p, content string) error` — Write saves the given content to a file, overwriting it if it exists. + - `func (n *Node) WriteMode(p, content string, mode os.FileMode) error` — WriteMode saves content with explicit permissions (no-op for in-memory node). + - `func (n *Node) FileGet(p string) (string, error)` — FileGet is an alias for Read. + - `func (n *Node) FileSet(p, content string) error` — FileSet is an alias for Write. + - `func (n *Node) EnsureDir(_ string) error` — EnsureDir is a no-op because directories are implicit in Node. + - `func (n *Node) Exists(p string) bool` — Exists checks if a path exists (file or directory). + - `func (n *Node) IsFile(p string) bool` — IsFile checks if a path exists and is a regular file. + - `func (n *Node) IsDir(p string) bool` — IsDir checks if a path exists and is a directory. + - `func (n *Node) Delete(p string) error` — Delete removes a single file. + - `func (n *Node) DeleteAll(p string) error` — DeleteAll removes a file or directory and all children. + - `func (n *Node) Rename(oldPath, newPath string) error` — Rename moves a file from oldPath to newPath. + - `func (n *Node) List(p string) ([]fs.DirEntry, error)` — List returns directory entries for the given path. + - `func (n *Node) Create(p string) (goio.WriteCloser, error)` — Create creates or truncates the named file, returning a WriteCloser. Content is committed to the Node on Close. + - `func (n *Node) Append(p string) (goio.WriteCloser, error)` — Append opens the named file for appending, creating it if needed. Content is committed to the Node on Close. + - `func (n *Node) ReadStream(p string) (goio.ReadCloser, error)` — ReadStream returns a ReadCloser for the file content. + - `func (n *Node) WriteStream(p string) (goio.WriteCloser, error)` — WriteStream returns a WriteCloser for the file content. + +### WalkOptions +- **File:** node.go +- **Purpose:** WalkOptions configures the behaviour of Walk. +- **Fields:** + - `MaxDepth int` — MaxDepth limits how many directory levels to descend. 0 means unlimited. + - `Filter func(path string, d fs.DirEntry) bool` — Filter, if set, is called for each entry. Return true to include the entry (and descend into it if it is a directory). + - `SkipErrors bool` — SkipErrors suppresses errors (e.g. nonexistent root) instead of propagating them through the callback. + +### nodeWriter +- **File:** node.go +- **Purpose:** nodeWriter buffers writes and commits them to the Node on Close. +- **Fields:** + - `node *Node` — No doc comment in source. + - `path string` — No doc comment in source. + - `buf []byte` — No doc comment in source. +- **Associated Methods:** + - `func (w *nodeWriter) Write(p []byte) (int, error)` — No doc comment in source. + - `func (w *nodeWriter) Close() error` — No doc comment in source. + +### dataFile +- **File:** node.go +- **Purpose:** dataFile represents a file in the Node. +- **Fields:** + - `name string` — No doc comment in source. + - `content []byte` — No doc comment in source. + - `modTime time.Time` — No doc comment in source. +- **Associated Methods:** + - `func (d *dataFile) Stat() (fs.FileInfo, error)` — No doc comment in source. + - `func (d *dataFile) Read(_ []byte) (int, error)` — No doc comment in source. + - `func (d *dataFile) Close() error` — No doc comment in source. + +### dataFileInfo +- **File:** node.go +- **Purpose:** dataFileInfo implements fs.FileInfo for a dataFile. +- **Fields:** + - `file *dataFile` — No doc comment in source. +- **Associated Methods:** + - `func (d *dataFileInfo) Name() string` — No doc comment in source. + - `func (d *dataFileInfo) Size() int64` — No doc comment in source. + - `func (d *dataFileInfo) Mode() fs.FileMode` — No doc comment in source. + - `func (d *dataFileInfo) ModTime() time.Time` — No doc comment in source. + - `func (d *dataFileInfo) IsDir() bool` — No doc comment in source. + - `func (d *dataFileInfo) Sys() any` — No doc comment in source. + +### dataFileReader +- **File:** node.go +- **Purpose:** dataFileReader implements fs.File for reading a dataFile. +- **Fields:** + - `file *dataFile` — No doc comment in source. + - `reader *bytes.Reader` — No doc comment in source. +- **Associated Methods:** + - `func (d *dataFileReader) Stat() (fs.FileInfo, error)` — No doc comment in source. + - `func (d *dataFileReader) Read(p []byte) (int, error)` — No doc comment in source. + - `func (d *dataFileReader) Close() error` — No doc comment in source. + +### dirInfo +- **File:** node.go +- **Purpose:** dirInfo implements fs.FileInfo for an implicit directory. +- **Fields:** + - `name string` — No doc comment in source. + - `modTime time.Time` — No doc comment in source. +- **Associated Methods:** + - `func (d *dirInfo) Name() string` — No doc comment in source. + - `func (d *dirInfo) Size() int64` — No doc comment in source. + - `func (d *dirInfo) Mode() fs.FileMode` — No doc comment in source. + - `func (d *dirInfo) ModTime() time.Time` — No doc comment in source. + - `func (d *dirInfo) IsDir() bool` — No doc comment in source. + - `func (d *dirInfo) Sys() any` — No doc comment in source. + +### dirFile +- **File:** node.go +- **Purpose:** dirFile implements fs.File for a directory. +- **Fields:** + - `path string` — No doc comment in source. + - `modTime time.Time` — No doc comment in source. +- **Associated Methods:** + - `func (d *dirFile) Stat() (fs.FileInfo, error)` — No doc comment in source. + - `func (d *dirFile) Read([]byte) (int, error)` — No doc comment in source. + - `func (d *dirFile) Close() error` — No doc comment in source. + +## Functions + +### New +- **File:** node.go +- **Signature:** `func New() *Node` +- **Purpose:** New creates a new, empty Node. + +### FromTar +- **File:** node.go +- **Signature:** `func FromTar(data []byte) (*Node, error)` +- **Purpose:** FromTar creates a new Node from a tar archive. + diff --git a/specs/s3/RFC.md b/specs/s3/RFC.md new file mode 100644 index 0000000..2a917e3 --- /dev/null +++ b/specs/s3/RFC.md @@ -0,0 +1,137 @@ +# s3 + +**Import:** `dappco.re/go/core/io/s3` +**Files:** 1 + +Package s3 provides an S3-backed implementation of the io.Medium interface. + +## Types + +### s3API +- **File:** s3.go +- **Purpose:** s3API is the subset of the S3 client API used by this package. This allows for interface-based mocking in tests. +- **Methods:** + - `GetObject func(ctx context.Context, params *s3.GetObjectInput, optFns ...func(*s3.Options)) (*s3.GetObjectOutput, error)` — No doc comment in source. + - `PutObject func(ctx context.Context, params *s3.PutObjectInput, optFns ...func(*s3.Options)) (*s3.PutObjectOutput, error)` — No doc comment in source. + - `DeleteObject func(ctx context.Context, params *s3.DeleteObjectInput, optFns ...func(*s3.Options)) (*s3.DeleteObjectOutput, error)` — No doc comment in source. + - `DeleteObjects func(ctx context.Context, params *s3.DeleteObjectsInput, optFns ...func(*s3.Options)) (*s3.DeleteObjectsOutput, error)` — No doc comment in source. + - `HeadObject func(ctx context.Context, params *s3.HeadObjectInput, optFns ...func(*s3.Options)) (*s3.HeadObjectOutput, error)` — No doc comment in source. + - `ListObjectsV2 func(ctx context.Context, params *s3.ListObjectsV2Input, optFns ...func(*s3.Options)) (*s3.ListObjectsV2Output, error)` — No doc comment in source. + - `CopyObject func(ctx context.Context, params *s3.CopyObjectInput, optFns ...func(*s3.Options)) (*s3.CopyObjectOutput, error)` — No doc comment in source. + +### Medium +- **File:** s3.go +- **Purpose:** Medium is an S3-backed storage backend implementing the io.Medium interface. +- **Fields:** + - `client s3API` — No doc comment in source. + - `bucket string` — No doc comment in source. + - `prefix string` — No doc comment in source. +- **Associated Methods:** + - `func (m *Medium) key(p string) string` — key returns the full S3 object key for a given path. + - `func (m *Medium) Read(p string) (string, error)` — Read retrieves the content of a file as a string. + - `func (m *Medium) Write(p, content string) error` — Write saves the given content to a file, overwriting it if it exists. + - `func (m *Medium) EnsureDir(_ string) error` — EnsureDir is a no-op for S3 (S3 has no real directories). + - `func (m *Medium) IsFile(p string) bool` — IsFile checks if a path exists and is a regular file (not a "directory" prefix). + - `func (m *Medium) FileGet(p string) (string, error)` — FileGet is a convenience function that reads a file from the medium. + - `func (m *Medium) FileSet(p, content string) error` — FileSet is a convenience function that writes a file to the medium. + - `func (m *Medium) Delete(p string) error` — Delete removes a single object. + - `func (m *Medium) DeleteAll(p string) error` — DeleteAll removes all objects under the given prefix. + - `func (m *Medium) Rename(oldPath, newPath string) error` — Rename moves an object by copying then deleting the original. + - `func (m *Medium) List(p string) ([]fs.DirEntry, error)` — List returns directory entries for the given path using ListObjectsV2 with delimiter. + - `func (m *Medium) Stat(p string) (fs.FileInfo, error)` — Stat returns file information for the given path using HeadObject. + - `func (m *Medium) Open(p string) (fs.File, error)` — Open opens the named file for reading. + - `func (m *Medium) Create(p string) (goio.WriteCloser, error)` — Create creates or truncates the named file. Returns a writer that uploads the content on Close. + - `func (m *Medium) Append(p string) (goio.WriteCloser, error)` — Append opens the named file for appending. It downloads the existing content (if any) and re-uploads the combined content on Close. + - `func (m *Medium) ReadStream(p string) (goio.ReadCloser, error)` — ReadStream returns a reader for the file content. + - `func (m *Medium) WriteStream(p string) (goio.WriteCloser, error)` — WriteStream returns a writer for the file content. Content is uploaded on Close. + - `func (m *Medium) Exists(p string) bool` — Exists checks if a path exists (file or directory prefix). + - `func (m *Medium) IsDir(p string) bool` — IsDir checks if a path exists and is a directory (has objects under it as a prefix). + +### Option +- **File:** s3.go +- **Purpose:** Option configures a Medium. +- **Underlying:** `func(*Medium)` + +### fileInfo +- **File:** s3.go +- **Purpose:** fileInfo implements fs.FileInfo for S3 objects. +- **Fields:** + - `name string` — No doc comment in source. + - `size int64` — No doc comment in source. + - `mode fs.FileMode` — No doc comment in source. + - `modTime time.Time` — No doc comment in source. + - `isDir bool` — No doc comment in source. +- **Associated Methods:** + - `func (fi *fileInfo) Name() string` — No doc comment in source. + - `func (fi *fileInfo) Size() int64` — No doc comment in source. + - `func (fi *fileInfo) Mode() fs.FileMode` — No doc comment in source. + - `func (fi *fileInfo) ModTime() time.Time` — No doc comment in source. + - `func (fi *fileInfo) IsDir() bool` — No doc comment in source. + - `func (fi *fileInfo) Sys() any` — No doc comment in source. + +### dirEntry +- **File:** s3.go +- **Purpose:** dirEntry implements fs.DirEntry for S3 listings. +- **Fields:** + - `name string` — No doc comment in source. + - `isDir bool` — No doc comment in source. + - `mode fs.FileMode` — No doc comment in source. + - `info fs.FileInfo` — No doc comment in source. +- **Associated Methods:** + - `func (de *dirEntry) Name() string` — No doc comment in source. + - `func (de *dirEntry) IsDir() bool` — No doc comment in source. + - `func (de *dirEntry) Type() fs.FileMode` — No doc comment in source. + - `func (de *dirEntry) Info() (fs.FileInfo, error)` — No doc comment in source. + +### s3File +- **File:** s3.go +- **Purpose:** s3File implements fs.File for S3 objects. +- **Fields:** + - `name string` — No doc comment in source. + - `content []byte` — No doc comment in source. + - `offset int64` — No doc comment in source. + - `size int64` — No doc comment in source. + - `modTime time.Time` — No doc comment in source. +- **Associated Methods:** + - `func (f *s3File) Stat() (fs.FileInfo, error)` — No doc comment in source. + - `func (f *s3File) Read(b []byte) (int, error)` — No doc comment in source. + - `func (f *s3File) Close() error` — No doc comment in source. + +### s3WriteCloser +- **File:** s3.go +- **Purpose:** s3WriteCloser buffers writes and uploads to S3 on Close. +- **Fields:** + - `medium *Medium` — No doc comment in source. + - `key string` — No doc comment in source. + - `data []byte` — No doc comment in source. +- **Associated Methods:** + - `func (w *s3WriteCloser) Write(p []byte) (int, error)` — No doc comment in source. + - `func (w *s3WriteCloser) Close() error` — No doc comment in source. + +## Functions + +### deleteObjectsError +- **File:** s3.go +- **Signature:** `func deleteObjectsError(prefix string, errs []types.Error) error` +- **Purpose:** No doc comment in source. + +### WithPrefix +- **File:** s3.go +- **Signature:** `func WithPrefix(prefix string) Option` +- **Purpose:** WithPrefix sets an optional key prefix for all operations. + +### WithClient +- **File:** s3.go +- **Signature:** `func WithClient(client *s3.Client) Option` +- **Purpose:** WithClient sets the S3 client for dependency injection. + +### withAPI +- **File:** s3.go +- **Signature:** `func withAPI(api s3API) Option` +- **Purpose:** withAPI sets the s3API interface directly (for testing with mocks). + +### New +- **File:** s3.go +- **Signature:** `func New(bucket string, opts ...Option) (*Medium, error)` +- **Purpose:** New creates a new S3 Medium for the given bucket. + diff --git a/specs/sigil/RFC.md b/specs/sigil/RFC.md new file mode 100644 index 0000000..4e6d8a8 --- /dev/null +++ b/specs/sigil/RFC.md @@ -0,0 +1,171 @@ +# sigil + +**Import:** `dappco.re/go/core/io/sigil` +**Files:** 3 + +This file implements the Pre-Obfuscation Layer Protocol with +XChaCha20-Poly1305 encryption. The protocol applies a reversible transformation +to plaintext BEFORE it reaches CPU encryption routines, providing defense-in-depth +against side-channel attacks. + +The encryption flow is: + + plaintext -> obfuscate(nonce) -> encrypt -> [nonce || ciphertext || tag] + +The decryption flow is: + + [nonce || ciphertext || tag] -> decrypt -> deobfuscate(nonce) -> plaintext + +Package sigil provides the Sigil transformation framework for composable, +reversible data transformations. + +Sigils are the core abstraction - each sigil implements a specific transformation +(encoding, compression, hashing, encryption) with a uniform interface. Sigils can +be chained together to create transformation pipelines. + +Example usage: + + hexSigil, _ := sigil.NewSigil("hex") + base64Sigil, _ := sigil.NewSigil("base64") + result, _ := sigil.Transmute(data, []sigil.Sigil{hexSigil, base64Sigil}) + +## Types + +### PreObfuscator +- **File:** crypto_sigil.go +- **Purpose:** PreObfuscator applies a reversible transformation to data before encryption. This ensures that raw plaintext patterns are never sent directly to CPU encryption routines, providing defense against side-channel attacks. Implementations must be deterministic: given the same entropy, the transformation must be perfectly reversible: Deobfuscate(Obfuscate(x, e), e) == x +- **Methods:** + - `Obfuscate func(data []byte, entropy []byte) []byte` — Obfuscate transforms plaintext before encryption using the provided entropy. The entropy is typically the encryption nonce, ensuring the transformation is unique per-encryption without additional random generation. + - `Deobfuscate func(data []byte, entropy []byte) []byte` — Deobfuscate reverses the transformation after decryption. Must be called with the same entropy used during Obfuscate. + +### XORObfuscator +- **File:** crypto_sigil.go +- **Purpose:** XORObfuscator performs XOR-based obfuscation using an entropy-derived key stream. The key stream is generated using SHA-256 in counter mode: keyStream[i*32:(i+1)*32] = SHA256(entropy || BigEndian64(i)) This provides a cryptographically uniform key stream that decorrelates plaintext patterns from the data seen by the encryption routine. XOR is symmetric, so obfuscation and deobfuscation use the same operation. +- **Fields:** + - None. +- **Associated Methods:** + - `func (x *XORObfuscator) Obfuscate(data []byte, entropy []byte) []byte` — Obfuscate XORs the data with a key stream derived from the entropy. + - `func (x *XORObfuscator) Deobfuscate(data []byte, entropy []byte) []byte` — Deobfuscate reverses the XOR transformation (XOR is symmetric). + - `func (x *XORObfuscator) transform(data []byte, entropy []byte) []byte` — transform applies XOR with an entropy-derived key stream. + - `func (x *XORObfuscator) deriveKeyStream(entropy []byte, length int) []byte` — deriveKeyStream creates a deterministic key stream from entropy. + +### ShuffleMaskObfuscator +- **File:** crypto_sigil.go +- **Purpose:** ShuffleMaskObfuscator provides stronger obfuscation through byte shuffling and masking. The obfuscation process: 1. Generate a mask from entropy using SHA-256 in counter mode 2. XOR the data with the mask 3. Generate a deterministic permutation using Fisher-Yates shuffle 4. Reorder bytes according to the permutation This provides both value transformation (XOR mask) and position transformation (shuffle), making pattern analysis more difficult than XOR alone. +- **Fields:** + - None. +- **Associated Methods:** + - `func (s *ShuffleMaskObfuscator) Obfuscate(data []byte, entropy []byte) []byte` — Obfuscate shuffles bytes and applies a mask derived from entropy. + - `func (s *ShuffleMaskObfuscator) Deobfuscate(data []byte, entropy []byte) []byte` — Deobfuscate reverses the shuffle and mask operations. + - `func (s *ShuffleMaskObfuscator) generatePermutation(entropy []byte, length int) []int` — generatePermutation creates a deterministic permutation from entropy. + - `func (s *ShuffleMaskObfuscator) deriveMask(entropy []byte, length int) []byte` — deriveMask creates a mask byte array from entropy. + +### ChaChaPolySigil +- **File:** crypto_sigil.go +- **Purpose:** ChaChaPolySigil is a Sigil that encrypts/decrypts data using ChaCha20-Poly1305. It applies pre-obfuscation before encryption to ensure raw plaintext never goes directly to CPU encryption routines. The output format is: [24-byte nonce][encrypted(obfuscated(plaintext))] Unlike demo implementations, the nonce is ONLY embedded in the ciphertext, not exposed separately in headers. +- **Fields:** + - `Key []byte` — No doc comment in source. + - `Obfuscator PreObfuscator` — No doc comment in source. + - `randReader io.Reader` — for testing injection +- **Associated Methods:** + - `func (s *ChaChaPolySigil) In(data []byte) ([]byte, error)` — In encrypts the data with pre-obfuscation. The flow is: plaintext -> obfuscate -> encrypt + - `func (s *ChaChaPolySigil) Out(data []byte) ([]byte, error)` — Out decrypts the data and reverses obfuscation. The flow is: decrypt -> deobfuscate -> plaintext + +### Sigil +- **File:** sigil.go +- **Purpose:** Sigil defines the interface for a data transformer. A Sigil represents a single transformation unit that can be applied to byte data. Sigils may be reversible (encoding, compression, encryption) or irreversible (hashing). For reversible sigils: Out(In(x)) == x for all valid x For irreversible sigils: Out returns the input unchanged For symmetric sigils: In(x) == Out(x) Implementations must handle nil input by returning nil without error, and empty input by returning an empty slice without error. +- **Methods:** + - `In func(data []byte) ([]byte, error)` — In applies the forward transformation to the data. For encoding sigils, this encodes the data. For compression sigils, this compresses the data. For hash sigils, this computes the digest. + - `Out func(data []byte) ([]byte, error)` — Out applies the reverse transformation to the data. For reversible sigils, this recovers the original data. For irreversible sigils (e.g., hashing), this returns the input unchanged. + +### ReverseSigil +- **File:** sigils.go +- **Purpose:** ReverseSigil is a Sigil that reverses the bytes of the payload. It is a symmetrical Sigil, meaning that the In and Out methods perform the same operation. +- **Fields:** + - None. +- **Associated Methods:** + - `func (s *ReverseSigil) In(data []byte) ([]byte, error)` — In reverses the bytes of the data. + - `func (s *ReverseSigil) Out(data []byte) ([]byte, error)` — Out reverses the bytes of the data. + +### HexSigil +- **File:** sigils.go +- **Purpose:** HexSigil is a Sigil that encodes/decodes data to/from hexadecimal. The In method encodes the data, and the Out method decodes it. +- **Fields:** + - None. +- **Associated Methods:** + - `func (s *HexSigil) In(data []byte) ([]byte, error)` — In encodes the data to hexadecimal. + - `func (s *HexSigil) Out(data []byte) ([]byte, error)` — Out decodes the data from hexadecimal. + +### Base64Sigil +- **File:** sigils.go +- **Purpose:** Base64Sigil is a Sigil that encodes/decodes data to/from base64. The In method encodes the data, and the Out method decodes it. +- **Fields:** + - None. +- **Associated Methods:** + - `func (s *Base64Sigil) In(data []byte) ([]byte, error)` — In encodes the data to base64. + - `func (s *Base64Sigil) Out(data []byte) ([]byte, error)` — Out decodes the data from base64. + +### GzipSigil +- **File:** sigils.go +- **Purpose:** GzipSigil is a Sigil that compresses/decompresses data using gzip. The In method compresses the data, and the Out method decompresses it. +- **Fields:** + - `writer io.Writer` — No doc comment in source. +- **Associated Methods:** + - `func (s *GzipSigil) In(data []byte) ([]byte, error)` — In compresses the data using gzip. + - `func (s *GzipSigil) Out(data []byte) ([]byte, error)` — Out decompresses the data using gzip. + +### JSONSigil +- **File:** sigils.go +- **Purpose:** JSONSigil is a Sigil that compacts or indents JSON data. The Out method is a no-op. +- **Fields:** + - `Indent bool` — No doc comment in source. +- **Associated Methods:** + - `func (s *JSONSigil) In(data []byte) ([]byte, error)` — In compacts or indents the JSON data. + - `func (s *JSONSigil) Out(data []byte) ([]byte, error)` — Out is a no-op for JSONSigil. + +### HashSigil +- **File:** sigils.go +- **Purpose:** HashSigil is a Sigil that hashes the data using a specified algorithm. The In method hashes the data, and the Out method is a no-op. +- **Fields:** + - `Hash crypto.Hash` — No doc comment in source. +- **Associated Methods:** + - `func (s *HashSigil) In(data []byte) ([]byte, error)` — In hashes the data. + - `func (s *HashSigil) Out(data []byte) ([]byte, error)` — Out is a no-op for HashSigil. + +## Functions + +### NewChaChaPolySigil +- **File:** crypto_sigil.go +- **Signature:** `func NewChaChaPolySigil(key []byte) (*ChaChaPolySigil, error)` +- **Purpose:** NewChaChaPolySigil creates a new encryption sigil with the given key. The key must be exactly 32 bytes. + +### NewChaChaPolySigilWithObfuscator +- **File:** crypto_sigil.go +- **Signature:** `func NewChaChaPolySigilWithObfuscator(key []byte, obfuscator PreObfuscator) (*ChaChaPolySigil, error)` +- **Purpose:** NewChaChaPolySigilWithObfuscator creates a new encryption sigil with custom obfuscator. + +### GetNonceFromCiphertext +- **File:** crypto_sigil.go +- **Signature:** `func GetNonceFromCiphertext(ciphertext []byte) ([]byte, error)` +- **Purpose:** GetNonceFromCiphertext extracts the nonce from encrypted output. This is provided for debugging/logging purposes only. The nonce should NOT be stored separately in headers. + +### Transmute +- **File:** sigil.go +- **Signature:** `func Transmute(data []byte, sigils []Sigil) ([]byte, error)` +- **Purpose:** Transmute applies a series of sigils to data in sequence. Each sigil's In method is called in order, with the output of one sigil becoming the input of the next. If any sigil returns an error, Transmute stops immediately and returns nil with that error. To reverse a transmutation, call each sigil's Out method in reverse order. + +### Untransmute +- **File:** sigil.go +- **Signature:** `func Untransmute(data []byte, sigils []Sigil) ([]byte, error)` +- **Purpose:** Untransmute reverses a transmutation by applying Out in reverse order. Each sigil's Out method is called in reverse order, with the output of one sigil becoming the input of the next. If any sigil returns an error, Untransmute stops immediately and returns nil with that error. + +### NewHashSigil +- **File:** sigils.go +- **Signature:** `func NewHashSigil(h crypto.Hash) *HashSigil` +- **Purpose:** NewHashSigil creates a new HashSigil. + +### NewSigil +- **File:** sigils.go +- **Signature:** `func NewSigil(name string) (Sigil, error)` +- **Purpose:** NewSigil is a factory function that returns a Sigil based on a string name. It is the primary way to create Sigil instances. + diff --git a/specs/sqlite/RFC.md b/specs/sqlite/RFC.md new file mode 100644 index 0000000..a2b6cae --- /dev/null +++ b/specs/sqlite/RFC.md @@ -0,0 +1,114 @@ +# sqlite + +**Import:** `dappco.re/go/core/io/sqlite` +**Files:** 1 + +Package sqlite provides a SQLite-backed implementation of the io.Medium interface. + +## Types + +### Medium +- **File:** sqlite.go +- **Purpose:** Medium is a SQLite-backed storage backend implementing the io.Medium interface. +- **Fields:** + - `db *sql.DB` — No doc comment in source. + - `table string` — No doc comment in source. +- **Associated Methods:** + - `func (m *Medium) Close() error` — Close closes the underlying database connection. + - `func (m *Medium) Read(p string) (string, error)` — Read retrieves the content of a file as a string. + - `func (m *Medium) Write(p, content string) error` — Write saves the given content to a file, overwriting it if it exists. + - `func (m *Medium) EnsureDir(p string) error` — EnsureDir makes sure a directory exists, creating it if necessary. + - `func (m *Medium) IsFile(p string) bool` — IsFile checks if a path exists and is a regular file. + - `func (m *Medium) FileGet(p string) (string, error)` — FileGet is a convenience function that reads a file from the medium. + - `func (m *Medium) FileSet(p, content string) error` — FileSet is a convenience function that writes a file to the medium. + - `func (m *Medium) Delete(p string) error` — Delete removes a file or empty directory. + - `func (m *Medium) DeleteAll(p string) error` — DeleteAll removes a file or directory and all its contents recursively. + - `func (m *Medium) Rename(oldPath, newPath string) error` — Rename moves a file or directory from oldPath to newPath. + - `func (m *Medium) List(p string) ([]fs.DirEntry, error)` — List returns the directory entries for the given path. + - `func (m *Medium) Stat(p string) (fs.FileInfo, error)` — Stat returns file information for the given path. + - `func (m *Medium) Open(p string) (fs.File, error)` — Open opens the named file for reading. + - `func (m *Medium) Create(p string) (goio.WriteCloser, error)` — Create creates or truncates the named file. + - `func (m *Medium) Append(p string) (goio.WriteCloser, error)` — Append opens the named file for appending, creating it if it doesn't exist. + - `func (m *Medium) ReadStream(p string) (goio.ReadCloser, error)` — ReadStream returns a reader for the file content. + - `func (m *Medium) WriteStream(p string) (goio.WriteCloser, error)` — WriteStream returns a writer for the file content. Content is stored on Close. + - `func (m *Medium) Exists(p string) bool` — Exists checks if a path exists (file or directory). + - `func (m *Medium) IsDir(p string) bool` — IsDir checks if a path exists and is a directory. + +### Option +- **File:** sqlite.go +- **Purpose:** Option configures a Medium. +- **Underlying:** `func(*Medium)` + +### fileInfo +- **File:** sqlite.go +- **Purpose:** fileInfo implements fs.FileInfo for SQLite entries. +- **Fields:** + - `name string` — No doc comment in source. + - `size int64` — No doc comment in source. + - `mode fs.FileMode` — No doc comment in source. + - `modTime time.Time` — No doc comment in source. + - `isDir bool` — No doc comment in source. +- **Associated Methods:** + - `func (fi *fileInfo) Name() string` — No doc comment in source. + - `func (fi *fileInfo) Size() int64` — No doc comment in source. + - `func (fi *fileInfo) Mode() fs.FileMode` — No doc comment in source. + - `func (fi *fileInfo) ModTime() time.Time` — No doc comment in source. + - `func (fi *fileInfo) IsDir() bool` — No doc comment in source. + - `func (fi *fileInfo) Sys() any` — No doc comment in source. + +### dirEntry +- **File:** sqlite.go +- **Purpose:** dirEntry implements fs.DirEntry for SQLite listings. +- **Fields:** + - `name string` — No doc comment in source. + - `isDir bool` — No doc comment in source. + - `mode fs.FileMode` — No doc comment in source. + - `info fs.FileInfo` — No doc comment in source. +- **Associated Methods:** + - `func (de *dirEntry) Name() string` — No doc comment in source. + - `func (de *dirEntry) IsDir() bool` — No doc comment in source. + - `func (de *dirEntry) Type() fs.FileMode` — No doc comment in source. + - `func (de *dirEntry) Info() (fs.FileInfo, error)` — No doc comment in source. + +### sqliteFile +- **File:** sqlite.go +- **Purpose:** sqliteFile implements fs.File for SQLite entries. +- **Fields:** + - `name string` — No doc comment in source. + - `content []byte` — No doc comment in source. + - `offset int64` — No doc comment in source. + - `mode fs.FileMode` — No doc comment in source. + - `modTime time.Time` — No doc comment in source. +- **Associated Methods:** + - `func (f *sqliteFile) Stat() (fs.FileInfo, error)` — No doc comment in source. + - `func (f *sqliteFile) Read(b []byte) (int, error)` — No doc comment in source. + - `func (f *sqliteFile) Close() error` — No doc comment in source. + +### sqliteWriteCloser +- **File:** sqlite.go +- **Purpose:** sqliteWriteCloser buffers writes and stores to SQLite on Close. +- **Fields:** + - `medium *Medium` — No doc comment in source. + - `path string` — No doc comment in source. + - `data []byte` — No doc comment in source. +- **Associated Methods:** + - `func (w *sqliteWriteCloser) Write(p []byte) (int, error)` — No doc comment in source. + - `func (w *sqliteWriteCloser) Close() error` — No doc comment in source. + +## Functions + +### WithTable +- **File:** sqlite.go +- **Signature:** `func WithTable(table string) Option` +- **Purpose:** WithTable sets the table name (default: "files"). + +### New +- **File:** sqlite.go +- **Signature:** `func New(dbPath string, opts ...Option) (*Medium, error)` +- **Purpose:** New creates a new SQLite Medium at the given database path. Use ":memory:" for an in-memory database. + +### cleanPath +- **File:** sqlite.go +- **Signature:** `func cleanPath(p string) string` +- **Purpose:** cleanPath normalises a path for consistent storage. Uses a leading "/" before Clean to sandbox traversal attempts. + diff --git a/specs/store/RFC.md b/specs/store/RFC.md new file mode 100644 index 0000000..c5837cd --- /dev/null +++ b/specs/store/RFC.md @@ -0,0 +1,120 @@ +# store + +**Import:** `dappco.re/go/core/io/store` +**Files:** 2 + +No package doc comment in source. + +## Types + +### Medium +- **File:** medium.go +- **Purpose:** Medium wraps a Store to satisfy the io.Medium interface. Paths are mapped as group/key — first segment is the group, the rest is the key. List("") returns groups as directories, List("group") returns keys as files. +- **Fields:** + - `s *Store` — No doc comment in source. +- **Associated Methods:** + - `func (m *Medium) Store() *Store` — Store returns the underlying KV store for direct access. + - `func (m *Medium) Close() error` — Close closes the underlying store. + - `func (m *Medium) Read(p string) (string, error)` — Read retrieves the value at group/key. + - `func (m *Medium) Write(p, content string) error` — Write stores a value at group/key. + - `func (m *Medium) EnsureDir(_ string) error` — EnsureDir is a no-op — groups are created implicitly on Set. + - `func (m *Medium) IsFile(p string) bool` — IsFile returns true if a group/key pair exists. + - `func (m *Medium) FileGet(p string) (string, error)` — FileGet is an alias for Read. + - `func (m *Medium) FileSet(p, content string) error` — FileSet is an alias for Write. + - `func (m *Medium) Delete(p string) error` — Delete removes a key, or checks that a group is empty. + - `func (m *Medium) DeleteAll(p string) error` — DeleteAll removes a key, or all keys in a group. + - `func (m *Medium) Rename(oldPath, newPath string) error` — Rename moves a key from one path to another. + - `func (m *Medium) List(p string) ([]fs.DirEntry, error)` — List returns directory entries. Empty path returns groups. A group path returns keys in that group. + - `func (m *Medium) Stat(p string) (fs.FileInfo, error)` — Stat returns file info for a group (dir) or key (file). + - `func (m *Medium) Open(p string) (fs.File, error)` — Open opens a key for reading. + - `func (m *Medium) Create(p string) (goio.WriteCloser, error)` — Create creates or truncates a key. Content is stored on Close. + - `func (m *Medium) Append(p string) (goio.WriteCloser, error)` — Append opens a key for appending. Content is stored on Close. + - `func (m *Medium) ReadStream(p string) (goio.ReadCloser, error)` — ReadStream returns a reader for the value. + - `func (m *Medium) WriteStream(p string) (goio.WriteCloser, error)` — WriteStream returns a writer. Content is stored on Close. + - `func (m *Medium) Exists(p string) bool` — Exists returns true if a group or key exists. + - `func (m *Medium) IsDir(p string) bool` — IsDir returns true if the path is a group with entries. + +### kvFileInfo +- **File:** medium.go +- **Purpose:** No doc comment in source. +- **Fields:** + - `name string` — No doc comment in source. + - `size int64` — No doc comment in source. + - `isDir bool` — No doc comment in source. +- **Associated Methods:** + - `func (fi *kvFileInfo) Name() string` — No doc comment in source. + - `func (fi *kvFileInfo) Size() int64` — No doc comment in source. + - `func (fi *kvFileInfo) Mode() fs.FileMode` — No doc comment in source. + - `func (fi *kvFileInfo) ModTime() time.Time` — No doc comment in source. + - `func (fi *kvFileInfo) IsDir() bool` — No doc comment in source. + - `func (fi *kvFileInfo) Sys() any` — No doc comment in source. + +### kvDirEntry +- **File:** medium.go +- **Purpose:** No doc comment in source. +- **Fields:** + - `name string` — No doc comment in source. + - `isDir bool` — No doc comment in source. + - `size int64` — No doc comment in source. +- **Associated Methods:** + - `func (de *kvDirEntry) Name() string` — No doc comment in source. + - `func (de *kvDirEntry) IsDir() bool` — No doc comment in source. + - `func (de *kvDirEntry) Type() fs.FileMode` — No doc comment in source. + - `func (de *kvDirEntry) Info() (fs.FileInfo, error)` — No doc comment in source. + +### kvFile +- **File:** medium.go +- **Purpose:** No doc comment in source. +- **Fields:** + - `name string` — No doc comment in source. + - `content []byte` — No doc comment in source. + - `offset int64` — No doc comment in source. +- **Associated Methods:** + - `func (f *kvFile) Stat() (fs.FileInfo, error)` — No doc comment in source. + - `func (f *kvFile) Read(b []byte) (int, error)` — No doc comment in source. + - `func (f *kvFile) Close() error` — No doc comment in source. + +### kvWriteCloser +- **File:** medium.go +- **Purpose:** No doc comment in source. +- **Fields:** + - `s *Store` — No doc comment in source. + - `group string` — No doc comment in source. + - `key string` — No doc comment in source. + - `data []byte` — No doc comment in source. +- **Associated Methods:** + - `func (w *kvWriteCloser) Write(p []byte) (int, error)` — No doc comment in source. + - `func (w *kvWriteCloser) Close() error` — No doc comment in source. + +### Store +- **File:** store.go +- **Purpose:** Store is a group-namespaced key-value store backed by SQLite. +- **Fields:** + - `db *sql.DB` — No doc comment in source. +- **Associated Methods:** + - `func (s *Store) Close() error` — Close closes the underlying database. + - `func (s *Store) Get(group, key string) (string, error)` — Get retrieves a value by group and key. + - `func (s *Store) Set(group, key, value string) error` — Set stores a value by group and key, overwriting if exists. + - `func (s *Store) Delete(group, key string) error` — Delete removes a single key from a group. + - `func (s *Store) Count(group string) (int, error)` — Count returns the number of keys in a group. + - `func (s *Store) DeleteGroup(group string) error` — DeleteGroup removes all keys in a group. + - `func (s *Store) GetAll(group string) (map[string]string, error)` — GetAll returns all key-value pairs in a group. + - `func (s *Store) Render(tmplStr, group string) (string, error)` — Render loads all key-value pairs from a group and renders a Go template. + +## Functions + +### NewMedium +- **File:** medium.go +- **Signature:** `func NewMedium(dbPath string) (*Medium, error)` +- **Purpose:** NewMedium creates an io.Medium backed by a KV store at the given SQLite path. + +### splitPath +- **File:** medium.go +- **Signature:** `func splitPath(p string) (group, key string)` +- **Purpose:** splitPath splits a medium-style path into group and key. First segment = group, remainder = key. + +### New +- **File:** store.go +- **Signature:** `func New(dbPath string) (*Store, error)` +- **Purpose:** New creates a Store at the given SQLite path. Use ":memory:" for tests. + diff --git a/specs/workspace/RFC.md b/specs/workspace/RFC.md new file mode 100644 index 0000000..a8d2e7e --- /dev/null +++ b/specs/workspace/RFC.md @@ -0,0 +1,60 @@ +# workspace + +**Import:** `dappco.re/go/core/io/workspace` +**Files:** 1 + +No package doc comment in source. + +## Types + +### Workspace +- **File:** service.go +- **Purpose:** Workspace provides management for encrypted user workspaces. +- **Methods:** + - `CreateWorkspace func(identifier, password string) (string, error)` — No doc comment in source. + - `SwitchWorkspace func(name string) error` — No doc comment in source. + - `WorkspaceFileGet func(filename string) (string, error)` — No doc comment in source. + - `WorkspaceFileSet func(filename, content string) error` — No doc comment in source. + +### cryptProvider +- **File:** service.go +- **Purpose:** cryptProvider is the interface for PGP key generation. +- **Methods:** + - `CreateKeyPair func(name, passphrase string) (string, error)` — No doc comment in source. + +### Service +- **File:** service.go +- **Purpose:** Service implements the Workspace interface. +- **Fields:** + - `core *core.Core` — No doc comment in source. + - `crypt cryptProvider` — No doc comment in source. + - `activeWorkspace string` — No doc comment in source. + - `rootPath string` — No doc comment in source. + - `medium io.Medium` — No doc comment in source. + - `mu sync.RWMutex` — No doc comment in source. +- **Associated Methods:** + - `func (s *Service) CreateWorkspace(identifier, password string) (string, error)` — CreateWorkspace creates a new encrypted workspace. Identifier is hashed (SHA-256) to create the directory name. A PGP keypair is generated using the password. + - `func (s *Service) SwitchWorkspace(name string) error` — SwitchWorkspace changes the active workspace. + - `func (s *Service) activeFilePath(op, filename string) (string, error)` — activeFilePath returns the full path to a file in the active workspace, or an error if no workspace is active. + - `func (s *Service) WorkspaceFileGet(filename string) (string, error)` — WorkspaceFileGet retrieves the content of a file from the active workspace. + - `func (s *Service) WorkspaceFileSet(filename, content string) error` — WorkspaceFileSet saves content to a file in the active workspace. + - `func (s *Service) HandleIPCEvents(c *core.Core, msg core.Message) core.Result` — HandleIPCEvents handles workspace-related IPC messages. + - `func (s *Service) workspacePath(op, name string) (string, error)` — No doc comment in source. + +## Functions + +### New +- **File:** service.go +- **Signature:** `func New(c *core.Core, crypt ...cryptProvider) (any, error)` +- **Purpose:** New creates a new Workspace service instance. An optional cryptProvider can be passed to supply PGP key generation. + +### workspaceHome +- **File:** service.go +- **Signature:** `func workspaceHome() string` +- **Purpose:** No doc comment in source. + +### joinWithinRoot +- **File:** service.go +- **Signature:** `func joinWithinRoot(root string, parts ...string) (string, error)` +- **Purpose:** No doc comment in source. + -- 2.45.3