fix(io): enforce ax v0.8.0 polish spec

Co-Authored-By: Virgil <virgil@lethean.io>
This commit is contained in:
Virgil 2026-03-26 16:23:45 +00:00
parent 238d6c6b91
commit 514ecd7e7a
25 changed files with 1605 additions and 767 deletions

View file

@ -8,7 +8,7 @@ import (
// --- MockMedium Tests ---
func TestNewMockMedium_Good(t *testing.T) {
func TestClient_NewMockMedium_Good(t *testing.T) {
m := NewMockMedium()
assert.NotNil(t, m)
assert.NotNil(t, m.Files)
@ -17,7 +17,7 @@ func TestNewMockMedium_Good(t *testing.T) {
assert.Empty(t, m.Dirs)
}
func TestMockMedium_Read_Good(t *testing.T) {
func TestClient_MockMedium_Read_Good(t *testing.T) {
m := NewMockMedium()
m.Files["test.txt"] = "hello world"
content, err := m.Read("test.txt")
@ -25,13 +25,13 @@ func TestMockMedium_Read_Good(t *testing.T) {
assert.Equal(t, "hello world", content)
}
func TestMockMedium_Read_Bad(t *testing.T) {
func TestClient_MockMedium_Read_Bad(t *testing.T) {
m := NewMockMedium()
_, err := m.Read("nonexistent.txt")
assert.Error(t, err)
}
func TestMockMedium_Write_Good(t *testing.T) {
func TestClient_MockMedium_Write_Good(t *testing.T) {
m := NewMockMedium()
err := m.Write("test.txt", "content")
assert.NoError(t, err)
@ -43,14 +43,14 @@ func TestMockMedium_Write_Good(t *testing.T) {
assert.Equal(t, "new content", m.Files["test.txt"])
}
func TestMockMedium_EnsureDir_Good(t *testing.T) {
func TestClient_MockMedium_EnsureDir_Good(t *testing.T) {
m := NewMockMedium()
err := m.EnsureDir("/path/to/dir")
assert.NoError(t, err)
assert.True(t, m.Dirs["/path/to/dir"])
}
func TestMockMedium_IsFile_Good(t *testing.T) {
func TestClient_MockMedium_IsFile_Good(t *testing.T) {
m := NewMockMedium()
m.Files["exists.txt"] = "content"
@ -58,7 +58,7 @@ func TestMockMedium_IsFile_Good(t *testing.T) {
assert.False(t, m.IsFile("nonexistent.txt"))
}
func TestMockMedium_FileGet_Good(t *testing.T) {
func TestClient_MockMedium_FileGet_Good(t *testing.T) {
m := NewMockMedium()
m.Files["test.txt"] = "content"
content, err := m.FileGet("test.txt")
@ -66,14 +66,14 @@ func TestMockMedium_FileGet_Good(t *testing.T) {
assert.Equal(t, "content", content)
}
func TestMockMedium_FileSet_Good(t *testing.T) {
func TestClient_MockMedium_FileSet_Good(t *testing.T) {
m := NewMockMedium()
err := m.FileSet("test.txt", "content")
assert.NoError(t, err)
assert.Equal(t, "content", m.Files["test.txt"])
}
func TestMockMedium_Delete_Good(t *testing.T) {
func TestClient_MockMedium_Delete_Good(t *testing.T) {
m := NewMockMedium()
m.Files["test.txt"] = "content"
@ -82,13 +82,13 @@ func TestMockMedium_Delete_Good(t *testing.T) {
assert.False(t, m.IsFile("test.txt"))
}
func TestMockMedium_Delete_Bad_NotFound(t *testing.T) {
func TestClient_MockMedium_Delete_NotFound_Bad(t *testing.T) {
m := NewMockMedium()
err := m.Delete("nonexistent.txt")
assert.Error(t, err)
}
func TestMockMedium_Delete_Bad_DirNotEmpty(t *testing.T) {
func TestClient_MockMedium_Delete_DirNotEmpty_Bad(t *testing.T) {
m := NewMockMedium()
m.Dirs["mydir"] = true
m.Files["mydir/file.txt"] = "content"
@ -97,7 +97,7 @@ func TestMockMedium_Delete_Bad_DirNotEmpty(t *testing.T) {
assert.Error(t, err)
}
func TestMockMedium_DeleteAll_Good(t *testing.T) {
func TestClient_MockMedium_DeleteAll_Good(t *testing.T) {
m := NewMockMedium()
m.Dirs["mydir"] = true
m.Dirs["mydir/subdir"] = true
@ -110,7 +110,7 @@ func TestMockMedium_DeleteAll_Good(t *testing.T) {
assert.Empty(t, m.Files)
}
func TestMockMedium_Rename_Good(t *testing.T) {
func TestClient_MockMedium_Rename_Good(t *testing.T) {
m := NewMockMedium()
m.Files["old.txt"] = "content"
@ -121,7 +121,7 @@ func TestMockMedium_Rename_Good(t *testing.T) {
assert.Equal(t, "content", m.Files["new.txt"])
}
func TestMockMedium_Rename_Good_Dir(t *testing.T) {
func TestClient_MockMedium_Rename_Dir_Good(t *testing.T) {
m := NewMockMedium()
m.Dirs["olddir"] = true
m.Files["olddir/file.txt"] = "content"
@ -133,7 +133,7 @@ func TestMockMedium_Rename_Good_Dir(t *testing.T) {
assert.Equal(t, "content", m.Files["newdir/file.txt"])
}
func TestMockMedium_List_Good(t *testing.T) {
func TestClient_MockMedium_List_Good(t *testing.T) {
m := NewMockMedium()
m.Dirs["mydir"] = true
m.Files["mydir/file1.txt"] = "content1"
@ -153,7 +153,7 @@ func TestMockMedium_List_Good(t *testing.T) {
assert.True(t, names["subdir"])
}
func TestMockMedium_Stat_Good(t *testing.T) {
func TestClient_MockMedium_Stat_Good(t *testing.T) {
m := NewMockMedium()
m.Files["test.txt"] = "hello world"
@ -164,7 +164,7 @@ func TestMockMedium_Stat_Good(t *testing.T) {
assert.False(t, info.IsDir())
}
func TestMockMedium_Stat_Good_Dir(t *testing.T) {
func TestClient_MockMedium_Stat_Dir_Good(t *testing.T) {
m := NewMockMedium()
m.Dirs["mydir"] = true
@ -174,7 +174,7 @@ func TestMockMedium_Stat_Good_Dir(t *testing.T) {
assert.True(t, info.IsDir())
}
func TestMockMedium_Exists_Good(t *testing.T) {
func TestClient_MockMedium_Exists_Good(t *testing.T) {
m := NewMockMedium()
m.Files["file.txt"] = "content"
m.Dirs["mydir"] = true
@ -184,7 +184,7 @@ func TestMockMedium_Exists_Good(t *testing.T) {
assert.False(t, m.Exists("nonexistent"))
}
func TestMockMedium_IsDir_Good(t *testing.T) {
func TestClient_MockMedium_IsDir_Good(t *testing.T) {
m := NewMockMedium()
m.Files["file.txt"] = "content"
m.Dirs["mydir"] = true
@ -196,7 +196,7 @@ func TestMockMedium_IsDir_Good(t *testing.T) {
// --- Wrapper Function Tests ---
func TestRead_Good(t *testing.T) {
func TestClient_Read_Good(t *testing.T) {
m := NewMockMedium()
m.Files["test.txt"] = "hello"
content, err := Read(m, "test.txt")
@ -204,21 +204,21 @@ func TestRead_Good(t *testing.T) {
assert.Equal(t, "hello", content)
}
func TestWrite_Good(t *testing.T) {
func TestClient_Write_Good(t *testing.T) {
m := NewMockMedium()
err := Write(m, "test.txt", "hello")
assert.NoError(t, err)
assert.Equal(t, "hello", m.Files["test.txt"])
}
func TestEnsureDir_Good(t *testing.T) {
func TestClient_EnsureDir_Good(t *testing.T) {
m := NewMockMedium()
err := EnsureDir(m, "/my/dir")
assert.NoError(t, err)
assert.True(t, m.Dirs["/my/dir"])
}
func TestIsFile_Good(t *testing.T) {
func TestClient_IsFile_Good(t *testing.T) {
m := NewMockMedium()
m.Files["exists.txt"] = "content"
@ -226,7 +226,7 @@ func TestIsFile_Good(t *testing.T) {
assert.False(t, IsFile(m, "nonexistent.txt"))
}
func TestCopy_Good(t *testing.T) {
func TestClient_Copy_Good(t *testing.T) {
source := NewMockMedium()
dest := NewMockMedium()
source.Files["test.txt"] = "hello"
@ -241,7 +241,7 @@ func TestCopy_Good(t *testing.T) {
assert.Equal(t, "content", dest.Files["copied.txt"])
}
func TestCopy_Bad(t *testing.T) {
func TestClient_Copy_Bad(t *testing.T) {
source := NewMockMedium()
dest := NewMockMedium()
err := Copy(source, "nonexistent.txt", dest, "dest.txt")
@ -250,7 +250,7 @@ func TestCopy_Bad(t *testing.T) {
// --- Local Global Tests ---
func TestLocalGlobal_Good(t *testing.T) {
func TestClient_LocalGlobal_Good(t *testing.T) {
// io.Local should be initialised by init()
assert.NotNil(t, Local, "io.Local should be initialised")

View file

@ -12,12 +12,11 @@ import (
"io/fs"
"path"
"slices"
"strings"
"sync"
"time"
core "dappco.re/go/core"
borgdatanode "forge.lthn.ai/Snider/Borg/pkg/datanode"
coreerr "forge.lthn.ai/core/go-log"
)
var (
@ -62,7 +61,7 @@ func New() *Medium {
func FromTar(data []byte) (*Medium, error) {
dn, err := borgdatanode.FromTar(data)
if err != nil {
return nil, coreerr.E("datanode.FromTar", "failed to restore", err)
return nil, core.E("datanode.FromTar", "failed to restore", err)
}
return &Medium{
dn: dn,
@ -72,21 +71,25 @@ func FromTar(data []byte) (*Medium, error) {
// Snapshot serialises the entire filesystem to a tarball.
// Use this for crash reports, workspace packaging, or TIM creation.
//
// result := m.Snapshot(...)
func (m *Medium) Snapshot() ([]byte, error) {
m.mu.RLock()
defer m.mu.RUnlock()
data, err := m.dn.ToTar()
if err != nil {
return nil, coreerr.E("datanode.Snapshot", "tar failed", err)
return nil, core.E("datanode.Snapshot", "tar failed", err)
}
return data, nil
}
// Restore replaces the filesystem contents from a tarball.
//
// result := m.Restore(...)
func (m *Medium) Restore(data []byte) error {
dn, err := borgdatanode.FromTar(data)
if err != nil {
return coreerr.E("datanode.Restore", "tar failed", err)
return core.E("datanode.Restore", "tar failed", err)
}
m.mu.Lock()
defer m.mu.Unlock()
@ -97,6 +100,8 @@ func (m *Medium) Restore(data []byte) error {
// DataNode returns the underlying Borg DataNode.
// Use this to wrap the filesystem in a TIM container.
//
// result := m.DataNode(...)
func (m *Medium) DataNode() *borgdatanode.DataNode {
m.mu.RLock()
defer m.mu.RUnlock()
@ -105,7 +110,7 @@ func (m *Medium) DataNode() *borgdatanode.DataNode {
// clean normalises a path: strips leading slash, cleans traversal.
func clean(p string) string {
p = strings.TrimPrefix(p, "/")
p = core.TrimPrefix(p, "/")
p = path.Clean(p)
if p == "." {
return ""
@ -115,6 +120,9 @@ func clean(p string) string {
// --- io.Medium interface ---
// Read documents the Read operation.
//
// result := m.Read(...)
func (m *Medium) Read(p string) (string, error) {
m.mu.RLock()
defer m.mu.RUnlock()
@ -122,32 +130,35 @@ func (m *Medium) Read(p string) (string, error) {
p = clean(p)
f, err := m.dn.Open(p)
if err != nil {
return "", coreerr.E("datanode.Read", "not found: "+p, fs.ErrNotExist)
return "", core.E("datanode.Read", core.Concat("not found: ", p), fs.ErrNotExist)
}
defer f.Close()
info, err := f.Stat()
if err != nil {
return "", coreerr.E("datanode.Read", "stat failed: "+p, err)
return "", core.E("datanode.Read", core.Concat("stat failed: ", p), err)
}
if info.IsDir() {
return "", coreerr.E("datanode.Read", "is a directory: "+p, fs.ErrInvalid)
return "", core.E("datanode.Read", core.Concat("is a directory: ", p), fs.ErrInvalid)
}
data, err := goio.ReadAll(f)
if err != nil {
return "", coreerr.E("datanode.Read", "read failed: "+p, err)
return "", core.E("datanode.Read", core.Concat("read failed: ", p), err)
}
return string(data), nil
}
// Write documents the Write operation.
//
// result := m.Write(...)
func (m *Medium) Write(p, content string) error {
m.mu.Lock()
defer m.mu.Unlock()
p = clean(p)
if p == "" {
return coreerr.E("datanode.Write", "empty path", fs.ErrInvalid)
return core.E("datanode.Write", "empty path", fs.ErrInvalid)
}
m.dn.AddData(p, []byte(content))
@ -156,10 +167,16 @@ func (m *Medium) Write(p, content string) error {
return nil
}
// WriteMode documents the WriteMode operation.
//
// result := m.WriteMode(...)
func (m *Medium) WriteMode(p, content string, mode fs.FileMode) error {
return m.Write(p, content)
}
// EnsureDir documents the EnsureDir operation.
//
// result := m.EnsureDir(...)
func (m *Medium) EnsureDir(p string) error {
m.mu.Lock()
defer m.mu.Unlock()
@ -184,6 +201,9 @@ func (m *Medium) ensureDirsLocked(p string) {
}
}
// IsFile documents the IsFile operation.
//
// result := m.IsFile(...)
func (m *Medium) IsFile(p string) bool {
m.mu.RLock()
defer m.mu.RUnlock()
@ -193,21 +213,30 @@ func (m *Medium) IsFile(p string) bool {
return err == nil && !info.IsDir()
}
// FileGet documents the FileGet operation.
//
// result := m.FileGet(...)
func (m *Medium) FileGet(p string) (string, error) {
return m.Read(p)
}
// FileSet documents the FileSet operation.
//
// result := m.FileSet(...)
func (m *Medium) FileSet(p, content string) error {
return m.Write(p, content)
}
// Delete documents the Delete operation.
//
// result := m.Delete(...)
func (m *Medium) Delete(p string) error {
m.mu.Lock()
defer m.mu.Unlock()
p = clean(p)
if p == "" {
return coreerr.E("datanode.Delete", "cannot delete root", fs.ErrPermission)
return core.E("datanode.Delete", "cannot delete root", fs.ErrPermission)
}
// Check if it's a file in the DataNode
@ -218,24 +247,24 @@ func (m *Medium) Delete(p string) error {
// Check if dir is empty
hasChildren, err := m.hasPrefixLocked(p + "/")
if err != nil {
return coreerr.E("datanode.Delete", "failed to inspect directory: "+p, err)
return core.E("datanode.Delete", core.Concat("failed to inspect directory: ", p), err)
}
if hasChildren {
return coreerr.E("datanode.Delete", "directory not empty: "+p, fs.ErrExist)
return core.E("datanode.Delete", core.Concat("directory not empty: ", p), fs.ErrExist)
}
delete(m.dirs, p)
return nil
}
return coreerr.E("datanode.Delete", "not found: "+p, fs.ErrNotExist)
return core.E("datanode.Delete", core.Concat("not found: ", p), fs.ErrNotExist)
}
if info.IsDir() {
hasChildren, err := m.hasPrefixLocked(p + "/")
if err != nil {
return coreerr.E("datanode.Delete", "failed to inspect directory: "+p, err)
return core.E("datanode.Delete", core.Concat("failed to inspect directory: ", p), err)
}
if hasChildren {
return coreerr.E("datanode.Delete", "directory not empty: "+p, fs.ErrExist)
return core.E("datanode.Delete", core.Concat("directory not empty: ", p), fs.ErrExist)
}
delete(m.dirs, p)
return nil
@ -243,18 +272,21 @@ func (m *Medium) Delete(p string) error {
// Remove the file by creating a new DataNode without it
if err := m.removeFileLocked(p); err != nil {
return coreerr.E("datanode.Delete", "failed to delete file: "+p, err)
return core.E("datanode.Delete", core.Concat("failed to delete file: ", p), err)
}
return nil
}
// DeleteAll documents the DeleteAll operation.
//
// result := m.DeleteAll(...)
func (m *Medium) DeleteAll(p string) error {
m.mu.Lock()
defer m.mu.Unlock()
p = clean(p)
if p == "" {
return coreerr.E("datanode.DeleteAll", "cannot delete root", fs.ErrPermission)
return core.E("datanode.DeleteAll", "cannot delete root", fs.ErrPermission)
}
prefix := p + "/"
@ -264,7 +296,7 @@ func (m *Medium) DeleteAll(p string) error {
info, err := m.dn.Stat(p)
if err == nil && !info.IsDir() {
if err := m.removeFileLocked(p); err != nil {
return coreerr.E("datanode.DeleteAll", "failed to delete file: "+p, err)
return core.E("datanode.DeleteAll", core.Concat("failed to delete file: ", p), err)
}
found = true
}
@ -272,12 +304,12 @@ func (m *Medium) DeleteAll(p string) error {
// Remove all files under prefix
entries, err := m.collectAllLocked()
if err != nil {
return coreerr.E("datanode.DeleteAll", "failed to inspect tree: "+p, err)
return core.E("datanode.DeleteAll", core.Concat("failed to inspect tree: ", p), err)
}
for _, name := range entries {
if name == p || strings.HasPrefix(name, prefix) {
if name == p || core.HasPrefix(name, prefix) {
if err := m.removeFileLocked(name); err != nil {
return coreerr.E("datanode.DeleteAll", "failed to delete file: "+name, err)
return core.E("datanode.DeleteAll", core.Concat("failed to delete file: ", name), err)
}
found = true
}
@ -285,18 +317,21 @@ func (m *Medium) DeleteAll(p string) error {
// Remove explicit dirs under prefix
for d := range m.dirs {
if d == p || strings.HasPrefix(d, prefix) {
if d == p || core.HasPrefix(d, prefix) {
delete(m.dirs, d)
found = true
}
}
if !found {
return coreerr.E("datanode.DeleteAll", "not found: "+p, fs.ErrNotExist)
return core.E("datanode.DeleteAll", core.Concat("not found: ", p), fs.ErrNotExist)
}
return nil
}
// Rename documents the Rename operation.
//
// result := m.Rename(...)
func (m *Medium) Rename(oldPath, newPath string) error {
m.mu.Lock()
defer m.mu.Unlock()
@ -307,19 +342,19 @@ func (m *Medium) Rename(oldPath, newPath string) error {
// Check if source is a file
info, err := m.dn.Stat(oldPath)
if err != nil {
return coreerr.E("datanode.Rename", "not found: "+oldPath, fs.ErrNotExist)
return core.E("datanode.Rename", core.Concat("not found: ", oldPath), fs.ErrNotExist)
}
if !info.IsDir() {
// Read old, write new, delete old
data, err := m.readFileLocked(oldPath)
if err != nil {
return coreerr.E("datanode.Rename", "failed to read source file: "+oldPath, err)
return core.E("datanode.Rename", core.Concat("failed to read source file: ", oldPath), err)
}
m.dn.AddData(newPath, data)
m.ensureDirsLocked(path.Dir(newPath))
if err := m.removeFileLocked(oldPath); err != nil {
return coreerr.E("datanode.Rename", "failed to remove source file: "+oldPath, err)
return core.E("datanode.Rename", core.Concat("failed to remove source file: ", oldPath), err)
}
return nil
}
@ -330,18 +365,18 @@ func (m *Medium) Rename(oldPath, newPath string) error {
entries, err := m.collectAllLocked()
if err != nil {
return coreerr.E("datanode.Rename", "failed to inspect tree: "+oldPath, err)
return core.E("datanode.Rename", core.Concat("failed to inspect tree: ", oldPath), err)
}
for _, name := range entries {
if strings.HasPrefix(name, oldPrefix) {
newName := newPrefix + strings.TrimPrefix(name, oldPrefix)
if core.HasPrefix(name, oldPrefix) {
newName := core.Concat(newPrefix, core.TrimPrefix(name, oldPrefix))
data, err := m.readFileLocked(name)
if err != nil {
return coreerr.E("datanode.Rename", "failed to read source file: "+name, err)
return core.E("datanode.Rename", core.Concat("failed to read source file: ", name), err)
}
m.dn.AddData(newName, data)
if err := m.removeFileLocked(name); err != nil {
return coreerr.E("datanode.Rename", "failed to remove source file: "+name, err)
return core.E("datanode.Rename", core.Concat("failed to remove source file: ", name), err)
}
}
}
@ -349,8 +384,8 @@ func (m *Medium) Rename(oldPath, newPath string) error {
// Move explicit dirs
dirsToMove := make(map[string]string)
for d := range m.dirs {
if d == oldPath || strings.HasPrefix(d, oldPrefix) {
newD := newPath + strings.TrimPrefix(d, oldPath)
if d == oldPath || core.HasPrefix(d, oldPrefix) {
newD := core.Concat(newPath, core.TrimPrefix(d, oldPath))
dirsToMove[d] = newD
}
}
@ -362,6 +397,9 @@ func (m *Medium) Rename(oldPath, newPath string) error {
return nil
}
// List documents the List operation.
//
// result := m.List(...)
func (m *Medium) List(p string) ([]fs.DirEntry, error) {
m.mu.RLock()
defer m.mu.RUnlock()
@ -374,7 +412,7 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) {
if p == "" || m.dirs[p] {
return []fs.DirEntry{}, nil
}
return nil, coreerr.E("datanode.List", "not found: "+p, fs.ErrNotExist)
return nil, core.E("datanode.List", core.Concat("not found: ", p), fs.ErrNotExist)
}
// Also include explicit subdirectories not discovered via files
@ -388,14 +426,14 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) {
}
for d := range m.dirs {
if !strings.HasPrefix(d, prefix) {
if !core.HasPrefix(d, prefix) {
continue
}
rest := strings.TrimPrefix(d, prefix)
rest := core.TrimPrefix(d, prefix)
if rest == "" {
continue
}
first := strings.SplitN(rest, "/", 2)[0]
first := core.SplitN(rest, "/", 2)[0]
if !seen[first] {
seen[first] = true
entries = append(entries, &dirEntry{name: first})
@ -409,6 +447,9 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) {
return entries, nil
}
// Stat documents the Stat operation.
//
// result := m.Stat(...)
func (m *Medium) Stat(p string) (fs.FileInfo, error) {
m.mu.RLock()
defer m.mu.RUnlock()
@ -426,9 +467,12 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) {
if m.dirs[p] {
return &fileInfo{name: path.Base(p), isDir: true, mode: fs.ModeDir | 0755}, nil
}
return nil, coreerr.E("datanode.Stat", "not found: "+p, fs.ErrNotExist)
return nil, core.E("datanode.Stat", core.Concat("not found: ", p), fs.ErrNotExist)
}
// Open documents the Open operation.
//
// result := m.Open(...)
func (m *Medium) Open(p string) (fs.File, error) {
m.mu.RLock()
defer m.mu.RUnlock()
@ -437,18 +481,24 @@ func (m *Medium) Open(p string) (fs.File, error) {
return m.dn.Open(p)
}
// Create documents the Create operation.
//
// result := m.Create(...)
func (m *Medium) Create(p string) (goio.WriteCloser, error) {
p = clean(p)
if p == "" {
return nil, coreerr.E("datanode.Create", "empty path", fs.ErrInvalid)
return nil, core.E("datanode.Create", "empty path", fs.ErrInvalid)
}
return &writeCloser{m: m, path: p}, nil
}
// Append documents the Append operation.
//
// result := m.Append(...)
func (m *Medium) Append(p string) (goio.WriteCloser, error) {
p = clean(p)
if p == "" {
return nil, coreerr.E("datanode.Append", "empty path", fs.ErrInvalid)
return nil, core.E("datanode.Append", "empty path", fs.ErrInvalid)
}
// Read existing content
@ -458,7 +508,7 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) {
data, err := m.readFileLocked(p)
if err != nil {
m.mu.RUnlock()
return nil, coreerr.E("datanode.Append", "failed to read existing content: "+p, err)
return nil, core.E("datanode.Append", core.Concat("failed to read existing content: ", p), err)
}
existing = data
}
@ -467,6 +517,9 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) {
return &writeCloser{m: m, path: p, buf: existing}, nil
}
// ReadStream documents the ReadStream operation.
//
// result := m.ReadStream(...)
func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) {
m.mu.RLock()
defer m.mu.RUnlock()
@ -474,15 +527,21 @@ func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) {
p = clean(p)
f, err := m.dn.Open(p)
if err != nil {
return nil, coreerr.E("datanode.ReadStream", "not found: "+p, fs.ErrNotExist)
return nil, core.E("datanode.ReadStream", core.Concat("not found: ", p), fs.ErrNotExist)
}
return f.(goio.ReadCloser), nil
}
// WriteStream documents the WriteStream operation.
//
// result := m.WriteStream(...)
func (m *Medium) WriteStream(p string) (goio.WriteCloser, error) {
return m.Create(p)
}
// Exists documents the Exists operation.
//
// result := m.Exists(...)
func (m *Medium) Exists(p string) bool {
m.mu.RLock()
defer m.mu.RUnlock()
@ -498,6 +557,9 @@ func (m *Medium) Exists(p string) bool {
return m.dirs[p]
}
// IsDir documents the IsDir operation.
//
// result := m.IsDir(...)
func (m *Medium) IsDir(p string) bool {
m.mu.RLock()
defer m.mu.RUnlock()
@ -522,12 +584,12 @@ func (m *Medium) hasPrefixLocked(prefix string) (bool, error) {
return false, err
}
for _, name := range entries {
if strings.HasPrefix(name, prefix) {
if core.HasPrefix(name, prefix) {
return true, nil
}
}
for d := range m.dirs {
if strings.HasPrefix(d, prefix) {
if core.HasPrefix(d, prefix) {
return true, nil
}
}
@ -596,11 +658,17 @@ type writeCloser struct {
buf []byte
}
// Write documents the Write operation.
//
// result := w.Write(...)
func (w *writeCloser) Write(p []byte) (int, error) {
w.buf = append(w.buf, p...)
return len(p), nil
}
// Close documents the Close operation.
//
// result := w.Close(...)
func (w *writeCloser) Close() error {
w.m.mu.Lock()
defer w.m.mu.Unlock()
@ -616,9 +684,24 @@ type dirEntry struct {
name string
}
// Name documents the Name operation.
//
// result := d.Name(...)
func (d *dirEntry) Name() string { return d.name }
// IsDir documents the IsDir operation.
//
// result := d.IsDir(...)
func (d *dirEntry) IsDir() bool { return true }
// Type documents the Type operation.
//
// result := d.Type(...)
func (d *dirEntry) Type() fs.FileMode { return fs.ModeDir }
// Info documents the Info operation.
//
// result := d.Info(...)
func (d *dirEntry) Info() (fs.FileInfo, error) {
return &fileInfo{name: d.name, isDir: true, mode: fs.ModeDir | 0755}, nil
}
@ -631,9 +714,32 @@ type fileInfo struct {
isDir bool
}
// Name documents the Name operation.
//
// result := fi.Name(...)
func (fi *fileInfo) Name() string { return fi.name }
// Size documents the Size operation.
//
// result := fi.Size(...)
func (fi *fileInfo) Size() int64 { return fi.size }
// Mode documents the Mode operation.
//
// result := fi.Mode(...)
func (fi *fileInfo) Mode() fs.FileMode { return fi.mode }
// ModTime documents the ModTime operation.
//
// result := fi.ModTime(...)
func (fi *fileInfo) ModTime() time.Time { return fi.modTime }
// IsDir documents the IsDir operation.
//
// result := fi.IsDir(...)
func (fi *fileInfo) IsDir() bool { return fi.isDir }
// Sys documents the Sys operation.
//
// result := fi.Sys(...)
func (fi *fileInfo) Sys() any { return nil }

View file

@ -1,11 +1,11 @@
package datanode
import (
"errors"
"io"
"io/fs"
"testing"
core "dappco.re/go/core"
coreio "dappco.re/go/core/io"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -14,7 +14,7 @@ import (
// Compile-time check: Medium implements io.Medium.
var _ coreio.Medium = (*Medium)(nil)
func TestReadWrite_Good(t *testing.T) {
func TestClient_ReadWrite_Good(t *testing.T) {
m := New()
err := m.Write("hello.txt", "world")
@ -25,7 +25,7 @@ func TestReadWrite_Good(t *testing.T) {
assert.Equal(t, "world", got)
}
func TestReadWrite_Bad(t *testing.T) {
func TestClient_ReadWrite_Bad(t *testing.T) {
m := New()
_, err := m.Read("missing.txt")
@ -35,7 +35,7 @@ func TestReadWrite_Bad(t *testing.T) {
assert.Error(t, err)
}
func TestNestedPaths_Good(t *testing.T) {
func TestClient_NestedPaths_Good(t *testing.T) {
m := New()
require.NoError(t, m.Write("a/b/c/deep.txt", "deep"))
@ -49,7 +49,7 @@ func TestNestedPaths_Good(t *testing.T) {
assert.True(t, m.IsDir("a/b/c"))
}
func TestLeadingSlash_Good(t *testing.T) {
func TestClient_LeadingSlash_Good(t *testing.T) {
m := New()
require.NoError(t, m.Write("/leading/file.txt", "stripped"))
@ -62,7 +62,7 @@ func TestLeadingSlash_Good(t *testing.T) {
assert.Equal(t, "stripped", got)
}
func TestIsFile_Good(t *testing.T) {
func TestClient_IsFile_Good(t *testing.T) {
m := New()
require.NoError(t, m.Write("file.go", "package main"))
@ -72,7 +72,7 @@ func TestIsFile_Good(t *testing.T) {
assert.False(t, m.IsFile("")) // empty path
}
func TestEnsureDir_Good(t *testing.T) {
func TestClient_EnsureDir_Good(t *testing.T) {
m := New()
require.NoError(t, m.EnsureDir("foo/bar/baz"))
@ -83,7 +83,7 @@ func TestEnsureDir_Good(t *testing.T) {
assert.True(t, m.Exists("foo/bar/baz"))
}
func TestDelete_Good(t *testing.T) {
func TestClient_Delete_Good(t *testing.T) {
m := New()
require.NoError(t, m.Write("delete-me.txt", "bye"))
@ -93,7 +93,7 @@ func TestDelete_Good(t *testing.T) {
assert.False(t, m.Exists("delete-me.txt"))
}
func TestDelete_Bad(t *testing.T) {
func TestClient_Delete_Bad(t *testing.T) {
m := New()
// Delete non-existent
@ -104,13 +104,13 @@ func TestDelete_Bad(t *testing.T) {
assert.Error(t, m.Delete("dir"))
}
func TestDelete_Bad_DirectoryInspectionFailure(t *testing.T) {
func TestClient_Delete_DirectoryInspectionFailure_Bad(t *testing.T) {
m := New()
require.NoError(t, m.Write("dir/file.txt", "content"))
original := dataNodeWalkDir
dataNodeWalkDir = func(_ fs.FS, _ string, _ fs.WalkDirFunc) error {
return errors.New("walk failed")
return core.NewError("walk failed")
}
t.Cleanup(func() {
dataNodeWalkDir = original
@ -121,7 +121,7 @@ func TestDelete_Bad_DirectoryInspectionFailure(t *testing.T) {
assert.Contains(t, err.Error(), "failed to inspect directory")
}
func TestDeleteAll_Good(t *testing.T) {
func TestClient_DeleteAll_Good(t *testing.T) {
m := New()
require.NoError(t, m.Write("tree/a.txt", "a"))
@ -135,13 +135,13 @@ func TestDeleteAll_Good(t *testing.T) {
assert.True(t, m.Exists("keep.txt"))
}
func TestDeleteAll_Bad_WalkFailure(t *testing.T) {
func TestClient_DeleteAll_WalkFailure_Bad(t *testing.T) {
m := New()
require.NoError(t, m.Write("tree/a.txt", "a"))
original := dataNodeWalkDir
dataNodeWalkDir = func(_ fs.FS, _ string, _ fs.WalkDirFunc) error {
return errors.New("walk failed")
return core.NewError("walk failed")
}
t.Cleanup(func() {
dataNodeWalkDir = original
@ -152,14 +152,14 @@ func TestDeleteAll_Bad_WalkFailure(t *testing.T) {
assert.Contains(t, err.Error(), "failed to inspect tree")
}
func TestDelete_Bad_RemoveFailure(t *testing.T) {
func TestClient_Delete_RemoveFailure_Bad(t *testing.T) {
m := New()
require.NoError(t, m.Write("keep.txt", "keep"))
require.NoError(t, m.Write("bad.txt", "bad"))
original := dataNodeReadAll
dataNodeReadAll = func(_ io.Reader) ([]byte, error) {
return nil, errors.New("read failed")
return nil, core.NewError("read failed")
}
t.Cleanup(func() {
dataNodeReadAll = original
@ -170,7 +170,7 @@ func TestDelete_Bad_RemoveFailure(t *testing.T) {
assert.Contains(t, err.Error(), "failed to delete file")
}
func TestRename_Good(t *testing.T) {
func TestClient_Rename_Good(t *testing.T) {
m := New()
require.NoError(t, m.Write("old.txt", "content"))
@ -182,7 +182,7 @@ func TestRename_Good(t *testing.T) {
assert.Equal(t, "content", got)
}
func TestRenameDir_Good(t *testing.T) {
func TestClient_RenameDir_Good(t *testing.T) {
m := New()
require.NoError(t, m.Write("src/a.go", "package a"))
@ -201,13 +201,13 @@ func TestRenameDir_Good(t *testing.T) {
assert.Equal(t, "package b", got)
}
func TestRenameDir_Bad_ReadFailure(t *testing.T) {
func TestClient_RenameDir_ReadFailure_Bad(t *testing.T) {
m := New()
require.NoError(t, m.Write("src/a.go", "package a"))
original := dataNodeReadAll
dataNodeReadAll = func(_ io.Reader) ([]byte, error) {
return nil, errors.New("read failed")
return nil, core.NewError("read failed")
}
t.Cleanup(func() {
dataNodeReadAll = original
@ -218,7 +218,7 @@ func TestRenameDir_Bad_ReadFailure(t *testing.T) {
assert.Contains(t, err.Error(), "failed to read source file")
}
func TestList_Good(t *testing.T) {
func TestClient_List_Good(t *testing.T) {
m := New()
require.NoError(t, m.Write("root.txt", "r"))
@ -247,7 +247,7 @@ func TestList_Good(t *testing.T) {
assert.Contains(t, names, "sub")
}
func TestStat_Good(t *testing.T) {
func TestClient_Stat_Good(t *testing.T) {
m := New()
require.NoError(t, m.Write("stat.txt", "hello"))
@ -263,7 +263,7 @@ func TestStat_Good(t *testing.T) {
assert.True(t, info.IsDir())
}
func TestOpen_Good(t *testing.T) {
func TestClient_Open_Good(t *testing.T) {
m := New()
require.NoError(t, m.Write("open.txt", "opened"))
@ -277,7 +277,7 @@ func TestOpen_Good(t *testing.T) {
assert.Equal(t, "opened", string(data))
}
func TestCreateAppend_Good(t *testing.T) {
func TestClient_CreateAppend_Good(t *testing.T) {
m := New()
// Create
@ -301,13 +301,13 @@ func TestCreateAppend_Good(t *testing.T) {
assert.Equal(t, "hello world", got)
}
func TestAppend_Bad_ReadFailure(t *testing.T) {
func TestClient_Append_ReadFailure_Bad(t *testing.T) {
m := New()
require.NoError(t, m.Write("new.txt", "hello"))
original := dataNodeReadAll
dataNodeReadAll = func(_ io.Reader) ([]byte, error) {
return nil, errors.New("read failed")
return nil, core.NewError("read failed")
}
t.Cleanup(func() {
dataNodeReadAll = original
@ -318,7 +318,7 @@ func TestAppend_Bad_ReadFailure(t *testing.T) {
assert.Contains(t, err.Error(), "failed to read existing content")
}
func TestStreams_Good(t *testing.T) {
func TestClient_Streams_Good(t *testing.T) {
m := New()
// WriteStream
@ -336,7 +336,7 @@ func TestStreams_Good(t *testing.T) {
rs.Close()
}
func TestFileGetFileSet_Good(t *testing.T) {
func TestClient_FileGetFileSet_Good(t *testing.T) {
m := New()
require.NoError(t, m.FileSet("alias.txt", "via set"))
@ -346,7 +346,7 @@ func TestFileGetFileSet_Good(t *testing.T) {
assert.Equal(t, "via set", got)
}
func TestSnapshotRestore_Good(t *testing.T) {
func TestClient_SnapshotRestore_Good(t *testing.T) {
m := New()
require.NoError(t, m.Write("a.txt", "alpha"))
@ -369,7 +369,7 @@ func TestSnapshotRestore_Good(t *testing.T) {
assert.Equal(t, "charlie", got)
}
func TestRestore_Good(t *testing.T) {
func TestClient_Restore_Good(t *testing.T) {
m := New()
require.NoError(t, m.Write("original.txt", "before"))
@ -391,7 +391,7 @@ func TestRestore_Good(t *testing.T) {
assert.False(t, m.Exists("extra.txt"))
}
func TestDataNode_Good(t *testing.T) {
func TestClient_DataNode_Good(t *testing.T) {
m := New()
require.NoError(t, m.Write("test.txt", "borg"))
@ -409,7 +409,7 @@ func TestDataNode_Good(t *testing.T) {
assert.Equal(t, "borg", string(data))
}
func TestOverwrite_Good(t *testing.T) {
func TestClient_Overwrite_Good(t *testing.T) {
m := New()
require.NoError(t, m.Write("file.txt", "v1"))
@ -420,7 +420,7 @@ func TestOverwrite_Good(t *testing.T) {
assert.Equal(t, "v2", got)
}
func TestExists_Good(t *testing.T) {
func TestClient_Exists_Good(t *testing.T) {
m := New()
assert.True(t, m.Exists("")) // root
@ -430,7 +430,7 @@ func TestExists_Good(t *testing.T) {
assert.True(t, m.Exists("x"))
}
func TestReadDir_Ugly(t *testing.T) {
func TestClient_ReadDir_Ugly(t *testing.T) {
m := New()
// Read from a file path (not a dir) should return empty or error

3
go.mod
View file

@ -3,9 +3,8 @@ module dappco.re/go/core/io
go 1.26.0
require (
dappco.re/go/core v0.6.0
dappco.re/go/core v0.8.0-alpha.1
forge.lthn.ai/Snider/Borg v0.3.1
forge.lthn.ai/core/go-log v0.0.4
github.com/aws/aws-sdk-go-v2 v1.41.4
github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1
github.com/stretchr/testify v1.11.1

6
go.sum
View file

@ -1,9 +1,7 @@
dappco.re/go/core v0.6.0 h1:0wmuO/UmCWXxJkxQ6XvVLnqkAuWitbd49PhxjCsplyk=
dappco.re/go/core v0.6.0/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A=
dappco.re/go/core v0.8.0-alpha.1 h1:gj7+Scv+L63Z7wMxbJYHhaRFkHJo2u4MMPuUSv/Dhtk=
dappco.re/go/core v0.8.0-alpha.1/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A=
forge.lthn.ai/Snider/Borg v0.3.1 h1:gfC1ZTpLoZai07oOWJiVeQ8+qJYK8A795tgVGJHbVL8=
forge.lthn.ai/Snider/Borg v0.3.1/go.mod h1:Z7DJD0yHXsxSyM7Mjl6/g4gH1NBsIz44Bf5AFlV76Wg=
forge.lthn.ai/core/go-log v0.0.4 h1:KTuCEPgFmuM8KJfnyQ8vPOU1Jg654W74h8IJvfQMfv0=
forge.lthn.ai/core/go-log v0.0.4/go.mod h1:r14MXKOD3LF/sI8XUJQhRk/SZHBE7jAFVuCfgkXoZPw=
github.com/aws/aws-sdk-go-v2 v1.41.4 h1:10f50G7WyU02T56ox1wWXq+zTX9I1zxG46HYuG1hH/k=
github.com/aws/aws-sdk-go-v2 v1.41.4/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7 h1:3kGOqnh1pPeddVa/E37XNTaWJ8W6vrbYV9lJEkCnhuY=

181
io.go
View file

@ -1,14 +1,13 @@
package io
import (
"bytes"
goio "io"
"io/fs"
"strings"
"time"
core "dappco.re/go/core"
"dappco.re/go/core/io/local"
coreerr "forge.lthn.ai/core/go-log"
)
// Medium defines the standard interface for a storage backend.
@ -86,11 +85,34 @@ type FileInfo struct {
isDir bool
}
// Name documents the Name operation.
//
// result := fi.Name(...)
func (fi FileInfo) Name() string { return fi.name }
// Size documents the Size operation.
//
// result := fi.Size(...)
func (fi FileInfo) Size() int64 { return fi.size }
// Mode documents the Mode operation.
//
// result := fi.Mode(...)
func (fi FileInfo) Mode() fs.FileMode { return fi.mode }
// ModTime documents the ModTime operation.
//
// result := fi.ModTime(...)
func (fi FileInfo) ModTime() time.Time { return fi.modTime }
// IsDir documents the IsDir operation.
//
// result := fi.IsDir(...)
func (fi FileInfo) IsDir() bool { return fi.isDir }
// Sys documents the Sys operation.
//
// result := fi.Sys(...)
func (fi FileInfo) Sys() any { return nil }
// DirEntry provides a simple implementation of fs.DirEntry for mock testing.
@ -101,9 +123,24 @@ type DirEntry struct {
info fs.FileInfo
}
// Name documents the Name operation.
//
// result := de.Name(...)
func (de DirEntry) Name() string { return de.name }
// IsDir documents the IsDir operation.
//
// result := de.IsDir(...)
func (de DirEntry) IsDir() bool { return de.isDir }
// Type documents the Type operation.
//
// result := de.Type(...)
func (de DirEntry) Type() fs.FileMode { return de.mode.Type() }
// Info documents the Info operation.
//
// result := de.Info(...)
func (de DirEntry) Info() (fs.FileInfo, error) { return de.info, nil }
// Local is a pre-initialised medium for the local filesystem.
@ -115,7 +152,7 @@ func init() {
var err error
Local, err = local.New("/")
if err != nil {
coreerr.Warn("io: failed to initialise Local medium, io.Local will be nil", "error", err)
core.Warn("io: failed to initialise Local medium, io.Local will be nil", "error", err)
}
}
@ -134,43 +171,57 @@ func NewSandboxed(root string) (Medium, error) {
// --- Helper Functions ---
// Read retrieves the content of a file from the given medium.
//
// result := io.Read(...)
func Read(m Medium, path string) (string, error) {
return m.Read(path)
}
// Write saves the given content to a file in the given medium.
//
// result := io.Write(...)
func Write(m Medium, path, content string) error {
return m.Write(path, content)
}
// ReadStream returns a reader for the file content from the given medium.
//
// result := io.ReadStream(...)
func ReadStream(m Medium, path string) (goio.ReadCloser, error) {
return m.ReadStream(path)
}
// WriteStream returns a writer for the file content in the given medium.
//
// result := io.WriteStream(...)
func WriteStream(m Medium, path string) (goio.WriteCloser, error) {
return m.WriteStream(path)
}
// EnsureDir makes sure a directory exists in the given medium.
//
// result := io.EnsureDir(...)
func EnsureDir(m Medium, path string) error {
return m.EnsureDir(path)
}
// IsFile checks if a path exists and is a regular file in the given medium.
//
// result := io.IsFile(...)
func IsFile(m Medium, path string) bool {
return m.IsFile(path)
}
// Copy copies a file from one medium to another.
//
// result := io.Copy(...)
func Copy(src Medium, srcPath string, dst Medium, dstPath string) error {
content, err := src.Read(srcPath)
if err != nil {
return coreerr.E("io.Copy", "read failed: "+srcPath, err)
return core.E("io.Copy", core.Concat("read failed: ", srcPath), err)
}
if err := dst.Write(dstPath, content); err != nil {
return coreerr.E("io.Copy", "write failed: "+dstPath, err)
return core.E("io.Copy", core.Concat("write failed: ", dstPath), err)
}
return nil
}
@ -185,6 +236,8 @@ type MockMedium struct {
}
// NewMockMedium creates a new MockMedium instance.
//
// result := io.NewMockMedium(...)
func NewMockMedium() *MockMedium {
return &MockMedium{
Files: make(map[string]string),
@ -194,48 +247,65 @@ func NewMockMedium() *MockMedium {
}
// Read retrieves the content of a file from the mock filesystem.
//
// result := m.Read(...)
func (m *MockMedium) Read(path string) (string, error) {
content, ok := m.Files[path]
if !ok {
return "", coreerr.E("io.MockMedium.Read", "file not found: "+path, fs.ErrNotExist)
return "", core.E("io.MockMedium.Read", core.Concat("file not found: ", path), fs.ErrNotExist)
}
return content, nil
}
// Write saves the given content to a file in the mock filesystem.
//
// result := m.Write(...)
func (m *MockMedium) Write(path, content string) error {
m.Files[path] = content
m.ModTimes[path] = time.Now()
return nil
}
// WriteMode documents the WriteMode operation.
//
// result := m.WriteMode(...)
func (m *MockMedium) WriteMode(path, content string, mode fs.FileMode) error {
return m.Write(path, content)
}
// EnsureDir records that a directory exists in the mock filesystem.
//
// result := m.EnsureDir(...)
func (m *MockMedium) EnsureDir(path string) error {
m.Dirs[path] = true
return nil
}
// IsFile checks if a path exists as a file in the mock filesystem.
//
// result := m.IsFile(...)
func (m *MockMedium) IsFile(path string) bool {
_, ok := m.Files[path]
return ok
}
// FileGet is a convenience function that reads a file from the mock filesystem.
//
// result := m.FileGet(...)
func (m *MockMedium) FileGet(path string) (string, error) {
return m.Read(path)
}
// FileSet is a convenience function that writes a file to the mock filesystem.
//
// result := m.FileSet(...)
func (m *MockMedium) FileSet(path, content string) error {
return m.Write(path, content)
}
// Delete removes a file or empty directory from the mock filesystem.
//
// result := m.Delete(...)
func (m *MockMedium) Delete(path string) error {
if _, ok := m.Files[path]; ok {
delete(m.Files, path)
@ -244,26 +314,28 @@ func (m *MockMedium) Delete(path string) error {
if _, ok := m.Dirs[path]; ok {
// Check if directory is empty (no files or subdirs with this prefix)
prefix := path
if !strings.HasSuffix(prefix, "/") {
if !core.HasSuffix(prefix, "/") {
prefix += "/"
}
for f := range m.Files {
if strings.HasPrefix(f, prefix) {
return coreerr.E("io.MockMedium.Delete", "directory not empty: "+path, fs.ErrExist)
if core.HasPrefix(f, prefix) {
return core.E("io.MockMedium.Delete", core.Concat("directory not empty: ", path), fs.ErrExist)
}
}
for d := range m.Dirs {
if d != path && strings.HasPrefix(d, prefix) {
return coreerr.E("io.MockMedium.Delete", "directory not empty: "+path, fs.ErrExist)
if d != path && core.HasPrefix(d, prefix) {
return core.E("io.MockMedium.Delete", core.Concat("directory not empty: ", path), fs.ErrExist)
}
}
delete(m.Dirs, path)
return nil
}
return coreerr.E("io.MockMedium.Delete", "path not found: "+path, fs.ErrNotExist)
return core.E("io.MockMedium.Delete", core.Concat("path not found: ", path), fs.ErrNotExist)
}
// DeleteAll removes a file or directory and all contents from the mock filesystem.
//
// result := m.DeleteAll(...)
func (m *MockMedium) DeleteAll(path string) error {
found := false
if _, ok := m.Files[path]; ok {
@ -277,29 +349,31 @@ func (m *MockMedium) DeleteAll(path string) error {
// Delete all entries under this path
prefix := path
if !strings.HasSuffix(prefix, "/") {
if !core.HasSuffix(prefix, "/") {
prefix += "/"
}
for f := range m.Files {
if strings.HasPrefix(f, prefix) {
if core.HasPrefix(f, prefix) {
delete(m.Files, f)
found = true
}
}
for d := range m.Dirs {
if strings.HasPrefix(d, prefix) {
if core.HasPrefix(d, prefix) {
delete(m.Dirs, d)
found = true
}
}
if !found {
return coreerr.E("io.MockMedium.DeleteAll", "path not found: "+path, fs.ErrNotExist)
return core.E("io.MockMedium.DeleteAll", core.Concat("path not found: ", path), fs.ErrNotExist)
}
return nil
}
// Rename moves a file or directory in the mock filesystem.
//
// result := m.Rename(...)
func (m *MockMedium) Rename(oldPath, newPath string) error {
if content, ok := m.Files[oldPath]; ok {
m.Files[newPath] = content
@ -316,19 +390,19 @@ func (m *MockMedium) Rename(oldPath, newPath string) error {
delete(m.Dirs, oldPath)
oldPrefix := oldPath
if !strings.HasSuffix(oldPrefix, "/") {
if !core.HasSuffix(oldPrefix, "/") {
oldPrefix += "/"
}
newPrefix := newPath
if !strings.HasSuffix(newPrefix, "/") {
if !core.HasSuffix(newPrefix, "/") {
newPrefix += "/"
}
// Collect files to move first (don't mutate during iteration)
filesToMove := make(map[string]string)
for f := range m.Files {
if strings.HasPrefix(f, oldPrefix) {
newF := newPrefix + strings.TrimPrefix(f, oldPrefix)
if core.HasPrefix(f, oldPrefix) {
newF := core.Concat(newPrefix, core.TrimPrefix(f, oldPrefix))
filesToMove[f] = newF
}
}
@ -344,8 +418,8 @@ func (m *MockMedium) Rename(oldPath, newPath string) error {
// Collect directories to move first
dirsToMove := make(map[string]string)
for d := range m.Dirs {
if strings.HasPrefix(d, oldPrefix) {
newD := newPrefix + strings.TrimPrefix(d, oldPrefix)
if core.HasPrefix(d, oldPrefix) {
newD := core.Concat(newPrefix, core.TrimPrefix(d, oldPrefix))
dirsToMove[d] = newD
}
}
@ -355,14 +429,16 @@ func (m *MockMedium) Rename(oldPath, newPath string) error {
}
return nil
}
return coreerr.E("io.MockMedium.Rename", "path not found: "+oldPath, fs.ErrNotExist)
return core.E("io.MockMedium.Rename", core.Concat("path not found: ", oldPath), fs.ErrNotExist)
}
// Open opens a file from the mock filesystem.
//
// result := m.Open(...)
func (m *MockMedium) Open(path string) (fs.File, error) {
content, ok := m.Files[path]
if !ok {
return nil, coreerr.E("io.MockMedium.Open", "file not found: "+path, fs.ErrNotExist)
return nil, core.E("io.MockMedium.Open", core.Concat("file not found: ", path), fs.ErrNotExist)
}
return &MockFile{
name: core.PathBase(path),
@ -371,6 +447,8 @@ func (m *MockMedium) Open(path string) (fs.File, error) {
}
// Create creates a file in the mock filesystem.
//
// result := m.Create(...)
func (m *MockMedium) Create(path string) (goio.WriteCloser, error) {
return &MockWriteCloser{
medium: m,
@ -379,6 +457,8 @@ func (m *MockMedium) Create(path string) (goio.WriteCloser, error) {
}
// Append opens a file for appending in the mock filesystem.
//
// result := m.Append(...)
func (m *MockMedium) Append(path string) (goio.WriteCloser, error) {
content := m.Files[path]
return &MockWriteCloser{
@ -389,11 +469,15 @@ func (m *MockMedium) Append(path string) (goio.WriteCloser, error) {
}
// ReadStream returns a reader for the file content in the mock filesystem.
//
// result := m.ReadStream(...)
func (m *MockMedium) ReadStream(path string) (goio.ReadCloser, error) {
return m.Open(path)
}
// WriteStream returns a writer for the file content in the mock filesystem.
//
// result := m.WriteStream(...)
func (m *MockMedium) WriteStream(path string) (goio.WriteCloser, error) {
return m.Create(path)
}
@ -405,6 +489,9 @@ type MockFile struct {
offset int64
}
// Stat documents the Stat operation.
//
// result := f.Stat(...)
func (f *MockFile) Stat() (fs.FileInfo, error) {
return FileInfo{
name: f.name,
@ -412,6 +499,9 @@ func (f *MockFile) Stat() (fs.FileInfo, error) {
}, nil
}
// Read documents the Read operation.
//
// result := f.Read(...)
func (f *MockFile) Read(b []byte) (int, error) {
if f.offset >= int64(len(f.content)) {
return 0, goio.EOF
@ -421,6 +511,9 @@ func (f *MockFile) Read(b []byte) (int, error) {
return n, nil
}
// Close documents the Close operation.
//
// result := f.Close(...)
func (f *MockFile) Close() error {
return nil
}
@ -432,11 +525,17 @@ type MockWriteCloser struct {
data []byte
}
// Write documents the Write operation.
//
// result := w.Write(...)
func (w *MockWriteCloser) Write(p []byte) (int, error) {
w.data = append(w.data, p...)
return len(p), nil
}
// Close documents the Close operation.
//
// result := w.Close(...)
func (w *MockWriteCloser) Close() error {
w.medium.Files[w.path] = string(w.data)
w.medium.ModTimes[w.path] = time.Now()
@ -444,35 +543,37 @@ func (w *MockWriteCloser) Close() error {
}
// List returns directory entries for the mock filesystem.
//
// result := m.List(...)
func (m *MockMedium) List(path string) ([]fs.DirEntry, error) {
if _, ok := m.Dirs[path]; !ok {
// Check if it's the root or has children
hasChildren := false
prefix := path
if path != "" && !strings.HasSuffix(prefix, "/") {
if path != "" && !core.HasSuffix(prefix, "/") {
prefix += "/"
}
for f := range m.Files {
if strings.HasPrefix(f, prefix) {
if core.HasPrefix(f, prefix) {
hasChildren = true
break
}
}
if !hasChildren {
for d := range m.Dirs {
if strings.HasPrefix(d, prefix) {
if core.HasPrefix(d, prefix) {
hasChildren = true
break
}
}
}
if !hasChildren && path != "" {
return nil, coreerr.E("io.MockMedium.List", "directory not found: "+path, fs.ErrNotExist)
return nil, core.E("io.MockMedium.List", core.Concat("directory not found: ", path), fs.ErrNotExist)
}
}
prefix := path
if path != "" && !strings.HasSuffix(prefix, "/") {
if path != "" && !core.HasSuffix(prefix, "/") {
prefix += "/"
}
@ -481,13 +582,13 @@ func (m *MockMedium) List(path string) ([]fs.DirEntry, error) {
// Find immediate children (files)
for f, content := range m.Files {
if !strings.HasPrefix(f, prefix) {
if !core.HasPrefix(f, prefix) {
continue
}
rest := strings.TrimPrefix(f, prefix)
if rest == "" || strings.Contains(rest, "/") {
rest := core.TrimPrefix(f, prefix)
if rest == "" || core.Contains(rest, "/") {
// Skip if it's not an immediate child
if idx := strings.Index(rest, "/"); idx != -1 {
if idx := bytes.IndexByte([]byte(rest), '/'); idx != -1 {
// This is a subdirectory
dirName := rest[:idx]
if !seen[dirName] {
@ -523,15 +624,15 @@ func (m *MockMedium) List(path string) ([]fs.DirEntry, error) {
// Find immediate subdirectories
for d := range m.Dirs {
if !strings.HasPrefix(d, prefix) {
if !core.HasPrefix(d, prefix) {
continue
}
rest := strings.TrimPrefix(d, prefix)
rest := core.TrimPrefix(d, prefix)
if rest == "" {
continue
}
// Get only immediate child
if idx := strings.Index(rest, "/"); idx != -1 {
if idx := bytes.IndexByte([]byte(rest), '/'); idx != -1 {
rest = rest[:idx]
}
if !seen[rest] {
@ -553,6 +654,8 @@ func (m *MockMedium) List(path string) ([]fs.DirEntry, error) {
}
// Stat returns file information for the mock filesystem.
//
// result := m.Stat(...)
func (m *MockMedium) Stat(path string) (fs.FileInfo, error) {
if content, ok := m.Files[path]; ok {
modTime, ok := m.ModTimes[path]
@ -573,10 +676,12 @@ func (m *MockMedium) Stat(path string) (fs.FileInfo, error) {
mode: fs.ModeDir | 0755,
}, nil
}
return nil, coreerr.E("io.MockMedium.Stat", "path not found: "+path, fs.ErrNotExist)
return nil, core.E("io.MockMedium.Stat", core.Concat("path not found: ", path), fs.ErrNotExist)
}
// Exists checks if a path exists in the mock filesystem.
//
// result := m.Exists(...)
func (m *MockMedium) Exists(path string) bool {
if _, ok := m.Files[path]; ok {
return true
@ -588,6 +693,8 @@ func (m *MockMedium) Exists(path string) bool {
}
// IsDir checks if a path is a directory in the mock filesystem.
//
// result := m.IsDir(...)
func (m *MockMedium) IsDir(path string) bool {
_, ok := m.Dirs[path]
return ok

View file

@ -2,15 +2,11 @@
package local
import (
"fmt"
goio "io"
"io/fs"
"os"
"strings"
"time"
"syscall"
core "dappco.re/go/core"
coreerr "forge.lthn.ai/core/go-log"
)
// Medium is a local filesystem storage backend.
@ -18,6 +14,8 @@ type Medium struct {
root string
}
var rawFS = (&core.Fs{}).NewUnrestricted()
// New creates a new local Medium rooted at the given directory.
// Pass "/" for full filesystem access, or a specific path to sandbox.
//
@ -41,21 +39,18 @@ func dirSeparator() string {
if sep := core.Env("DS"); sep != "" {
return sep
}
return string(os.PathSeparator)
return "/"
}
func normalisePath(p string) string {
sep := dirSeparator()
if sep == "/" {
return strings.ReplaceAll(p, "\\", sep)
return core.Replace(p, "\\", sep)
}
return strings.ReplaceAll(p, "/", sep)
return core.Replace(p, "/", sep)
}
func currentWorkingDir() string {
if cwd, err := os.Getwd(); err == nil && cwd != "" {
return cwd
}
if cwd := core.Env("DIR_CWD"); cwd != "" {
return cwd
}
@ -75,12 +70,12 @@ func cleanSandboxPath(p string) string {
}
func splitPathParts(p string) []string {
trimmed := strings.TrimPrefix(p, dirSeparator())
trimmed := core.TrimPrefix(p, dirSeparator())
if trimmed == "" {
return nil
}
var parts []string
for _, part := range strings.Split(trimmed, dirSeparator()) {
for _, part := range core.Split(trimmed, dirSeparator()) {
if part == "" {
continue
}
@ -102,20 +97,20 @@ func resolveSymlinksRecursive(p string, seen map[string]struct{}) (string, error
current := dirSeparator()
for _, part := range splitPathParts(p) {
next := core.Path(current, part)
info, err := os.Lstat(next)
info, err := lstat(next)
if err != nil {
if os.IsNotExist(err) {
if core.Is(err, syscall.ENOENT) {
current = next
continue
}
return "", err
}
if info.Mode()&os.ModeSymlink == 0 {
if !isSymlink(info.Mode) {
current = next
continue
}
target, err := os.Readlink(next)
target, err := readlink(next)
if err != nil {
return "", err
}
@ -126,7 +121,7 @@ func resolveSymlinksRecursive(p string, seen map[string]struct{}) (string, error
target = core.Path(target)
}
if _, ok := seen[target]; ok {
return "", coreerr.E("local.resolveSymlinksPath", "symlink cycle: "+target, os.ErrInvalid)
return "", core.E("local.resolveSymlinksPath", core.Concat("symlink cycle: ", target), fs.ErrInvalid)
}
seen[target] = struct{}{}
resolved, err := resolveSymlinksRecursive(target, seen)
@ -146,7 +141,7 @@ func isWithinRoot(root, target string) bool {
if root == dirSeparator() {
return true
}
return target == root || strings.HasPrefix(target, root+dirSeparator())
return target == root || core.HasPrefix(target, root+dirSeparator())
}
func canonicalPath(p string) string {
@ -179,8 +174,7 @@ func logSandboxEscape(root, path, attempted string) {
if username == "" {
username = "unknown"
}
fmt.Fprintf(os.Stderr, "[%s] SECURITY sandbox escape detected root=%s path=%s attempted=%s user=%s\n",
time.Now().Format(time.RFC3339), root, path, attempted, username)
core.Security("sandbox escape detected", "root", root, "path", path, "attempted", attempted, "user", username)
}
// path sanitises and returns the full path.
@ -207,7 +201,7 @@ func (m *Medium) path(p string) string {
}
// Join cleaned relative path with root
return core.Path(m.root, strings.TrimPrefix(clean, dirSeparator()))
return core.Path(m.root, core.TrimPrefix(clean, dirSeparator()))
}
// validatePath ensures the path is within the sandbox, following symlinks if they exist.
@ -224,7 +218,7 @@ func (m *Medium) validatePath(p string) (string, error) {
next := core.Path(current, part)
realNext, err := resolveSymlinksPath(next)
if err != nil {
if os.IsNotExist(err) {
if core.Is(err, syscall.ENOENT) {
// Part doesn't exist, we can't follow symlinks anymore.
// Since the path is already Cleaned and current is safe,
// appending a component to current will not escape.
@ -238,7 +232,7 @@ func (m *Medium) validatePath(p string) (string, error) {
if !isWithinRoot(m.root, realNext) {
// Security event: sandbox escape attempt
logSandboxEscape(m.root, p, realNext)
return "", os.ErrPermission // Path escapes sandbox
return "", fs.ErrPermission
}
current = realNext
}
@ -247,48 +241,51 @@ func (m *Medium) validatePath(p string) (string, error) {
}
// Read returns file contents as string.
//
// result := m.Read(...)
func (m *Medium) Read(p string) (string, error) {
full, err := m.validatePath(p)
if err != nil {
return "", err
}
data, err := os.ReadFile(full)
if err != nil {
return "", err
}
return string(data), nil
return resultValue[string]("local.Read", core.Concat("read failed: ", p), rawFS.Read(full))
}
// Write saves content to file, creating parent directories as needed.
// Files are created with mode 0644. For sensitive files (keys, secrets),
// use WriteMode with 0600.
//
// result := m.Write(...)
func (m *Medium) Write(p, content string) error {
return m.WriteMode(p, content, 0644)
}
// WriteMode saves content to file with explicit permissions.
// Use 0600 for sensitive files (encryption output, private keys, auth hashes).
//
// result := m.WriteMode(...)
func (m *Medium) WriteMode(p, content string, mode fs.FileMode) error {
full, err := m.validatePath(p)
if err != nil {
return err
}
if err := os.MkdirAll(core.PathDir(full), 0755); err != nil {
return err
}
return os.WriteFile(full, []byte(content), mode)
return resultErr("local.WriteMode", core.Concat("write failed: ", p), rawFS.WriteMode(full, content, mode))
}
// EnsureDir creates directory if it doesn't exist.
//
// result := m.EnsureDir(...)
func (m *Medium) EnsureDir(p string) error {
full, err := m.validatePath(p)
if err != nil {
return err
}
return os.MkdirAll(full, 0755)
return resultErr("local.EnsureDir", core.Concat("ensure dir failed: ", p), rawFS.EnsureDir(full))
}
// IsDir returns true if path is a directory.
//
// result := m.IsDir(...)
func (m *Medium) IsDir(p string) bool {
if p == "" {
return false
@ -297,11 +294,12 @@ func (m *Medium) IsDir(p string) bool {
if err != nil {
return false
}
info, err := os.Stat(full)
return err == nil && info.IsDir()
return rawFS.IsDir(full)
}
// IsFile returns true if path is a regular file.
//
// result := m.IsFile(...)
func (m *Medium) IsFile(p string) bool {
if p == "" {
return false
@ -310,69 +308,73 @@ func (m *Medium) IsFile(p string) bool {
if err != nil {
return false
}
info, err := os.Stat(full)
return err == nil && info.Mode().IsRegular()
return rawFS.IsFile(full)
}
// Exists returns true if path exists.
//
// result := m.Exists(...)
func (m *Medium) Exists(p string) bool {
full, err := m.validatePath(p)
if err != nil {
return false
}
_, err = os.Stat(full)
return err == nil
return rawFS.Exists(full)
}
// List returns directory entries.
//
// result := m.List(...)
func (m *Medium) List(p string) ([]fs.DirEntry, error) {
full, err := m.validatePath(p)
if err != nil {
return nil, err
}
return os.ReadDir(full)
return resultValue[[]fs.DirEntry]("local.List", core.Concat("list failed: ", p), rawFS.List(full))
}
// Stat returns file info.
//
// result := m.Stat(...)
func (m *Medium) Stat(p string) (fs.FileInfo, error) {
full, err := m.validatePath(p)
if err != nil {
return nil, err
}
return os.Stat(full)
return resultValue[fs.FileInfo]("local.Stat", core.Concat("stat failed: ", p), rawFS.Stat(full))
}
// Open opens the named file for reading.
//
// result := m.Open(...)
func (m *Medium) Open(p string) (fs.File, error) {
full, err := m.validatePath(p)
if err != nil {
return nil, err
}
return os.Open(full)
return resultValue[fs.File]("local.Open", core.Concat("open failed: ", p), rawFS.Open(full))
}
// Create creates or truncates the named file.
//
// result := m.Create(...)
func (m *Medium) Create(p string) (goio.WriteCloser, error) {
full, err := m.validatePath(p)
if err != nil {
return nil, err
}
if err := os.MkdirAll(core.PathDir(full), 0755); err != nil {
return nil, err
}
return os.Create(full)
return resultValue[goio.WriteCloser]("local.Create", core.Concat("create failed: ", p), rawFS.Create(full))
}
// Append opens the named file for appending, creating it if it doesn't exist.
//
// result := m.Append(...)
func (m *Medium) Append(p string) (goio.WriteCloser, error) {
full, err := m.validatePath(p)
if err != nil {
return nil, err
}
if err := os.MkdirAll(core.PathDir(full), 0755); err != nil {
return nil, err
}
return os.OpenFile(full, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
return resultValue[goio.WriteCloser]("local.Append", core.Concat("append failed: ", p), rawFS.Append(full))
}
// ReadStream returns a reader for the file content.
@ -381,6 +383,8 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) {
// API, as required by the io.Medium interface, while Open provides the more
// general filesystem-level operation. Both methods are kept for semantic
// clarity and backward compatibility.
//
// result := m.ReadStream(...)
func (m *Medium) ReadStream(path string) (goio.ReadCloser, error) {
return m.Open(path)
}
@ -391,35 +395,43 @@ func (m *Medium) ReadStream(path string) (goio.ReadCloser, error) {
// API, as required by the io.Medium interface, while Create provides the more
// general filesystem-level operation. Both methods are kept for semantic
// clarity and backward compatibility.
//
// result := m.WriteStream(...)
func (m *Medium) WriteStream(path string) (goio.WriteCloser, error) {
return m.Create(path)
}
// Delete removes a file or empty directory.
//
// result := m.Delete(...)
func (m *Medium) Delete(p string) error {
full, err := m.validatePath(p)
if err != nil {
return err
}
if isProtectedPath(full) {
return coreerr.E("local.Delete", "refusing to delete protected path: "+full, nil)
return core.E("local.Delete", core.Concat("refusing to delete protected path: ", full), nil)
}
return os.Remove(full)
return resultErr("local.Delete", core.Concat("delete failed: ", p), rawFS.Delete(full))
}
// DeleteAll removes a file or directory recursively.
//
// result := m.DeleteAll(...)
func (m *Medium) DeleteAll(p string) error {
full, err := m.validatePath(p)
if err != nil {
return err
}
if isProtectedPath(full) {
return coreerr.E("local.DeleteAll", "refusing to delete protected path: "+full, nil)
return core.E("local.DeleteAll", core.Concat("refusing to delete protected path: ", full), nil)
}
return os.RemoveAll(full)
return resultErr("local.DeleteAll", core.Concat("delete all failed: ", p), rawFS.DeleteAll(full))
}
// Rename moves a file or directory.
//
// result := m.Rename(...)
func (m *Medium) Rename(oldPath, newPath string) error {
oldFull, err := m.validatePath(oldPath)
if err != nil {
@ -429,15 +441,68 @@ func (m *Medium) Rename(oldPath, newPath string) error {
if err != nil {
return err
}
return os.Rename(oldFull, newFull)
return resultErr("local.Rename", core.Concat("rename failed: ", oldPath), rawFS.Rename(oldFull, newFull))
}
// FileGet is an alias for Read.
//
// result := m.FileGet(...)
func (m *Medium) FileGet(p string) (string, error) {
return m.Read(p)
}
// FileSet is an alias for Write.
//
// result := m.FileSet(...)
func (m *Medium) FileSet(p, content string) error {
return m.Write(p, content)
}
func lstat(path string) (*syscall.Stat_t, error) {
info := &syscall.Stat_t{}
if err := syscall.Lstat(path, info); err != nil {
return nil, err
}
return info, nil
}
func isSymlink(mode uint32) bool {
return mode&syscall.S_IFMT == syscall.S_IFLNK
}
func readlink(path string) (string, error) {
size := 256
for {
buf := make([]byte, size)
n, err := syscall.Readlink(path, buf)
if err != nil {
return "", err
}
if n < len(buf) {
return string(buf[:n]), nil
}
size *= 2
}
}
func resultErr(op, msg string, result core.Result) error {
if result.OK {
return nil
}
if err, ok := result.Value.(error); ok {
return core.E(op, msg, err)
}
return core.E(op, msg, nil)
}
func resultValue[T any](op, msg string, result core.Result) (T, error) {
var zero T
if !result.OK {
return zero, resultErr(op, msg, result)
}
value, ok := result.Value.(T)
if !ok {
return zero, core.E(op, "unexpected result type", nil)
}
return value, nil
}

View file

@ -3,8 +3,7 @@ package local
import (
"io"
"io/fs"
"os"
"strings"
"syscall"
"testing"
core "dappco.re/go/core"
@ -12,7 +11,7 @@ import (
"github.com/stretchr/testify/require"
)
func TestNew_Good_ResolvesRoot(t *testing.T) {
func TestClient_New_ResolvesRoot_Good(t *testing.T) {
root := t.TempDir()
m, err := New(root)
assert.NoError(t, err)
@ -22,7 +21,7 @@ func TestNew_Good_ResolvesRoot(t *testing.T) {
assert.Equal(t, resolved, m.root)
}
func TestPath_Good_Sandboxed(t *testing.T) {
func TestClient_Path_Sandboxed_Good(t *testing.T) {
m := &Medium{root: "/home/user"}
// Normal paths
@ -40,7 +39,7 @@ func TestPath_Good_Sandboxed(t *testing.T) {
assert.Equal(t, "/home/user/etc/passwd", m.path("/etc/passwd"))
}
func TestPath_Good_RootFilesystem(t *testing.T) {
func TestClient_Path_RootFilesystem_Good(t *testing.T) {
m := &Medium{root: "/"}
// When root is "/", absolute paths pass through
@ -52,7 +51,7 @@ func TestPath_Good_RootFilesystem(t *testing.T) {
assert.Equal(t, core.Path(cwd, "file.txt"), m.path("file.txt"))
}
func TestReadWrite_Good_Basic(t *testing.T) {
func TestClient_ReadWrite_Basic_Good(t *testing.T) {
root := t.TempDir()
m, _ := New(root)
@ -77,7 +76,7 @@ func TestReadWrite_Good_Basic(t *testing.T) {
assert.Error(t, err)
}
func TestEnsureDir_Good_Basic(t *testing.T) {
func TestClient_EnsureDir_Basic_Good(t *testing.T) {
root := t.TempDir()
m, _ := New(root)
@ -89,7 +88,7 @@ func TestEnsureDir_Good_Basic(t *testing.T) {
assert.True(t, info.IsDir())
}
func TestIsDir_Good_Basic(t *testing.T) {
func TestClient_IsDir_Basic_Good(t *testing.T) {
root := t.TempDir()
m, _ := New(root)
@ -102,7 +101,7 @@ func TestIsDir_Good_Basic(t *testing.T) {
assert.False(t, m.IsDir(""))
}
func TestIsFile_Good_Basic(t *testing.T) {
func TestClient_IsFile_Basic_Good(t *testing.T) {
root := t.TempDir()
m, _ := New(root)
@ -115,7 +114,7 @@ func TestIsFile_Good_Basic(t *testing.T) {
assert.False(t, m.IsFile(""))
}
func TestExists_Good_Basic(t *testing.T) {
func TestClient_Exists_Basic_Good(t *testing.T) {
root := t.TempDir()
m, _ := New(root)
@ -125,7 +124,7 @@ func TestExists_Good_Basic(t *testing.T) {
assert.False(t, m.Exists("nope"))
}
func TestList_Good_Basic(t *testing.T) {
func TestClient_List_Basic_Good(t *testing.T) {
root := t.TempDir()
m, _ := New(root)
@ -138,7 +137,7 @@ func TestList_Good_Basic(t *testing.T) {
assert.Len(t, entries, 3)
}
func TestStat_Good_Basic(t *testing.T) {
func TestClient_Stat_Basic_Good(t *testing.T) {
root := t.TempDir()
m, _ := New(root)
@ -149,7 +148,7 @@ func TestStat_Good_Basic(t *testing.T) {
assert.Equal(t, int64(7), info.Size())
}
func TestDelete_Good_Basic(t *testing.T) {
func TestClient_Delete_Basic_Good(t *testing.T) {
root := t.TempDir()
m, _ := New(root)
@ -161,7 +160,7 @@ func TestDelete_Good_Basic(t *testing.T) {
assert.False(t, m.Exists("todelete"))
}
func TestDeleteAll_Good_Basic(t *testing.T) {
func TestClient_DeleteAll_Basic_Good(t *testing.T) {
root := t.TempDir()
m, _ := New(root)
@ -172,11 +171,11 @@ func TestDeleteAll_Good_Basic(t *testing.T) {
assert.False(t, m.Exists("dir"))
}
func TestDelete_Bad_ProtectedHomeViaSymlinkEnv(t *testing.T) {
func TestClient_Delete_ProtectedHomeViaSymlinkEnv_Bad(t *testing.T) {
realHome := t.TempDir()
linkParent := t.TempDir()
homeLink := core.Path(linkParent, "home-link")
require.NoError(t, os.Symlink(realHome, homeLink))
require.NoError(t, syscall.Symlink(realHome, homeLink))
t.Setenv("HOME", homeLink)
m, err := New("/")
@ -187,7 +186,7 @@ func TestDelete_Bad_ProtectedHomeViaSymlinkEnv(t *testing.T) {
assert.DirExists(t, realHome)
}
func TestDeleteAll_Bad_ProtectedHomeViaEnv(t *testing.T) {
func TestClient_DeleteAll_ProtectedHomeViaEnv_Bad(t *testing.T) {
tempHome := t.TempDir()
t.Setenv("HOME", tempHome)
@ -199,7 +198,7 @@ func TestDeleteAll_Bad_ProtectedHomeViaEnv(t *testing.T) {
assert.DirExists(t, tempHome)
}
func TestRename_Good_Basic(t *testing.T) {
func TestClient_Rename_Basic_Good(t *testing.T) {
root := t.TempDir()
m, _ := New(root)
@ -211,7 +210,7 @@ func TestRename_Good_Basic(t *testing.T) {
assert.True(t, m.Exists("new"))
}
func TestFileGetFileSet_Good_Basic(t *testing.T) {
func TestClient_FileGetFileSet_Basic_Good(t *testing.T) {
root := t.TempDir()
m, _ := New(root)
@ -223,7 +222,7 @@ func TestFileGetFileSet_Good_Basic(t *testing.T) {
assert.Equal(t, "value", val)
}
func TestDelete_Good(t *testing.T) {
func TestClient_Delete_Good(t *testing.T) {
testRoot := t.TempDir()
medium, err := New(testRoot)
@ -246,7 +245,7 @@ func TestDelete_Good(t *testing.T) {
assert.False(t, medium.IsDir("emptydir"))
}
func TestDelete_Bad_NotEmpty(t *testing.T) {
func TestClient_Delete_NotEmpty_Bad(t *testing.T) {
testRoot := t.TempDir()
medium, err := New(testRoot)
@ -261,7 +260,7 @@ func TestDelete_Bad_NotEmpty(t *testing.T) {
assert.Error(t, err)
}
func TestDeleteAll_Good(t *testing.T) {
func TestClient_DeleteAll_Good(t *testing.T) {
testRoot := t.TempDir()
medium, err := New(testRoot)
@ -281,7 +280,7 @@ func TestDeleteAll_Good(t *testing.T) {
assert.False(t, medium.Exists("mydir/subdir/file2.txt"))
}
func TestRename_Good(t *testing.T) {
func TestClient_Rename_Good(t *testing.T) {
testRoot := t.TempDir()
medium, err := New(testRoot)
@ -300,7 +299,7 @@ func TestRename_Good(t *testing.T) {
assert.Equal(t, "content", content)
}
func TestRename_Good_TraversalSanitised(t *testing.T) {
func TestClient_Rename_TraversalSanitised_Good(t *testing.T) {
testRoot := t.TempDir()
medium, err := New(testRoot)
@ -317,7 +316,7 @@ func TestRename_Good_TraversalSanitised(t *testing.T) {
assert.True(t, medium.Exists("escaped.txt"))
}
func TestList_Good(t *testing.T) {
func TestClient_List_Good(t *testing.T) {
testRoot := t.TempDir()
medium, err := New(testRoot)
@ -345,7 +344,7 @@ func TestList_Good(t *testing.T) {
assert.True(t, names["subdir"])
}
func TestStat_Good(t *testing.T) {
func TestClient_Stat_Good(t *testing.T) {
testRoot := t.TempDir()
medium, err := New(testRoot)
@ -369,7 +368,7 @@ func TestStat_Good(t *testing.T) {
assert.True(t, info.IsDir())
}
func TestExists_Good(t *testing.T) {
func TestClient_Exists_Good(t *testing.T) {
testRoot := t.TempDir()
medium, err := New(testRoot)
@ -386,7 +385,7 @@ func TestExists_Good(t *testing.T) {
assert.True(t, medium.Exists("mydir"))
}
func TestIsDir_Good(t *testing.T) {
func TestClient_IsDir_Good(t *testing.T) {
testRoot := t.TempDir()
medium, err := New(testRoot)
@ -403,7 +402,7 @@ func TestIsDir_Good(t *testing.T) {
assert.False(t, medium.IsDir("nonexistent"))
}
func TestReadStream_Good_Basic(t *testing.T) {
func TestClient_ReadStream_Basic_Good(t *testing.T) {
root := t.TempDir()
m, _ := New(root)
@ -422,14 +421,14 @@ func TestReadStream_Good_Basic(t *testing.T) {
assert.Equal(t, "streaming", string(data))
}
func TestWriteStream_Good_Basic(t *testing.T) {
func TestClient_WriteStream_Basic_Good(t *testing.T) {
root := t.TempDir()
m, _ := New(root)
writer, err := m.WriteStream("output.txt")
assert.NoError(t, err)
_, err = io.Copy(writer, strings.NewReader("piped data"))
_, err = io.Copy(writer, core.NewReader("piped data"))
assert.NoError(t, err)
err = writer.Close()
assert.NoError(t, err)
@ -439,7 +438,7 @@ func TestWriteStream_Good_Basic(t *testing.T) {
assert.Equal(t, "piped data", content)
}
func TestPath_Ugly_TraversalAdvanced(t *testing.T) {
func TestClient_Path_TraversalAdvanced_Ugly(t *testing.T) {
m := &Medium{root: "/sandbox"}
// Multiple levels of traversal
@ -454,7 +453,7 @@ func TestPath_Ugly_TraversalAdvanced(t *testing.T) {
assert.Equal(t, "/sandbox/file\x00.txt", m.path("file\x00.txt"))
}
func TestValidatePath_Bad_SymlinkEscape(t *testing.T) {
func TestClient_ValidatePath_SymlinkEscape_Bad(t *testing.T) {
root := t.TempDir()
m, err := New(root)
assert.NoError(t, err)
@ -474,7 +473,7 @@ func TestValidatePath_Bad_SymlinkEscape(t *testing.T) {
// Test 2: Symlink escape
// Create a symlink inside the sandbox pointing outside
linkPath := core.Path(root, "evil_link")
err = os.Symlink(outside, linkPath)
err = syscall.Symlink(outside, linkPath)
assert.NoError(t, err)
// Try to access a file through the symlink
@ -487,7 +486,7 @@ func TestValidatePath_Bad_SymlinkEscape(t *testing.T) {
assert.NoError(t, err)
innerDir := core.Path(root, "inner")
nestedLink := core.Path(innerDir, "nested_evil")
err = os.Symlink(outside, nestedLink)
err = syscall.Symlink(outside, nestedLink)
assert.NoError(t, err)
_, err = m.validatePath("inner/nested_evil/secret.txt")
@ -495,7 +494,7 @@ func TestValidatePath_Bad_SymlinkEscape(t *testing.T) {
assert.ErrorIs(t, err, fs.ErrPermission)
}
func TestEmptyPaths_Ugly(t *testing.T) {
func TestClient_EmptyPaths_Ugly(t *testing.T) {
root := t.TempDir()
m, err := New(root)
assert.NoError(t, err)

View file

@ -11,7 +11,6 @@ import (
"io/fs"
"path"
"slices"
"strings"
"time"
core "dappco.re/go/core"
@ -42,13 +41,15 @@ func New() *Node {
// ---------- Node-specific methods ----------
// AddData stages content in the in-memory filesystem.
//
// result := n.AddData(...)
func (n *Node) AddData(name string, content []byte) {
name = strings.TrimPrefix(name, "/")
name = core.TrimPrefix(name, "/")
if name == "" {
return
}
// Directories are implicit, so we don't store them.
if strings.HasSuffix(name, "/") {
if core.HasSuffix(name, "/") {
return
}
n.files[name] = &dataFile{
@ -59,6 +60,8 @@ func (n *Node) AddData(name string, content []byte) {
}
// ToTar serialises the entire in-memory tree to a tar archive.
//
// result := n.ToTar(...)
func (n *Node) ToTar() ([]byte, error) {
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
@ -86,6 +89,8 @@ func (n *Node) ToTar() ([]byte, error) {
}
// FromTar creates a new Node from a tar archive.
//
// result := node.FromTar(...)
func FromTar(data []byte) (*Node, error) {
n := New()
if err := n.LoadTar(data); err != nil {
@ -95,6 +100,8 @@ func FromTar(data []byte) (*Node, error) {
}
// LoadTar replaces the in-memory tree with the contents of a tar archive.
//
// result := n.LoadTar(...)
func (n *Node) LoadTar(data []byte) error {
newFiles := make(map[string]*dataFile)
tr := tar.NewReader(bytes.NewReader(data))
@ -111,10 +118,10 @@ func (n *Node) LoadTar(data []byte) error {
if header.Typeflag == tar.TypeReg {
content, err := goio.ReadAll(tr)
if err != nil {
return err
return core.E("node.LoadTar", "read tar entry", err)
}
name := strings.TrimPrefix(header.Name, "/")
if name == "" || strings.HasSuffix(name, "/") {
name := core.TrimPrefix(header.Name, "/")
if name == "" || core.HasSuffix(name, "/") {
continue
}
newFiles[name] = &dataFile{
@ -130,6 +137,8 @@ func (n *Node) LoadTar(data []byte) error {
}
// WalkNode walks the in-memory tree, calling fn for each entry.
//
// result := n.WalkNode(...)
func (n *Node) WalkNode(root string, fn fs.WalkDirFunc) error {
return fs.WalkDir(n, root, fn)
}
@ -147,6 +156,8 @@ type WalkOptions struct {
}
// Walk walks the in-memory tree with optional WalkOptions.
//
// result := n.Walk(...)
func (n *Node) Walk(root string, fn fs.WalkDirFunc, opts ...WalkOptions) error {
var opt WalkOptions
if len(opts) > 0 {
@ -175,9 +186,9 @@ func (n *Node) Walk(root string, fn fs.WalkDirFunc, opts ...WalkOptions) error {
// After visiting a directory at MaxDepth, prevent descending further.
if result == nil && opt.MaxDepth > 0 && d != nil && d.IsDir() && p != root {
rel := strings.TrimPrefix(p, root)
rel = strings.TrimPrefix(rel, "/")
depth := strings.Count(rel, "/") + 1
rel := core.TrimPrefix(p, root)
rel = core.TrimPrefix(rel, "/")
depth := len(core.Split(rel, "/"))
if depth >= opt.MaxDepth {
return fs.SkipDir
}
@ -189,11 +200,13 @@ func (n *Node) Walk(root string, fn fs.WalkDirFunc, opts ...WalkOptions) error {
// ReadFile returns the content of the named file as a byte slice.
// Implements fs.ReadFileFS.
//
// result := n.ReadFile(...)
func (n *Node) ReadFile(name string) ([]byte, error) {
name = strings.TrimPrefix(name, "/")
name = core.TrimPrefix(name, "/")
f, ok := n.files[name]
if !ok {
return nil, &fs.PathError{Op: "read", Path: name, Err: fs.ErrNotExist}
return nil, core.E("node.ReadFile", core.Concat("path not found: ", name), fs.ErrNotExist)
}
// Return a copy to prevent callers from mutating internal state.
result := make([]byte, len(f.content))
@ -202,19 +215,21 @@ func (n *Node) ReadFile(name string) ([]byte, error) {
}
// CopyFile copies a file from the in-memory tree to the local filesystem.
//
// result := n.CopyFile(...)
func (n *Node) CopyFile(src, dst string, perm fs.FileMode) error {
src = strings.TrimPrefix(src, "/")
src = core.TrimPrefix(src, "/")
f, ok := n.files[src]
if !ok {
// Check if it's a directory — can't copy directories this way.
info, err := n.Stat(src)
if err != nil {
return &fs.PathError{Op: "copyfile", Path: src, Err: fs.ErrNotExist}
return core.E("node.CopyFile", core.Concat("source not found: ", src), fs.ErrNotExist)
}
if info.IsDir() {
return &fs.PathError{Op: "copyfile", Path: src, Err: fs.ErrInvalid}
return core.E("node.CopyFile", core.Concat("source is a directory: ", src), fs.ErrInvalid)
}
return &fs.PathError{Op: "copyfile", Path: src, Err: fs.ErrNotExist}
return core.E("node.CopyFile", core.Concat("source not found: ", src), fs.ErrNotExist)
}
parent := core.PathDir(dst)
if parent != "." && parent != "" && parent != dst && !coreio.Local.IsDir(parent) {
@ -230,7 +245,7 @@ func (n *Node) CopyFile(src, dst string, perm fs.FileMode) error {
// dst := io.NewMockMedium()
// _ = n.CopyTo(dst, "config", "backup/config")
func (n *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) error {
sourcePath = strings.TrimPrefix(sourcePath, "/")
sourcePath = core.TrimPrefix(sourcePath, "/")
info, err := n.Stat(sourcePath)
if err != nil {
return err
@ -240,25 +255,25 @@ func (n *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) error {
// Single file copy
f, ok := n.files[sourcePath]
if !ok {
return fs.ErrNotExist
return core.E("node.CopyTo", core.Concat("path not found: ", sourcePath), fs.ErrNotExist)
}
return target.Write(destPath, string(f.content))
}
// Directory: walk and copy all files underneath
prefix := sourcePath
if prefix != "" && !strings.HasSuffix(prefix, "/") {
if prefix != "" && !core.HasSuffix(prefix, "/") {
prefix += "/"
}
for p, f := range n.files {
if !strings.HasPrefix(p, prefix) && p != sourcePath {
if !core.HasPrefix(p, prefix) && p != sourcePath {
continue
}
rel := strings.TrimPrefix(p, prefix)
rel := core.TrimPrefix(p, prefix)
dest := destPath
if rel != "" {
dest = destPath + "/" + rel
dest = core.Concat(destPath, "/", rel)
}
if err := target.Write(dest, string(f.content)); err != nil {
return err
@ -270,8 +285,10 @@ func (n *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) error {
// ---------- Medium interface: fs.FS methods ----------
// Open opens a file from the Node. Implements fs.FS.
//
// result := n.Open(...)
func (n *Node) Open(name string) (fs.File, error) {
name = strings.TrimPrefix(name, "/")
name = core.TrimPrefix(name, "/")
if file, ok := n.files[name]; ok {
return &dataFileReader{file: file}, nil
}
@ -281,16 +298,18 @@ func (n *Node) Open(name string) (fs.File, error) {
prefix = ""
}
for p := range n.files {
if strings.HasPrefix(p, prefix) {
if core.HasPrefix(p, prefix) {
return &dirFile{path: name, modTime: time.Now()}, nil
}
}
return nil, fs.ErrNotExist
return nil, core.E("node.Open", core.Concat("path not found: ", name), fs.ErrNotExist)
}
// Stat returns file information for the given path.
//
// result := n.Stat(...)
func (n *Node) Stat(name string) (fs.FileInfo, error) {
name = strings.TrimPrefix(name, "/")
name = core.TrimPrefix(name, "/")
if file, ok := n.files[name]; ok {
return file.Stat()
}
@ -300,16 +319,18 @@ func (n *Node) Stat(name string) (fs.FileInfo, error) {
prefix = ""
}
for p := range n.files {
if strings.HasPrefix(p, prefix) {
if core.HasPrefix(p, prefix) {
return &dirInfo{name: path.Base(name), modTime: time.Now()}, nil
}
}
return nil, fs.ErrNotExist
return nil, core.E("node.Stat", core.Concat("path not found: ", name), fs.ErrNotExist)
}
// ReadDir reads and returns all directory entries for the named directory.
//
// result := n.ReadDir(...)
func (n *Node) ReadDir(name string) ([]fs.DirEntry, error) {
name = strings.TrimPrefix(name, "/")
name = core.TrimPrefix(name, "/")
if name == "." {
name = ""
}
@ -328,19 +349,19 @@ func (n *Node) ReadDir(name string) ([]fs.DirEntry, error) {
}
for p := range n.files {
if !strings.HasPrefix(p, prefix) {
if !core.HasPrefix(p, prefix) {
continue
}
relPath := strings.TrimPrefix(p, prefix)
firstComponent := strings.Split(relPath, "/")[0]
relPath := core.TrimPrefix(p, prefix)
firstComponent := core.SplitN(relPath, "/", 2)[0]
if seen[firstComponent] {
continue
}
seen[firstComponent] = true
if strings.Contains(relPath, "/") {
if core.Contains(relPath, "/") {
dir := &dirInfo{name: firstComponent, modTime: time.Now()}
entries = append(entries, fs.FileInfoToDirEntry(dir))
} else {
@ -360,37 +381,49 @@ func (n *Node) ReadDir(name string) ([]fs.DirEntry, error) {
// ---------- Medium interface: read/write ----------
// Read retrieves the content of a file as a string.
//
// result := n.Read(...)
func (n *Node) Read(p string) (string, error) {
p = strings.TrimPrefix(p, "/")
p = core.TrimPrefix(p, "/")
f, ok := n.files[p]
if !ok {
return "", fs.ErrNotExist
return "", core.E("node.Read", core.Concat("path not found: ", p), fs.ErrNotExist)
}
return string(f.content), nil
}
// Write saves the given content to a file, overwriting it if it exists.
//
// result := n.Write(...)
func (n *Node) Write(p, content string) error {
n.AddData(p, []byte(content))
return nil
}
// WriteMode saves content with explicit permissions (no-op for in-memory node).
//
// result := n.WriteMode(...)
func (n *Node) WriteMode(p, content string, mode fs.FileMode) error {
return n.Write(p, content)
}
// FileGet is an alias for Read.
//
// result := n.FileGet(...)
func (n *Node) FileGet(p string) (string, error) {
return n.Read(p)
}
// FileSet is an alias for Write.
//
// result := n.FileSet(...)
func (n *Node) FileSet(p, content string) error {
return n.Write(p, content)
}
// EnsureDir is a no-op because directories are implicit in Node.
//
// result := n.EnsureDir(...)
func (n *Node) EnsureDir(_ string) error {
return nil
}
@ -398,19 +431,25 @@ func (n *Node) EnsureDir(_ string) error {
// ---------- Medium interface: existence checks ----------
// Exists checks if a path exists (file or directory).
//
// result := n.Exists(...)
func (n *Node) Exists(p string) bool {
_, err := n.Stat(p)
return err == nil
}
// IsFile checks if a path exists and is a regular file.
//
// result := n.IsFile(...)
func (n *Node) IsFile(p string) bool {
p = strings.TrimPrefix(p, "/")
p = core.TrimPrefix(p, "/")
_, ok := n.files[p]
return ok
}
// IsDir checks if a path exists and is a directory.
//
// result := n.IsDir(...)
func (n *Node) IsDir(p string) bool {
info, err := n.Stat(p)
if err != nil {
@ -422,18 +461,22 @@ func (n *Node) IsDir(p string) bool {
// ---------- Medium interface: mutations ----------
// Delete removes a single file.
//
// result := n.Delete(...)
func (n *Node) Delete(p string) error {
p = strings.TrimPrefix(p, "/")
p = core.TrimPrefix(p, "/")
if _, ok := n.files[p]; ok {
delete(n.files, p)
return nil
}
return fs.ErrNotExist
return core.E("node.Delete", core.Concat("path not found: ", p), fs.ErrNotExist)
}
// DeleteAll removes a file or directory and all children.
//
// result := n.DeleteAll(...)
func (n *Node) DeleteAll(p string) error {
p = strings.TrimPrefix(p, "/")
p = core.TrimPrefix(p, "/")
found := false
if _, ok := n.files[p]; ok {
@ -443,26 +486,28 @@ func (n *Node) DeleteAll(p string) error {
prefix := p + "/"
for k := range n.files {
if strings.HasPrefix(k, prefix) {
if core.HasPrefix(k, prefix) {
delete(n.files, k)
found = true
}
}
if !found {
return fs.ErrNotExist
return core.E("node.DeleteAll", core.Concat("path not found: ", p), fs.ErrNotExist)
}
return nil
}
// Rename moves a file from oldPath to newPath.
//
// result := n.Rename(...)
func (n *Node) Rename(oldPath, newPath string) error {
oldPath = strings.TrimPrefix(oldPath, "/")
newPath = strings.TrimPrefix(newPath, "/")
oldPath = core.TrimPrefix(oldPath, "/")
newPath = core.TrimPrefix(newPath, "/")
f, ok := n.files[oldPath]
if !ok {
return fs.ErrNotExist
return core.E("node.Rename", core.Concat("path not found: ", oldPath), fs.ErrNotExist)
}
f.name = newPath
@ -472,8 +517,10 @@ func (n *Node) Rename(oldPath, newPath string) error {
}
// List returns directory entries for the given path.
//
// result := n.List(...)
func (n *Node) List(p string) ([]fs.DirEntry, error) {
p = strings.TrimPrefix(p, "/")
p = core.TrimPrefix(p, "/")
if p == "" || p == "." {
return n.ReadDir(".")
}
@ -484,15 +531,19 @@ func (n *Node) List(p string) ([]fs.DirEntry, error) {
// Create creates or truncates the named file, returning a WriteCloser.
// Content is committed to the Node on Close.
//
// result := n.Create(...)
func (n *Node) Create(p string) (goio.WriteCloser, error) {
p = strings.TrimPrefix(p, "/")
p = core.TrimPrefix(p, "/")
return &nodeWriter{node: n, path: p}, nil
}
// Append opens the named file for appending, creating it if needed.
// Content is committed to the Node on Close.
//
// result := n.Append(...)
func (n *Node) Append(p string) (goio.WriteCloser, error) {
p = strings.TrimPrefix(p, "/")
p = core.TrimPrefix(p, "/")
var existing []byte
if f, ok := n.files[p]; ok {
existing = make([]byte, len(f.content))
@ -502,6 +553,8 @@ func (n *Node) Append(p string) (goio.WriteCloser, error) {
}
// ReadStream returns a ReadCloser for the file content.
//
// result := n.ReadStream(...)
func (n *Node) ReadStream(p string) (goio.ReadCloser, error) {
f, err := n.Open(p)
if err != nil {
@ -511,6 +564,8 @@ func (n *Node) ReadStream(p string) (goio.ReadCloser, error) {
}
// WriteStream returns a WriteCloser for the file content.
//
// result := n.WriteStream(...)
func (n *Node) WriteStream(p string) (goio.WriteCloser, error) {
return n.Create(p)
}
@ -524,11 +579,17 @@ type nodeWriter struct {
buf []byte
}
// Write documents the Write operation.
//
// result := w.Write(...)
func (w *nodeWriter) Write(p []byte) (int, error) {
w.buf = append(w.buf, p...)
return len(p), nil
}
// Close documents the Close operation.
//
// result := w.Close(...)
func (w *nodeWriter) Close() error {
w.node.files[w.path] = &dataFile{
name: w.path,
@ -545,18 +606,52 @@ type dataFile struct {
modTime time.Time
}
// Stat documents the Stat operation.
//
// result := d.Stat(...)
func (d *dataFile) Stat() (fs.FileInfo, error) { return &dataFileInfo{file: d}, nil }
// Read documents the Read operation.
//
// result := d.Read(...)
func (d *dataFile) Read(_ []byte) (int, error) { return 0, goio.EOF }
// Close documents the Close operation.
//
// result := d.Close(...)
func (d *dataFile) Close() error { return nil }
// dataFileInfo implements fs.FileInfo for a dataFile.
type dataFileInfo struct{ file *dataFile }
// Name documents the Name operation.
//
// result := d.Name(...)
func (d *dataFileInfo) Name() string { return path.Base(d.file.name) }
// Size documents the Size operation.
//
// result := d.Size(...)
func (d *dataFileInfo) Size() int64 { return int64(len(d.file.content)) }
// Mode documents the Mode operation.
//
// result := d.Mode(...)
func (d *dataFileInfo) Mode() fs.FileMode { return 0444 }
// ModTime documents the ModTime operation.
//
// result := d.ModTime(...)
func (d *dataFileInfo) ModTime() time.Time { return d.file.modTime }
// IsDir documents the IsDir operation.
//
// result := d.IsDir(...)
func (d *dataFileInfo) IsDir() bool { return false }
// Sys documents the Sys operation.
//
// result := d.Sys(...)
func (d *dataFileInfo) Sys() any { return nil }
// dataFileReader implements fs.File for reading a dataFile.
@ -565,13 +660,24 @@ type dataFileReader struct {
reader *bytes.Reader
}
// Stat documents the Stat operation.
//
// result := d.Stat(...)
func (d *dataFileReader) Stat() (fs.FileInfo, error) { return d.file.Stat() }
// Read documents the Read operation.
//
// result := d.Read(...)
func (d *dataFileReader) Read(p []byte) (int, error) {
if d.reader == nil {
d.reader = bytes.NewReader(d.file.content)
}
return d.reader.Read(p)
}
// Close documents the Close operation.
//
// result := d.Close(...)
func (d *dataFileReader) Close() error { return nil }
// dirInfo implements fs.FileInfo for an implicit directory.
@ -580,11 +686,34 @@ type dirInfo struct {
modTime time.Time
}
// Name documents the Name operation.
//
// result := d.Name(...)
func (d *dirInfo) Name() string { return d.name }
// Size documents the Size operation.
//
// result := d.Size(...)
func (d *dirInfo) Size() int64 { return 0 }
// Mode documents the Mode operation.
//
// result := d.Mode(...)
func (d *dirInfo) Mode() fs.FileMode { return fs.ModeDir | 0555 }
// ModTime documents the ModTime operation.
//
// result := d.ModTime(...)
func (d *dirInfo) ModTime() time.Time { return d.modTime }
// IsDir documents the IsDir operation.
//
// result := d.IsDir(...)
func (d *dirInfo) IsDir() bool { return true }
// Sys documents the Sys operation.
//
// result := d.Sys(...)
func (d *dirInfo) Sys() any { return nil }
// dirFile implements fs.File for a directory.
@ -593,12 +722,23 @@ type dirFile struct {
modTime time.Time
}
// Stat documents the Stat operation.
//
// result := d.Stat(...)
func (d *dirFile) Stat() (fs.FileInfo, error) {
return &dirInfo{name: path.Base(d.path), modTime: d.modTime}, nil
}
// Read documents the Read operation.
//
// result := d.Read(...)
func (d *dirFile) Read([]byte) (int, error) {
return 0, &fs.PathError{Op: "read", Path: d.path, Err: fs.ErrInvalid}
return 0, core.E("node.dirFile.Read", core.Concat("cannot read directory: ", d.path), &fs.PathError{Op: "read", Path: d.path, Err: fs.ErrInvalid})
}
// Close documents the Close operation.
//
// result := d.Close(...)
func (d *dirFile) Close() error { return nil }
// Ensure Node implements fs.FS so WalkDir works.

View file

@ -3,11 +3,9 @@ package node
import (
"archive/tar"
"bytes"
"errors"
"io"
"io/fs"
"sort"
"strings"
"testing"
core "dappco.re/go/core"
@ -20,7 +18,7 @@ import (
// New
// ---------------------------------------------------------------------------
func TestNew_Good(t *testing.T) {
func TestNode_New_Good(t *testing.T) {
n := New()
require.NotNil(t, n, "New() must not return nil")
assert.NotNil(t, n.files, "New() must initialise the files map")
@ -30,7 +28,7 @@ func TestNew_Good(t *testing.T) {
// AddData
// ---------------------------------------------------------------------------
func TestAddData_Good(t *testing.T) {
func TestNode_AddData_Good(t *testing.T) {
n := New()
n.AddData("foo.txt", []byte("foo"))
@ -43,7 +41,7 @@ func TestAddData_Good(t *testing.T) {
assert.Equal(t, "foo.txt", info.Name())
}
func TestAddData_Bad(t *testing.T) {
func TestNode_AddData_Bad(t *testing.T) {
n := New()
// Empty name is silently ignored.
@ -55,7 +53,7 @@ func TestAddData_Bad(t *testing.T) {
assert.Empty(t, n.files, "directory entry must not be stored")
}
func TestAddData_Ugly(t *testing.T) {
func TestNode_AddData_Ugly(t *testing.T) {
t.Run("Overwrite", func(t *testing.T) {
n := New()
n.AddData("foo.txt", []byte("foo"))
@ -77,7 +75,7 @@ func TestAddData_Ugly(t *testing.T) {
// Open
// ---------------------------------------------------------------------------
func TestOpen_Good(t *testing.T) {
func TestNode_Open_Good(t *testing.T) {
n := New()
n.AddData("foo.txt", []byte("foo"))
@ -91,14 +89,14 @@ func TestOpen_Good(t *testing.T) {
assert.Equal(t, "foo", string(buf[:nr]))
}
func TestOpen_Bad(t *testing.T) {
func TestNode_Open_Bad(t *testing.T) {
n := New()
_, err := n.Open("nonexistent.txt")
require.Error(t, err)
assert.ErrorIs(t, err, fs.ErrNotExist)
}
func TestOpen_Ugly(t *testing.T) {
func TestNode_Open_Ugly(t *testing.T) {
n := New()
n.AddData("bar/baz.txt", []byte("baz"))
@ -112,7 +110,7 @@ func TestOpen_Ugly(t *testing.T) {
require.Error(t, err)
var pathErr *fs.PathError
require.True(t, errors.As(err, &pathErr))
require.True(t, core.As(err, &pathErr))
assert.Equal(t, fs.ErrInvalid, pathErr.Err)
}
@ -120,7 +118,7 @@ func TestOpen_Ugly(t *testing.T) {
// Stat
// ---------------------------------------------------------------------------
func TestStat_Good(t *testing.T) {
func TestNode_Stat_Good(t *testing.T) {
n := New()
n.AddData("foo.txt", []byte("foo"))
n.AddData("bar/baz.txt", []byte("baz"))
@ -139,14 +137,14 @@ func TestStat_Good(t *testing.T) {
assert.Equal(t, "bar", dirInfo.Name())
}
func TestStat_Bad(t *testing.T) {
func TestNode_Stat_Bad(t *testing.T) {
n := New()
_, err := n.Stat("nonexistent")
require.Error(t, err)
assert.ErrorIs(t, err, fs.ErrNotExist)
}
func TestStat_Ugly(t *testing.T) {
func TestNode_Stat_Ugly(t *testing.T) {
n := New()
n.AddData("foo.txt", []byte("foo"))
@ -161,7 +159,7 @@ func TestStat_Ugly(t *testing.T) {
// ReadFile
// ---------------------------------------------------------------------------
func TestReadFile_Good(t *testing.T) {
func TestNode_ReadFile_Good(t *testing.T) {
n := New()
n.AddData("hello.txt", []byte("hello world"))
@ -170,14 +168,14 @@ func TestReadFile_Good(t *testing.T) {
assert.Equal(t, []byte("hello world"), data)
}
func TestReadFile_Bad(t *testing.T) {
func TestNode_ReadFile_Bad(t *testing.T) {
n := New()
_, err := n.ReadFile("missing.txt")
require.Error(t, err)
assert.ErrorIs(t, err, fs.ErrNotExist)
}
func TestReadFile_Ugly(t *testing.T) {
func TestNode_ReadFile_Ugly(t *testing.T) {
n := New()
n.AddData("data.bin", []byte("original"))
@ -195,7 +193,7 @@ func TestReadFile_Ugly(t *testing.T) {
// ReadDir
// ---------------------------------------------------------------------------
func TestReadDir_Good(t *testing.T) {
func TestNode_ReadDir_Good(t *testing.T) {
n := New()
n.AddData("foo.txt", []byte("foo"))
n.AddData("bar/baz.txt", []byte("baz"))
@ -212,7 +210,7 @@ func TestReadDir_Good(t *testing.T) {
assert.Equal(t, []string{"baz.txt", "qux.txt"}, sortedNames(barEntries))
}
func TestReadDir_Bad(t *testing.T) {
func TestNode_ReadDir_Bad(t *testing.T) {
n := New()
n.AddData("foo.txt", []byte("foo"))
@ -220,11 +218,11 @@ func TestReadDir_Bad(t *testing.T) {
_, err := n.ReadDir("foo.txt")
require.Error(t, err)
var pathErr *fs.PathError
require.True(t, errors.As(err, &pathErr))
require.True(t, core.As(err, &pathErr))
assert.Equal(t, fs.ErrInvalid, pathErr.Err)
}
func TestReadDir_Ugly(t *testing.T) {
func TestNode_ReadDir_Ugly(t *testing.T) {
n := New()
n.AddData("bar/baz.txt", []byte("baz"))
n.AddData("empty_dir/", nil) // Ignored by AddData.
@ -238,7 +236,7 @@ func TestReadDir_Ugly(t *testing.T) {
// Exists
// ---------------------------------------------------------------------------
func TestExists_Good(t *testing.T) {
func TestNode_Exists_Good(t *testing.T) {
n := New()
n.AddData("foo.txt", []byte("foo"))
n.AddData("bar/baz.txt", []byte("baz"))
@ -247,12 +245,12 @@ func TestExists_Good(t *testing.T) {
assert.True(t, n.Exists("bar"))
}
func TestExists_Bad(t *testing.T) {
func TestNode_Exists_Bad(t *testing.T) {
n := New()
assert.False(t, n.Exists("nonexistent"))
}
func TestExists_Ugly(t *testing.T) {
func TestNode_Exists_Ugly(t *testing.T) {
n := New()
n.AddData("dummy.txt", []byte("dummy"))
@ -264,7 +262,7 @@ func TestExists_Ugly(t *testing.T) {
// Walk
// ---------------------------------------------------------------------------
func TestWalk_Good(t *testing.T) {
func TestNode_Walk_Good(t *testing.T) {
n := New()
n.AddData("foo.txt", []byte("foo"))
n.AddData("bar/baz.txt", []byte("baz"))
@ -281,7 +279,7 @@ func TestWalk_Good(t *testing.T) {
assert.Equal(t, []string{".", "bar", "bar/baz.txt", "bar/qux.txt", "foo.txt"}, paths)
}
func TestWalk_Bad(t *testing.T) {
func TestNode_Walk_Bad(t *testing.T) {
n := New()
var called bool
@ -295,13 +293,13 @@ func TestWalk_Bad(t *testing.T) {
assert.ErrorIs(t, err, fs.ErrNotExist)
}
func TestWalk_Ugly(t *testing.T) {
func TestNode_Walk_Ugly(t *testing.T) {
n := New()
n.AddData("a/b.txt", []byte("b"))
n.AddData("a/c.txt", []byte("c"))
// Stop walk early with a custom error.
walkErr := errors.New("stop walking")
walkErr := core.NewError("stop walking")
var paths []string
err := n.Walk(".", func(p string, d fs.DirEntry, err error) error {
if p == "a/b.txt" {
@ -314,7 +312,7 @@ func TestWalk_Ugly(t *testing.T) {
assert.Equal(t, walkErr, err, "Walk must propagate the callback error")
}
func TestWalk_Good_Options(t *testing.T) {
func TestNode_Walk_Options_Good(t *testing.T) {
n := New()
n.AddData("root.txt", []byte("root"))
n.AddData("a/a1.txt", []byte("a1"))
@ -339,7 +337,7 @@ func TestWalk_Good_Options(t *testing.T) {
paths = append(paths, p)
return nil
}, WalkOptions{Filter: func(p string, d fs.DirEntry) bool {
return !strings.HasPrefix(p, "a")
return !core.HasPrefix(p, "a")
}})
require.NoError(t, err)
@ -363,7 +361,7 @@ func TestWalk_Good_Options(t *testing.T) {
// CopyFile
// ---------------------------------------------------------------------------
func TestCopyFile_Good(t *testing.T) {
func TestNode_CopyFile_Good(t *testing.T) {
n := New()
n.AddData("foo.txt", []byte("foo"))
@ -376,7 +374,7 @@ func TestCopyFile_Good(t *testing.T) {
assert.Equal(t, "foo", content)
}
func TestCopyFile_Bad(t *testing.T) {
func TestNode_CopyFile_Bad(t *testing.T) {
n := New()
tmpfile := core.Path(t.TempDir(), "test.txt")
@ -390,7 +388,7 @@ func TestCopyFile_Bad(t *testing.T) {
assert.Error(t, err)
}
func TestCopyFile_Ugly(t *testing.T) {
func TestNode_CopyFile_Ugly(t *testing.T) {
n := New()
n.AddData("bar/baz.txt", []byte("baz"))
tmpfile := core.Path(t.TempDir(), "test.txt")
@ -404,7 +402,7 @@ func TestCopyFile_Ugly(t *testing.T) {
// ToTar / FromTar
// ---------------------------------------------------------------------------
func TestToTar_Good(t *testing.T) {
func TestNode_ToTar_Good(t *testing.T) {
n := New()
n.AddData("foo.txt", []byte("foo"))
n.AddData("bar/baz.txt", []byte("baz"))
@ -431,7 +429,7 @@ func TestToTar_Good(t *testing.T) {
assert.Equal(t, "baz", files["bar/baz.txt"])
}
func TestFromTar_Good(t *testing.T) {
func TestNode_FromTar_Good(t *testing.T) {
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
@ -458,14 +456,14 @@ func TestFromTar_Good(t *testing.T) {
assert.True(t, n.Exists("bar/baz.txt"), "bar/baz.txt should exist")
}
func TestFromTar_Bad(t *testing.T) {
func TestNode_FromTar_Bad(t *testing.T) {
// Truncated data that cannot be a valid tar.
truncated := make([]byte, 100)
_, err := FromTar(truncated)
assert.Error(t, err, "truncated data should produce an error")
}
func TestTarRoundTrip_Good(t *testing.T) {
func TestNode_TarRoundTrip_Good(t *testing.T) {
n1 := New()
n1.AddData("a.txt", []byte("alpha"))
n1.AddData("b/c.txt", []byte("charlie"))
@ -490,7 +488,7 @@ func TestTarRoundTrip_Good(t *testing.T) {
// fs.FS interface compliance
// ---------------------------------------------------------------------------
func TestFSInterface_Good(t *testing.T) {
func TestNode_FSInterface_Good(t *testing.T) {
n := New()
n.AddData("hello.txt", []byte("world"))

182
s3/s3.go
View file

@ -7,14 +7,13 @@ import (
goio "io"
"io/fs"
"path"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
coreerr "forge.lthn.ai/core/go-log"
core "dappco.re/go/core"
)
// s3API is the subset of the S3 client API used by this package.
@ -47,26 +46,28 @@ func deleteObjectsError(prefix string, errs []types.Error) error {
msg := aws.ToString(item.Message)
switch {
case code != "" && msg != "":
details = append(details, key+": "+code+" "+msg)
details = append(details, core.Concat(key, ": ", code, " ", msg))
case code != "":
details = append(details, key+": "+code)
details = append(details, core.Concat(key, ": ", code))
case msg != "":
details = append(details, key+": "+msg)
details = append(details, core.Concat(key, ": ", msg))
default:
details = append(details, key)
}
}
return coreerr.E("s3.DeleteAll", "partial delete failed under "+prefix+": "+strings.Join(details, "; "), nil)
return core.E("s3.DeleteAll", core.Concat("partial delete failed under ", prefix, ": ", core.Join("; ", details...)), nil)
}
// Option configures a Medium.
type Option func(*Medium)
// WithPrefix sets an optional key prefix for all operations.
//
// result := s3.WithPrefix(...)
func WithPrefix(prefix string) Option {
return func(m *Medium) {
// Ensure prefix ends with "/" if non-empty
if prefix != "" && !strings.HasSuffix(prefix, "/") {
if prefix != "" && !core.HasSuffix(prefix, "/") {
prefix += "/"
}
m.prefix = prefix
@ -74,6 +75,8 @@ func WithPrefix(prefix string) Option {
}
// WithClient sets the S3 client for dependency injection.
//
// result := s3.WithClient(...)
func WithClient(client *s3.Client) Option {
return func(m *Medium) {
m.client = client
@ -95,14 +98,14 @@ func withAPI(api s3API) Option {
// m, _ := s3.New("backups", s3.WithClient(awsClient), s3.WithPrefix("daily"))
func New(bucket string, opts ...Option) (*Medium, error) {
if bucket == "" {
return nil, coreerr.E("s3.New", "bucket name is required", nil)
return nil, core.E("s3.New", "bucket name is required", nil)
}
m := &Medium{bucket: bucket}
for _, opt := range opts {
opt(m)
}
if m.client == nil {
return nil, coreerr.E("s3.New", "S3 client is required (use WithClient option)", nil)
return nil, core.E("s3.New", "S3 client is required (use WithClient option)", nil)
}
return m, nil
}
@ -115,7 +118,7 @@ func (m *Medium) key(p string) string {
if clean == "/" {
clean = ""
}
clean = strings.TrimPrefix(clean, "/")
clean = core.TrimPrefix(clean, "/")
if m.prefix == "" {
return clean
@ -127,10 +130,12 @@ func (m *Medium) key(p string) string {
}
// Read retrieves the content of a file as a string.
//
// result := m.Read(...)
func (m *Medium) Read(p string) (string, error) {
key := m.key(p)
if key == "" {
return "", coreerr.E("s3.Read", "path is required", fs.ErrInvalid)
return "", core.E("s3.Read", "path is required", fs.ErrInvalid)
}
out, err := m.client.GetObject(context.Background(), &s3.GetObjectInput{
@ -138,48 +143,54 @@ func (m *Medium) Read(p string) (string, error) {
Key: aws.String(key),
})
if err != nil {
return "", coreerr.E("s3.Read", "failed to get object: "+key, err)
return "", core.E("s3.Read", core.Concat("failed to get object: ", key), err)
}
defer out.Body.Close()
data, err := goio.ReadAll(out.Body)
if err != nil {
return "", coreerr.E("s3.Read", "failed to read body: "+key, err)
return "", core.E("s3.Read", core.Concat("failed to read body: ", key), err)
}
return string(data), nil
}
// Write saves the given content to a file, overwriting it if it exists.
//
// result := m.Write(...)
func (m *Medium) Write(p, content string) error {
key := m.key(p)
if key == "" {
return coreerr.E("s3.Write", "path is required", fs.ErrInvalid)
return core.E("s3.Write", "path is required", fs.ErrInvalid)
}
_, err := m.client.PutObject(context.Background(), &s3.PutObjectInput{
Bucket: aws.String(m.bucket),
Key: aws.String(key),
Body: strings.NewReader(content),
Body: core.NewReader(content),
})
if err != nil {
return coreerr.E("s3.Write", "failed to put object: "+key, err)
return core.E("s3.Write", core.Concat("failed to put object: ", key), err)
}
return nil
}
// EnsureDir is a no-op for S3 (S3 has no real directories).
//
// result := m.EnsureDir(...)
func (m *Medium) EnsureDir(_ string) error {
return nil
}
// IsFile checks if a path exists and is a regular file (not a "directory" prefix).
//
// result := m.IsFile(...)
func (m *Medium) IsFile(p string) bool {
key := m.key(p)
if key == "" {
return false
}
// A "file" in S3 is an object whose key does not end with "/"
if strings.HasSuffix(key, "/") {
if core.HasSuffix(key, "/") {
return false
}
_, err := m.client.HeadObject(context.Background(), &s3.HeadObjectInput{
@ -190,20 +201,26 @@ func (m *Medium) IsFile(p string) bool {
}
// FileGet is a convenience function that reads a file from the medium.
//
// result := m.FileGet(...)
func (m *Medium) FileGet(p string) (string, error) {
return m.Read(p)
}
// FileSet is a convenience function that writes a file to the medium.
//
// result := m.FileSet(...)
func (m *Medium) FileSet(p, content string) error {
return m.Write(p, content)
}
// Delete removes a single object.
//
// result := m.Delete(...)
func (m *Medium) Delete(p string) error {
key := m.key(p)
if key == "" {
return coreerr.E("s3.Delete", "path is required", fs.ErrInvalid)
return core.E("s3.Delete", "path is required", fs.ErrInvalid)
}
_, err := m.client.DeleteObject(context.Background(), &s3.DeleteObjectInput{
@ -211,16 +228,18 @@ func (m *Medium) Delete(p string) error {
Key: aws.String(key),
})
if err != nil {
return coreerr.E("s3.Delete", "failed to delete object: "+key, err)
return core.E("s3.Delete", core.Concat("failed to delete object: ", key), err)
}
return nil
}
// DeleteAll removes all objects under the given prefix.
//
// result := m.DeleteAll(...)
func (m *Medium) DeleteAll(p string) error {
key := m.key(p)
if key == "" {
return coreerr.E("s3.DeleteAll", "path is required", fs.ErrInvalid)
return core.E("s3.DeleteAll", "path is required", fs.ErrInvalid)
}
// First, try deleting the exact key
@ -229,12 +248,12 @@ func (m *Medium) DeleteAll(p string) error {
Key: aws.String(key),
})
if err != nil {
return coreerr.E("s3.DeleteAll", "failed to delete object: "+key, err)
return core.E("s3.DeleteAll", core.Concat("failed to delete object: ", key), err)
}
// Then delete all objects under the prefix
prefix := key
if !strings.HasSuffix(prefix, "/") {
if !core.HasSuffix(prefix, "/") {
prefix += "/"
}
@ -248,7 +267,7 @@ func (m *Medium) DeleteAll(p string) error {
ContinuationToken: continuationToken,
})
if err != nil {
return coreerr.E("s3.DeleteAll", "failed to list objects: "+prefix, err)
return core.E("s3.DeleteAll", core.Concat("failed to list objects: ", prefix), err)
}
if len(listOut.Contents) == 0 {
@ -265,7 +284,7 @@ func (m *Medium) DeleteAll(p string) error {
Delete: &types.Delete{Objects: objects, Quiet: aws.Bool(true)},
})
if err != nil {
return coreerr.E("s3.DeleteAll", "failed to delete objects", err)
return core.E("s3.DeleteAll", "failed to delete objects", err)
}
if err := deleteObjectsError(prefix, deleteOut.Errors); err != nil {
return err
@ -282,11 +301,13 @@ func (m *Medium) DeleteAll(p string) error {
}
// Rename moves an object by copying then deleting the original.
//
// result := m.Rename(...)
func (m *Medium) Rename(oldPath, newPath string) error {
oldKey := m.key(oldPath)
newKey := m.key(newPath)
if oldKey == "" || newKey == "" {
return coreerr.E("s3.Rename", "both old and new paths are required", fs.ErrInvalid)
return core.E("s3.Rename", "both old and new paths are required", fs.ErrInvalid)
}
copySource := m.bucket + "/" + oldKey
@ -297,7 +318,7 @@ func (m *Medium) Rename(oldPath, newPath string) error {
Key: aws.String(newKey),
})
if err != nil {
return coreerr.E("s3.Rename", "failed to copy object: "+oldKey+" -> "+newKey, err)
return core.E("s3.Rename", core.Concat("failed to copy object: ", oldKey, " -> ", newKey), err)
}
_, err = m.client.DeleteObject(context.Background(), &s3.DeleteObjectInput{
@ -305,16 +326,18 @@ func (m *Medium) Rename(oldPath, newPath string) error {
Key: aws.String(oldKey),
})
if err != nil {
return coreerr.E("s3.Rename", "failed to delete source object: "+oldKey, err)
return core.E("s3.Rename", core.Concat("failed to delete source object: ", oldKey), err)
}
return nil
}
// List returns directory entries for the given path using ListObjectsV2 with delimiter.
//
// result := m.List(...)
func (m *Medium) List(p string) ([]fs.DirEntry, error) {
prefix := m.key(p)
if prefix != "" && !strings.HasSuffix(prefix, "/") {
if prefix != "" && !core.HasSuffix(prefix, "/") {
prefix += "/"
}
@ -326,7 +349,7 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) {
Delimiter: aws.String("/"),
})
if err != nil {
return nil, coreerr.E("s3.List", "failed to list objects: "+prefix, err)
return nil, core.E("s3.List", core.Concat("failed to list objects: ", prefix), err)
}
// Common prefixes are "directories"
@ -334,8 +357,8 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) {
if cp.Prefix == nil {
continue
}
name := strings.TrimPrefix(*cp.Prefix, prefix)
name = strings.TrimSuffix(name, "/")
name := core.TrimPrefix(*cp.Prefix, prefix)
name = core.TrimSuffix(name, "/")
if name == "" {
continue
}
@ -356,8 +379,8 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) {
if obj.Key == nil {
continue
}
name := strings.TrimPrefix(*obj.Key, prefix)
if name == "" || strings.Contains(name, "/") {
name := core.TrimPrefix(*obj.Key, prefix)
if name == "" || core.Contains(name, "/") {
continue
}
var size int64
@ -385,10 +408,12 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) {
}
// Stat returns file information for the given path using HeadObject.
//
// result := m.Stat(...)
func (m *Medium) Stat(p string) (fs.FileInfo, error) {
key := m.key(p)
if key == "" {
return nil, coreerr.E("s3.Stat", "path is required", fs.ErrInvalid)
return nil, core.E("s3.Stat", "path is required", fs.ErrInvalid)
}
out, err := m.client.HeadObject(context.Background(), &s3.HeadObjectInput{
@ -396,7 +421,7 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) {
Key: aws.String(key),
})
if err != nil {
return nil, coreerr.E("s3.Stat", "failed to head object: "+key, err)
return nil, core.E("s3.Stat", core.Concat("failed to head object: ", key), err)
}
var size int64
@ -418,10 +443,12 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) {
}
// Open opens the named file for reading.
//
// result := m.Open(...)
func (m *Medium) Open(p string) (fs.File, error) {
key := m.key(p)
if key == "" {
return nil, coreerr.E("s3.Open", "path is required", fs.ErrInvalid)
return nil, core.E("s3.Open", "path is required", fs.ErrInvalid)
}
out, err := m.client.GetObject(context.Background(), &s3.GetObjectInput{
@ -429,13 +456,13 @@ func (m *Medium) Open(p string) (fs.File, error) {
Key: aws.String(key),
})
if err != nil {
return nil, coreerr.E("s3.Open", "failed to get object: "+key, err)
return nil, core.E("s3.Open", core.Concat("failed to get object: ", key), err)
}
data, err := goio.ReadAll(out.Body)
out.Body.Close()
if err != nil {
return nil, coreerr.E("s3.Open", "failed to read body: "+key, err)
return nil, core.E("s3.Open", core.Concat("failed to read body: ", key), err)
}
var size int64
@ -457,10 +484,12 @@ func (m *Medium) Open(p string) (fs.File, error) {
// Create creates or truncates the named file. Returns a writer that
// uploads the content on Close.
//
// result := m.Create(...)
func (m *Medium) Create(p string) (goio.WriteCloser, error) {
key := m.key(p)
if key == "" {
return nil, coreerr.E("s3.Create", "path is required", fs.ErrInvalid)
return nil, core.E("s3.Create", "path is required", fs.ErrInvalid)
}
return &s3WriteCloser{
medium: m,
@ -470,10 +499,12 @@ func (m *Medium) Create(p string) (goio.WriteCloser, error) {
// Append opens the named file for appending. It downloads the existing
// content (if any) and re-uploads the combined content on Close.
//
// result := m.Append(...)
func (m *Medium) Append(p string) (goio.WriteCloser, error) {
key := m.key(p)
if key == "" {
return nil, coreerr.E("s3.Append", "path is required", fs.ErrInvalid)
return nil, core.E("s3.Append", "path is required", fs.ErrInvalid)
}
var existing []byte
@ -494,10 +525,12 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) {
}
// ReadStream returns a reader for the file content.
//
// result := m.ReadStream(...)
func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) {
key := m.key(p)
if key == "" {
return nil, coreerr.E("s3.ReadStream", "path is required", fs.ErrInvalid)
return nil, core.E("s3.ReadStream", "path is required", fs.ErrInvalid)
}
out, err := m.client.GetObject(context.Background(), &s3.GetObjectInput{
@ -505,17 +538,21 @@ func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) {
Key: aws.String(key),
})
if err != nil {
return nil, coreerr.E("s3.ReadStream", "failed to get object: "+key, err)
return nil, core.E("s3.ReadStream", core.Concat("failed to get object: ", key), err)
}
return out.Body, nil
}
// WriteStream returns a writer for the file content. Content is uploaded on Close.
//
// result := m.WriteStream(...)
func (m *Medium) WriteStream(p string) (goio.WriteCloser, error) {
return m.Create(p)
}
// Exists checks if a path exists (file or directory prefix).
//
// result := m.Exists(...)
func (m *Medium) Exists(p string) bool {
key := m.key(p)
if key == "" {
@ -533,7 +570,7 @@ func (m *Medium) Exists(p string) bool {
// Check as a "directory" prefix
prefix := key
if !strings.HasSuffix(prefix, "/") {
if !core.HasSuffix(prefix, "/") {
prefix += "/"
}
listOut, err := m.client.ListObjectsV2(context.Background(), &s3.ListObjectsV2Input{
@ -548,6 +585,8 @@ func (m *Medium) Exists(p string) bool {
}
// IsDir checks if a path exists and is a directory (has objects under it as a prefix).
//
// result := m.IsDir(...)
func (m *Medium) IsDir(p string) bool {
key := m.key(p)
if key == "" {
@ -555,7 +594,7 @@ func (m *Medium) IsDir(p string) bool {
}
prefix := key
if !strings.HasSuffix(prefix, "/") {
if !core.HasSuffix(prefix, "/") {
prefix += "/"
}
@ -581,11 +620,34 @@ type fileInfo struct {
isDir bool
}
// Name documents the Name operation.
//
// result := fi.Name(...)
func (fi *fileInfo) Name() string { return fi.name }
// Size documents the Size operation.
//
// result := fi.Size(...)
func (fi *fileInfo) Size() int64 { return fi.size }
// Mode documents the Mode operation.
//
// result := fi.Mode(...)
func (fi *fileInfo) Mode() fs.FileMode { return fi.mode }
// ModTime documents the ModTime operation.
//
// result := fi.ModTime(...)
func (fi *fileInfo) ModTime() time.Time { return fi.modTime }
// IsDir documents the IsDir operation.
//
// result := fi.IsDir(...)
func (fi *fileInfo) IsDir() bool { return fi.isDir }
// Sys documents the Sys operation.
//
// result := fi.Sys(...)
func (fi *fileInfo) Sys() any { return nil }
// dirEntry implements fs.DirEntry for S3 listings.
@ -596,9 +658,24 @@ type dirEntry struct {
info fs.FileInfo
}
// Name documents the Name operation.
//
// result := de.Name(...)
func (de *dirEntry) Name() string { return de.name }
// IsDir documents the IsDir operation.
//
// result := de.IsDir(...)
func (de *dirEntry) IsDir() bool { return de.isDir }
// Type documents the Type operation.
//
// result := de.Type(...)
func (de *dirEntry) Type() fs.FileMode { return de.mode.Type() }
// Info documents the Info operation.
//
// result := de.Info(...)
func (de *dirEntry) Info() (fs.FileInfo, error) { return de.info, nil }
// s3File implements fs.File for S3 objects.
@ -610,6 +687,9 @@ type s3File struct {
modTime time.Time
}
// Stat documents the Stat operation.
//
// result := f.Stat(...)
func (f *s3File) Stat() (fs.FileInfo, error) {
return &fileInfo{
name: f.name,
@ -619,6 +699,9 @@ func (f *s3File) Stat() (fs.FileInfo, error) {
}, nil
}
// Read documents the Read operation.
//
// result := f.Read(...)
func (f *s3File) Read(b []byte) (int, error) {
if f.offset >= int64(len(f.content)) {
return 0, goio.EOF
@ -628,6 +711,9 @@ func (f *s3File) Read(b []byte) (int, error) {
return n, nil
}
// Close documents the Close operation.
//
// result := f.Close(...)
func (f *s3File) Close() error {
return nil
}
@ -639,11 +725,17 @@ type s3WriteCloser struct {
data []byte
}
// Write documents the Write operation.
//
// result := w.Write(...)
func (w *s3WriteCloser) Write(p []byte) (int, error) {
w.data = append(w.data, p...)
return len(p), nil
}
// Close documents the Close operation.
//
// result := w.Close(...)
func (w *s3WriteCloser) Close() error {
_, err := w.medium.client.PutObject(context.Background(), &s3.PutObjectInput{
Bucket: aws.String(w.medium.bucket),
@ -651,7 +743,7 @@ func (w *s3WriteCloser) Close() error {
Body: bytes.NewReader(w.data),
})
if err != nil {
return coreerr.E("s3.writeCloser.Close", "failed to upload on close", err)
return core.E("s3.writeCloser.Close", "failed to upload on close", err)
}
return nil
}

View file

@ -3,16 +3,14 @@ package s3
import (
"bytes"
"context"
"errors"
"fmt"
goio "io"
"io/fs"
"sort"
"strings"
"sync"
"testing"
"time"
core "dappco.re/go/core"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
@ -45,7 +43,7 @@ func (m *mockS3) GetObject(_ context.Context, params *s3.GetObjectInput, _ ...fu
key := aws.ToString(params.Key)
data, ok := m.objects[key]
if !ok {
return nil, fmt.Errorf("NoSuchKey: key %q not found", key)
return nil, core.E("s3test.mockS3.GetObject", core.Sprintf("NoSuchKey: key %q not found", key), fs.ErrNotExist)
}
mtime := m.mtimes[key]
return &s3.GetObjectOutput{
@ -106,7 +104,7 @@ func (m *mockS3) HeadObject(_ context.Context, params *s3.HeadObjectInput, _ ...
key := aws.ToString(params.Key)
data, ok := m.objects[key]
if !ok {
return nil, fmt.Errorf("NotFound: key %q not found", key)
return nil, core.E("s3test.mockS3.HeadObject", core.Sprintf("NotFound: key %q not found", key), fs.ErrNotExist)
}
mtime := m.mtimes[key]
return &s3.HeadObjectOutput{
@ -129,7 +127,7 @@ func (m *mockS3) ListObjectsV2(_ context.Context, params *s3.ListObjectsV2Input,
// Collect all matching keys sorted
var allKeys []string
for k := range m.objects {
if strings.HasPrefix(k, prefix) {
if core.HasPrefix(k, prefix) {
allKeys = append(allKeys, k)
}
}
@ -139,12 +137,13 @@ func (m *mockS3) ListObjectsV2(_ context.Context, params *s3.ListObjectsV2Input,
commonPrefixes := make(map[string]bool)
for _, k := range allKeys {
rest := strings.TrimPrefix(k, prefix)
rest := core.TrimPrefix(k, prefix)
if delimiter != "" {
if idx := strings.Index(rest, delimiter); idx >= 0 {
parts := core.SplitN(rest, delimiter, 2)
if len(parts) == 2 {
// This key has a delimiter after the prefix -> common prefix
cp := prefix + rest[:idx+len(delimiter)]
cp := core.Concat(prefix, parts[0], delimiter)
commonPrefixes[cp] = true
continue
}
@ -187,15 +186,15 @@ func (m *mockS3) CopyObject(_ context.Context, params *s3.CopyObjectInput, _ ...
// CopySource is "bucket/key"
source := aws.ToString(params.CopySource)
parts := strings.SplitN(source, "/", 2)
parts := core.SplitN(source, "/", 2)
if len(parts) != 2 {
return nil, fmt.Errorf("invalid CopySource: %s", source)
return nil, core.E("s3test.mockS3.CopyObject", core.Sprintf("invalid CopySource: %s", source), fs.ErrInvalid)
}
srcKey := parts[1]
data, ok := m.objects[srcKey]
if !ok {
return nil, fmt.Errorf("NoSuchKey: source key %q not found", srcKey)
return nil, core.E("s3test.mockS3.CopyObject", core.Sprintf("NoSuchKey: source key %q not found", srcKey), fs.ErrNotExist)
}
destKey := aws.ToString(params.Key)
@ -217,7 +216,7 @@ func newTestMedium(t *testing.T) (*Medium, *mockS3) {
// --- Tests ---
func TestNew_Good(t *testing.T) {
func TestS3_New_Good(t *testing.T) {
mock := newMockS3()
m, err := New("my-bucket", withAPI(mock))
require.NoError(t, err)
@ -225,19 +224,19 @@ func TestNew_Good(t *testing.T) {
assert.Equal(t, "", m.prefix)
}
func TestNew_Bad_NoBucket(t *testing.T) {
func TestS3_New_NoBucket_Bad(t *testing.T) {
_, err := New("")
assert.Error(t, err)
assert.Contains(t, err.Error(), "bucket name is required")
}
func TestNew_Bad_NoClient(t *testing.T) {
func TestS3_New_NoClient_Bad(t *testing.T) {
_, err := New("bucket")
assert.Error(t, err)
assert.Contains(t, err.Error(), "S3 client is required")
}
func TestWithPrefix_Good(t *testing.T) {
func TestS3_WithPrefix_Good(t *testing.T) {
mock := newMockS3()
m, err := New("bucket", withAPI(mock), WithPrefix("data/"))
require.NoError(t, err)
@ -249,7 +248,7 @@ func TestWithPrefix_Good(t *testing.T) {
assert.Equal(t, "data/", m2.prefix)
}
func TestReadWrite_Good(t *testing.T) {
func TestS3_ReadWrite_Good(t *testing.T) {
m, _ := newTestMedium(t)
err := m.Write("hello.txt", "world")
@ -260,14 +259,14 @@ func TestReadWrite_Good(t *testing.T) {
assert.Equal(t, "world", content)
}
func TestReadWrite_Bad_NotFound(t *testing.T) {
func TestS3_ReadWrite_NotFound_Bad(t *testing.T) {
m, _ := newTestMedium(t)
_, err := m.Read("nonexistent.txt")
assert.Error(t, err)
}
func TestReadWrite_Bad_EmptyPath(t *testing.T) {
func TestS3_ReadWrite_EmptyPath_Bad(t *testing.T) {
m, _ := newTestMedium(t)
_, err := m.Read("")
@ -277,7 +276,7 @@ func TestReadWrite_Bad_EmptyPath(t *testing.T) {
assert.Error(t, err)
}
func TestReadWrite_Good_WithPrefix(t *testing.T) {
func TestS3_ReadWrite_WithPrefix_Good(t *testing.T) {
mock := newMockS3()
m, err := New("bucket", withAPI(mock), WithPrefix("pfx"))
require.NoError(t, err)
@ -294,14 +293,14 @@ func TestReadWrite_Good_WithPrefix(t *testing.T) {
assert.Equal(t, "data", content)
}
func TestEnsureDir_Good(t *testing.T) {
func TestS3_EnsureDir_Good(t *testing.T) {
m, _ := newTestMedium(t)
// EnsureDir is a no-op for S3
err := m.EnsureDir("any/path")
assert.NoError(t, err)
}
func TestIsFile_Good(t *testing.T) {
func TestS3_IsFile_Good(t *testing.T) {
m, _ := newTestMedium(t)
err := m.Write("file.txt", "content")
@ -312,7 +311,7 @@ func TestIsFile_Good(t *testing.T) {
assert.False(t, m.IsFile(""))
}
func TestFileGetFileSet_Good(t *testing.T) {
func TestS3_FileGetFileSet_Good(t *testing.T) {
m, _ := newTestMedium(t)
err := m.FileSet("key.txt", "value")
@ -323,7 +322,7 @@ func TestFileGetFileSet_Good(t *testing.T) {
assert.Equal(t, "value", val)
}
func TestDelete_Good(t *testing.T) {
func TestS3_Delete_Good(t *testing.T) {
m, _ := newTestMedium(t)
err := m.Write("to-delete.txt", "content")
@ -335,13 +334,13 @@ func TestDelete_Good(t *testing.T) {
assert.False(t, m.IsFile("to-delete.txt"))
}
func TestDelete_Bad_EmptyPath(t *testing.T) {
func TestS3_Delete_EmptyPath_Bad(t *testing.T) {
m, _ := newTestMedium(t)
err := m.Delete("")
assert.Error(t, err)
}
func TestDeleteAll_Good(t *testing.T) {
func TestS3_DeleteAll_Good(t *testing.T) {
m, _ := newTestMedium(t)
// Create nested structure
@ -357,22 +356,22 @@ func TestDeleteAll_Good(t *testing.T) {
assert.True(t, m.IsFile("other.txt"))
}
func TestDeleteAll_Bad_EmptyPath(t *testing.T) {
func TestS3_DeleteAll_EmptyPath_Bad(t *testing.T) {
m, _ := newTestMedium(t)
err := m.DeleteAll("")
assert.Error(t, err)
}
func TestDeleteAll_Bad_DeleteObjectError(t *testing.T) {
func TestS3_DeleteAll_DeleteObjectError_Bad(t *testing.T) {
m, mock := newTestMedium(t)
mock.deleteObjectErrors["dir"] = errors.New("boom")
mock.deleteObjectErrors["dir"] = core.NewError("boom")
err := m.DeleteAll("dir")
require.Error(t, err)
assert.Contains(t, err.Error(), "failed to delete object: dir")
}
func TestDeleteAll_Bad_PartialDelete(t *testing.T) {
func TestS3_DeleteAll_PartialDelete_Bad(t *testing.T) {
m, mock := newTestMedium(t)
require.NoError(t, m.Write("dir/file1.txt", "a"))
@ -391,7 +390,7 @@ func TestDeleteAll_Bad_PartialDelete(t *testing.T) {
assert.False(t, m.IsFile("dir/file1.txt"))
}
func TestRename_Good(t *testing.T) {
func TestS3_Rename_Good(t *testing.T) {
m, _ := newTestMedium(t)
require.NoError(t, m.Write("old.txt", "content"))
@ -408,7 +407,7 @@ func TestRename_Good(t *testing.T) {
assert.Equal(t, "content", content)
}
func TestRename_Bad_EmptyPath(t *testing.T) {
func TestS3_Rename_EmptyPath_Bad(t *testing.T) {
m, _ := newTestMedium(t)
err := m.Rename("", "new.txt")
assert.Error(t, err)
@ -417,13 +416,13 @@ func TestRename_Bad_EmptyPath(t *testing.T) {
assert.Error(t, err)
}
func TestRename_Bad_SourceNotFound(t *testing.T) {
func TestS3_Rename_SourceNotFound_Bad(t *testing.T) {
m, _ := newTestMedium(t)
err := m.Rename("nonexistent.txt", "new.txt")
assert.Error(t, err)
}
func TestList_Good(t *testing.T) {
func TestS3_List_Good(t *testing.T) {
m, _ := newTestMedium(t)
require.NoError(t, m.Write("dir/file1.txt", "a"))
@ -454,7 +453,7 @@ func TestList_Good(t *testing.T) {
}
}
func TestList_Good_Root(t *testing.T) {
func TestS3_List_Root_Good(t *testing.T) {
m, _ := newTestMedium(t)
require.NoError(t, m.Write("root.txt", "content"))
@ -472,7 +471,7 @@ func TestList_Good_Root(t *testing.T) {
assert.True(t, names["dir"])
}
func TestStat_Good(t *testing.T) {
func TestS3_Stat_Good(t *testing.T) {
m, _ := newTestMedium(t)
require.NoError(t, m.Write("file.txt", "hello world"))
@ -484,20 +483,20 @@ func TestStat_Good(t *testing.T) {
assert.False(t, info.IsDir())
}
func TestStat_Bad_NotFound(t *testing.T) {
func TestS3_Stat_NotFound_Bad(t *testing.T) {
m, _ := newTestMedium(t)
_, err := m.Stat("nonexistent.txt")
assert.Error(t, err)
}
func TestStat_Bad_EmptyPath(t *testing.T) {
func TestS3_Stat_EmptyPath_Bad(t *testing.T) {
m, _ := newTestMedium(t)
_, err := m.Stat("")
assert.Error(t, err)
}
func TestOpen_Good(t *testing.T) {
func TestS3_Open_Good(t *testing.T) {
m, _ := newTestMedium(t)
require.NoError(t, m.Write("file.txt", "open me"))
@ -515,14 +514,14 @@ func TestOpen_Good(t *testing.T) {
assert.Equal(t, "file.txt", stat.Name())
}
func TestOpen_Bad_NotFound(t *testing.T) {
func TestS3_Open_NotFound_Bad(t *testing.T) {
m, _ := newTestMedium(t)
_, err := m.Open("nonexistent.txt")
assert.Error(t, err)
}
func TestCreate_Good(t *testing.T) {
func TestS3_Create_Good(t *testing.T) {
m, _ := newTestMedium(t)
w, err := m.Create("new.txt")
@ -540,7 +539,7 @@ func TestCreate_Good(t *testing.T) {
assert.Equal(t, "created", content)
}
func TestAppend_Good(t *testing.T) {
func TestS3_Append_Good(t *testing.T) {
m, _ := newTestMedium(t)
require.NoError(t, m.Write("append.txt", "hello"))
@ -558,7 +557,7 @@ func TestAppend_Good(t *testing.T) {
assert.Equal(t, "hello world", content)
}
func TestAppend_Good_NewFile(t *testing.T) {
func TestS3_Append_NewFile_Good(t *testing.T) {
m, _ := newTestMedium(t)
w, err := m.Append("new.txt")
@ -574,7 +573,7 @@ func TestAppend_Good_NewFile(t *testing.T) {
assert.Equal(t, "fresh", content)
}
func TestReadStream_Good(t *testing.T) {
func TestS3_ReadStream_Good(t *testing.T) {
m, _ := newTestMedium(t)
require.NoError(t, m.Write("stream.txt", "streaming content"))
@ -588,19 +587,19 @@ func TestReadStream_Good(t *testing.T) {
assert.Equal(t, "streaming content", string(data))
}
func TestReadStream_Bad_NotFound(t *testing.T) {
func TestS3_ReadStream_NotFound_Bad(t *testing.T) {
m, _ := newTestMedium(t)
_, err := m.ReadStream("nonexistent.txt")
assert.Error(t, err)
}
func TestWriteStream_Good(t *testing.T) {
func TestS3_WriteStream_Good(t *testing.T) {
m, _ := newTestMedium(t)
writer, err := m.WriteStream("output.txt")
require.NoError(t, err)
_, err = goio.Copy(writer, strings.NewReader("piped data"))
_, err = goio.Copy(writer, core.NewReader("piped data"))
require.NoError(t, err)
err = writer.Close()
require.NoError(t, err)
@ -610,7 +609,7 @@ func TestWriteStream_Good(t *testing.T) {
assert.Equal(t, "piped data", content)
}
func TestExists_Good(t *testing.T) {
func TestS3_Exists_Good(t *testing.T) {
m, _ := newTestMedium(t)
assert.False(t, m.Exists("nonexistent.txt"))
@ -619,7 +618,7 @@ func TestExists_Good(t *testing.T) {
assert.True(t, m.Exists("file.txt"))
}
func TestExists_Good_DirectoryPrefix(t *testing.T) {
func TestS3_Exists_DirectoryPrefix_Good(t *testing.T) {
m, _ := newTestMedium(t)
require.NoError(t, m.Write("dir/file.txt", "content"))
@ -627,7 +626,7 @@ func TestExists_Good_DirectoryPrefix(t *testing.T) {
assert.True(t, m.Exists("dir"))
}
func TestIsDir_Good(t *testing.T) {
func TestS3_IsDir_Good(t *testing.T) {
m, _ := newTestMedium(t)
require.NoError(t, m.Write("dir/file.txt", "content"))
@ -638,7 +637,7 @@ func TestIsDir_Good(t *testing.T) {
assert.False(t, m.IsDir(""))
}
func TestKey_Good(t *testing.T) {
func TestS3_Key_Good(t *testing.T) {
mock := newMockS3()
// No prefix
@ -657,7 +656,7 @@ func TestKey_Good(t *testing.T) {
}
// Ugly: verify the Medium interface is satisfied at compile time.
func TestInterfaceCompliance_Ugly(t *testing.T) {
func TestS3_InterfaceCompliance_Ugly(t *testing.T) {
mock := newMockS3()
m, err := New("bucket", withAPI(mock))
require.NoError(t, err)

View file

@ -16,21 +16,21 @@ import (
"crypto/rand"
"crypto/sha256"
"encoding/binary"
"errors"
"io"
core "dappco.re/go/core"
"golang.org/x/crypto/chacha20poly1305"
)
var (
// ErrInvalidKey is returned when the encryption key is invalid.
ErrInvalidKey = errors.New("sigil: invalid key size, must be 32 bytes")
ErrInvalidKey = core.E("sigil.ErrInvalidKey", "invalid key size, must be 32 bytes", nil)
// ErrCiphertextTooShort is returned when the ciphertext is too short to decrypt.
ErrCiphertextTooShort = errors.New("sigil: ciphertext too short")
ErrCiphertextTooShort = core.E("sigil.ErrCiphertextTooShort", "ciphertext too short", nil)
// ErrDecryptionFailed is returned when decryption or authentication fails.
ErrDecryptionFailed = errors.New("sigil: decryption failed")
ErrDecryptionFailed = core.E("sigil.ErrDecryptionFailed", "decryption failed", nil)
// ErrNoKeyConfigured is returned when no encryption key has been set.
ErrNoKeyConfigured = errors.New("sigil: no encryption key configured")
ErrNoKeyConfigured = core.E("sigil.ErrNoKeyConfigured", "no encryption key configured", nil)
)
// PreObfuscator applies a reversible transformation to data before encryption.
@ -62,6 +62,8 @@ type PreObfuscator interface {
type XORObfuscator struct{}
// Obfuscate XORs the data with a key stream derived from the entropy.
//
// result := x.Obfuscate(...)
func (x *XORObfuscator) Obfuscate(data []byte, entropy []byte) []byte {
if len(data) == 0 {
return data
@ -70,6 +72,8 @@ func (x *XORObfuscator) Obfuscate(data []byte, entropy []byte) []byte {
}
// Deobfuscate reverses the XOR transformation (XOR is symmetric).
//
// result := x.Deobfuscate(...)
func (x *XORObfuscator) Deobfuscate(data []byte, entropy []byte) []byte {
if len(data) == 0 {
return data
@ -124,6 +128,8 @@ func (x *XORObfuscator) deriveKeyStream(entropy []byte, length int) []byte {
type ShuffleMaskObfuscator struct{}
// Obfuscate shuffles bytes and applies a mask derived from entropy.
//
// result := s.Obfuscate(...)
func (s *ShuffleMaskObfuscator) Obfuscate(data []byte, entropy []byte) []byte {
if len(data) == 0 {
return data
@ -151,6 +157,8 @@ func (s *ShuffleMaskObfuscator) Obfuscate(data []byte, entropy []byte) []byte {
}
// Deobfuscate reverses the shuffle and mask operations.
//
// result := s.Deobfuscate(...)
func (s *ShuffleMaskObfuscator) Deobfuscate(data []byte, entropy []byte) []byte {
if len(data) == 0 {
return data
@ -283,6 +291,8 @@ func NewChaChaPolySigilWithObfuscator(key []byte, obfuscator PreObfuscator) (*Ch
// In encrypts the data with pre-obfuscation.
// The flow is: plaintext -> obfuscate -> encrypt
//
// result := s.In(...)
func (s *ChaChaPolySigil) In(data []byte) ([]byte, error) {
if s.Key == nil {
return nil, ErrNoKeyConfigured
@ -293,7 +303,7 @@ func (s *ChaChaPolySigil) In(data []byte) ([]byte, error) {
aead, err := chacha20poly1305.NewX(s.Key)
if err != nil {
return nil, err
return nil, core.E("sigil.ChaChaPolySigil.In", "create cipher", err)
}
// Generate nonce
@ -303,7 +313,7 @@ func (s *ChaChaPolySigil) In(data []byte) ([]byte, error) {
reader = rand.Reader
}
if _, err := io.ReadFull(reader, nonce); err != nil {
return nil, err
return nil, core.E("sigil.ChaChaPolySigil.In", "read nonce", err)
}
// Pre-obfuscate the plaintext using nonce as entropy
@ -322,6 +332,8 @@ func (s *ChaChaPolySigil) In(data []byte) ([]byte, error) {
// Out decrypts the data and reverses obfuscation.
// The flow is: decrypt -> deobfuscate -> plaintext
//
// result := s.Out(...)
func (s *ChaChaPolySigil) Out(data []byte) ([]byte, error) {
if s.Key == nil {
return nil, ErrNoKeyConfigured
@ -332,7 +344,7 @@ func (s *ChaChaPolySigil) Out(data []byte) ([]byte, error) {
aead, err := chacha20poly1305.NewX(s.Key)
if err != nil {
return nil, err
return nil, core.E("sigil.ChaChaPolySigil.Out", "create cipher", err)
}
minLen := aead.NonceSize() + aead.Overhead()
@ -347,7 +359,7 @@ func (s *ChaChaPolySigil) Out(data []byte) ([]byte, error) {
// Decrypt
obfuscated, err := aead.Open(nil, nonce, ciphertext, nil)
if err != nil {
return nil, ErrDecryptionFailed
return nil, core.E("sigil.ChaChaPolySigil.Out", "decrypt ciphertext", ErrDecryptionFailed)
}
// Deobfuscate using the same nonce as entropy
@ -366,6 +378,8 @@ func (s *ChaChaPolySigil) Out(data []byte) ([]byte, error) {
// GetNonceFromCiphertext extracts the nonce from encrypted output.
// This is provided for debugging/logging purposes only.
// The nonce should NOT be stored separately in headers.
//
// result := sigil.GetNonceFromCiphertext(...)
func GetNonceFromCiphertext(ciphertext []byte) ([]byte, error) {
nonceSize := chacha20poly1305.NonceSizeX
if len(ciphertext) < nonceSize {

View file

@ -3,17 +3,17 @@ package sigil
import (
"bytes"
"crypto/rand"
"errors"
"io"
"testing"
core "dappco.re/go/core"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// ── XORObfuscator ──────────────────────────────────────────────────
func TestXORObfuscator_Good_RoundTrip(t *testing.T) {
func TestCryptoSigil_XORObfuscator_RoundTrip_Good(t *testing.T) {
ob := &XORObfuscator{}
data := []byte("the axioms are in the weights")
entropy := []byte("deterministic-nonce-24bytes!")
@ -26,7 +26,7 @@ func TestXORObfuscator_Good_RoundTrip(t *testing.T) {
assert.Equal(t, data, restored)
}
func TestXORObfuscator_Good_DifferentEntropyDifferentOutput(t *testing.T) {
func TestCryptoSigil_XORObfuscator_DifferentEntropyDifferentOutput_Good(t *testing.T) {
ob := &XORObfuscator{}
data := []byte("same plaintext")
@ -35,7 +35,7 @@ func TestXORObfuscator_Good_DifferentEntropyDifferentOutput(t *testing.T) {
assert.NotEqual(t, out1, out2)
}
func TestXORObfuscator_Good_Deterministic(t *testing.T) {
func TestCryptoSigil_XORObfuscator_Deterministic_Good(t *testing.T) {
ob := &XORObfuscator{}
data := []byte("reproducible")
entropy := []byte("fixed-seed")
@ -45,7 +45,7 @@ func TestXORObfuscator_Good_Deterministic(t *testing.T) {
assert.Equal(t, out1, out2)
}
func TestXORObfuscator_Good_LargeData(t *testing.T) {
func TestCryptoSigil_XORObfuscator_LargeData_Good(t *testing.T) {
ob := &XORObfuscator{}
// Larger than one SHA-256 block (32 bytes) to test multi-block key stream.
data := make([]byte, 256)
@ -59,7 +59,7 @@ func TestXORObfuscator_Good_LargeData(t *testing.T) {
assert.Equal(t, data, restored)
}
func TestXORObfuscator_Good_EmptyData(t *testing.T) {
func TestCryptoSigil_XORObfuscator_EmptyData_Good(t *testing.T) {
ob := &XORObfuscator{}
result := ob.Obfuscate([]byte{}, []byte("entropy"))
assert.Equal(t, []byte{}, result)
@ -68,7 +68,7 @@ func TestXORObfuscator_Good_EmptyData(t *testing.T) {
assert.Equal(t, []byte{}, result)
}
func TestXORObfuscator_Good_SymmetricProperty(t *testing.T) {
func TestCryptoSigil_XORObfuscator_SymmetricProperty_Good(t *testing.T) {
ob := &XORObfuscator{}
data := []byte("XOR is its own inverse")
entropy := []byte("nonce")
@ -80,7 +80,7 @@ func TestXORObfuscator_Good_SymmetricProperty(t *testing.T) {
// ── ShuffleMaskObfuscator ──────────────────────────────────────────
func TestShuffleMaskObfuscator_Good_RoundTrip(t *testing.T) {
func TestCryptoSigil_ShuffleMaskObfuscator_RoundTrip_Good(t *testing.T) {
ob := &ShuffleMaskObfuscator{}
data := []byte("shuffle and mask protect patterns")
entropy := []byte("deterministic-entropy")
@ -93,7 +93,7 @@ func TestShuffleMaskObfuscator_Good_RoundTrip(t *testing.T) {
assert.Equal(t, data, restored)
}
func TestShuffleMaskObfuscator_Good_DifferentEntropy(t *testing.T) {
func TestCryptoSigil_ShuffleMaskObfuscator_DifferentEntropy_Good(t *testing.T) {
ob := &ShuffleMaskObfuscator{}
data := []byte("same data")
@ -102,7 +102,7 @@ func TestShuffleMaskObfuscator_Good_DifferentEntropy(t *testing.T) {
assert.NotEqual(t, out1, out2)
}
func TestShuffleMaskObfuscator_Good_Deterministic(t *testing.T) {
func TestCryptoSigil_ShuffleMaskObfuscator_Deterministic_Good(t *testing.T) {
ob := &ShuffleMaskObfuscator{}
data := []byte("reproducible shuffle")
entropy := []byte("fixed")
@ -112,7 +112,7 @@ func TestShuffleMaskObfuscator_Good_Deterministic(t *testing.T) {
assert.Equal(t, out1, out2)
}
func TestShuffleMaskObfuscator_Good_LargeData(t *testing.T) {
func TestCryptoSigil_ShuffleMaskObfuscator_LargeData_Good(t *testing.T) {
ob := &ShuffleMaskObfuscator{}
data := make([]byte, 512)
for i := range data {
@ -125,7 +125,7 @@ func TestShuffleMaskObfuscator_Good_LargeData(t *testing.T) {
assert.Equal(t, data, restored)
}
func TestShuffleMaskObfuscator_Good_EmptyData(t *testing.T) {
func TestCryptoSigil_ShuffleMaskObfuscator_EmptyData_Good(t *testing.T) {
ob := &ShuffleMaskObfuscator{}
result := ob.Obfuscate([]byte{}, []byte("entropy"))
assert.Equal(t, []byte{}, result)
@ -134,7 +134,7 @@ func TestShuffleMaskObfuscator_Good_EmptyData(t *testing.T) {
assert.Equal(t, []byte{}, result)
}
func TestShuffleMaskObfuscator_Good_SingleByte(t *testing.T) {
func TestCryptoSigil_ShuffleMaskObfuscator_SingleByte_Good(t *testing.T) {
ob := &ShuffleMaskObfuscator{}
data := []byte{0x42}
entropy := []byte("single")
@ -146,7 +146,7 @@ func TestShuffleMaskObfuscator_Good_SingleByte(t *testing.T) {
// ── NewChaChaPolySigil ─────────────────────────────────────────────
func TestNewChaChaPolySigil_Good(t *testing.T) {
func TestCryptoSigil_NewChaChaPolySigil_Good(t *testing.T) {
key := make([]byte, 32)
_, _ = rand.Read(key)
@ -157,7 +157,7 @@ func TestNewChaChaPolySigil_Good(t *testing.T) {
assert.NotNil(t, s.Obfuscator)
}
func TestNewChaChaPolySigil_Good_KeyIsCopied(t *testing.T) {
func TestCryptoSigil_NewChaChaPolySigil_KeyIsCopied_Good(t *testing.T) {
key := make([]byte, 32)
_, _ = rand.Read(key)
original := make([]byte, 32)
@ -171,24 +171,24 @@ func TestNewChaChaPolySigil_Good_KeyIsCopied(t *testing.T) {
assert.Equal(t, original, s.Key)
}
func TestNewChaChaPolySigil_Bad_ShortKey(t *testing.T) {
func TestCryptoSigil_NewChaChaPolySigil_ShortKey_Bad(t *testing.T) {
_, err := NewChaChaPolySigil([]byte("too short"))
assert.ErrorIs(t, err, ErrInvalidKey)
}
func TestNewChaChaPolySigil_Bad_LongKey(t *testing.T) {
func TestCryptoSigil_NewChaChaPolySigil_LongKey_Bad(t *testing.T) {
_, err := NewChaChaPolySigil(make([]byte, 64))
assert.ErrorIs(t, err, ErrInvalidKey)
}
func TestNewChaChaPolySigil_Bad_EmptyKey(t *testing.T) {
func TestCryptoSigil_NewChaChaPolySigil_EmptyKey_Bad(t *testing.T) {
_, err := NewChaChaPolySigil(nil)
assert.ErrorIs(t, err, ErrInvalidKey)
}
// ── NewChaChaPolySigilWithObfuscator ───────────────────────────────
func TestNewChaChaPolySigilWithObfuscator_Good(t *testing.T) {
func TestCryptoSigil_NewChaChaPolySigilWithObfuscator_Good(t *testing.T) {
key := make([]byte, 32)
_, _ = rand.Read(key)
@ -198,7 +198,7 @@ func TestNewChaChaPolySigilWithObfuscator_Good(t *testing.T) {
assert.Equal(t, ob, s.Obfuscator)
}
func TestNewChaChaPolySigilWithObfuscator_Good_NilObfuscator(t *testing.T) {
func TestCryptoSigil_NewChaChaPolySigilWithObfuscator_NilObfuscator_Good(t *testing.T) {
key := make([]byte, 32)
_, _ = rand.Read(key)
@ -208,14 +208,14 @@ func TestNewChaChaPolySigilWithObfuscator_Good_NilObfuscator(t *testing.T) {
assert.IsType(t, &XORObfuscator{}, s.Obfuscator)
}
func TestNewChaChaPolySigilWithObfuscator_Bad_InvalidKey(t *testing.T) {
func TestCryptoSigil_NewChaChaPolySigilWithObfuscator_InvalidKey_Bad(t *testing.T) {
_, err := NewChaChaPolySigilWithObfuscator([]byte("bad"), &XORObfuscator{})
assert.ErrorIs(t, err, ErrInvalidKey)
}
// ── ChaChaPolySigil In/Out (encrypt/decrypt) ───────────────────────
func TestChaChaPolySigil_Good_RoundTrip(t *testing.T) {
func TestCryptoSigil_ChaChaPolySigil_RoundTrip_Good(t *testing.T) {
key := make([]byte, 32)
_, _ = rand.Read(key)
@ -233,7 +233,7 @@ func TestChaChaPolySigil_Good_RoundTrip(t *testing.T) {
assert.Equal(t, plaintext, decrypted)
}
func TestChaChaPolySigil_Good_WithShuffleMask(t *testing.T) {
func TestCryptoSigil_ChaChaPolySigil_WithShuffleMask_Good(t *testing.T) {
key := make([]byte, 32)
_, _ = rand.Read(key)
@ -249,7 +249,7 @@ func TestChaChaPolySigil_Good_WithShuffleMask(t *testing.T) {
assert.Equal(t, plaintext, decrypted)
}
func TestChaChaPolySigil_Good_NilData(t *testing.T) {
func TestCryptoSigil_ChaChaPolySigil_NilData_Good(t *testing.T) {
key := make([]byte, 32)
_, _ = rand.Read(key)
@ -265,7 +265,7 @@ func TestChaChaPolySigil_Good_NilData(t *testing.T) {
assert.Nil(t, dec)
}
func TestChaChaPolySigil_Good_EmptyPlaintext(t *testing.T) {
func TestCryptoSigil_ChaChaPolySigil_EmptyPlaintext_Good(t *testing.T) {
key := make([]byte, 32)
_, _ = rand.Read(key)
@ -281,7 +281,7 @@ func TestChaChaPolySigil_Good_EmptyPlaintext(t *testing.T) {
assert.Equal(t, []byte{}, decrypted)
}
func TestChaChaPolySigil_Good_DifferentCiphertextsPerCall(t *testing.T) {
func TestCryptoSigil_ChaChaPolySigil_DifferentCiphertextsPerCall_Good(t *testing.T) {
key := make([]byte, 32)
_, _ = rand.Read(key)
@ -296,7 +296,7 @@ func TestChaChaPolySigil_Good_DifferentCiphertextsPerCall(t *testing.T) {
assert.NotEqual(t, ct1, ct2)
}
func TestChaChaPolySigil_Bad_NoKey(t *testing.T) {
func TestCryptoSigil_ChaChaPolySigil_NoKey_Bad(t *testing.T) {
s := &ChaChaPolySigil{}
_, err := s.In([]byte("data"))
@ -306,7 +306,7 @@ func TestChaChaPolySigil_Bad_NoKey(t *testing.T) {
assert.ErrorIs(t, err, ErrNoKeyConfigured)
}
func TestChaChaPolySigil_Bad_WrongKey(t *testing.T) {
func TestCryptoSigil_ChaChaPolySigil_WrongKey_Bad(t *testing.T) {
key1 := make([]byte, 32)
key2 := make([]byte, 32)
_, _ = rand.Read(key1)
@ -322,7 +322,7 @@ func TestChaChaPolySigil_Bad_WrongKey(t *testing.T) {
assert.ErrorIs(t, err, ErrDecryptionFailed)
}
func TestChaChaPolySigil_Bad_TruncatedCiphertext(t *testing.T) {
func TestCryptoSigil_ChaChaPolySigil_TruncatedCiphertext_Bad(t *testing.T) {
key := make([]byte, 32)
_, _ = rand.Read(key)
@ -331,7 +331,7 @@ func TestChaChaPolySigil_Bad_TruncatedCiphertext(t *testing.T) {
assert.ErrorIs(t, err, ErrCiphertextTooShort)
}
func TestChaChaPolySigil_Bad_TamperedCiphertext(t *testing.T) {
func TestCryptoSigil_ChaChaPolySigil_TamperedCiphertext_Bad(t *testing.T) {
key := make([]byte, 32)
_, _ = rand.Read(key)
@ -349,10 +349,10 @@ func TestChaChaPolySigil_Bad_TamperedCiphertext(t *testing.T) {
type failReader struct{}
func (f *failReader) Read([]byte) (int, error) {
return 0, errors.New("entropy source failed")
return 0, core.NewError("entropy source failed")
}
func TestChaChaPolySigil_Bad_RandReaderFailure(t *testing.T) {
func TestCryptoSigil_ChaChaPolySigil_RandReaderFailure_Bad(t *testing.T) {
key := make([]byte, 32)
_, _ = rand.Read(key)
@ -365,7 +365,7 @@ func TestChaChaPolySigil_Bad_RandReaderFailure(t *testing.T) {
// ── ChaChaPolySigil without obfuscator ─────────────────────────────
func TestChaChaPolySigil_Good_NoObfuscator(t *testing.T) {
func TestCryptoSigil_ChaChaPolySigil_NoObfuscator_Good(t *testing.T) {
key := make([]byte, 32)
_, _ = rand.Read(key)
@ -383,7 +383,7 @@ func TestChaChaPolySigil_Good_NoObfuscator(t *testing.T) {
// ── GetNonceFromCiphertext ─────────────────────────────────────────
func TestGetNonceFromCiphertext_Good(t *testing.T) {
func TestCryptoSigil_GetNonceFromCiphertext_Good(t *testing.T) {
key := make([]byte, 32)
_, _ = rand.Read(key)
@ -398,7 +398,7 @@ func TestGetNonceFromCiphertext_Good(t *testing.T) {
assert.Equal(t, ciphertext[:24], nonce)
}
func TestGetNonceFromCiphertext_Good_NonceCopied(t *testing.T) {
func TestCryptoSigil_GetNonceFromCiphertext_NonceCopied_Good(t *testing.T) {
key := make([]byte, 32)
_, _ = rand.Read(key)
@ -414,19 +414,19 @@ func TestGetNonceFromCiphertext_Good_NonceCopied(t *testing.T) {
assert.Equal(t, original, ciphertext[:24])
}
func TestGetNonceFromCiphertext_Bad_TooShort(t *testing.T) {
func TestCryptoSigil_GetNonceFromCiphertext_TooShort_Bad(t *testing.T) {
_, err := GetNonceFromCiphertext([]byte("short"))
assert.ErrorIs(t, err, ErrCiphertextTooShort)
}
func TestGetNonceFromCiphertext_Bad_Empty(t *testing.T) {
func TestCryptoSigil_GetNonceFromCiphertext_Empty_Bad(t *testing.T) {
_, err := GetNonceFromCiphertext(nil)
assert.ErrorIs(t, err, ErrCiphertextTooShort)
}
// ── ChaChaPolySigil in Transmute pipeline ──────────────────────────
func TestChaChaPolySigil_Good_InTransmutePipeline(t *testing.T) {
func TestCryptoSigil_ChaChaPolySigil_InTransmutePipeline_Good(t *testing.T) {
key := make([]byte, 32)
_, _ = rand.Read(key)
@ -460,16 +460,16 @@ func isHex(data []byte) bool {
type failSigil struct{}
func (f *failSigil) In([]byte) ([]byte, error) { return nil, errors.New("fail in") }
func (f *failSigil) Out([]byte) ([]byte, error) { return nil, errors.New("fail out") }
func (f *failSigil) In([]byte) ([]byte, error) { return nil, core.NewError("fail in") }
func (f *failSigil) Out([]byte) ([]byte, error) { return nil, core.NewError("fail out") }
func TestTransmute_Bad_ErrorPropagation(t *testing.T) {
func TestCryptoSigil_Transmute_ErrorPropagation_Bad(t *testing.T) {
_, err := Transmute([]byte("data"), []Sigil{&failSigil{}})
assert.Error(t, err)
assert.Contains(t, err.Error(), "fail in")
}
func TestUntransmute_Bad_ErrorPropagation(t *testing.T) {
func TestCryptoSigil_Untransmute_ErrorPropagation_Bad(t *testing.T) {
_, err := Untransmute([]byte("data"), []Sigil{&failSigil{}})
assert.Error(t, err)
assert.Contains(t, err.Error(), "fail out")
@ -477,7 +477,7 @@ func TestUntransmute_Bad_ErrorPropagation(t *testing.T) {
// ── GzipSigil with custom writer (edge case) ──────────────────────
func TestGzipSigil_Good_CustomWriter(t *testing.T) {
func TestCryptoSigil_GzipSigil_CustomWriter_Good(t *testing.T) {
var buf bytes.Buffer
s := &GzipSigil{writer: &buf}
@ -490,7 +490,7 @@ func TestGzipSigil_Good_CustomWriter(t *testing.T) {
// ── deriveKeyStream edge: exactly 32 bytes ─────────────────────────
func TestDeriveKeyStream_Good_ExactBlockSize(t *testing.T) {
func TestCryptoSigil_DeriveKeyStream_ExactBlockSize_Good(t *testing.T) {
ob := &XORObfuscator{}
data := make([]byte, 32) // Exactly one SHA-256 block.
for i := range data {
@ -505,7 +505,7 @@ func TestDeriveKeyStream_Good_ExactBlockSize(t *testing.T) {
// ── io.Reader fallback in In ───────────────────────────────────────
func TestChaChaPolySigil_Good_NilRandReader(t *testing.T) {
func TestCryptoSigil_ChaChaPolySigil_NilRandReader_Good(t *testing.T) {
key := make([]byte, 32)
_, _ = rand.Read(key)

View file

@ -12,6 +12,8 @@
// result, _ := sigil.Transmute(data, []sigil.Sigil{hexSigil, base64Sigil})
package sigil
import core "dappco.re/go/core"
// Sigil defines the interface for a data transformer.
//
// A Sigil represents a single transformation unit that can be applied to byte data.
@ -43,12 +45,14 @@ type Sigil interface {
// stops immediately and returns nil with that error.
//
// To reverse a transmutation, call each sigil's Out method in reverse order.
//
// result := sigil.Transmute(...)
func Transmute(data []byte, sigils []Sigil) ([]byte, error) {
var err error
for _, s := range sigils {
data, err = s.In(data)
if err != nil {
return nil, err
return nil, core.E("sigil.Transmute", "sigil in failed", err)
}
}
return data, nil
@ -59,12 +63,14 @@ func Transmute(data []byte, sigils []Sigil) ([]byte, error) {
// Each sigil's Out method is called in reverse order, with the output of one sigil
// becoming the input of the next. If any sigil returns an error, Untransmute
// stops immediately and returns nil with that error.
//
// result := sigil.Untransmute(...)
func Untransmute(data []byte, sigils []Sigil) ([]byte, error) {
var err error
for i := len(sigils) - 1; i >= 0; i-- {
data, err = sigils[i].Out(data)
if err != nil {
return nil, err
return nil, core.E("sigil.Untransmute", "sigil out failed", err)
}
}
return data, nil

View file

@ -17,7 +17,7 @@ import (
// ReverseSigil
// ---------------------------------------------------------------------------
func TestReverseSigil_Good(t *testing.T) {
func TestSigil_ReverseSigil_Good(t *testing.T) {
s := &ReverseSigil{}
out, err := s.In([]byte("hello"))
@ -30,7 +30,7 @@ func TestReverseSigil_Good(t *testing.T) {
assert.Equal(t, []byte("hello"), restored)
}
func TestReverseSigil_Bad(t *testing.T) {
func TestSigil_ReverseSigil_Bad(t *testing.T) {
s := &ReverseSigil{}
// Empty input returns empty.
@ -39,7 +39,7 @@ func TestReverseSigil_Bad(t *testing.T) {
assert.Equal(t, []byte{}, out)
}
func TestReverseSigil_Ugly(t *testing.T) {
func TestSigil_ReverseSigil_Ugly(t *testing.T) {
s := &ReverseSigil{}
// Nil input returns nil.
@ -56,7 +56,7 @@ func TestReverseSigil_Ugly(t *testing.T) {
// HexSigil
// ---------------------------------------------------------------------------
func TestHexSigil_Good(t *testing.T) {
func TestSigil_HexSigil_Good(t *testing.T) {
s := &HexSigil{}
data := []byte("hello world")
@ -69,7 +69,7 @@ func TestHexSigil_Good(t *testing.T) {
assert.Equal(t, data, decoded)
}
func TestHexSigil_Bad(t *testing.T) {
func TestSigil_HexSigil_Bad(t *testing.T) {
s := &HexSigil{}
// Invalid hex input.
@ -82,7 +82,7 @@ func TestHexSigil_Bad(t *testing.T) {
assert.Equal(t, []byte{}, out)
}
func TestHexSigil_Ugly(t *testing.T) {
func TestSigil_HexSigil_Ugly(t *testing.T) {
s := &HexSigil{}
out, err := s.In(nil)
@ -98,7 +98,7 @@ func TestHexSigil_Ugly(t *testing.T) {
// Base64Sigil
// ---------------------------------------------------------------------------
func TestBase64Sigil_Good(t *testing.T) {
func TestSigil_Base64Sigil_Good(t *testing.T) {
s := &Base64Sigil{}
data := []byte("composable transforms")
@ -111,7 +111,7 @@ func TestBase64Sigil_Good(t *testing.T) {
assert.Equal(t, data, decoded)
}
func TestBase64Sigil_Bad(t *testing.T) {
func TestSigil_Base64Sigil_Bad(t *testing.T) {
s := &Base64Sigil{}
// Invalid base64 (wrong padding).
@ -124,7 +124,7 @@ func TestBase64Sigil_Bad(t *testing.T) {
assert.Equal(t, []byte{}, out)
}
func TestBase64Sigil_Ugly(t *testing.T) {
func TestSigil_Base64Sigil_Ugly(t *testing.T) {
s := &Base64Sigil{}
out, err := s.In(nil)
@ -140,7 +140,7 @@ func TestBase64Sigil_Ugly(t *testing.T) {
// GzipSigil
// ---------------------------------------------------------------------------
func TestGzipSigil_Good(t *testing.T) {
func TestSigil_GzipSigil_Good(t *testing.T) {
s := &GzipSigil{}
data := []byte("the quick brown fox jumps over the lazy dog")
@ -153,7 +153,7 @@ func TestGzipSigil_Good(t *testing.T) {
assert.Equal(t, data, decompressed)
}
func TestGzipSigil_Bad(t *testing.T) {
func TestSigil_GzipSigil_Bad(t *testing.T) {
s := &GzipSigil{}
// Invalid gzip data.
@ -170,7 +170,7 @@ func TestGzipSigil_Bad(t *testing.T) {
assert.Equal(t, []byte{}, decompressed)
}
func TestGzipSigil_Ugly(t *testing.T) {
func TestSigil_GzipSigil_Ugly(t *testing.T) {
s := &GzipSigil{}
out, err := s.In(nil)
@ -186,7 +186,7 @@ func TestGzipSigil_Ugly(t *testing.T) {
// JSONSigil
// ---------------------------------------------------------------------------
func TestJSONSigil_Good(t *testing.T) {
func TestSigil_JSONSigil_Good(t *testing.T) {
s := &JSONSigil{Indent: false}
data := []byte(`{ "key" : "value" }`)
@ -200,7 +200,7 @@ func TestJSONSigil_Good(t *testing.T) {
assert.Equal(t, compacted, passthrough)
}
func TestJSONSigil_Good_Indent(t *testing.T) {
func TestSigil_JSONSigil_Indent_Good(t *testing.T) {
s := &JSONSigil{Indent: true}
data := []byte(`{"key":"value"}`)
@ -210,7 +210,7 @@ func TestJSONSigil_Good_Indent(t *testing.T) {
assert.Contains(t, string(indented), " ")
}
func TestJSONSigil_Bad(t *testing.T) {
func TestSigil_JSONSigil_Bad(t *testing.T) {
s := &JSONSigil{Indent: false}
// Invalid JSON.
@ -218,15 +218,16 @@ func TestJSONSigil_Bad(t *testing.T) {
assert.Error(t, err)
}
func TestJSONSigil_Ugly(t *testing.T) {
func TestSigil_JSONSigil_Ugly(t *testing.T) {
s := &JSONSigil{Indent: false}
// json.Compact on nil/empty will produce an error (invalid JSON).
_, err := s.In(nil)
assert.Error(t, err)
// Nil input is passed through without error, matching the Sigil contract.
out, err := s.In(nil)
require.NoError(t, err)
assert.Nil(t, out)
// Out with nil is passthrough.
out, err := s.Out(nil)
out, err = s.Out(nil)
require.NoError(t, err)
assert.Nil(t, out)
}
@ -235,7 +236,7 @@ func TestJSONSigil_Ugly(t *testing.T) {
// HashSigil
// ---------------------------------------------------------------------------
func TestHashSigil_Good(t *testing.T) {
func TestSigil_HashSigil_Good(t *testing.T) {
data := []byte("hash me")
tests := []struct {
@ -280,7 +281,7 @@ func TestHashSigil_Good(t *testing.T) {
}
}
func TestHashSigil_Bad(t *testing.T) {
func TestSigil_HashSigil_Bad(t *testing.T) {
// Unsupported hash constant.
s := &HashSigil{Hash: 0}
_, err := s.In([]byte("data"))
@ -288,7 +289,7 @@ func TestHashSigil_Bad(t *testing.T) {
assert.Contains(t, err.Error(), "not available")
}
func TestHashSigil_Ugly(t *testing.T) {
func TestSigil_HashSigil_Ugly(t *testing.T) {
// Hashing empty data should still produce a valid digest.
s, err := NewSigil("sha256")
require.NoError(t, err)
@ -302,7 +303,7 @@ func TestHashSigil_Ugly(t *testing.T) {
// NewSigil factory
// ---------------------------------------------------------------------------
func TestNewSigil_Good(t *testing.T) {
func TestSigil_NewSigil_Good(t *testing.T) {
names := []string{
"reverse", "hex", "base64", "gzip", "json", "json-indent",
"md4", "md5", "sha1", "sha224", "sha256", "sha384", "sha512",
@ -321,13 +322,13 @@ func TestNewSigil_Good(t *testing.T) {
}
}
func TestNewSigil_Bad(t *testing.T) {
func TestSigil_NewSigil_Bad(t *testing.T) {
_, err := NewSigil("nonexistent")
assert.Error(t, err)
assert.Contains(t, err.Error(), "unknown sigil name")
}
func TestNewSigil_Ugly(t *testing.T) {
func TestSigil_NewSigil_Ugly(t *testing.T) {
_, err := NewSigil("")
assert.Error(t, err)
}
@ -336,7 +337,7 @@ func TestNewSigil_Ugly(t *testing.T) {
// Transmute / Untransmute
// ---------------------------------------------------------------------------
func TestTransmute_Good(t *testing.T) {
func TestSigil_Transmute_Good(t *testing.T) {
data := []byte("round trip")
hexSigil, err := NewSigil("hex")
@ -355,7 +356,7 @@ func TestTransmute_Good(t *testing.T) {
assert.Equal(t, data, decoded)
}
func TestTransmute_Good_MultiSigil(t *testing.T) {
func TestSigil_Transmute_MultiSigil_Good(t *testing.T) {
data := []byte("multi sigil pipeline test data")
reverseSigil, err := NewSigil("reverse")
@ -375,7 +376,7 @@ func TestTransmute_Good_MultiSigil(t *testing.T) {
assert.Equal(t, data, decoded)
}
func TestTransmute_Good_GzipRoundTrip(t *testing.T) {
func TestSigil_Transmute_GzipRoundTrip_Good(t *testing.T) {
data := []byte("compress then encode then decode then decompress")
gzipSigil, err := NewSigil("gzip")
@ -393,7 +394,7 @@ func TestTransmute_Good_GzipRoundTrip(t *testing.T) {
assert.Equal(t, data, decoded)
}
func TestTransmute_Bad(t *testing.T) {
func TestSigil_Transmute_Bad(t *testing.T) {
// Transmute with a sigil that will fail: hex decode on non-hex input.
hexSigil := &HexSigil{}
@ -402,7 +403,7 @@ func TestTransmute_Bad(t *testing.T) {
assert.Error(t, err)
}
func TestTransmute_Ugly(t *testing.T) {
func TestSigil_Transmute_Ugly(t *testing.T) {
// Empty sigil chain is a no-op.
data := []byte("unchanged")

View file

@ -10,10 +10,9 @@ import (
"crypto/sha512"
"encoding/base64"
"encoding/hex"
"encoding/json"
"io"
coreerr "forge.lthn.ai/core/go-log"
core "dappco.re/go/core"
"golang.org/x/crypto/blake2b"
"golang.org/x/crypto/blake2s"
"golang.org/x/crypto/md4"
@ -26,6 +25,8 @@ import (
type ReverseSigil struct{}
// In reverses the bytes of the data.
//
// result := s.In(...)
func (s *ReverseSigil) In(data []byte) ([]byte, error) {
if data == nil {
return nil, nil
@ -38,6 +39,8 @@ func (s *ReverseSigil) In(data []byte) ([]byte, error) {
}
// Out reverses the bytes of the data.
//
// result := s.Out(...)
func (s *ReverseSigil) Out(data []byte) ([]byte, error) {
return s.In(data)
}
@ -47,6 +50,8 @@ func (s *ReverseSigil) Out(data []byte) ([]byte, error) {
type HexSigil struct{}
// In encodes the data to hexadecimal.
//
// result := s.In(...)
func (s *HexSigil) In(data []byte) ([]byte, error) {
if data == nil {
return nil, nil
@ -57,6 +62,8 @@ func (s *HexSigil) In(data []byte) ([]byte, error) {
}
// Out decodes the data from hexadecimal.
//
// result := s.Out(...)
func (s *HexSigil) Out(data []byte) ([]byte, error) {
if data == nil {
return nil, nil
@ -71,6 +78,8 @@ func (s *HexSigil) Out(data []byte) ([]byte, error) {
type Base64Sigil struct{}
// In encodes the data to base64.
//
// result := s.In(...)
func (s *Base64Sigil) In(data []byte) ([]byte, error) {
if data == nil {
return nil, nil
@ -81,6 +90,8 @@ func (s *Base64Sigil) In(data []byte) ([]byte, error) {
}
// Out decodes the data from base64.
//
// result := s.Out(...)
func (s *Base64Sigil) Out(data []byte) ([]byte, error) {
if data == nil {
return nil, nil
@ -97,6 +108,8 @@ type GzipSigil struct {
}
// In compresses the data using gzip.
//
// result := s.In(...)
func (s *GzipSigil) In(data []byte) ([]byte, error) {
if data == nil {
return nil, nil
@ -108,25 +121,31 @@ func (s *GzipSigil) In(data []byte) ([]byte, error) {
}
gz := gzip.NewWriter(w)
if _, err := gz.Write(data); err != nil {
return nil, err
return nil, core.E("sigil.GzipSigil.In", "write gzip payload", err)
}
if err := gz.Close(); err != nil {
return nil, err
return nil, core.E("sigil.GzipSigil.In", "close gzip writer", err)
}
return b.Bytes(), nil
}
// Out decompresses the data using gzip.
//
// result := s.Out(...)
func (s *GzipSigil) Out(data []byte) ([]byte, error) {
if data == nil {
return nil, nil
}
r, err := gzip.NewReader(bytes.NewReader(data))
if err != nil {
return nil, err
return nil, core.E("sigil.GzipSigil.Out", "open gzip reader", err)
}
defer r.Close()
return io.ReadAll(r)
out, err := io.ReadAll(r)
if err != nil {
return nil, core.E("sigil.GzipSigil.Out", "read gzip payload", err)
}
return out, nil
}
// JSONSigil is a Sigil that compacts or indents JSON data.
@ -134,18 +153,32 @@ func (s *GzipSigil) Out(data []byte) ([]byte, error) {
type JSONSigil struct{ Indent bool }
// In compacts or indents the JSON data.
//
// result := s.In(...)
func (s *JSONSigil) In(data []byte) ([]byte, error) {
if s.Indent {
var out bytes.Buffer
err := json.Indent(&out, data, "", " ")
return out.Bytes(), err
if data == nil {
return nil, nil
}
var out bytes.Buffer
err := json.Compact(&out, data)
return out.Bytes(), err
var decoded any
result := core.JSONUnmarshal(data, &decoded)
if !result.OK {
if err, ok := result.Value.(error); ok {
return nil, core.E("sigil.JSONSigil.In", "decode json", err)
}
return nil, core.E("sigil.JSONSigil.In", "decode json", nil)
}
compact := core.JSONMarshalString(decoded)
if s.Indent {
return []byte(indentJSON(compact)), nil
}
return []byte(compact), nil
}
// Out is a no-op for JSONSigil.
//
// result := s.Out(...)
func (s *JSONSigil) Out(data []byte) ([]byte, error) {
// For simplicity, Out is a no-op. The primary use is formatting.
return data, nil
@ -158,11 +191,15 @@ type HashSigil struct {
}
// NewHashSigil creates a new HashSigil.
//
// result := sigil.NewHashSigil(...)
func NewHashSigil(h crypto.Hash) *HashSigil {
return &HashSigil{Hash: h}
}
// In hashes the data.
//
// result := s.In(...)
func (s *HashSigil) In(data []byte) ([]byte, error) {
var h io.Writer
switch s.Hash {
@ -204,7 +241,7 @@ func (s *HashSigil) In(data []byte) ([]byte, error) {
h, _ = blake2b.New512(nil)
default:
// MD5SHA1 is not supported as a direct hash
return nil, coreerr.E("sigil.HashSigil.In", "hash algorithm not available", nil)
return nil, core.E("sigil.HashSigil.In", "hash algorithm not available", nil)
}
h.Write(data)
@ -212,12 +249,16 @@ func (s *HashSigil) In(data []byte) ([]byte, error) {
}
// Out is a no-op for HashSigil.
//
// result := s.Out(...)
func (s *HashSigil) Out(data []byte) ([]byte, error) {
return data, nil
}
// NewSigil is a factory function that returns a Sigil based on a string name.
// It is the primary way to create Sigil instances.
//
// result := sigil.NewSigil(...)
func NewSigil(name string) (Sigil, error) {
switch name {
case "reverse":
@ -269,6 +310,72 @@ func NewSigil(name string) (Sigil, error) {
case "blake2b-512":
return NewHashSigil(crypto.BLAKE2b_512), nil
default:
return nil, coreerr.E("sigil.NewSigil", "unknown sigil name: "+name, nil)
return nil, core.E("sigil.NewSigil", core.Concat("unknown sigil name: ", name), nil)
}
}
func indentJSON(compact string) string {
if compact == "" {
return ""
}
builder := core.NewBuilder()
indent := 0
inString := false
escaped := false
writeIndent := func(level int) {
for i := 0; i < level; i++ {
builder.WriteString(" ")
}
}
for i := 0; i < len(compact); i++ {
ch := compact[i]
if inString {
builder.WriteByte(ch)
if escaped {
escaped = false
continue
}
if ch == '\\' {
escaped = true
continue
}
if ch == '"' {
inString = false
}
continue
}
switch ch {
case '"':
inString = true
builder.WriteByte(ch)
case '{', '[':
builder.WriteByte(ch)
if i+1 < len(compact) && compact[i+1] != '}' && compact[i+1] != ']' {
indent++
builder.WriteByte('\n')
writeIndent(indent)
}
case '}', ']':
if i > 0 && compact[i-1] != '{' && compact[i-1] != '[' {
indent--
builder.WriteByte('\n')
writeIndent(indent)
}
builder.WriteByte(ch)
case ',':
builder.WriteByte(ch)
builder.WriteByte('\n')
writeIndent(indent)
case ':':
builder.WriteString(": ")
default:
builder.WriteByte(ch)
}
}
return builder.String()
}

View file

@ -7,10 +7,9 @@ import (
goio "io"
"io/fs"
"path"
"strings"
"time"
coreerr "forge.lthn.ai/core/go-log"
core "dappco.re/go/core"
_ "modernc.org/sqlite" // Pure Go SQLite driver
)
@ -25,6 +24,8 @@ type Medium struct {
type Option func(*Medium)
// WithTable sets the table name (default: "files").
//
// result := sqlite.WithTable(...)
func WithTable(table string) Option {
return func(m *Medium) {
m.table = table
@ -40,7 +41,7 @@ func WithTable(table string) Option {
// _ = m.Write("config/app.yaml", "port: 8080")
func New(dbPath string, opts ...Option) (*Medium, error) {
if dbPath == "" {
return nil, coreerr.E("sqlite.New", "database path is required", nil)
return nil, core.E("sqlite.New", "database path is required", nil)
}
m := &Medium{table: "files"}
@ -50,13 +51,13 @@ func New(dbPath string, opts ...Option) (*Medium, error) {
db, err := sql.Open("sqlite", dbPath)
if err != nil {
return nil, coreerr.E("sqlite.New", "failed to open database", err)
return nil, core.E("sqlite.New", "failed to open database", err)
}
// Enable WAL mode for better concurrency
if _, err := db.Exec("PRAGMA journal_mode=WAL"); err != nil {
db.Close()
return nil, coreerr.E("sqlite.New", "failed to set WAL mode", err)
return nil, core.E("sqlite.New", "failed to set WAL mode", err)
}
// Create the schema
@ -69,7 +70,7 @@ func New(dbPath string, opts ...Option) (*Medium, error) {
)`
if _, err := db.Exec(createSQL); err != nil {
db.Close()
return nil, coreerr.E("sqlite.New", "failed to create table", err)
return nil, core.E("sqlite.New", "failed to create table", err)
}
m.db = db
@ -77,6 +78,8 @@ func New(dbPath string, opts ...Option) (*Medium, error) {
}
// Close closes the underlying database connection.
//
// result := m.Close(...)
func (m *Medium) Close() error {
if m.db != nil {
return m.db.Close()
@ -91,14 +94,16 @@ func cleanPath(p string) string {
if clean == "/" {
return ""
}
return strings.TrimPrefix(clean, "/")
return core.TrimPrefix(clean, "/")
}
// Read retrieves the content of a file as a string.
//
// result := m.Read(...)
func (m *Medium) Read(p string) (string, error) {
key := cleanPath(p)
if key == "" {
return "", coreerr.E("sqlite.Read", "path is required", fs.ErrInvalid)
return "", core.E("sqlite.Read", "path is required", fs.ErrInvalid)
}
var content []byte
@ -107,22 +112,24 @@ func (m *Medium) Read(p string) (string, error) {
`SELECT content, is_dir FROM `+m.table+` WHERE path = ?`, key,
).Scan(&content, &isDir)
if err == sql.ErrNoRows {
return "", coreerr.E("sqlite.Read", "file not found: "+key, fs.ErrNotExist)
return "", core.E("sqlite.Read", core.Concat("file not found: ", key), fs.ErrNotExist)
}
if err != nil {
return "", coreerr.E("sqlite.Read", "query failed: "+key, err)
return "", core.E("sqlite.Read", core.Concat("query failed: ", key), err)
}
if isDir {
return "", coreerr.E("sqlite.Read", "path is a directory: "+key, fs.ErrInvalid)
return "", core.E("sqlite.Read", core.Concat("path is a directory: ", key), fs.ErrInvalid)
}
return string(content), nil
}
// Write saves the given content to a file, overwriting it if it exists.
//
// result := m.Write(...)
func (m *Medium) Write(p, content string) error {
key := cleanPath(p)
if key == "" {
return coreerr.E("sqlite.Write", "path is required", fs.ErrInvalid)
return core.E("sqlite.Write", "path is required", fs.ErrInvalid)
}
_, err := m.db.Exec(
@ -131,12 +138,14 @@ func (m *Medium) Write(p, content string) error {
key, []byte(content), time.Now().UTC(),
)
if err != nil {
return coreerr.E("sqlite.Write", "insert failed: "+key, err)
return core.E("sqlite.Write", core.Concat("insert failed: ", key), err)
}
return nil
}
// EnsureDir makes sure a directory exists, creating it if necessary.
//
// result := m.EnsureDir(...)
func (m *Medium) EnsureDir(p string) error {
key := cleanPath(p)
if key == "" {
@ -150,12 +159,14 @@ func (m *Medium) EnsureDir(p string) error {
key, time.Now().UTC(),
)
if err != nil {
return coreerr.E("sqlite.EnsureDir", "insert failed: "+key, err)
return core.E("sqlite.EnsureDir", core.Concat("insert failed: ", key), err)
}
return nil
}
// IsFile checks if a path exists and is a regular file.
//
// result := m.IsFile(...)
func (m *Medium) IsFile(p string) bool {
key := cleanPath(p)
if key == "" {
@ -173,20 +184,26 @@ func (m *Medium) IsFile(p string) bool {
}
// FileGet is a convenience function that reads a file from the medium.
//
// result := m.FileGet(...)
func (m *Medium) FileGet(p string) (string, error) {
return m.Read(p)
}
// FileSet is a convenience function that writes a file to the medium.
//
// result := m.FileSet(...)
func (m *Medium) FileSet(p, content string) error {
return m.Write(p, content)
}
// Delete removes a file or empty directory.
//
// result := m.Delete(...)
func (m *Medium) Delete(p string) error {
key := cleanPath(p)
if key == "" {
return coreerr.E("sqlite.Delete", "path is required", fs.ErrInvalid)
return core.E("sqlite.Delete", "path is required", fs.ErrInvalid)
}
// Check if it's a directory with children
@ -195,10 +212,10 @@ func (m *Medium) Delete(p string) error {
`SELECT is_dir FROM `+m.table+` WHERE path = ?`, key,
).Scan(&isDir)
if err == sql.ErrNoRows {
return coreerr.E("sqlite.Delete", "path not found: "+key, fs.ErrNotExist)
return core.E("sqlite.Delete", core.Concat("path not found: ", key), fs.ErrNotExist)
}
if err != nil {
return coreerr.E("sqlite.Delete", "query failed: "+key, err)
return core.E("sqlite.Delete", core.Concat("query failed: ", key), err)
}
if isDir {
@ -209,29 +226,31 @@ func (m *Medium) Delete(p string) error {
`SELECT COUNT(*) FROM `+m.table+` WHERE path LIKE ? AND path != ?`, prefix+"%", key,
).Scan(&count)
if err != nil {
return coreerr.E("sqlite.Delete", "count failed: "+key, err)
return core.E("sqlite.Delete", core.Concat("count failed: ", key), err)
}
if count > 0 {
return coreerr.E("sqlite.Delete", "directory not empty: "+key, fs.ErrExist)
return core.E("sqlite.Delete", core.Concat("directory not empty: ", key), fs.ErrExist)
}
}
res, err := m.db.Exec(`DELETE FROM `+m.table+` WHERE path = ?`, key)
if err != nil {
return coreerr.E("sqlite.Delete", "delete failed: "+key, err)
return core.E("sqlite.Delete", core.Concat("delete failed: ", key), err)
}
n, _ := res.RowsAffected()
if n == 0 {
return coreerr.E("sqlite.Delete", "path not found: "+key, fs.ErrNotExist)
return core.E("sqlite.Delete", core.Concat("path not found: ", key), fs.ErrNotExist)
}
return nil
}
// DeleteAll removes a file or directory and all its contents recursively.
//
// result := m.DeleteAll(...)
func (m *Medium) DeleteAll(p string) error {
key := cleanPath(p)
if key == "" {
return coreerr.E("sqlite.DeleteAll", "path is required", fs.ErrInvalid)
return core.E("sqlite.DeleteAll", "path is required", fs.ErrInvalid)
}
prefix := key + "/"
@ -242,26 +261,28 @@ func (m *Medium) DeleteAll(p string) error {
key, prefix+"%",
)
if err != nil {
return coreerr.E("sqlite.DeleteAll", "delete failed: "+key, err)
return core.E("sqlite.DeleteAll", core.Concat("delete failed: ", key), err)
}
n, _ := res.RowsAffected()
if n == 0 {
return coreerr.E("sqlite.DeleteAll", "path not found: "+key, fs.ErrNotExist)
return core.E("sqlite.DeleteAll", core.Concat("path not found: ", key), fs.ErrNotExist)
}
return nil
}
// Rename moves a file or directory from oldPath to newPath.
//
// result := m.Rename(...)
func (m *Medium) Rename(oldPath, newPath string) error {
oldKey := cleanPath(oldPath)
newKey := cleanPath(newPath)
if oldKey == "" || newKey == "" {
return coreerr.E("sqlite.Rename", "both old and new paths are required", fs.ErrInvalid)
return core.E("sqlite.Rename", "both old and new paths are required", fs.ErrInvalid)
}
tx, err := m.db.Begin()
if err != nil {
return coreerr.E("sqlite.Rename", "begin tx failed", err)
return core.E("sqlite.Rename", "begin tx failed", err)
}
defer tx.Rollback()
@ -274,10 +295,10 @@ func (m *Medium) Rename(oldPath, newPath string) error {
`SELECT content, mode, is_dir, mtime FROM `+m.table+` WHERE path = ?`, oldKey,
).Scan(&content, &mode, &isDir, &mtime)
if err == sql.ErrNoRows {
return coreerr.E("sqlite.Rename", "source not found: "+oldKey, fs.ErrNotExist)
return core.E("sqlite.Rename", core.Concat("source not found: ", oldKey), fs.ErrNotExist)
}
if err != nil {
return coreerr.E("sqlite.Rename", "query failed: "+oldKey, err)
return core.E("sqlite.Rename", core.Concat("query failed: ", oldKey), err)
}
// Insert or replace at new path
@ -287,13 +308,13 @@ func (m *Medium) Rename(oldPath, newPath string) error {
newKey, content, mode, isDir, mtime,
)
if err != nil {
return coreerr.E("sqlite.Rename", "insert at new path failed: "+newKey, err)
return core.E("sqlite.Rename", core.Concat("insert at new path failed: ", newKey), err)
}
// Delete old path
_, err = tx.Exec(`DELETE FROM `+m.table+` WHERE path = ?`, oldKey)
if err != nil {
return coreerr.E("sqlite.Rename", "delete old path failed: "+oldKey, err)
return core.E("sqlite.Rename", core.Concat("delete old path failed: ", oldKey), err)
}
// If it's a directory, move all children
@ -306,7 +327,7 @@ func (m *Medium) Rename(oldPath, newPath string) error {
oldPrefix+"%",
)
if err != nil {
return coreerr.E("sqlite.Rename", "query children failed", err)
return core.E("sqlite.Rename", "query children failed", err)
}
type child struct {
@ -321,28 +342,28 @@ func (m *Medium) Rename(oldPath, newPath string) error {
var c child
if err := rows.Scan(&c.path, &c.content, &c.mode, &c.isDir, &c.mtime); err != nil {
rows.Close()
return coreerr.E("sqlite.Rename", "scan child failed", err)
return core.E("sqlite.Rename", "scan child failed", err)
}
children = append(children, c)
}
rows.Close()
for _, c := range children {
newChildPath := newPrefix + strings.TrimPrefix(c.path, oldPrefix)
newChildPath := core.Concat(newPrefix, core.TrimPrefix(c.path, oldPrefix))
_, err = tx.Exec(
`INSERT INTO `+m.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, ?, ?, ?)
ON CONFLICT(path) DO UPDATE SET content = excluded.content, mode = excluded.mode, is_dir = excluded.is_dir, mtime = excluded.mtime`,
newChildPath, c.content, c.mode, c.isDir, c.mtime,
)
if err != nil {
return coreerr.E("sqlite.Rename", "insert child failed", err)
return core.E("sqlite.Rename", "insert child failed", err)
}
}
// Delete old children
_, err = tx.Exec(`DELETE FROM `+m.table+` WHERE path LIKE ?`, oldPrefix+"%")
if err != nil {
return coreerr.E("sqlite.Rename", "delete old children failed", err)
return core.E("sqlite.Rename", "delete old children failed", err)
}
}
@ -350,6 +371,8 @@ func (m *Medium) Rename(oldPath, newPath string) error {
}
// List returns the directory entries for the given path.
//
// result := m.List(...)
func (m *Medium) List(p string) ([]fs.DirEntry, error) {
prefix := cleanPath(p)
if prefix != "" {
@ -362,7 +385,7 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) {
prefix+"%", prefix+"%",
)
if err != nil {
return nil, coreerr.E("sqlite.List", "query failed", err)
return nil, core.E("sqlite.List", "query failed", err)
}
defer rows.Close()
@ -376,18 +399,19 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) {
var isDir bool
var mtime time.Time
if err := rows.Scan(&rowPath, &content, &mode, &isDir, &mtime); err != nil {
return nil, coreerr.E("sqlite.List", "scan failed", err)
return nil, core.E("sqlite.List", "scan failed", err)
}
rest := strings.TrimPrefix(rowPath, prefix)
rest := core.TrimPrefix(rowPath, prefix)
if rest == "" {
continue
}
// Check if this is a direct child or nested
if idx := strings.Index(rest, "/"); idx >= 0 {
parts := core.SplitN(rest, "/", 2)
if len(parts) == 2 {
// Nested - register as a directory
dirName := rest[:idx]
dirName := parts[0]
if !seen[dirName] {
seen[dirName] = true
entries = append(entries, &dirEntry{
@ -425,10 +449,12 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) {
}
// Stat returns file information for the given path.
//
// result := m.Stat(...)
func (m *Medium) Stat(p string) (fs.FileInfo, error) {
key := cleanPath(p)
if key == "" {
return nil, coreerr.E("sqlite.Stat", "path is required", fs.ErrInvalid)
return nil, core.E("sqlite.Stat", "path is required", fs.ErrInvalid)
}
var content []byte
@ -439,10 +465,10 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) {
`SELECT content, mode, is_dir, mtime FROM `+m.table+` WHERE path = ?`, key,
).Scan(&content, &mode, &isDir, &mtime)
if err == sql.ErrNoRows {
return nil, coreerr.E("sqlite.Stat", "path not found: "+key, fs.ErrNotExist)
return nil, core.E("sqlite.Stat", core.Concat("path not found: ", key), fs.ErrNotExist)
}
if err != nil {
return nil, coreerr.E("sqlite.Stat", "query failed: "+key, err)
return nil, core.E("sqlite.Stat", core.Concat("query failed: ", key), err)
}
name := path.Base(key)
@ -456,10 +482,12 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) {
}
// Open opens the named file for reading.
//
// result := m.Open(...)
func (m *Medium) Open(p string) (fs.File, error) {
key := cleanPath(p)
if key == "" {
return nil, coreerr.E("sqlite.Open", "path is required", fs.ErrInvalid)
return nil, core.E("sqlite.Open", "path is required", fs.ErrInvalid)
}
var content []byte
@ -470,13 +498,13 @@ func (m *Medium) Open(p string) (fs.File, error) {
`SELECT content, mode, is_dir, mtime FROM `+m.table+` WHERE path = ?`, key,
).Scan(&content, &mode, &isDir, &mtime)
if err == sql.ErrNoRows {
return nil, coreerr.E("sqlite.Open", "file not found: "+key, fs.ErrNotExist)
return nil, core.E("sqlite.Open", core.Concat("file not found: ", key), fs.ErrNotExist)
}
if err != nil {
return nil, coreerr.E("sqlite.Open", "query failed: "+key, err)
return nil, core.E("sqlite.Open", core.Concat("query failed: ", key), err)
}
if isDir {
return nil, coreerr.E("sqlite.Open", "path is a directory: "+key, fs.ErrInvalid)
return nil, core.E("sqlite.Open", core.Concat("path is a directory: ", key), fs.ErrInvalid)
}
return &sqliteFile{
@ -488,10 +516,12 @@ func (m *Medium) Open(p string) (fs.File, error) {
}
// Create creates or truncates the named file.
//
// result := m.Create(...)
func (m *Medium) Create(p string) (goio.WriteCloser, error) {
key := cleanPath(p)
if key == "" {
return nil, coreerr.E("sqlite.Create", "path is required", fs.ErrInvalid)
return nil, core.E("sqlite.Create", "path is required", fs.ErrInvalid)
}
return &sqliteWriteCloser{
medium: m,
@ -500,10 +530,12 @@ func (m *Medium) Create(p string) (goio.WriteCloser, error) {
}
// Append opens the named file for appending, creating it if it doesn't exist.
//
// result := m.Append(...)
func (m *Medium) Append(p string) (goio.WriteCloser, error) {
key := cleanPath(p)
if key == "" {
return nil, coreerr.E("sqlite.Append", "path is required", fs.ErrInvalid)
return nil, core.E("sqlite.Append", "path is required", fs.ErrInvalid)
}
var existing []byte
@ -511,7 +543,7 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) {
`SELECT content FROM `+m.table+` WHERE path = ? AND is_dir = FALSE`, key,
).Scan(&existing)
if err != nil && err != sql.ErrNoRows {
return nil, coreerr.E("sqlite.Append", "query failed: "+key, err)
return nil, core.E("sqlite.Append", core.Concat("query failed: ", key), err)
}
return &sqliteWriteCloser{
@ -522,10 +554,12 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) {
}
// ReadStream returns a reader for the file content.
//
// result := m.ReadStream(...)
func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) {
key := cleanPath(p)
if key == "" {
return nil, coreerr.E("sqlite.ReadStream", "path is required", fs.ErrInvalid)
return nil, core.E("sqlite.ReadStream", "path is required", fs.ErrInvalid)
}
var content []byte
@ -534,24 +568,28 @@ func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) {
`SELECT content, is_dir FROM `+m.table+` WHERE path = ?`, key,
).Scan(&content, &isDir)
if err == sql.ErrNoRows {
return nil, coreerr.E("sqlite.ReadStream", "file not found: "+key, fs.ErrNotExist)
return nil, core.E("sqlite.ReadStream", core.Concat("file not found: ", key), fs.ErrNotExist)
}
if err != nil {
return nil, coreerr.E("sqlite.ReadStream", "query failed: "+key, err)
return nil, core.E("sqlite.ReadStream", core.Concat("query failed: ", key), err)
}
if isDir {
return nil, coreerr.E("sqlite.ReadStream", "path is a directory: "+key, fs.ErrInvalid)
return nil, core.E("sqlite.ReadStream", core.Concat("path is a directory: ", key), fs.ErrInvalid)
}
return goio.NopCloser(bytes.NewReader(content)), nil
}
// WriteStream returns a writer for the file content. Content is stored on Close.
//
// result := m.WriteStream(...)
func (m *Medium) WriteStream(p string) (goio.WriteCloser, error) {
return m.Create(p)
}
// Exists checks if a path exists (file or directory).
//
// result := m.Exists(...)
func (m *Medium) Exists(p string) bool {
key := cleanPath(p)
if key == "" {
@ -570,6 +608,8 @@ func (m *Medium) Exists(p string) bool {
}
// IsDir checks if a path exists and is a directory.
//
// result := m.IsDir(...)
func (m *Medium) IsDir(p string) bool {
key := cleanPath(p)
if key == "" {
@ -597,11 +637,34 @@ type fileInfo struct {
isDir bool
}
// Name documents the Name operation.
//
// result := fi.Name(...)
func (fi *fileInfo) Name() string { return fi.name }
// Size documents the Size operation.
//
// result := fi.Size(...)
func (fi *fileInfo) Size() int64 { return fi.size }
// Mode documents the Mode operation.
//
// result := fi.Mode(...)
func (fi *fileInfo) Mode() fs.FileMode { return fi.mode }
// ModTime documents the ModTime operation.
//
// result := fi.ModTime(...)
func (fi *fileInfo) ModTime() time.Time { return fi.modTime }
// IsDir documents the IsDir operation.
//
// result := fi.IsDir(...)
func (fi *fileInfo) IsDir() bool { return fi.isDir }
// Sys documents the Sys operation.
//
// result := fi.Sys(...)
func (fi *fileInfo) Sys() any { return nil }
// dirEntry implements fs.DirEntry for SQLite listings.
@ -612,9 +675,24 @@ type dirEntry struct {
info fs.FileInfo
}
// Name documents the Name operation.
//
// result := de.Name(...)
func (de *dirEntry) Name() string { return de.name }
// IsDir documents the IsDir operation.
//
// result := de.IsDir(...)
func (de *dirEntry) IsDir() bool { return de.isDir }
// Type documents the Type operation.
//
// result := de.Type(...)
func (de *dirEntry) Type() fs.FileMode { return de.mode.Type() }
// Info documents the Info operation.
//
// result := de.Info(...)
func (de *dirEntry) Info() (fs.FileInfo, error) { return de.info, nil }
// sqliteFile implements fs.File for SQLite entries.
@ -626,6 +704,9 @@ type sqliteFile struct {
modTime time.Time
}
// Stat documents the Stat operation.
//
// result := f.Stat(...)
func (f *sqliteFile) Stat() (fs.FileInfo, error) {
return &fileInfo{
name: f.name,
@ -635,6 +716,9 @@ func (f *sqliteFile) Stat() (fs.FileInfo, error) {
}, nil
}
// Read documents the Read operation.
//
// result := f.Read(...)
func (f *sqliteFile) Read(b []byte) (int, error) {
if f.offset >= int64(len(f.content)) {
return 0, goio.EOF
@ -644,6 +728,9 @@ func (f *sqliteFile) Read(b []byte) (int, error) {
return n, nil
}
// Close documents the Close operation.
//
// result := f.Close(...)
func (f *sqliteFile) Close() error {
return nil
}
@ -655,11 +742,17 @@ type sqliteWriteCloser struct {
data []byte
}
// Write documents the Write operation.
//
// result := w.Write(...)
func (w *sqliteWriteCloser) Write(p []byte) (int, error) {
w.data = append(w.data, p...)
return len(p), nil
}
// Close documents the Close operation.
//
// result := w.Close(...)
func (w *sqliteWriteCloser) Close() error {
_, err := w.medium.db.Exec(
`INSERT INTO `+w.medium.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, 420, FALSE, ?)
@ -667,7 +760,7 @@ func (w *sqliteWriteCloser) Close() error {
w.path, w.data, time.Now().UTC(),
)
if err != nil {
return coreerr.E("sqlite.WriteCloser.Close", "store failed: "+w.path, err)
return core.E("sqlite.WriteCloser.Close", core.Concat("store failed: ", w.path), err)
}
return nil
}

View file

@ -3,9 +3,9 @@ package sqlite
import (
goio "io"
"io/fs"
"strings"
"testing"
core "dappco.re/go/core"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -20,21 +20,21 @@ func newTestMedium(t *testing.T) *Medium {
// --- Constructor Tests ---
func TestNew_Good(t *testing.T) {
func TestSqlite_New_Good(t *testing.T) {
m, err := New(":memory:")
require.NoError(t, err)
defer m.Close()
assert.Equal(t, "files", m.table)
}
func TestNew_Good_WithTable(t *testing.T) {
func TestSqlite_New_WithTable_Good(t *testing.T) {
m, err := New(":memory:", WithTable("custom"))
require.NoError(t, err)
defer m.Close()
assert.Equal(t, "custom", m.table)
}
func TestNew_Bad_EmptyPath(t *testing.T) {
func TestSqlite_New_EmptyPath_Bad(t *testing.T) {
_, err := New("")
assert.Error(t, err)
assert.Contains(t, err.Error(), "database path is required")
@ -42,7 +42,7 @@ func TestNew_Bad_EmptyPath(t *testing.T) {
// --- Read/Write Tests ---
func TestReadWrite_Good(t *testing.T) {
func TestSqlite_ReadWrite_Good(t *testing.T) {
m := newTestMedium(t)
err := m.Write("hello.txt", "world")
@ -53,7 +53,7 @@ func TestReadWrite_Good(t *testing.T) {
assert.Equal(t, "world", content)
}
func TestReadWrite_Good_Overwrite(t *testing.T) {
func TestSqlite_ReadWrite_Overwrite_Good(t *testing.T) {
m := newTestMedium(t)
require.NoError(t, m.Write("file.txt", "first"))
@ -64,7 +64,7 @@ func TestReadWrite_Good_Overwrite(t *testing.T) {
assert.Equal(t, "second", content)
}
func TestReadWrite_Good_NestedPath(t *testing.T) {
func TestSqlite_ReadWrite_NestedPath_Good(t *testing.T) {
m := newTestMedium(t)
err := m.Write("a/b/c.txt", "nested")
@ -75,28 +75,28 @@ func TestReadWrite_Good_NestedPath(t *testing.T) {
assert.Equal(t, "nested", content)
}
func TestRead_Bad_NotFound(t *testing.T) {
func TestSqlite_Read_NotFound_Bad(t *testing.T) {
m := newTestMedium(t)
_, err := m.Read("nonexistent.txt")
assert.Error(t, err)
}
func TestRead_Bad_EmptyPath(t *testing.T) {
func TestSqlite_Read_EmptyPath_Bad(t *testing.T) {
m := newTestMedium(t)
_, err := m.Read("")
assert.Error(t, err)
}
func TestWrite_Bad_EmptyPath(t *testing.T) {
func TestSqlite_Write_EmptyPath_Bad(t *testing.T) {
m := newTestMedium(t)
err := m.Write("", "content")
assert.Error(t, err)
}
func TestRead_Bad_IsDirectory(t *testing.T) {
func TestSqlite_Read_IsDirectory_Bad(t *testing.T) {
m := newTestMedium(t)
require.NoError(t, m.EnsureDir("mydir"))
@ -106,7 +106,7 @@ func TestRead_Bad_IsDirectory(t *testing.T) {
// --- EnsureDir Tests ---
func TestEnsureDir_Good(t *testing.T) {
func TestSqlite_EnsureDir_Good(t *testing.T) {
m := newTestMedium(t)
err := m.EnsureDir("mydir")
@ -114,14 +114,14 @@ func TestEnsureDir_Good(t *testing.T) {
assert.True(t, m.IsDir("mydir"))
}
func TestEnsureDir_Good_EmptyPath(t *testing.T) {
func TestSqlite_EnsureDir_EmptyPath_Good(t *testing.T) {
m := newTestMedium(t)
// Root always exists, no-op
err := m.EnsureDir("")
assert.NoError(t, err)
}
func TestEnsureDir_Good_Idempotent(t *testing.T) {
func TestSqlite_EnsureDir_Idempotent_Good(t *testing.T) {
m := newTestMedium(t)
require.NoError(t, m.EnsureDir("mydir"))
@ -131,7 +131,7 @@ func TestEnsureDir_Good_Idempotent(t *testing.T) {
// --- IsFile Tests ---
func TestIsFile_Good(t *testing.T) {
func TestSqlite_IsFile_Good(t *testing.T) {
m := newTestMedium(t)
require.NoError(t, m.Write("file.txt", "content"))
@ -145,7 +145,7 @@ func TestIsFile_Good(t *testing.T) {
// --- FileGet/FileSet Tests ---
func TestFileGetFileSet_Good(t *testing.T) {
func TestSqlite_FileGetFileSet_Good(t *testing.T) {
m := newTestMedium(t)
err := m.FileSet("key.txt", "value")
@ -158,7 +158,7 @@ func TestFileGetFileSet_Good(t *testing.T) {
// --- Delete Tests ---
func TestDelete_Good(t *testing.T) {
func TestSqlite_Delete_Good(t *testing.T) {
m := newTestMedium(t)
require.NoError(t, m.Write("to-delete.txt", "content"))
@ -169,7 +169,7 @@ func TestDelete_Good(t *testing.T) {
assert.False(t, m.Exists("to-delete.txt"))
}
func TestDelete_Good_EmptyDir(t *testing.T) {
func TestSqlite_Delete_EmptyDir_Good(t *testing.T) {
m := newTestMedium(t)
require.NoError(t, m.EnsureDir("emptydir"))
@ -180,21 +180,21 @@ func TestDelete_Good_EmptyDir(t *testing.T) {
assert.False(t, m.IsDir("emptydir"))
}
func TestDelete_Bad_NotFound(t *testing.T) {
func TestSqlite_Delete_NotFound_Bad(t *testing.T) {
m := newTestMedium(t)
err := m.Delete("nonexistent")
assert.Error(t, err)
}
func TestDelete_Bad_EmptyPath(t *testing.T) {
func TestSqlite_Delete_EmptyPath_Bad(t *testing.T) {
m := newTestMedium(t)
err := m.Delete("")
assert.Error(t, err)
}
func TestDelete_Bad_NotEmpty(t *testing.T) {
func TestSqlite_Delete_NotEmpty_Bad(t *testing.T) {
m := newTestMedium(t)
require.NoError(t, m.EnsureDir("mydir"))
@ -206,7 +206,7 @@ func TestDelete_Bad_NotEmpty(t *testing.T) {
// --- DeleteAll Tests ---
func TestDeleteAll_Good(t *testing.T) {
func TestSqlite_DeleteAll_Good(t *testing.T) {
m := newTestMedium(t)
require.NoError(t, m.Write("dir/file1.txt", "a"))
@ -221,7 +221,7 @@ func TestDeleteAll_Good(t *testing.T) {
assert.True(t, m.Exists("other.txt"))
}
func TestDeleteAll_Good_SingleFile(t *testing.T) {
func TestSqlite_DeleteAll_SingleFile_Good(t *testing.T) {
m := newTestMedium(t)
require.NoError(t, m.Write("file.txt", "content"))
@ -231,14 +231,14 @@ func TestDeleteAll_Good_SingleFile(t *testing.T) {
assert.False(t, m.Exists("file.txt"))
}
func TestDeleteAll_Bad_NotFound(t *testing.T) {
func TestSqlite_DeleteAll_NotFound_Bad(t *testing.T) {
m := newTestMedium(t)
err := m.DeleteAll("nonexistent")
assert.Error(t, err)
}
func TestDeleteAll_Bad_EmptyPath(t *testing.T) {
func TestSqlite_DeleteAll_EmptyPath_Bad(t *testing.T) {
m := newTestMedium(t)
err := m.DeleteAll("")
@ -247,7 +247,7 @@ func TestDeleteAll_Bad_EmptyPath(t *testing.T) {
// --- Rename Tests ---
func TestRename_Good(t *testing.T) {
func TestSqlite_Rename_Good(t *testing.T) {
m := newTestMedium(t)
require.NoError(t, m.Write("old.txt", "content"))
@ -263,7 +263,7 @@ func TestRename_Good(t *testing.T) {
assert.Equal(t, "content", content)
}
func TestRename_Good_Directory(t *testing.T) {
func TestSqlite_Rename_Directory_Good(t *testing.T) {
m := newTestMedium(t)
require.NoError(t, m.EnsureDir("olddir"))
@ -282,14 +282,14 @@ func TestRename_Good_Directory(t *testing.T) {
assert.Equal(t, "content", content)
}
func TestRename_Bad_SourceNotFound(t *testing.T) {
func TestSqlite_Rename_SourceNotFound_Bad(t *testing.T) {
m := newTestMedium(t)
err := m.Rename("nonexistent", "new")
assert.Error(t, err)
}
func TestRename_Bad_EmptyPath(t *testing.T) {
func TestSqlite_Rename_EmptyPath_Bad(t *testing.T) {
m := newTestMedium(t)
err := m.Rename("", "new")
@ -301,7 +301,7 @@ func TestRename_Bad_EmptyPath(t *testing.T) {
// --- List Tests ---
func TestList_Good(t *testing.T) {
func TestSqlite_List_Good(t *testing.T) {
m := newTestMedium(t)
require.NoError(t, m.Write("dir/file1.txt", "a"))
@ -322,7 +322,7 @@ func TestList_Good(t *testing.T) {
assert.Len(t, entries, 3)
}
func TestList_Good_Root(t *testing.T) {
func TestSqlite_List_Root_Good(t *testing.T) {
m := newTestMedium(t)
require.NoError(t, m.Write("root.txt", "content"))
@ -340,7 +340,7 @@ func TestList_Good_Root(t *testing.T) {
assert.True(t, names["dir"])
}
func TestList_Good_DirectoryEntry(t *testing.T) {
func TestSqlite_List_DirectoryEntry_Good(t *testing.T) {
m := newTestMedium(t)
require.NoError(t, m.Write("dir/sub/file.txt", "content"))
@ -359,7 +359,7 @@ func TestList_Good_DirectoryEntry(t *testing.T) {
// --- Stat Tests ---
func TestStat_Good(t *testing.T) {
func TestSqlite_Stat_Good(t *testing.T) {
m := newTestMedium(t)
require.NoError(t, m.Write("file.txt", "hello world"))
@ -371,7 +371,7 @@ func TestStat_Good(t *testing.T) {
assert.False(t, info.IsDir())
}
func TestStat_Good_Directory(t *testing.T) {
func TestSqlite_Stat_Directory_Good(t *testing.T) {
m := newTestMedium(t)
require.NoError(t, m.EnsureDir("mydir"))
@ -382,14 +382,14 @@ func TestStat_Good_Directory(t *testing.T) {
assert.True(t, info.IsDir())
}
func TestStat_Bad_NotFound(t *testing.T) {
func TestSqlite_Stat_NotFound_Bad(t *testing.T) {
m := newTestMedium(t)
_, err := m.Stat("nonexistent")
assert.Error(t, err)
}
func TestStat_Bad_EmptyPath(t *testing.T) {
func TestSqlite_Stat_EmptyPath_Bad(t *testing.T) {
m := newTestMedium(t)
_, err := m.Stat("")
@ -398,7 +398,7 @@ func TestStat_Bad_EmptyPath(t *testing.T) {
// --- Open Tests ---
func TestOpen_Good(t *testing.T) {
func TestSqlite_Open_Good(t *testing.T) {
m := newTestMedium(t)
require.NoError(t, m.Write("file.txt", "open me"))
@ -416,14 +416,14 @@ func TestOpen_Good(t *testing.T) {
assert.Equal(t, "file.txt", stat.Name())
}
func TestOpen_Bad_NotFound(t *testing.T) {
func TestSqlite_Open_NotFound_Bad(t *testing.T) {
m := newTestMedium(t)
_, err := m.Open("nonexistent.txt")
assert.Error(t, err)
}
func TestOpen_Bad_IsDirectory(t *testing.T) {
func TestSqlite_Open_IsDirectory_Bad(t *testing.T) {
m := newTestMedium(t)
require.NoError(t, m.EnsureDir("mydir"))
@ -433,7 +433,7 @@ func TestOpen_Bad_IsDirectory(t *testing.T) {
// --- Create Tests ---
func TestCreate_Good(t *testing.T) {
func TestSqlite_Create_Good(t *testing.T) {
m := newTestMedium(t)
w, err := m.Create("new.txt")
@ -451,7 +451,7 @@ func TestCreate_Good(t *testing.T) {
assert.Equal(t, "created", content)
}
func TestCreate_Good_Overwrite(t *testing.T) {
func TestSqlite_Create_Overwrite_Good(t *testing.T) {
m := newTestMedium(t)
require.NoError(t, m.Write("file.txt", "old content"))
@ -467,7 +467,7 @@ func TestCreate_Good_Overwrite(t *testing.T) {
assert.Equal(t, "new", content)
}
func TestCreate_Bad_EmptyPath(t *testing.T) {
func TestSqlite_Create_EmptyPath_Bad(t *testing.T) {
m := newTestMedium(t)
_, err := m.Create("")
@ -476,7 +476,7 @@ func TestCreate_Bad_EmptyPath(t *testing.T) {
// --- Append Tests ---
func TestAppend_Good(t *testing.T) {
func TestSqlite_Append_Good(t *testing.T) {
m := newTestMedium(t)
require.NoError(t, m.Write("append.txt", "hello"))
@ -493,7 +493,7 @@ func TestAppend_Good(t *testing.T) {
assert.Equal(t, "hello world", content)
}
func TestAppend_Good_NewFile(t *testing.T) {
func TestSqlite_Append_NewFile_Good(t *testing.T) {
m := newTestMedium(t)
w, err := m.Append("new.txt")
@ -508,7 +508,7 @@ func TestAppend_Good_NewFile(t *testing.T) {
assert.Equal(t, "fresh", content)
}
func TestAppend_Bad_EmptyPath(t *testing.T) {
func TestSqlite_Append_EmptyPath_Bad(t *testing.T) {
m := newTestMedium(t)
_, err := m.Append("")
@ -517,7 +517,7 @@ func TestAppend_Bad_EmptyPath(t *testing.T) {
// --- ReadStream Tests ---
func TestReadStream_Good(t *testing.T) {
func TestSqlite_ReadStream_Good(t *testing.T) {
m := newTestMedium(t)
require.NoError(t, m.Write("stream.txt", "streaming content"))
@ -531,14 +531,14 @@ func TestReadStream_Good(t *testing.T) {
assert.Equal(t, "streaming content", string(data))
}
func TestReadStream_Bad_NotFound(t *testing.T) {
func TestSqlite_ReadStream_NotFound_Bad(t *testing.T) {
m := newTestMedium(t)
_, err := m.ReadStream("nonexistent.txt")
assert.Error(t, err)
}
func TestReadStream_Bad_IsDirectory(t *testing.T) {
func TestSqlite_ReadStream_IsDirectory_Bad(t *testing.T) {
m := newTestMedium(t)
require.NoError(t, m.EnsureDir("mydir"))
@ -548,13 +548,13 @@ func TestReadStream_Bad_IsDirectory(t *testing.T) {
// --- WriteStream Tests ---
func TestWriteStream_Good(t *testing.T) {
func TestSqlite_WriteStream_Good(t *testing.T) {
m := newTestMedium(t)
writer, err := m.WriteStream("output.txt")
require.NoError(t, err)
_, err = goio.Copy(writer, strings.NewReader("piped data"))
_, err = goio.Copy(writer, core.NewReader("piped data"))
require.NoError(t, err)
require.NoError(t, writer.Close())
@ -565,7 +565,7 @@ func TestWriteStream_Good(t *testing.T) {
// --- Exists Tests ---
func TestExists_Good(t *testing.T) {
func TestSqlite_Exists_Good(t *testing.T) {
m := newTestMedium(t)
assert.False(t, m.Exists("nonexistent"))
@ -577,7 +577,7 @@ func TestExists_Good(t *testing.T) {
assert.True(t, m.Exists("mydir"))
}
func TestExists_Good_EmptyPath(t *testing.T) {
func TestSqlite_Exists_EmptyPath_Good(t *testing.T) {
m := newTestMedium(t)
// Root always exists
assert.True(t, m.Exists(""))
@ -585,7 +585,7 @@ func TestExists_Good_EmptyPath(t *testing.T) {
// --- IsDir Tests ---
func TestIsDir_Good(t *testing.T) {
func TestSqlite_IsDir_Good(t *testing.T) {
m := newTestMedium(t)
require.NoError(t, m.Write("file.txt", "content"))
@ -599,7 +599,7 @@ func TestIsDir_Good(t *testing.T) {
// --- cleanPath Tests ---
func TestCleanPath_Good(t *testing.T) {
func TestSqlite_CleanPath_Good(t *testing.T) {
assert.Equal(t, "file.txt", cleanPath("file.txt"))
assert.Equal(t, "dir/file.txt", cleanPath("dir/file.txt"))
assert.Equal(t, "file.txt", cleanPath("/file.txt"))
@ -612,7 +612,7 @@ func TestCleanPath_Good(t *testing.T) {
// --- Interface Compliance ---
func TestInterfaceCompliance_Ugly(t *testing.T) {
func TestSqlite_InterfaceCompliance_Ugly(t *testing.T) {
m := newTestMedium(t)
// Verify all methods exist by asserting the interface shape.
@ -640,7 +640,7 @@ func TestInterfaceCompliance_Ugly(t *testing.T) {
// --- Custom Table ---
func TestCustomTable_Good(t *testing.T) {
func TestSqlite_CustomTable_Good(t *testing.T) {
m, err := New(":memory:", WithTable("my_files"))
require.NoError(t, err)
defer m.Close()

View file

@ -4,10 +4,9 @@ import (
goio "io"
"io/fs"
"path"
"strings"
"time"
coreerr "forge.lthn.ai/core/go-log"
core "dappco.re/go/core"
)
// Medium wraps a Store to satisfy the io.Medium interface.
@ -33,16 +32,22 @@ func NewMedium(dbPath string) (*Medium, error) {
}
// AsMedium returns a Medium adapter for an existing Store.
//
// result := s.AsMedium(...)
func (s *Store) AsMedium() *Medium {
return &Medium{s: s}
}
// Store returns the underlying KV store for direct access.
//
// result := m.Store(...)
func (m *Medium) Store() *Store {
return m.s
}
// Close closes the underlying store.
//
// result := m.Close(...)
func (m *Medium) Close() error {
return m.s.Close()
}
@ -51,11 +56,11 @@ func (m *Medium) Close() error {
// First segment = group, remainder = key.
func splitPath(p string) (group, key string) {
clean := path.Clean(p)
clean = strings.TrimPrefix(clean, "/")
clean = core.TrimPrefix(clean, "/")
if clean == "" || clean == "." {
return "", ""
}
parts := strings.SplitN(clean, "/", 2)
parts := core.SplitN(clean, "/", 2)
if len(parts) == 1 {
return parts[0], ""
}
@ -63,29 +68,37 @@ func splitPath(p string) (group, key string) {
}
// Read retrieves the value at group/key.
//
// result := m.Read(...)
func (m *Medium) Read(p string) (string, error) {
group, key := splitPath(p)
if key == "" {
return "", coreerr.E("store.Read", "path must include group/key", fs.ErrInvalid)
return "", core.E("store.Read", "path must include group/key", fs.ErrInvalid)
}
return m.s.Get(group, key)
}
// Write stores a value at group/key.
//
// result := m.Write(...)
func (m *Medium) Write(p, content string) error {
group, key := splitPath(p)
if key == "" {
return coreerr.E("store.Write", "path must include group/key", fs.ErrInvalid)
return core.E("store.Write", "path must include group/key", fs.ErrInvalid)
}
return m.s.Set(group, key, content)
}
// EnsureDir is a no-op — groups are created implicitly on Set.
//
// result := m.EnsureDir(...)
func (m *Medium) EnsureDir(_ string) error {
return nil
}
// IsFile returns true if a group/key pair exists.
//
// result := m.IsFile(...)
func (m *Medium) IsFile(p string) bool {
group, key := splitPath(p)
if key == "" {
@ -96,20 +109,26 @@ func (m *Medium) IsFile(p string) bool {
}
// FileGet is an alias for Read.
//
// result := m.FileGet(...)
func (m *Medium) FileGet(p string) (string, error) {
return m.Read(p)
}
// FileSet is an alias for Write.
//
// result := m.FileSet(...)
func (m *Medium) FileSet(p, content string) error {
return m.Write(p, content)
}
// Delete removes a key, or checks that a group is empty.
//
// result := m.Delete(...)
func (m *Medium) Delete(p string) error {
group, key := splitPath(p)
if group == "" {
return coreerr.E("store.Delete", "path is required", fs.ErrInvalid)
return core.E("store.Delete", "path is required", fs.ErrInvalid)
}
if key == "" {
n, err := m.s.Count(group)
@ -117,7 +136,7 @@ func (m *Medium) Delete(p string) error {
return err
}
if n > 0 {
return coreerr.E("store.Delete", "group not empty: "+group, fs.ErrExist)
return core.E("store.Delete", core.Concat("group not empty: ", group), fs.ErrExist)
}
return nil
}
@ -125,10 +144,12 @@ func (m *Medium) Delete(p string) error {
}
// DeleteAll removes a key, or all keys in a group.
//
// result := m.DeleteAll(...)
func (m *Medium) DeleteAll(p string) error {
group, key := splitPath(p)
if group == "" {
return coreerr.E("store.DeleteAll", "path is required", fs.ErrInvalid)
return core.E("store.DeleteAll", "path is required", fs.ErrInvalid)
}
if key == "" {
return m.s.DeleteGroup(group)
@ -137,11 +158,13 @@ func (m *Medium) DeleteAll(p string) error {
}
// Rename moves a key from one path to another.
//
// result := m.Rename(...)
func (m *Medium) Rename(oldPath, newPath string) error {
og, ok := splitPath(oldPath)
ng, nk := splitPath(newPath)
if ok == "" || nk == "" {
return coreerr.E("store.Rename", "both paths must include group/key", fs.ErrInvalid)
return core.E("store.Rename", "both paths must include group/key", fs.ErrInvalid)
}
val, err := m.s.Get(og, ok)
if err != nil {
@ -155,13 +178,15 @@ func (m *Medium) Rename(oldPath, newPath string) error {
// List returns directory entries. Empty path returns groups.
// A group path returns keys in that group.
//
// result := m.List(...)
func (m *Medium) List(p string) ([]fs.DirEntry, error) {
group, key := splitPath(p)
if group == "" {
rows, err := m.s.db.Query("SELECT DISTINCT grp FROM kv ORDER BY grp")
if err != nil {
return nil, coreerr.E("store.List", "query groups", err)
return nil, core.E("store.List", "query groups", err)
}
defer rows.Close()
@ -169,7 +194,7 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) {
for rows.Next() {
var g string
if err := rows.Scan(&g); err != nil {
return nil, coreerr.E("store.List", "scan", err)
return nil, core.E("store.List", "scan", err)
}
entries = append(entries, &kvDirEntry{name: g, isDir: true})
}
@ -192,10 +217,12 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) {
}
// Stat returns file info for a group (dir) or key (file).
//
// result := m.Stat(...)
func (m *Medium) Stat(p string) (fs.FileInfo, error) {
group, key := splitPath(p)
if group == "" {
return nil, coreerr.E("store.Stat", "path is required", fs.ErrInvalid)
return nil, core.E("store.Stat", "path is required", fs.ErrInvalid)
}
if key == "" {
n, err := m.s.Count(group)
@ -203,7 +230,7 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) {
return nil, err
}
if n == 0 {
return nil, coreerr.E("store.Stat", "group not found: "+group, fs.ErrNotExist)
return nil, core.E("store.Stat", core.Concat("group not found: ", group), fs.ErrNotExist)
}
return &kvFileInfo{name: group, isDir: true}, nil
}
@ -215,10 +242,12 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) {
}
// Open opens a key for reading.
//
// result := m.Open(...)
func (m *Medium) Open(p string) (fs.File, error) {
group, key := splitPath(p)
if key == "" {
return nil, coreerr.E("store.Open", "path must include group/key", fs.ErrInvalid)
return nil, core.E("store.Open", "path must include group/key", fs.ErrInvalid)
}
val, err := m.s.Get(group, key)
if err != nil {
@ -228,43 +257,53 @@ func (m *Medium) Open(p string) (fs.File, error) {
}
// Create creates or truncates a key. Content is stored on Close.
//
// result := m.Create(...)
func (m *Medium) Create(p string) (goio.WriteCloser, error) {
group, key := splitPath(p)
if key == "" {
return nil, coreerr.E("store.Create", "path must include group/key", fs.ErrInvalid)
return nil, core.E("store.Create", "path must include group/key", fs.ErrInvalid)
}
return &kvWriteCloser{s: m.s, group: group, key: key}, nil
}
// Append opens a key for appending. Content is stored on Close.
//
// result := m.Append(...)
func (m *Medium) Append(p string) (goio.WriteCloser, error) {
group, key := splitPath(p)
if key == "" {
return nil, coreerr.E("store.Append", "path must include group/key", fs.ErrInvalid)
return nil, core.E("store.Append", "path must include group/key", fs.ErrInvalid)
}
existing, _ := m.s.Get(group, key)
return &kvWriteCloser{s: m.s, group: group, key: key, data: []byte(existing)}, nil
}
// ReadStream returns a reader for the value.
//
// result := m.ReadStream(...)
func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) {
group, key := splitPath(p)
if key == "" {
return nil, coreerr.E("store.ReadStream", "path must include group/key", fs.ErrInvalid)
return nil, core.E("store.ReadStream", "path must include group/key", fs.ErrInvalid)
}
val, err := m.s.Get(group, key)
if err != nil {
return nil, err
}
return goio.NopCloser(strings.NewReader(val)), nil
return goio.NopCloser(core.NewReader(val)), nil
}
// WriteStream returns a writer. Content is stored on Close.
//
// result := m.WriteStream(...)
func (m *Medium) WriteStream(p string) (goio.WriteCloser, error) {
return m.Create(p)
}
// Exists returns true if a group or key exists.
//
// result := m.Exists(...)
func (m *Medium) Exists(p string) bool {
group, key := splitPath(p)
if group == "" {
@ -279,6 +318,8 @@ func (m *Medium) Exists(p string) bool {
}
// IsDir returns true if the path is a group with entries.
//
// result := m.IsDir(...)
func (m *Medium) IsDir(p string) bool {
group, key := splitPath(p)
if key != "" || group == "" {
@ -296,16 +337,39 @@ type kvFileInfo struct {
isDir bool
}
// Name documents the Name operation.
//
// result := fi.Name(...)
func (fi *kvFileInfo) Name() string { return fi.name }
// Size documents the Size operation.
//
// result := fi.Size(...)
func (fi *kvFileInfo) Size() int64 { return fi.size }
// Mode documents the Mode operation.
//
// result := fi.Mode(...)
func (fi *kvFileInfo) Mode() fs.FileMode {
if fi.isDir {
return fs.ModeDir | 0755
}
return 0644
}
// ModTime documents the ModTime operation.
//
// result := fi.ModTime(...)
func (fi *kvFileInfo) ModTime() time.Time { return time.Time{} }
// IsDir documents the IsDir operation.
//
// result := fi.IsDir(...)
func (fi *kvFileInfo) IsDir() bool { return fi.isDir }
// Sys documents the Sys operation.
//
// result := fi.Sys(...)
func (fi *kvFileInfo) Sys() any { return nil }
type kvDirEntry struct {
@ -314,14 +378,29 @@ type kvDirEntry struct {
size int64
}
// Name documents the Name operation.
//
// result := de.Name(...)
func (de *kvDirEntry) Name() string { return de.name }
// IsDir documents the IsDir operation.
//
// result := de.IsDir(...)
func (de *kvDirEntry) IsDir() bool { return de.isDir }
// Type documents the Type operation.
//
// result := de.Type(...)
func (de *kvDirEntry) Type() fs.FileMode {
if de.isDir {
return fs.ModeDir
}
return 0
}
// Info documents the Info operation.
//
// result := de.Info(...)
func (de *kvDirEntry) Info() (fs.FileInfo, error) {
return &kvFileInfo{name: de.name, size: de.size, isDir: de.isDir}, nil
}
@ -332,10 +411,16 @@ type kvFile struct {
offset int64
}
// Stat documents the Stat operation.
//
// result := f.Stat(...)
func (f *kvFile) Stat() (fs.FileInfo, error) {
return &kvFileInfo{name: f.name, size: int64(len(f.content))}, nil
}
// Read documents the Read operation.
//
// result := f.Read(...)
func (f *kvFile) Read(b []byte) (int, error) {
if f.offset >= int64(len(f.content)) {
return 0, goio.EOF
@ -345,6 +430,9 @@ func (f *kvFile) Read(b []byte) (int, error) {
return n, nil
}
// Close documents the Close operation.
//
// result := f.Close(...)
func (f *kvFile) Close() error { return nil }
type kvWriteCloser struct {
@ -354,11 +442,17 @@ type kvWriteCloser struct {
data []byte
}
// Write documents the Write operation.
//
// result := w.Write(...)
func (w *kvWriteCloser) Write(p []byte) (int, error) {
w.data = append(w.data, p...)
return len(p), nil
}
// Close documents the Close operation.
//
// result := w.Close(...)
func (w *kvWriteCloser) Close() error {
return w.s.Set(w.group, w.key, string(w.data))
}

View file

@ -16,7 +16,7 @@ func newTestMedium(t *testing.T) *Medium {
return m
}
func TestMedium_ReadWrite_Good(t *testing.T) {
func TestMedium_Medium_ReadWrite_Good(t *testing.T) {
m := newTestMedium(t)
err := m.Write("config/theme", "dark")
@ -27,19 +27,19 @@ func TestMedium_ReadWrite_Good(t *testing.T) {
assert.Equal(t, "dark", val)
}
func TestMedium_Read_Bad_NoKey(t *testing.T) {
func TestMedium_Medium_Read_NoKey_Bad(t *testing.T) {
m := newTestMedium(t)
_, err := m.Read("config")
assert.Error(t, err)
}
func TestMedium_Read_Bad_NotFound(t *testing.T) {
func TestMedium_Medium_Read_NotFound_Bad(t *testing.T) {
m := newTestMedium(t)
_, err := m.Read("config/missing")
assert.Error(t, err)
}
func TestMedium_IsFile_Good(t *testing.T) {
func TestMedium_Medium_IsFile_Good(t *testing.T) {
m := newTestMedium(t)
_ = m.Write("grp/key", "val")
@ -48,7 +48,7 @@ func TestMedium_IsFile_Good(t *testing.T) {
assert.False(t, m.IsFile("grp"))
}
func TestMedium_Delete_Good(t *testing.T) {
func TestMedium_Medium_Delete_Good(t *testing.T) {
m := newTestMedium(t)
_ = m.Write("grp/key", "val")
@ -57,7 +57,7 @@ func TestMedium_Delete_Good(t *testing.T) {
assert.False(t, m.IsFile("grp/key"))
}
func TestMedium_Delete_Bad_NonEmptyGroup(t *testing.T) {
func TestMedium_Medium_Delete_NonEmptyGroup_Bad(t *testing.T) {
m := newTestMedium(t)
_ = m.Write("grp/key", "val")
@ -65,7 +65,7 @@ func TestMedium_Delete_Bad_NonEmptyGroup(t *testing.T) {
assert.Error(t, err)
}
func TestMedium_DeleteAll_Good(t *testing.T) {
func TestMedium_Medium_DeleteAll_Good(t *testing.T) {
m := newTestMedium(t)
_ = m.Write("grp/a", "1")
_ = m.Write("grp/b", "2")
@ -75,7 +75,7 @@ func TestMedium_DeleteAll_Good(t *testing.T) {
assert.False(t, m.Exists("grp"))
}
func TestMedium_Rename_Good(t *testing.T) {
func TestMedium_Medium_Rename_Good(t *testing.T) {
m := newTestMedium(t)
_ = m.Write("old/key", "val")
@ -88,7 +88,7 @@ func TestMedium_Rename_Good(t *testing.T) {
assert.False(t, m.IsFile("old/key"))
}
func TestMedium_List_Good_Groups(t *testing.T) {
func TestMedium_Medium_List_Groups_Good(t *testing.T) {
m := newTestMedium(t)
_ = m.Write("alpha/a", "1")
_ = m.Write("beta/b", "2")
@ -106,7 +106,7 @@ func TestMedium_List_Good_Groups(t *testing.T) {
assert.True(t, names["beta"])
}
func TestMedium_List_Good_Keys(t *testing.T) {
func TestMedium_Medium_List_Keys_Good(t *testing.T) {
m := newTestMedium(t)
_ = m.Write("grp/a", "1")
_ = m.Write("grp/b", "22")
@ -116,7 +116,7 @@ func TestMedium_List_Good_Keys(t *testing.T) {
assert.Len(t, entries, 2)
}
func TestMedium_Stat_Good(t *testing.T) {
func TestMedium_Medium_Stat_Good(t *testing.T) {
m := newTestMedium(t)
_ = m.Write("grp/key", "hello")
@ -132,7 +132,7 @@ func TestMedium_Stat_Good(t *testing.T) {
assert.False(t, info.IsDir())
}
func TestMedium_Exists_IsDir_Good(t *testing.T) {
func TestMedium_Medium_Exists_IsDir_Good(t *testing.T) {
m := newTestMedium(t)
_ = m.Write("grp/key", "val")
@ -143,7 +143,7 @@ func TestMedium_Exists_IsDir_Good(t *testing.T) {
assert.False(t, m.Exists("nope"))
}
func TestMedium_Open_Read_Good(t *testing.T) {
func TestMedium_Medium_Open_Read_Good(t *testing.T) {
m := newTestMedium(t)
_ = m.Write("grp/key", "hello world")
@ -156,7 +156,7 @@ func TestMedium_Open_Read_Good(t *testing.T) {
assert.Equal(t, "hello world", string(data))
}
func TestMedium_CreateClose_Good(t *testing.T) {
func TestMedium_Medium_CreateClose_Good(t *testing.T) {
m := newTestMedium(t)
w, err := m.Create("grp/key")
@ -169,7 +169,7 @@ func TestMedium_CreateClose_Good(t *testing.T) {
assert.Equal(t, "streamed", val)
}
func TestMedium_Append_Good(t *testing.T) {
func TestMedium_Medium_Append_Good(t *testing.T) {
m := newTestMedium(t)
_ = m.Write("grp/key", "hello")
@ -183,7 +183,7 @@ func TestMedium_Append_Good(t *testing.T) {
assert.Equal(t, "hello world", val)
}
func TestMedium_AsMedium_Good(t *testing.T) {
func TestMedium_Medium_AsMedium_Good(t *testing.T) {
s, err := New(":memory:")
require.NoError(t, err)
defer s.Close()

View file

@ -2,16 +2,14 @@ package store
import (
"database/sql"
"errors"
"strings"
"text/template"
coreerr "forge.lthn.ai/core/go-log"
core "dappco.re/go/core"
_ "modernc.org/sqlite"
)
// ErrNotFound is returned when a key does not exist in the store.
var ErrNotFound = errors.New("store: not found")
var ErrNotFound = core.E("store.ErrNotFound", "key not found", nil)
// Store is a group-namespaced key-value store backed by SQLite.
type Store struct {
@ -27,11 +25,11 @@ type Store struct {
func New(dbPath string) (*Store, error) {
db, err := sql.Open("sqlite", dbPath)
if err != nil {
return nil, coreerr.E("store.New", "open db", err)
return nil, core.E("store.New", "open db", err)
}
if _, err := db.Exec("PRAGMA journal_mode=WAL"); err != nil {
db.Close()
return nil, coreerr.E("store.New", "WAL mode", err)
return nil, core.E("store.New", "WAL mode", err)
}
if _, err := db.Exec(`CREATE TABLE IF NOT EXISTS kv (
grp TEXT NOT NULL,
@ -40,30 +38,36 @@ func New(dbPath string) (*Store, error) {
PRIMARY KEY (grp, key)
)`); err != nil {
db.Close()
return nil, coreerr.E("store.New", "create schema", err)
return nil, core.E("store.New", "create schema", err)
}
return &Store{db: db}, nil
}
// Close closes the underlying database.
//
// result := s.Close(...)
func (s *Store) Close() error {
return s.db.Close()
}
// Get retrieves a value by group and key.
//
// result := s.Get(...)
func (s *Store) Get(group, key string) (string, error) {
var val string
err := s.db.QueryRow("SELECT value FROM kv WHERE grp = ? AND key = ?", group, key).Scan(&val)
if err == sql.ErrNoRows {
return "", coreerr.E("store.Get", "not found: "+group+"/"+key, ErrNotFound)
return "", core.E("store.Get", core.Concat("not found: ", group, "/", key), ErrNotFound)
}
if err != nil {
return "", coreerr.E("store.Get", "query", err)
return "", core.E("store.Get", "query", err)
}
return val, nil
}
// Set stores a value by group and key, overwriting if exists.
//
// result := s.Set(...)
func (s *Store) Set(group, key, value string) error {
_, err := s.db.Exec(
`INSERT INTO kv (grp, key, value) VALUES (?, ?, ?)
@ -71,44 +75,52 @@ func (s *Store) Set(group, key, value string) error {
group, key, value,
)
if err != nil {
return coreerr.E("store.Set", "exec", err)
return core.E("store.Set", "exec", err)
}
return nil
}
// Delete removes a single key from a group.
//
// result := s.Delete(...)
func (s *Store) Delete(group, key string) error {
_, err := s.db.Exec("DELETE FROM kv WHERE grp = ? AND key = ?", group, key)
if err != nil {
return coreerr.E("store.Delete", "exec", err)
return core.E("store.Delete", "exec", err)
}
return nil
}
// Count returns the number of keys in a group.
//
// result := s.Count(...)
func (s *Store) Count(group string) (int, error) {
var n int
err := s.db.QueryRow("SELECT COUNT(*) FROM kv WHERE grp = ?", group).Scan(&n)
if err != nil {
return 0, coreerr.E("store.Count", "query", err)
return 0, core.E("store.Count", "query", err)
}
return n, nil
}
// DeleteGroup removes all keys in a group.
//
// result := s.DeleteGroup(...)
func (s *Store) DeleteGroup(group string) error {
_, err := s.db.Exec("DELETE FROM kv WHERE grp = ?", group)
if err != nil {
return coreerr.E("store.DeleteGroup", "exec", err)
return core.E("store.DeleteGroup", "exec", err)
}
return nil
}
// GetAll returns all key-value pairs in a group.
//
// result := s.GetAll(...)
func (s *Store) GetAll(group string) (map[string]string, error) {
rows, err := s.db.Query("SELECT key, value FROM kv WHERE grp = ?", group)
if err != nil {
return nil, coreerr.E("store.GetAll", "query", err)
return nil, core.E("store.GetAll", "query", err)
}
defer rows.Close()
@ -116,12 +128,12 @@ func (s *Store) GetAll(group string) (map[string]string, error) {
for rows.Next() {
var k, v string
if err := rows.Scan(&k, &v); err != nil {
return nil, coreerr.E("store.GetAll", "scan", err)
return nil, core.E("store.GetAll", "scan", err)
}
result[k] = v
}
if err := rows.Err(); err != nil {
return nil, coreerr.E("store.GetAll", "rows", err)
return nil, core.E("store.GetAll", "rows", err)
}
return result, nil
}
@ -135,7 +147,7 @@ func (s *Store) GetAll(group string) (map[string]string, error) {
func (s *Store) Render(tmplStr, group string) (string, error) {
rows, err := s.db.Query("SELECT key, value FROM kv WHERE grp = ?", group)
if err != nil {
return "", coreerr.E("store.Render", "query", err)
return "", core.E("store.Render", "query", err)
}
defer rows.Close()
@ -143,21 +155,21 @@ func (s *Store) Render(tmplStr, group string) (string, error) {
for rows.Next() {
var k, v string
if err := rows.Scan(&k, &v); err != nil {
return "", coreerr.E("store.Render", "scan", err)
return "", core.E("store.Render", "scan", err)
}
vars[k] = v
}
if err := rows.Err(); err != nil {
return "", coreerr.E("store.Render", "rows", err)
return "", core.E("store.Render", "rows", err)
}
tmpl, err := template.New("render").Parse(tmplStr)
if err != nil {
return "", coreerr.E("store.Render", "parse template", err)
return "", core.E("store.Render", "parse template", err)
}
var b strings.Builder
if err := tmpl.Execute(&b, vars); err != nil {
return "", coreerr.E("store.Render", "execute template", err)
b := core.NewBuilder()
if err := tmpl.Execute(b, vars); err != nil {
return "", core.E("store.Render", "execute template", err)
}
return b.String(), nil
}

View file

@ -7,7 +7,7 @@ import (
"github.com/stretchr/testify/require"
)
func TestSetGet_Good(t *testing.T) {
func TestStore_SetGet_Good(t *testing.T) {
s, err := New(":memory:")
require.NoError(t, err)
defer s.Close()
@ -20,7 +20,7 @@ func TestSetGet_Good(t *testing.T) {
assert.Equal(t, "dark", val)
}
func TestGet_Bad_NotFound(t *testing.T) {
func TestStore_Get_NotFound_Bad(t *testing.T) {
s, _ := New(":memory:")
defer s.Close()
@ -28,7 +28,7 @@ func TestGet_Bad_NotFound(t *testing.T) {
assert.Error(t, err)
}
func TestDelete_Good(t *testing.T) {
func TestStore_Delete_Good(t *testing.T) {
s, _ := New(":memory:")
defer s.Close()
@ -40,7 +40,7 @@ func TestDelete_Good(t *testing.T) {
assert.Error(t, err)
}
func TestCount_Good(t *testing.T) {
func TestStore_Count_Good(t *testing.T) {
s, _ := New(":memory:")
defer s.Close()
@ -53,7 +53,7 @@ func TestCount_Good(t *testing.T) {
assert.Equal(t, 2, n)
}
func TestDeleteGroup_Good(t *testing.T) {
func TestStore_DeleteGroup_Good(t *testing.T) {
s, _ := New(":memory:")
defer s.Close()
@ -66,7 +66,7 @@ func TestDeleteGroup_Good(t *testing.T) {
assert.Equal(t, 0, n)
}
func TestGetAll_Good(t *testing.T) {
func TestStore_GetAll_Good(t *testing.T) {
s, _ := New(":memory:")
defer s.Close()
@ -79,7 +79,7 @@ func TestGetAll_Good(t *testing.T) {
assert.Equal(t, map[string]string{"a": "1", "b": "2"}, all)
}
func TestGetAll_Good_Empty(t *testing.T) {
func TestStore_GetAll_Empty_Good(t *testing.T) {
s, _ := New(":memory:")
defer s.Close()
@ -88,7 +88,7 @@ func TestGetAll_Good_Empty(t *testing.T) {
assert.Empty(t, all)
}
func TestRender_Good(t *testing.T) {
func TestStore_Render_Good(t *testing.T) {
s, _ := New(":memory:")
defer s.Close()

View file

@ -4,11 +4,9 @@ import (
"crypto/sha256"
"encoding/hex"
"io/fs"
"strings"
"sync"
core "dappco.re/go/core"
coreerr "forge.lthn.ai/core/go-log"
"dappco.re/go/core/io"
)
@ -46,7 +44,7 @@ type Service struct {
func New(c *core.Core, crypt ...cryptProvider) (any, error) {
home := workspaceHome()
if home == "" {
return nil, coreerr.E("workspace.New", "failed to determine home directory", fs.ErrNotExist)
return nil, core.E("workspace.New", "failed to determine home directory", fs.ErrNotExist)
}
rootPath := core.Path(home, ".core", "workspaces")
@ -61,7 +59,7 @@ func New(c *core.Core, crypt ...cryptProvider) (any, error) {
}
if err := s.medium.EnsureDir(rootPath); err != nil {
return nil, coreerr.E("workspace.New", "failed to ensure root directory", err)
return nil, core.E("workspace.New", "failed to ensure root directory", err)
}
return s, nil
@ -70,12 +68,14 @@ func New(c *core.Core, crypt ...cryptProvider) (any, error) {
// CreateWorkspace creates a new encrypted workspace.
// Identifier is hashed (SHA-256) to create the directory name.
// A PGP keypair is generated using the password.
//
// result := s.CreateWorkspace(...)
func (s *Service) CreateWorkspace(identifier, password string) (string, error) {
s.mu.Lock()
defer s.mu.Unlock()
if s.crypt == nil {
return "", coreerr.E("workspace.CreateWorkspace", "crypt service not available", nil)
return "", core.E("workspace.CreateWorkspace", "crypt service not available", nil)
}
hash := sha256.Sum256([]byte(identifier))
@ -86,28 +86,30 @@ func (s *Service) CreateWorkspace(identifier, password string) (string, error) {
}
if s.medium.Exists(wsPath) {
return "", coreerr.E("workspace.CreateWorkspace", "workspace already exists", nil)
return "", core.E("workspace.CreateWorkspace", "workspace already exists", nil)
}
for _, d := range []string{"config", "log", "data", "files", "keys"} {
if err := s.medium.EnsureDir(core.Path(wsPath, d)); err != nil {
return "", coreerr.E("workspace.CreateWorkspace", "failed to create directory: "+d, err)
return "", core.E("workspace.CreateWorkspace", core.Concat("failed to create directory: ", d), err)
}
}
privKey, err := s.crypt.CreateKeyPair(identifier, password)
if err != nil {
return "", coreerr.E("workspace.CreateWorkspace", "failed to generate keys", err)
return "", core.E("workspace.CreateWorkspace", "failed to generate keys", err)
}
if err := s.medium.WriteMode(core.Path(wsPath, "keys", "private.key"), privKey, 0600); err != nil {
return "", coreerr.E("workspace.CreateWorkspace", "failed to save private key", err)
return "", core.E("workspace.CreateWorkspace", "failed to save private key", err)
}
return wsID, nil
}
// SwitchWorkspace changes the active workspace.
//
// result := s.SwitchWorkspace(...)
func (s *Service) SwitchWorkspace(name string) error {
s.mu.Lock()
defer s.mu.Unlock()
@ -117,7 +119,7 @@ func (s *Service) SwitchWorkspace(name string) error {
return err
}
if !s.medium.IsDir(wsPath) {
return coreerr.E("workspace.SwitchWorkspace", "workspace not found: "+name, nil)
return core.E("workspace.SwitchWorkspace", core.Concat("workspace not found: ", name), nil)
}
s.activeWorkspace = core.PathBase(wsPath)
@ -128,20 +130,22 @@ func (s *Service) SwitchWorkspace(name string) error {
// or an error if no workspace is active.
func (s *Service) activeFilePath(op, filename string) (string, error) {
if s.activeWorkspace == "" {
return "", coreerr.E(op, "no active workspace", nil)
return "", core.E(op, "no active workspace", nil)
}
filesRoot := core.Path(s.rootPath, s.activeWorkspace, "files")
path, err := joinWithinRoot(filesRoot, filename)
if err != nil {
return "", coreerr.E(op, "file path escapes workspace files", fs.ErrPermission)
return "", core.E(op, "file path escapes workspace files", fs.ErrPermission)
}
if path == filesRoot {
return "", coreerr.E(op, "filename is required", fs.ErrInvalid)
return "", core.E(op, "filename is required", fs.ErrInvalid)
}
return path, nil
}
// WorkspaceFileGet retrieves the content of a file from the active workspace.
//
// result := s.WorkspaceFileGet(...)
func (s *Service) WorkspaceFileGet(filename string) (string, error) {
s.mu.RLock()
defer s.mu.RUnlock()
@ -154,6 +158,8 @@ func (s *Service) WorkspaceFileGet(filename string) (string, error) {
}
// WorkspaceFileSet saves content to a file in the active workspace.
//
// result := s.WorkspaceFileSet(...)
func (s *Service) WorkspaceFileSet(filename, content string) error {
s.mu.Lock()
defer s.mu.Unlock()
@ -166,6 +172,8 @@ func (s *Service) WorkspaceFileSet(filename, content string) error {
}
// HandleIPCEvents handles workspace-related IPC messages.
//
// result := s.HandleIPCEvents(...)
func (s *Service) HandleIPCEvents(c *core.Core, msg core.Message) core.Result {
switch m := msg.(type) {
case map[string]any:
@ -203,7 +211,7 @@ func workspaceHome() string {
func joinWithinRoot(root string, parts ...string) (string, error) {
candidate := core.Path(append([]string{root}, parts...)...)
sep := core.Env("DS")
if candidate == root || strings.HasPrefix(candidate, root+sep) {
if candidate == root || core.HasPrefix(candidate, root+sep) {
return candidate, nil
}
return "", fs.ErrPermission
@ -211,14 +219,14 @@ func joinWithinRoot(root string, parts ...string) (string, error) {
func (s *Service) workspacePath(op, name string) (string, error) {
if name == "" {
return "", coreerr.E(op, "workspace name is required", fs.ErrInvalid)
return "", core.E(op, "workspace name is required", fs.ErrInvalid)
}
path, err := joinWithinRoot(s.rootPath, name)
if err != nil {
return "", coreerr.E(op, "workspace path escapes root", err)
return "", core.E(op, "workspace path escapes root", err)
}
if core.PathDir(path) != s.rootPath {
return "", coreerr.E(op, "invalid workspace name: "+name, fs.ErrPermission)
return "", core.E(op, core.Concat("invalid workspace name: ", name), fs.ErrPermission)
}
return path, nil
}

View file

@ -31,7 +31,7 @@ func newTestService(t *testing.T) (*Service, string) {
return svc.(*Service), tempHome
}
func TestWorkspace_Good_RoundTrip(t *testing.T) {
func TestService_Workspace_RoundTrip_Good(t *testing.T) {
s, tempHome := newTestService(t)
id, err := s.CreateWorkspace("test-user", "pass123")
@ -55,7 +55,7 @@ func TestWorkspace_Good_RoundTrip(t *testing.T) {
assert.Equal(t, "top secret info", got)
}
func TestSwitchWorkspace_Bad_TraversalBlocked(t *testing.T) {
func TestService_SwitchWorkspace_TraversalBlocked_Bad(t *testing.T) {
s, tempHome := newTestService(t)
outside := core.Path(tempHome, ".core", "escaped")
@ -66,7 +66,7 @@ func TestSwitchWorkspace_Bad_TraversalBlocked(t *testing.T) {
assert.Empty(t, s.activeWorkspace)
}
func TestWorkspaceFileSet_Bad_TraversalBlocked(t *testing.T) {
func TestService_WorkspaceFileSet_TraversalBlocked_Bad(t *testing.T) {
s, tempHome := newTestService(t)
id, err := s.CreateWorkspace("test-user", "pass123")