refactor(ax): make docs and helpers example-driven
Some checks failed
CI / test (push) Failing after 2s
CI / auto-fix (push) Failing after 0s
CI / auto-merge (push) Failing after 0s

This commit is contained in:
Virgil 2026-03-30 20:47:41 +00:00
parent b19617c371
commit 9fb978dc75
12 changed files with 135 additions and 116 deletions

View file

@ -1,9 +1,14 @@
// Package datanode provides an in-memory io.Medium backed by Borg's DataNode.
//
// medium := datanode.New()
// _ = medium.Write("jobs/run.log", "started")
// snapshot, _ := medium.Snapshot()
// restored, _ := datanode.FromTar(snapshot)
//
// DataNode is an in-memory fs.FS that serialises to tar. Wrapping it as a
// Medium lets any code that works with io.Medium transparently operate on
// an in-memory filesystem that can be snapshotted, shipped as a crash report,
// or wrapped in a TIM container for runc execution.
// Medium lets any code that works with io.Medium transparently operate on an
// in-memory filesystem that can be snapshotted, shipped as a crash report, or
// wrapped in a TIM container for runc execution.
package datanode
import (
@ -39,9 +44,7 @@ type Medium struct {
mu sync.RWMutex
}
// Use New when you need an in-memory Medium that snapshots to tar.
//
// Example usage:
// New creates an in-memory Medium that snapshots to tar.
//
// medium := datanode.New()
// _ = medium.Write("jobs/run.log", "started")
@ -52,9 +55,7 @@ func New() *Medium {
}
}
// Use FromTar(snapshot) to restore a Medium from tar bytes.
//
// Example usage:
// FromTar restores a Medium from tar bytes.
//
// sourceMedium := datanode.New()
// snapshot, _ := sourceMedium.Snapshot()
@ -103,8 +104,8 @@ func (m *Medium) DataNode() *borgdatanode.DataNode {
return m.dataNode
}
// cleanPath normalises a path: strips leading slash, cleans traversal.
func cleanPath(filePath string) string {
// normaliseEntryPath normalises a path: strips the leading slash and cleans traversal.
func normaliseEntryPath(filePath string) string {
filePath = core.TrimPrefix(filePath, "/")
filePath = path.Clean(filePath)
if filePath == "." {
@ -119,7 +120,7 @@ func (m *Medium) Read(filePath string) (string, error) {
m.mu.RLock()
defer m.mu.RUnlock()
filePath = cleanPath(filePath)
filePath = normaliseEntryPath(filePath)
f, err := m.dataNode.Open(filePath)
if err != nil {
return "", core.E("datanode.Read", core.Concat("not found: ", filePath), fs.ErrNotExist)
@ -145,7 +146,7 @@ func (m *Medium) Write(filePath, content string) error {
m.mu.Lock()
defer m.mu.Unlock()
filePath = cleanPath(filePath)
filePath = normaliseEntryPath(filePath)
if filePath == "" {
return core.E("datanode.Write", "empty path", fs.ErrInvalid)
}
@ -164,7 +165,7 @@ func (m *Medium) EnsureDir(filePath string) error {
m.mu.Lock()
defer m.mu.Unlock()
filePath = cleanPath(filePath)
filePath = normaliseEntryPath(filePath)
if filePath == "" {
return nil
}
@ -188,7 +189,7 @@ func (m *Medium) IsFile(filePath string) bool {
m.mu.RLock()
defer m.mu.RUnlock()
filePath = cleanPath(filePath)
filePath = normaliseEntryPath(filePath)
info, err := m.dataNode.Stat(filePath)
return err == nil && !info.IsDir()
}
@ -205,7 +206,7 @@ func (m *Medium) Delete(filePath string) error {
m.mu.Lock()
defer m.mu.Unlock()
filePath = cleanPath(filePath)
filePath = normaliseEntryPath(filePath)
if filePath == "" {
return core.E("datanode.Delete", "cannot delete root", fs.ErrPermission)
}
@ -252,7 +253,7 @@ func (m *Medium) DeleteAll(filePath string) error {
m.mu.Lock()
defer m.mu.Unlock()
filePath = cleanPath(filePath)
filePath = normaliseEntryPath(filePath)
if filePath == "" {
return core.E("datanode.DeleteAll", "cannot delete root", fs.ErrPermission)
}
@ -301,8 +302,8 @@ func (m *Medium) Rename(oldPath, newPath string) error {
m.mu.Lock()
defer m.mu.Unlock()
oldPath = cleanPath(oldPath)
newPath = cleanPath(newPath)
oldPath = normaliseEntryPath(oldPath)
newPath = normaliseEntryPath(newPath)
// Check if source is a file
info, err := m.dataNode.Stat(oldPath)
@ -366,7 +367,7 @@ func (m *Medium) List(filePath string) ([]fs.DirEntry, error) {
m.mu.RLock()
defer m.mu.RUnlock()
filePath = cleanPath(filePath)
filePath = normaliseEntryPath(filePath)
entries, err := m.dataNode.ReadDir(filePath)
if err != nil {
@ -413,7 +414,7 @@ func (m *Medium) Stat(filePath string) (fs.FileInfo, error) {
m.mu.RLock()
defer m.mu.RUnlock()
filePath = cleanPath(filePath)
filePath = normaliseEntryPath(filePath)
if filePath == "" {
return &fileInfo{name: ".", isDir: true, mode: fs.ModeDir | 0755}, nil
}
@ -433,12 +434,12 @@ func (m *Medium) Open(filePath string) (fs.File, error) {
m.mu.RLock()
defer m.mu.RUnlock()
filePath = cleanPath(filePath)
filePath = normaliseEntryPath(filePath)
return m.dataNode.Open(filePath)
}
func (m *Medium) Create(filePath string) (goio.WriteCloser, error) {
filePath = cleanPath(filePath)
filePath = normaliseEntryPath(filePath)
if filePath == "" {
return nil, core.E("datanode.Create", "empty path", fs.ErrInvalid)
}
@ -446,7 +447,7 @@ func (m *Medium) Create(filePath string) (goio.WriteCloser, error) {
}
func (m *Medium) Append(filePath string) (goio.WriteCloser, error) {
filePath = cleanPath(filePath)
filePath = normaliseEntryPath(filePath)
if filePath == "" {
return nil, core.E("datanode.Append", "empty path", fs.ErrInvalid)
}
@ -471,7 +472,7 @@ func (m *Medium) ReadStream(filePath string) (goio.ReadCloser, error) {
m.mu.RLock()
defer m.mu.RUnlock()
filePath = cleanPath(filePath)
filePath = normaliseEntryPath(filePath)
f, err := m.dataNode.Open(filePath)
if err != nil {
return nil, core.E("datanode.ReadStream", core.Concat("not found: ", filePath), fs.ErrNotExist)
@ -487,7 +488,7 @@ func (m *Medium) Exists(filePath string) bool {
m.mu.RLock()
defer m.mu.RUnlock()
filePath = cleanPath(filePath)
filePath = normaliseEntryPath(filePath)
if filePath == "" {
return true // root always exists
}
@ -502,7 +503,7 @@ func (m *Medium) IsDir(filePath string) bool {
m.mu.RLock()
defer m.mu.RUnlock()
filePath = cleanPath(filePath)
filePath = normaliseEntryPath(filePath)
if filePath == "" {
return true
}

7
doc.go
View file

@ -1,4 +1,9 @@
// Package io defines the storage abstraction used across CoreGO.
// Package io defines the storage boundary used across CoreGO.
//
// medium, _ := io.NewSandboxed("/srv/app")
// _ = medium.Write("config/app.yaml", "port: 8080")
// backup, _ := io.NewSandboxed("/srv/backup")
// _ = io.Copy(medium, "data/report.json", backup, "daily/report.json")
//
// Callers work against Medium so the same code can read and write state from
// sandboxed local paths, in-memory nodes, SQLite, S3, or other backends

View file

@ -1,4 +1,8 @@
// Package local provides a local filesystem implementation of the io.Medium interface.
// Package local provides the local filesystem implementation of io.Medium.
//
// medium, _ := local.New("/srv/app")
// _ = medium.Write("config/app.yaml", "port: 8080")
// content, _ := medium.Read("config/app.yaml")
package local
import (
@ -16,10 +20,9 @@ type Medium struct {
var unrestrictedFileSystem = (&core.Fs{}).NewUnrestricted()
// Use New to sandbox filesystem access under a root directory.
// Pass "/" for full filesystem access, or a specific path to sandbox.
// New creates a filesystem rooted at root.
//
// Example usage:
// Pass "/" for full filesystem access, or a project path to sandbox.
//
// medium, _ := local.New("/srv/app")
// _ = medium.Write("config/app.yaml", "port: 8080")

View file

@ -1,6 +1,12 @@
// Package node provides an in-memory filesystem implementation of io.Medium
// ported from Borg's DataNode. It stores files in memory with implicit
// directory structure and supports tar serialisation.
// Package node provides an in-memory filesystem implementation of io.Medium.
//
// nodeTree := node.New()
// nodeTree.AddData("config/app.yaml", []byte("port: 8080"))
// snapshot, _ := nodeTree.ToTar()
// restored, _ := node.FromTar(snapshot)
//
// It stores files in memory with implicit directory structure and supports
// tar serialisation.
package node
import (

View file

@ -1,15 +1,14 @@
// Package sigil provides the Sigil transformation framework for composable,
// reversible data transformations.
//
// Sigils are the core abstraction - each sigil implements a specific transformation
// (encoding, compression, hashing, encryption) with a uniform interface. Sigils can
// be chained together to create transformation pipelines.
//
// Example usage:
//
// hexSigil, _ := sigil.NewSigil("hex")
// base64Sigil, _ := sigil.NewSigil("base64")
// result, _ := sigil.Transmute(data, []sigil.Sigil{hexSigil, base64Sigil})
// gzipSigil, _ := sigil.NewSigil("gzip")
// encoded, _ := sigil.Transmute([]byte("payload"), []sigil.Sigil{hexSigil, gzipSigil})
// decoded, _ := sigil.Untransmute(encoded, []sigil.Sigil{hexSigil, gzipSigil})
//
// Sigils are the core abstraction - each sigil implements a specific
// transformation (encoding, compression, hashing, encryption) with a uniform
// interface. Sigils can be chained together to create transformation pipelines.
package sigil
import core "dappco.re/go/core"

View file

@ -1,4 +1,7 @@
// Package sqlite provides a SQLite-backed implementation of the io.Medium interface.
// Package sqlite persists io.Medium content in a SQLite database.
//
// medium, _ := sqlite.New(sqlite.Options{Path: ":memory:"})
// _ = medium.Write("config/app.yaml", "port: 8080")
package sqlite
import (
@ -23,7 +26,7 @@ type Medium struct {
var _ coreio.Medium = (*Medium)(nil)
// Options configures a Medium.
// Options configures a SQLite-backed Medium.
type Options struct {
// Path is the SQLite database path. Use ":memory:" for tests.
Path string
@ -38,10 +41,7 @@ func normaliseTableName(table string) string {
return table
}
// Use New to point the medium at a SQLite database path.
// Use ":memory:" for an in-memory database.
//
// Example usage:
// New opens a SQLite-backed Medium at the provided database path.
//
// medium, _ := sqlite.New(sqlite.Options{Path: ":memory:", Table: "files"})
// _ = medium.Write("config/app.yaml", "port: 8080")
@ -88,9 +88,9 @@ func (m *Medium) Close() error {
return nil
}
// cleanPath normalises a path for consistent storage.
// normaliseEntryPath normalises a path for consistent storage.
// Uses a leading "/" before Clean to sandbox traversal attempts.
func cleanPath(filePath string) string {
func normaliseEntryPath(filePath string) string {
clean := path.Clean("/" + filePath)
if clean == "/" {
return ""
@ -99,7 +99,7 @@ func cleanPath(filePath string) string {
}
func (m *Medium) Read(filePath string) (string, error) {
key := cleanPath(filePath)
key := normaliseEntryPath(filePath)
if key == "" {
return "", core.E("sqlite.Read", "path is required", fs.ErrInvalid)
}
@ -127,7 +127,7 @@ func (m *Medium) Write(filePath, content string) error {
// WriteMode saves the given content with explicit permissions.
func (m *Medium) WriteMode(filePath, content string, mode fs.FileMode) error {
key := cleanPath(filePath)
key := normaliseEntryPath(filePath)
if key == "" {
return core.E("sqlite.WriteMode", "path is required", fs.ErrInvalid)
}
@ -145,7 +145,7 @@ func (m *Medium) WriteMode(filePath, content string, mode fs.FileMode) error {
// EnsureDir makes sure a directory exists, creating it if necessary.
func (m *Medium) EnsureDir(filePath string) error {
key := cleanPath(filePath)
key := normaliseEntryPath(filePath)
if key == "" {
// Root always "exists"
return nil
@ -163,7 +163,7 @@ func (m *Medium) EnsureDir(filePath string) error {
}
func (m *Medium) IsFile(filePath string) bool {
key := cleanPath(filePath)
key := normaliseEntryPath(filePath)
if key == "" {
return false
}
@ -188,7 +188,7 @@ func (m *Medium) FileSet(filePath, content string) error {
// Delete removes a file or empty directory.
func (m *Medium) Delete(filePath string) error {
key := cleanPath(filePath)
key := normaliseEntryPath(filePath)
if key == "" {
return core.E("sqlite.Delete", "path is required", fs.ErrInvalid)
}
@ -233,7 +233,7 @@ func (m *Medium) Delete(filePath string) error {
// DeleteAll removes a file or directory and all its contents recursively.
func (m *Medium) DeleteAll(filePath string) error {
key := cleanPath(filePath)
key := normaliseEntryPath(filePath)
if key == "" {
return core.E("sqlite.DeleteAll", "path is required", fs.ErrInvalid)
}
@ -257,8 +257,8 @@ func (m *Medium) DeleteAll(filePath string) error {
// Rename moves a file or directory from oldPath to newPath.
func (m *Medium) Rename(oldPath, newPath string) error {
oldKey := cleanPath(oldPath)
newKey := cleanPath(newPath)
oldKey := normaliseEntryPath(oldPath)
newKey := normaliseEntryPath(newPath)
if oldKey == "" || newKey == "" {
return core.E("sqlite.Rename", "both old and new paths are required", fs.ErrInvalid)
}
@ -355,7 +355,7 @@ func (m *Medium) Rename(oldPath, newPath string) error {
// List returns the directory entries for the given path.
func (m *Medium) List(filePath string) ([]fs.DirEntry, error) {
prefix := cleanPath(filePath)
prefix := normaliseEntryPath(filePath)
if prefix != "" {
prefix += "/"
}
@ -430,7 +430,7 @@ func (m *Medium) List(filePath string) ([]fs.DirEntry, error) {
}
func (m *Medium) Stat(filePath string) (fs.FileInfo, error) {
key := cleanPath(filePath)
key := normaliseEntryPath(filePath)
if key == "" {
return nil, core.E("sqlite.Stat", "path is required", fs.ErrInvalid)
}
@ -460,7 +460,7 @@ func (m *Medium) Stat(filePath string) (fs.FileInfo, error) {
}
func (m *Medium) Open(filePath string) (fs.File, error) {
key := cleanPath(filePath)
key := normaliseEntryPath(filePath)
if key == "" {
return nil, core.E("sqlite.Open", "path is required", fs.ErrInvalid)
}
@ -491,7 +491,7 @@ func (m *Medium) Open(filePath string) (fs.File, error) {
}
func (m *Medium) Create(filePath string) (goio.WriteCloser, error) {
key := cleanPath(filePath)
key := normaliseEntryPath(filePath)
if key == "" {
return nil, core.E("sqlite.Create", "path is required", fs.ErrInvalid)
}
@ -502,7 +502,7 @@ func (m *Medium) Create(filePath string) (goio.WriteCloser, error) {
}
func (m *Medium) Append(filePath string) (goio.WriteCloser, error) {
key := cleanPath(filePath)
key := normaliseEntryPath(filePath)
if key == "" {
return nil, core.E("sqlite.Append", "path is required", fs.ErrInvalid)
}
@ -523,7 +523,7 @@ func (m *Medium) Append(filePath string) (goio.WriteCloser, error) {
}
func (m *Medium) ReadStream(filePath string) (goio.ReadCloser, error) {
key := cleanPath(filePath)
key := normaliseEntryPath(filePath)
if key == "" {
return nil, core.E("sqlite.ReadStream", "path is required", fs.ErrInvalid)
}
@ -551,7 +551,7 @@ func (m *Medium) WriteStream(filePath string) (goio.WriteCloser, error) {
}
func (m *Medium) Exists(filePath string) bool {
key := cleanPath(filePath)
key := normaliseEntryPath(filePath)
if key == "" {
// Root always exists
return true
@ -568,7 +568,7 @@ func (m *Medium) Exists(filePath string) bool {
}
func (m *Medium) IsDir(filePath string) bool {
key := cleanPath(filePath)
key := normaliseEntryPath(filePath)
if key == "" {
return false
}

View file

@ -597,17 +597,17 @@ func TestSqlite_IsDir_Good(t *testing.T) {
assert.False(t, m.IsDir(""))
}
// --- cleanPath Tests ---
// --- normaliseEntryPath Tests ---
func TestSqlite_CleanPath_Good(t *testing.T) {
assert.Equal(t, "file.txt", cleanPath("file.txt"))
assert.Equal(t, "dir/file.txt", cleanPath("dir/file.txt"))
assert.Equal(t, "file.txt", cleanPath("/file.txt"))
assert.Equal(t, "file.txt", cleanPath("../file.txt"))
assert.Equal(t, "file.txt", cleanPath("dir/../file.txt"))
assert.Equal(t, "", cleanPath(""))
assert.Equal(t, "", cleanPath("."))
assert.Equal(t, "", cleanPath("/"))
func TestSqlite_NormaliseEntryPath_Good(t *testing.T) {
assert.Equal(t, "file.txt", normaliseEntryPath("file.txt"))
assert.Equal(t, "dir/file.txt", normaliseEntryPath("dir/file.txt"))
assert.Equal(t, "file.txt", normaliseEntryPath("/file.txt"))
assert.Equal(t, "file.txt", normaliseEntryPath("../file.txt"))
assert.Equal(t, "file.txt", normaliseEntryPath("dir/../file.txt"))
assert.Equal(t, "", normaliseEntryPath(""))
assert.Equal(t, "", normaliseEntryPath("."))
assert.Equal(t, "", normaliseEntryPath("/"))
}
// --- Interface Compliance ---

View file

@ -1,4 +1,9 @@
// Package store provides a group-namespaced key-value store backed by SQLite.
// Package store provides a SQLite-backed group-namespaced key-value store.
//
// kvStore, _ := store.New(":memory:")
// _ = kvStore.Set("app", "theme", "midnight")
// medium := kvStore.AsMedium()
// _ = medium.Write("app/theme", "midnight")
//
// It also exposes an io.Medium adapter so grouped values can participate in
// the same storage workflows as filesystem-backed mediums.

View file

@ -11,7 +11,7 @@ import (
)
// Medium wraps a Store to satisfy the io.Medium interface.
// Paths are mapped as group/key first segment is the group,
// Paths are mapped as group/key - the first segment is the group,
// the rest is the key. List("") returns groups as directories,
// List("group") returns keys as files.
type Medium struct {
@ -20,9 +20,7 @@ type Medium struct {
var _ coreio.Medium = (*Medium)(nil)
// Use NewMedium to expose a Store as an io.Medium.
//
// Example usage:
// NewMedium exposes a Store as an io.Medium.
//
// medium, _ := store.NewMedium("config.db")
// _ = medium.Write("app/theme", "midnight")
@ -49,9 +47,8 @@ func (m *Medium) Close() error {
return m.store.Close()
}
// splitPath splits a medium-style path into group and key.
// First segment = group, remainder = key.
func splitPath(entryPath string) (group, key string) {
// splitEntryPath splits a group/key path into store components.
func splitEntryPath(entryPath string) (group, key string) {
clean := path.Clean(entryPath)
clean = core.TrimPrefix(clean, "/")
if clean == "" || clean == "." {
@ -65,7 +62,7 @@ func splitPath(entryPath string) (group, key string) {
}
func (m *Medium) Read(entryPath string) (string, error) {
group, key := splitPath(entryPath)
group, key := splitEntryPath(entryPath)
if key == "" {
return "", core.E("store.Read", "path must include group/key", fs.ErrInvalid)
}
@ -73,7 +70,7 @@ func (m *Medium) Read(entryPath string) (string, error) {
}
func (m *Medium) Write(entryPath, content string) error {
group, key := splitPath(entryPath)
group, key := splitEntryPath(entryPath)
if key == "" {
return core.E("store.Write", "path must include group/key", fs.ErrInvalid)
}
@ -91,7 +88,7 @@ func (m *Medium) EnsureDir(_ string) error {
}
func (m *Medium) IsFile(entryPath string) bool {
group, key := splitPath(entryPath)
group, key := splitEntryPath(entryPath)
if key == "" {
return false
}
@ -108,7 +105,7 @@ func (m *Medium) FileSet(entryPath, content string) error {
}
func (m *Medium) Delete(entryPath string) error {
group, key := splitPath(entryPath)
group, key := splitEntryPath(entryPath)
if group == "" {
return core.E("store.Delete", "path is required", fs.ErrInvalid)
}
@ -126,7 +123,7 @@ func (m *Medium) Delete(entryPath string) error {
}
func (m *Medium) DeleteAll(entryPath string) error {
group, key := splitPath(entryPath)
group, key := splitEntryPath(entryPath)
if group == "" {
return core.E("store.DeleteAll", "path is required", fs.ErrInvalid)
}
@ -137,8 +134,8 @@ func (m *Medium) DeleteAll(entryPath string) error {
}
func (m *Medium) Rename(oldPath, newPath string) error {
oldGroup, oldKey := splitPath(oldPath)
newGroup, newKey := splitPath(newPath)
oldGroup, oldKey := splitEntryPath(oldPath)
newGroup, newKey := splitEntryPath(newPath)
if oldKey == "" || newKey == "" {
return core.E("store.Rename", "both paths must include group/key", fs.ErrInvalid)
}
@ -155,7 +152,7 @@ func (m *Medium) Rename(oldPath, newPath string) error {
// List returns directory entries. Empty path returns groups.
// A group path returns keys in that group.
func (m *Medium) List(entryPath string) ([]fs.DirEntry, error) {
group, key := splitPath(entryPath)
group, key := splitEntryPath(entryPath)
if group == "" {
rows, err := m.store.database.Query("SELECT DISTINCT grp FROM kv ORDER BY grp")
@ -192,7 +189,7 @@ func (m *Medium) List(entryPath string) ([]fs.DirEntry, error) {
// Stat returns file info for a group (dir) or key (file).
func (m *Medium) Stat(entryPath string) (fs.FileInfo, error) {
group, key := splitPath(entryPath)
group, key := splitEntryPath(entryPath)
if group == "" {
return nil, core.E("store.Stat", "path is required", fs.ErrInvalid)
}
@ -214,7 +211,7 @@ func (m *Medium) Stat(entryPath string) (fs.FileInfo, error) {
}
func (m *Medium) Open(entryPath string) (fs.File, error) {
group, key := splitPath(entryPath)
group, key := splitEntryPath(entryPath)
if key == "" {
return nil, core.E("store.Open", "path must include group/key", fs.ErrInvalid)
}
@ -226,7 +223,7 @@ func (m *Medium) Open(entryPath string) (fs.File, error) {
}
func (m *Medium) Create(entryPath string) (goio.WriteCloser, error) {
group, key := splitPath(entryPath)
group, key := splitEntryPath(entryPath)
if key == "" {
return nil, core.E("store.Create", "path must include group/key", fs.ErrInvalid)
}
@ -234,7 +231,7 @@ func (m *Medium) Create(entryPath string) (goio.WriteCloser, error) {
}
func (m *Medium) Append(entryPath string) (goio.WriteCloser, error) {
group, key := splitPath(entryPath)
group, key := splitEntryPath(entryPath)
if key == "" {
return nil, core.E("store.Append", "path must include group/key", fs.ErrInvalid)
}
@ -243,7 +240,7 @@ func (m *Medium) Append(entryPath string) (goio.WriteCloser, error) {
}
func (m *Medium) ReadStream(entryPath string) (goio.ReadCloser, error) {
group, key := splitPath(entryPath)
group, key := splitEntryPath(entryPath)
if key == "" {
return nil, core.E("store.ReadStream", "path must include group/key", fs.ErrInvalid)
}
@ -259,7 +256,7 @@ func (m *Medium) WriteStream(entryPath string) (goio.WriteCloser, error) {
}
func (m *Medium) Exists(entryPath string) bool {
group, key := splitPath(entryPath)
group, key := splitEntryPath(entryPath)
if group == "" {
return false
}
@ -272,7 +269,7 @@ func (m *Medium) Exists(entryPath string) bool {
}
func (m *Medium) IsDir(entryPath string) bool {
group, key := splitPath(entryPath)
group, key := splitEntryPath(entryPath)
if key != "" || group == "" {
return false
}

View file

@ -139,13 +139,13 @@ func (s *Store) Render(templateText, group string) (string, error) {
}
defer rows.Close()
vars := make(map[string]string)
templateValues := make(map[string]string)
for rows.Next() {
var key, value string
if err := rows.Scan(&key, &value); err != nil {
return "", core.E("store.Render", "scan", err)
}
vars[key] = value
templateValues[key] = value
}
if err := rows.Err(); err != nil {
return "", core.E("store.Render", "rows", err)
@ -155,9 +155,9 @@ func (s *Store) Render(templateText, group string) (string, error) {
if err != nil {
return "", core.E("store.Render", "parse template", err)
}
b := core.NewBuilder()
if err := tmpl.Execute(b, vars); err != nil {
builder := core.NewBuilder()
if err := tmpl.Execute(builder, templateValues); err != nil {
return "", core.E("store.Render", "execute template", err)
}
return b.String(), nil
return builder.String(), nil
}

View file

@ -1,5 +1,10 @@
// Package workspace provides encrypted user workspaces backed by io.Medium.
//
// service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: cryptProvider})
// workspaceID, _ := service.CreateWorkspace("alice", "pass123")
// _ = service.SwitchWorkspace(workspaceID)
// _ = service.WorkspaceFileSet("notes/todo.txt", "ship it")
//
// Workspaces are rooted under the caller's configured home directory and keep
// file access constrained to the active workspace.
package workspace

View file

@ -44,14 +44,12 @@ type Service struct {
var _ Workspace = (*Service)(nil)
// Use New to manage encrypted user workspaces from a Core runtime.
// New creates an encrypted workspace service from a Core runtime.
//
// Example usage:
//
// service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: myCryptProvider})
// service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: cryptProvider})
// workspaceID, _ := service.CreateWorkspace("alice", "pass123")
func New(options Options) (*Service, error) {
home := workspaceHome()
home := resolveWorkspaceHomeDirectory()
if home == "" {
return nil, core.E("workspace.New", "failed to determine home directory", fs.ErrNotExist)
}
@ -135,14 +133,14 @@ func (s *Service) SwitchWorkspace(name string) error {
return nil
}
// activeFilePath returns the full path to a file in the active workspace,
// or an error if no workspace is active.
// activeFilePath resolves a filename inside the active workspace files root.
// It rejects empty names and traversal outside the workspace root.
func (s *Service) activeFilePath(operation, filename string) (string, error) {
if s.activeWorkspace == "" {
return "", core.E(operation, "no active workspace", nil)
}
filesRoot := core.Path(s.rootPath, s.activeWorkspace, "files")
filePath, err := joinWithinRoot(filesRoot, filename)
filePath, err := joinPathWithinRoot(filesRoot, filename)
if err != nil {
return "", core.E(operation, "file path escapes workspace files", fs.ErrPermission)
}
@ -209,7 +207,7 @@ func (s *Service) HandleIPCEvents(_ *core.Core, message core.Message) core.Resul
return core.Result{OK: true}
}
func workspaceHome() string {
func resolveWorkspaceHomeDirectory() string {
if home := core.Env("CORE_HOME"); home != "" {
return home
}
@ -219,7 +217,7 @@ func workspaceHome() string {
return core.Env("DIR_HOME")
}
func joinWithinRoot(root string, parts ...string) (string, error) {
func joinPathWithinRoot(root string, parts ...string) (string, error) {
candidate := core.Path(append([]string{root}, parts...)...)
sep := core.Env("DS")
if candidate == root || core.HasPrefix(candidate, root+sep) {
@ -232,7 +230,7 @@ func (s *Service) workspacePath(operation, workspaceName string) (string, error)
if workspaceName == "" {
return "", core.E(operation, "workspace name is required", fs.ErrInvalid)
}
workspaceDirectory, err := joinWithinRoot(s.rootPath, workspaceName)
workspaceDirectory, err := joinPathWithinRoot(s.rootPath, workspaceName)
if err != nil {
return "", core.E(operation, "workspace path escapes root", err)
}