fix: address CodeRabbit PR #2 findings
- datanode: add isFileLocked() helper to prevent RLock re-entry deadlock in Append - io: MemoryMedium WriteMode rejects ancestor-is-file collision; EnsureDir rejects target-is-file collision - io: copy fileModes during directory rename - local: guard Delete/DeleteAll against removing sandbox root - local: add TOCTOU TODO comment on validatePath symlink loop - local: alias stdlib io→goio in medium_test.go - datanode: alias stdlib io→goio in medium_test.go - sqlite: add isValidTableName() whitelist to prevent table-name SQL injection in New() - sqlite: remove duplicate WHERE clause args in List query - sqlite: add mode field to sqliteWriteCloser; use it in Close (was hardcoded 420) - sigil: GzipSigil.In returns nil when custom outputWriter is used (buffer was empty) - sigil: capture hasher.Write error in HashSigil.In - sigil: add comment explaining DecryptionFailedError hides raw AEAD error intentionally - s3: add comment explaining WriteMode ignores mode (no POSIX on S3) - s3_test: ListObjectsV2 mock sets IsTruncated+NextContinuationToken when maxKeys exceeded - node: add comment explaining WriteMode ignores mode for in-memory nodes - store: sort keys before building List entries for deterministic output - store: add explanatory comment on NotFoundError sentinel - workspace: replace sha256.Sum256 key derivation with HKDF (RFC 5869) - docs: fix RFC-CORE-008 header (was RFC-025) - docs: update import paths from forge.lthn.ai/core/go-io to dappco.re/go/core/io - docs/RFC.md: remove duplicate Read/Write alias doc blocks Co-Authored-By: Virgil <virgil@lethean.io>
This commit is contained in:
parent
a6d473fcee
commit
a43a16fb0d
18 changed files with 174 additions and 72 deletions
|
|
@ -171,7 +171,11 @@ func (medium *Medium) IsFile(filePath string) bool {
|
|||
medium.lock.RLock()
|
||||
defer medium.lock.RUnlock()
|
||||
|
||||
filePath = normaliseEntryPath(filePath)
|
||||
return medium.isFileLocked(normaliseEntryPath(filePath))
|
||||
}
|
||||
|
||||
// isFileLocked reports whether filePath is a regular file. Caller must hold at least medium.lock.RLock.
|
||||
func (medium *Medium) isFileLocked(filePath string) bool {
|
||||
info, err := medium.dataNode.Stat(filePath)
|
||||
return err == nil && !info.IsDir()
|
||||
}
|
||||
|
|
@ -415,7 +419,7 @@ func (medium *Medium) Append(filePath string) (goio.WriteCloser, error) {
|
|||
|
||||
var existing []byte
|
||||
medium.lock.RLock()
|
||||
if medium.IsFile(filePath) {
|
||||
if medium.isFileLocked(filePath) {
|
||||
data, err := medium.readFileLocked(filePath)
|
||||
if err != nil {
|
||||
medium.lock.RUnlock()
|
||||
|
|
@ -522,6 +526,10 @@ func (medium *Medium) readFileLocked(filePath string) ([]byte, error) {
|
|||
return data, nil
|
||||
}
|
||||
|
||||
// removeFileLocked rebuilds the entire DataNode excluding the target entry.
|
||||
// This is O(n) per call, leading to O(n²) behaviour when deleting many files in a loop.
|
||||
// TODO(perf): use a DataNode deletion API if borgdatanode ever exposes one, or batch deletions
|
||||
// by collecting targets before rebuilding once.
|
||||
func (medium *Medium) removeFileLocked(target string) error {
|
||||
entries, err := medium.collectAllLocked()
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
package datanode
|
||||
|
||||
import (
|
||||
"io"
|
||||
goio "io"
|
||||
"io/fs"
|
||||
"testing"
|
||||
|
||||
|
|
@ -155,7 +155,7 @@ func TestDataNode_Delete_RemoveFailure_Bad(t *testing.T) {
|
|||
require.NoError(t, dataNodeMedium.Write("bad.txt", "bad"))
|
||||
|
||||
original := dataNodeReadAll
|
||||
dataNodeReadAll = func(_ io.Reader) ([]byte, error) {
|
||||
dataNodeReadAll = func(_ goio.Reader) ([]byte, error) {
|
||||
return nil, core.NewError("read failed")
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
|
|
@ -203,7 +203,7 @@ func TestDataNode_RenameDir_ReadFailure_Bad(t *testing.T) {
|
|||
require.NoError(t, dataNodeMedium.Write("src/a.go", "package a"))
|
||||
|
||||
original := dataNodeReadAll
|
||||
dataNodeReadAll = func(_ io.Reader) ([]byte, error) {
|
||||
dataNodeReadAll = func(_ goio.Reader) ([]byte, error) {
|
||||
return nil, core.NewError("read failed")
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
|
|
@ -268,7 +268,7 @@ func TestDataNode_Open_Good(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
defer file.Close()
|
||||
|
||||
data, err := io.ReadAll(file)
|
||||
data, err := goio.ReadAll(file)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "opened", string(data))
|
||||
}
|
||||
|
|
@ -300,7 +300,7 @@ func TestDataNode_Append_ReadFailure_Bad(t *testing.T) {
|
|||
require.NoError(t, dataNodeMedium.Write("new.txt", "hello"))
|
||||
|
||||
original := dataNodeReadAll
|
||||
dataNodeReadAll = func(_ io.Reader) ([]byte, error) {
|
||||
dataNodeReadAll = func(_ goio.Reader) ([]byte, error) {
|
||||
return nil, core.NewError("read failed")
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
|
|
@ -322,7 +322,7 @@ func TestDataNode_Streams_Good(t *testing.T) {
|
|||
|
||||
readStream, err := dataNodeMedium.ReadStream("stream.txt")
|
||||
require.NoError(t, err)
|
||||
data, err := io.ReadAll(readStream)
|
||||
data, err := goio.ReadAll(readStream)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "streamed", string(data))
|
||||
require.NoError(t, readStream.Close())
|
||||
|
|
@ -382,7 +382,7 @@ func TestDataNode_DataNode_Good(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
defer file.Close()
|
||||
|
||||
data, err := io.ReadAll(file)
|
||||
data, err := goio.ReadAll(file)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "borg", string(data))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
# RFC-025: Agent Experience (AX) Design Principles
|
||||
# RFC-CORE-008: Agent Experience (AX) Design Principles
|
||||
|
||||
- **Status:** Draft
|
||||
- **Authors:** Snider, Cladius
|
||||
|
|
|
|||
37
docs/RFC.md
37
docs/RFC.md
|
|
@ -7,9 +7,9 @@ description: Complete API reference for go-io.
|
|||
|
||||
This document enumerates every exported type, function, method, and variable in go-io, with short usage examples.
|
||||
|
||||
Examples use the import paths from `docs/index.md` (`forge.lthn.ai/core/go-io`). Adjust paths if your module path differs.
|
||||
Examples use the import paths from `docs/index.md` (`dappco.re/go/core/io`). Adjust paths if your module path differs.
|
||||
|
||||
## Package io (`forge.lthn.ai/core/go-io`)
|
||||
## Package io (`dappco.re/go/core/io`)
|
||||
|
||||
Defines the `Medium` interface, helper functions, and in-memory `MemoryMedium` implementation.
|
||||
|
||||
|
|
@ -65,23 +65,6 @@ _ = m.Write("notes.txt", "hello")
|
|||
ok := m.IsFile("notes.txt")
|
||||
```
|
||||
|
||||
**Read(path string) (string, error)**
|
||||
Alias for `Read`.
|
||||
Example:
|
||||
```go
|
||||
m := io.NewMemoryMedium()
|
||||
_ = m.Write("notes.txt", "hello")
|
||||
value, _ := m.Read("notes.txt")
|
||||
```
|
||||
|
||||
**Write(path, content string) error**
|
||||
Alias for `Write`.
|
||||
Example:
|
||||
```go
|
||||
m := io.NewMemoryMedium()
|
||||
_ = m.Write("notes.txt", "hello")
|
||||
```
|
||||
|
||||
**Delete(path string) error**
|
||||
Deletes a file or empty directory.
|
||||
Example:
|
||||
|
|
@ -620,7 +603,7 @@ _, _ = w.Write([]byte("hello"))
|
|||
_ = w.Close()
|
||||
```
|
||||
|
||||
## Package local (`forge.lthn.ai/core/go-io/local`)
|
||||
## Package local (`dappco.re/go/core/io/local`)
|
||||
|
||||
Local filesystem backend with sandboxed roots and symlink-escape protection.
|
||||
|
||||
|
|
@ -798,7 +781,7 @@ m, _ := local.New("/srv/app")
|
|||
_ = m.Write("notes.txt", "hello")
|
||||
```
|
||||
|
||||
## Package node (`forge.lthn.ai/core/go-io/node`)
|
||||
## Package node (`dappco.re/go/core/io/node`)
|
||||
|
||||
In-memory filesystem implementing `io.Medium` and `fs.FS`, with tar serialisation.
|
||||
|
||||
|
|
@ -1087,7 +1070,7 @@ _, _ = w.Write([]byte("data"))
|
|||
_ = w.Close()
|
||||
```
|
||||
|
||||
## Package store (`forge.lthn.ai/core/go-io/store`)
|
||||
## Package store (`dappco.re/go/core/io/store`)
|
||||
|
||||
Group-namespaced key-value store backed by SQLite, plus a `Medium` adapter.
|
||||
|
||||
|
|
@ -1387,7 +1370,7 @@ _ = m.Write("config/theme", "midnight")
|
|||
ok := m.IsDir("config")
|
||||
```
|
||||
|
||||
## Package sqlite (`forge.lthn.ai/core/go-io/sqlite`)
|
||||
## Package sqlite (`dappco.re/go/core/io/sqlite`)
|
||||
|
||||
SQLite-backed `io.Medium` implementation using the pure-Go driver.
|
||||
|
||||
|
|
@ -1575,7 +1558,7 @@ _ = m.EnsureDir("config")
|
|||
ok := m.IsDir("config")
|
||||
```
|
||||
|
||||
## Package s3 (`forge.lthn.ai/core/go-io/s3`)
|
||||
## Package s3 (`dappco.re/go/core/io/s3`)
|
||||
|
||||
Amazon S3-backed `io.Medium` implementation.
|
||||
|
||||
|
|
@ -1773,7 +1756,7 @@ m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client})
|
|||
ok := m.IsDir("logs")
|
||||
```
|
||||
|
||||
## Package datanode (`forge.lthn.ai/core/go-io/datanode`)
|
||||
## Package datanode (`dappco.re/go/core/io/datanode`)
|
||||
|
||||
In-memory `io.Medium` backed by Borg's DataNode, with tar snapshot/restore support.
|
||||
|
||||
|
|
@ -1986,7 +1969,7 @@ _ = m.EnsureDir("config")
|
|||
ok := m.IsDir("config")
|
||||
```
|
||||
|
||||
## Package workspace (`forge.lthn.ai/core/go-io/workspace`)
|
||||
## Package workspace (`dappco.re/go/core/io/workspace`)
|
||||
|
||||
Encrypted user workspace management.
|
||||
|
||||
|
|
@ -2123,7 +2106,7 @@ result := service.HandleWorkspaceMessage(core.New(), workspace.WorkspaceCommand{
|
|||
_ = result.OK
|
||||
```
|
||||
|
||||
## Package sigil (`forge.lthn.ai/core/go-io/sigil`)
|
||||
## Package sigil (`dappco.re/go/core/io/sigil`)
|
||||
|
||||
Composable data-transformation sigils for encoding, compression, hashing, and encryption.
|
||||
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ description: Unified storage abstraction for Go with pluggable backends — loca
|
|||
|
||||
# go-io
|
||||
|
||||
`forge.lthn.ai/core/go-io` is a storage abstraction library that provides a single `Medium` interface for reading and writing files across different backends. Write your code against `Medium` once, then swap between local disk, S3, SQLite, or in-memory storage without changing a line of business logic.
|
||||
`dappco.re/go/core/io` is a storage abstraction library that provides a single `Medium` interface for reading and writing files across different backends. Write your code against `Medium` once, then swap between local disk, S3, SQLite, or in-memory storage without changing a line of business logic.
|
||||
|
||||
The library also includes `sigil`, a composable data-transformation pipeline for encoding, compression, hashing, and authenticated encryption.
|
||||
|
||||
|
|
@ -14,9 +14,9 @@ The library also includes `sigil`, a composable data-transformation pipeline for
|
|||
|
||||
```go
|
||||
import (
|
||||
io "forge.lthn.ai/core/go-io"
|
||||
"forge.lthn.ai/core/go-io/s3"
|
||||
"forge.lthn.ai/core/go-io/node"
|
||||
io "dappco.re/go/core/io"
|
||||
"dappco.re/go/core/io/s3"
|
||||
"dappco.re/go/core/io/node"
|
||||
)
|
||||
|
||||
content, _ := io.Local.Read("/etc/hostname")
|
||||
|
|
@ -37,15 +37,15 @@ _ = s3Medium.Write("photo.jpg", rawData)
|
|||
|
||||
| Package | Import Path | Purpose |
|
||||
|---------|-------------|---------|
|
||||
| `io` (root) | `forge.lthn.ai/core/go-io` | `Medium` interface, helper functions, `MemoryMedium` for tests |
|
||||
| `local` | `forge.lthn.ai/core/go-io/local` | Local filesystem backend with path sandboxing and symlink-escape protection |
|
||||
| `s3` | `forge.lthn.ai/core/go-io/s3` | Amazon S3 / S3-compatible backend (Garage, MinIO, etc.) |
|
||||
| `sqlite` | `forge.lthn.ai/core/go-io/sqlite` | SQLite-backed virtual filesystem (pure Go driver, no CGO) |
|
||||
| `node` | `forge.lthn.ai/core/go-io/node` | In-memory filesystem implementing both `Medium` and `fs.FS`, with tar round-tripping |
|
||||
| `datanode` | `forge.lthn.ai/core/go-io/datanode` | Thread-safe in-memory `Medium` backed by Borg's DataNode, with snapshot/restore |
|
||||
| `store` | `forge.lthn.ai/core/go-io/store` | Group-namespaced key-value store (SQLite), with a `Medium` adapter and Go template rendering |
|
||||
| `sigil` | `forge.lthn.ai/core/go-io/sigil` | Composable data transformations: encoding, compression, hashing, XChaCha20-Poly1305 encryption |
|
||||
| `workspace` | `forge.lthn.ai/core/go-io/workspace` | Encrypted workspace service integrated with the Core DI container |
|
||||
| `io` (root) | `dappco.re/go/core/io` | `Medium` interface, helper functions, `MemoryMedium` for tests |
|
||||
| `local` | `dappco.re/go/core/io/local` | Local filesystem backend with path sandboxing and symlink-escape protection |
|
||||
| `s3` | `dappco.re/go/core/io/s3` | Amazon S3 / S3-compatible backend (Garage, MinIO, etc.) |
|
||||
| `sqlite` | `dappco.re/go/core/io/sqlite` | SQLite-backed virtual filesystem (pure Go driver, no CGO) |
|
||||
| `node` | `dappco.re/go/core/io/node` | In-memory filesystem implementing both `Medium` and `fs.FS`, with tar round-tripping |
|
||||
| `datanode` | `dappco.re/go/core/io/datanode` | Thread-safe in-memory `Medium` backed by Borg's DataNode, with snapshot/restore |
|
||||
| `store` | `dappco.re/go/core/io/store` | Group-namespaced key-value store (SQLite), with a `Medium` adapter and Go template rendering |
|
||||
| `sigil` | `dappco.re/go/core/io/sigil` | Composable data transformations: encoding, compression, hashing, XChaCha20-Poly1305 encryption |
|
||||
| `workspace` | `dappco.re/go/core/io/workspace` | Encrypted workspace service integrated with the Core DI container |
|
||||
|
||||
|
||||
## The Medium Interface
|
||||
|
|
|
|||
19
io.go
19
io.go
|
|
@ -272,6 +272,18 @@ func (medium *MemoryMedium) Write(path, content string) error {
|
|||
|
||||
// Example: _ = io.NewMemoryMedium().WriteMode("keys/private.key", "secret", 0600)
|
||||
func (medium *MemoryMedium) WriteMode(path, content string, mode fs.FileMode) error {
|
||||
// Verify no ancestor directory component is stored as a file.
|
||||
ancestor := path.Dir(path)
|
||||
for ancestor != "." && ancestor != "" {
|
||||
if _, ok := medium.fileContents[ancestor]; ok {
|
||||
return core.E("io.MemoryMedium.WriteMode", core.Concat("ancestor path is a file: ", ancestor), fs.ErrExist)
|
||||
}
|
||||
next := path.Dir(ancestor)
|
||||
if next == ancestor {
|
||||
break
|
||||
}
|
||||
ancestor = next
|
||||
}
|
||||
medium.ensureAncestorDirectories(path)
|
||||
medium.fileContents[path] = content
|
||||
medium.fileModes[path] = mode
|
||||
|
|
@ -281,6 +293,9 @@ func (medium *MemoryMedium) WriteMode(path, content string, mode fs.FileMode) er
|
|||
|
||||
// Example: _ = io.NewMemoryMedium().EnsureDir("config/app")
|
||||
func (medium *MemoryMedium) EnsureDir(path string) error {
|
||||
if _, ok := medium.fileContents[path]; ok {
|
||||
return core.E("io.MemoryMedium.EnsureDir", core.Concat("path is already a file: ", path), fs.ErrExist)
|
||||
}
|
||||
medium.ensureAncestorDirectories(path)
|
||||
medium.directories[path] = true
|
||||
return nil
|
||||
|
|
@ -411,6 +426,10 @@ func (medium *MemoryMedium) Rename(oldPath, newPath string) error {
|
|||
medium.modificationTimes[newFilePath] = modTime
|
||||
delete(medium.modificationTimes, oldFilePath)
|
||||
}
|
||||
if fileMode, ok := medium.fileModes[oldFilePath]; ok {
|
||||
medium.fileModes[newFilePath] = fileMode
|
||||
delete(medium.fileModes, oldFilePath)
|
||||
}
|
||||
}
|
||||
|
||||
dirsToMove := make(map[string]string)
|
||||
|
|
|
|||
|
|
@ -197,6 +197,14 @@ func (medium *Medium) sandboxedPath(path string) string {
|
|||
return core.Path(medium.filesystemRoot, core.TrimPrefix(clean, dirSeparator()))
|
||||
}
|
||||
|
||||
// validatePath resolves the caller-supplied path against the sandbox root, rejecting any path
|
||||
// that would escape via symlinks.
|
||||
//
|
||||
// TODO(security): the per-component Lstat + join loop is subject to a TOCTOU race: a symlink
|
||||
// could be swapped between the Lstat and the subsequent open. A proper fix requires opening each
|
||||
// directory component with O_NOFOLLOW (openat-style) so that the resolved fd is used for the
|
||||
// next step rather than re-resolving from a path string. Until then, symlink-based escape is
|
||||
// only possible on systems where an attacker can swap filesystem objects between syscalls.
|
||||
func (medium *Medium) validatePath(path string) (string, error) {
|
||||
if medium.filesystemRoot == dirSeparator() {
|
||||
return medium.sandboxedPath(path), nil
|
||||
|
|
@ -358,6 +366,9 @@ func (medium *Medium) Delete(path string) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resolvedPath == medium.filesystemRoot {
|
||||
return core.E("local.Delete", "refusing to delete sandbox root", nil)
|
||||
}
|
||||
if isProtectedPath(resolvedPath) {
|
||||
return core.E("local.Delete", core.Concat("refusing to delete protected path: ", resolvedPath), nil)
|
||||
}
|
||||
|
|
@ -370,6 +381,9 @@ func (medium *Medium) DeleteAll(path string) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resolvedPath == medium.filesystemRoot {
|
||||
return core.E("local.DeleteAll", "refusing to delete sandbox root", nil)
|
||||
}
|
||||
if isProtectedPath(resolvedPath) {
|
||||
return core.E("local.DeleteAll", core.Concat("refusing to delete protected path: ", resolvedPath), nil)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
package local
|
||||
|
||||
import (
|
||||
"io"
|
||||
goio "io"
|
||||
"io/fs"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
|
@ -379,8 +379,8 @@ func TestLocal_ReadStream_Basic_Good(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
defer reader.Close()
|
||||
|
||||
limitReader := io.LimitReader(reader, 9)
|
||||
data, err := io.ReadAll(limitReader)
|
||||
limitReader := goio.LimitReader(reader, 9)
|
||||
data, err := goio.ReadAll(limitReader)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "streaming", string(data))
|
||||
}
|
||||
|
|
@ -392,7 +392,7 @@ func TestLocal_WriteStream_Basic_Good(t *testing.T) {
|
|||
writer, err := localMedium.WriteStream("output.txt")
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = io.Copy(writer, core.NewReader("piped data"))
|
||||
_, err = goio.Copy(writer, core.NewReader("piped data"))
|
||||
assert.NoError(t, err)
|
||||
err = writer.Close()
|
||||
assert.NoError(t, err)
|
||||
|
|
|
|||
|
|
@ -427,6 +427,6 @@ func TestIO_Copy_Bad(t *testing.T) {
|
|||
func TestIO_LocalGlobal_Good(t *testing.T) {
|
||||
assert.NotNil(t, Local, "io.Local should be initialised")
|
||||
|
||||
var memoryMedium = Local
|
||||
assert.NotNil(t, memoryMedium)
|
||||
var localMedium = Local
|
||||
assert.NotNil(t, localMedium)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -334,6 +334,7 @@ func (node *Node) Write(filePath, content string) error {
|
|||
}
|
||||
|
||||
// Example: _ = nodeTree.WriteMode("keys/private.key", key, 0600)
|
||||
// Note: mode is intentionally ignored — in-memory nodes have no filesystem permission model.
|
||||
func (node *Node) WriteMode(filePath, content string, mode fs.FileMode) error {
|
||||
return node.Write(filePath, content)
|
||||
}
|
||||
|
|
|
|||
2
s3/s3.go
2
s3/s3.go
|
|
@ -161,6 +161,8 @@ func (medium *Medium) Write(filePath, content string) error {
|
|||
}
|
||||
|
||||
// Example: _ = medium.WriteMode("keys/private.key", key, 0600)
|
||||
// Note: mode is intentionally ignored — S3 has no POSIX permission model.
|
||||
// Use S3 bucket policies and IAM for access control.
|
||||
func (medium *Medium) WriteMode(filePath, content string, mode fs.FileMode) error {
|
||||
return medium.Write(filePath, content)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -131,10 +131,22 @@ func (client *testS3Client) ListObjectsV2(operationContext context.Context, para
|
|||
}
|
||||
sort.Strings(allKeys)
|
||||
|
||||
continuationToken := aws.ToString(params.ContinuationToken)
|
||||
|
||||
var contents []types.Object
|
||||
commonPrefixes := make(map[string]bool)
|
||||
truncated := false
|
||||
var nextToken string
|
||||
|
||||
past := continuationToken == ""
|
||||
for _, k := range allKeys {
|
||||
if !past {
|
||||
if k == continuationToken {
|
||||
past = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
rest := core.TrimPrefix(k, prefix)
|
||||
|
||||
if delimiter != "" {
|
||||
|
|
@ -147,6 +159,8 @@ func (client *testS3Client) ListObjectsV2(operationContext context.Context, para
|
|||
}
|
||||
|
||||
if int32(len(contents)) >= maxKeys {
|
||||
truncated = true
|
||||
nextToken = k
|
||||
break
|
||||
}
|
||||
|
||||
|
|
@ -169,11 +183,15 @@ func (client *testS3Client) ListObjectsV2(operationContext context.Context, para
|
|||
cpSlice = append(cpSlice, types.CommonPrefix{Prefix: aws.String(cp)})
|
||||
}
|
||||
|
||||
return &awss3.ListObjectsV2Output{
|
||||
out := &awss3.ListObjectsV2Output{
|
||||
Contents: contents,
|
||||
CommonPrefixes: cpSlice,
|
||||
IsTruncated: aws.Bool(false),
|
||||
}, nil
|
||||
IsTruncated: aws.Bool(truncated),
|
||||
}
|
||||
if truncated {
|
||||
out.NextContinuationToken = aws.String(nextToken)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (client *testS3Client) CopyObject(operationContext context.Context, params *awss3.CopyObjectInput, optionFns ...func(*awss3.Options)) (*awss3.CopyObjectOutput, error) {
|
||||
|
|
|
|||
|
|
@ -264,6 +264,8 @@ func (sigil *ChaChaPolySigil) Out(data []byte) ([]byte, error) {
|
|||
|
||||
obfuscated, err := aead.Open(nil, nonce, ciphertext, nil)
|
||||
if err != nil {
|
||||
// The underlying aead error is intentionally hidden: surfacing raw AEAD errors can
|
||||
// leak oracle information to an attacker. DecryptionFailedError is the safe sentinel.
|
||||
return nil, core.E("sigil.ChaChaPolySigil.Out", "decrypt ciphertext", DecryptionFailedError)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -102,6 +102,11 @@ func (sigil *GzipSigil) In(data []byte) ([]byte, error) {
|
|||
if err := gzipWriter.Close(); err != nil {
|
||||
return nil, core.E("sigil.GzipSigil.In", "close gzip writer", err)
|
||||
}
|
||||
// When a custom outputWriter was supplied the caller owns the bytes; return nil so the
|
||||
// pipeline does not propagate a stale empty-buffer value.
|
||||
if sigil.outputWriter != nil {
|
||||
return nil, nil
|
||||
}
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
|
|
@ -203,7 +208,9 @@ func (sigil *HashSigil) In(data []byte) ([]byte, error) {
|
|||
return nil, core.E("sigil.HashSigil.In", "hash algorithm not available", fs.ErrInvalid)
|
||||
}
|
||||
|
||||
hasher.Write(data)
|
||||
if _, err := hasher.Write(data); err != nil {
|
||||
return nil, core.E("sigil.HashSigil.In", "write hash input", err)
|
||||
}
|
||||
return hasher.(interface{ Sum([]byte) []byte }).Sum(nil), nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -38,6 +38,27 @@ func normaliseTableName(table string) string {
|
|||
return table
|
||||
}
|
||||
|
||||
// isValidTableName reports whether name consists only of ASCII letters, digits, and underscores,
|
||||
// starting with a letter or underscore. This prevents SQL-injection via table-name concatenation.
|
||||
func isValidTableName(name string) bool {
|
||||
if name == "" {
|
||||
return false
|
||||
}
|
||||
for i, ch := range name {
|
||||
switch {
|
||||
case ch >= 'a' && ch <= 'z', ch >= 'A' && ch <= 'Z', ch == '_':
|
||||
// always valid
|
||||
case ch >= '0' && ch <= '9':
|
||||
if i == 0 {
|
||||
return false // must not start with a digit
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Example: medium, _ := sqlite.New(sqlite.Options{Path: ":memory:", Table: "files"})
|
||||
// Example: _ = medium.Write("config/app.yaml", "port: 8080")
|
||||
func New(options Options) (*Medium, error) {
|
||||
|
|
@ -45,7 +66,12 @@ func New(options Options) (*Medium, error) {
|
|||
return nil, core.E("sqlite.New", "database path is required", fs.ErrInvalid)
|
||||
}
|
||||
|
||||
medium := &Medium{table: normaliseTableName(options.Table)}
|
||||
tableName := normaliseTableName(options.Table)
|
||||
if !isValidTableName(tableName) {
|
||||
return nil, core.E("sqlite.New", core.Concat("table name contains invalid characters: ", tableName), fs.ErrInvalid)
|
||||
}
|
||||
|
||||
medium := &Medium{table: tableName}
|
||||
|
||||
database, err := sql.Open("sqlite", options.Path)
|
||||
if err != nil {
|
||||
|
|
@ -338,8 +364,8 @@ func (medium *Medium) List(filePath string) ([]fs.DirEntry, error) {
|
|||
}
|
||||
|
||||
rows, err := medium.database.Query(
|
||||
`SELECT path, content, mode, is_dir, mtime FROM `+medium.table+` WHERE path LIKE ? OR path LIKE ?`,
|
||||
prefix+"%", prefix+"%",
|
||||
`SELECT path, content, mode, is_dir, mtime FROM `+medium.table+` WHERE path LIKE ?`,
|
||||
prefix+"%",
|
||||
)
|
||||
if err != nil {
|
||||
return nil, core.E("sqlite.List", "query failed", err)
|
||||
|
|
@ -635,6 +661,7 @@ type sqliteWriteCloser struct {
|
|||
medium *Medium
|
||||
path string
|
||||
data []byte
|
||||
mode fs.FileMode
|
||||
}
|
||||
|
||||
func (writer *sqliteWriteCloser) Write(data []byte) (int, error) {
|
||||
|
|
@ -643,10 +670,14 @@ func (writer *sqliteWriteCloser) Write(data []byte) (int, error) {
|
|||
}
|
||||
|
||||
func (writer *sqliteWriteCloser) Close() error {
|
||||
mode := writer.mode
|
||||
if mode == 0 {
|
||||
mode = 0644
|
||||
}
|
||||
_, err := writer.medium.database.Exec(
|
||||
`INSERT INTO `+writer.medium.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, 420, FALSE, ?)
|
||||
ON CONFLICT(path) DO UPDATE SET content = excluded.content, is_dir = FALSE, mtime = excluded.mtime`,
|
||||
writer.path, writer.data, time.Now().UTC(),
|
||||
`INSERT INTO `+writer.medium.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, ?, FALSE, ?)
|
||||
ON CONFLICT(path) DO UPDATE SET content = excluded.content, mode = excluded.mode, is_dir = FALSE, mtime = excluded.mtime`,
|
||||
writer.path, writer.data, int(mode), time.Now().UTC(),
|
||||
)
|
||||
if err != nil {
|
||||
return core.E("sqlite.WriteCloser.Close", core.Concat("store failed: ", writer.path), err)
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import (
|
|||
goio "io"
|
||||
"io/fs"
|
||||
"path"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
|
|
@ -171,9 +172,15 @@ func (medium *Medium) List(entryPath string) ([]fs.DirEntry, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Sort keys so that List returns entries in a deterministic order.
|
||||
keys := make([]string, 0, len(all))
|
||||
for k := range all {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
slices.Sort(keys)
|
||||
var entries []fs.DirEntry
|
||||
for key, value := range all {
|
||||
entries = append(entries, &keyValueDirEntry{name: key, size: int64(len(value))})
|
||||
for _, k := range keys {
|
||||
entries = append(entries, &keyValueDirEntry{name: k, size: int64(len(all[k]))})
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,7 +10,10 @@ import (
|
|||
_ "modernc.org/sqlite"
|
||||
)
|
||||
|
||||
// Example: _, err := keyValueStore.Get("app", "theme")
|
||||
// NotFoundError is the sentinel returned when a key does not exist in the store.
|
||||
// Callers test for it with errors.Is. It is defined with errors.New so that
|
||||
// identity comparison works correctly across package boundaries.
|
||||
// Example: _, err := keyValueStore.Get("app", "theme"); errors.Is(err, store.NotFoundError)
|
||||
var NotFoundError = errors.New("key not found")
|
||||
|
||||
// Example: keyValueStore, _ := store.New(store.Options{Path: ":memory:"})
|
||||
|
|
|
|||
|
|
@ -2,11 +2,12 @@ package workspace
|
|||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
goio "io"
|
||||
"io/fs"
|
||||
"sync"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"golang.org/x/crypto/hkdf"
|
||||
|
||||
"dappco.re/go/core/io"
|
||||
"dappco.re/go/core/io/sigil"
|
||||
|
|
@ -188,8 +189,14 @@ func (service *Service) workspaceCipherSigil(operation string) (*sigil.ChaChaPol
|
|||
if err != nil {
|
||||
return nil, core.E(operation, "failed to read workspace key", err)
|
||||
}
|
||||
derived := sha256.Sum256([]byte(rawKey))
|
||||
cipherSigil, err := sigil.NewChaChaPolySigil(derived[:], nil)
|
||||
// Use HKDF (RFC 5869) for key derivation: it is purpose-bound, domain-separated,
|
||||
// and more resistant to length-extension attacks than a bare SHA-256 hash.
|
||||
hkdfReader := hkdf.New(sha256.New, []byte(rawKey), nil, []byte("workspace-cipher-key"))
|
||||
derived := make([]byte, 32)
|
||||
if _, err := goio.ReadFull(hkdfReader, derived); err != nil {
|
||||
return nil, core.E(operation, "failed to derive workspace key", err)
|
||||
}
|
||||
cipherSigil, err := sigil.NewChaChaPolySigil(derived, nil)
|
||||
if err != nil {
|
||||
return nil, core.E(operation, "failed to create cipher sigil", err)
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue