refactor(store): tighten AX naming and examples
All checks were successful
Security Scan / security (push) Successful in 10s
Test / test (push) Successful in 1m41s

Co-Authored-By: Virgil <virgil@lethean.io>
This commit is contained in:
Virgil 2026-03-30 18:17:07 +00:00
parent 5b944410e7
commit 2eedf1e937
8 changed files with 98 additions and 100 deletions

View file

@ -131,7 +131,7 @@ func main() {
## Adding a New Method
1. Implement on `*Store` in `store.go`
2. If mutating, call `s.notify(Event{...})` after successful database write
2. If mutating, call `storeInstance.notify(Event{...})` after successful database write
3. Add delegation method on `ScopedStore` in `scope.go` (prefix the group)
4. Update `checkQuota` in `scope.go` if it affects key/group counts
5. Write `Test<File>_<Function>_<Good|Bad|Ugly>` tests

View file

@ -42,7 +42,7 @@ func main() {
}
fmt.Println(themeValue)
// Watch config mutations on a buffered channel.
// Watch "config" changes and print each event as it arrives.
watcher := storeInstance.Watch("config", "*")
defer storeInstance.Unwatch(watcher)
go func() {

View file

@ -16,32 +16,32 @@ func BenchmarkGetAll_VaryingSize(b *testing.B) {
for _, size := range sizes {
b.Run(core.Sprintf("size=%d", size), func(b *testing.B) {
s, err := New(":memory:")
storeInstance, err := New(":memory:")
if err != nil {
b.Fatal(err)
}
defer s.Close()
defer storeInstance.Close()
for i := range size {
_ = s.Set("bench", core.Sprintf("key-%d", i), "value")
_ = storeInstance.Set("bench", core.Sprintf("key-%d", i), "value")
}
b.ReportAllocs()
b.ResetTimer()
for range b.N {
_, _ = s.GetAll("bench")
_, _ = storeInstance.GetAll("bench")
}
})
}
}
func BenchmarkSetGet_Parallel(b *testing.B) {
s, err := New(":memory:")
storeInstance, err := New(":memory:")
if err != nil {
b.Fatal(err)
}
defer s.Close()
defer storeInstance.Close()
b.ReportAllocs()
b.ResetTimer()
@ -50,84 +50,84 @@ func BenchmarkSetGet_Parallel(b *testing.B) {
i := 0
for pb.Next() {
key := core.Sprintf("key-%d", i)
_ = s.Set("parallel", key, "value")
_, _ = s.Get("parallel", key)
_ = storeInstance.Set("parallel", key, "value")
_, _ = storeInstance.Get("parallel", key)
i++
}
})
}
func BenchmarkCount_10K(b *testing.B) {
s, err := New(":memory:")
storeInstance, err := New(":memory:")
if err != nil {
b.Fatal(err)
}
defer s.Close()
defer storeInstance.Close()
for i := range 10_000 {
_ = s.Set("bench", core.Sprintf("key-%d", i), "value")
_ = storeInstance.Set("bench", core.Sprintf("key-%d", i), "value")
}
b.ReportAllocs()
b.ResetTimer()
for range b.N {
_, _ = s.Count("bench")
_, _ = storeInstance.Count("bench")
}
}
func BenchmarkDelete(b *testing.B) {
s, err := New(":memory:")
storeInstance, err := New(":memory:")
if err != nil {
b.Fatal(err)
}
defer s.Close()
defer storeInstance.Close()
// Pre-populate keys that will be deleted.
for i := range b.N {
_ = s.Set("bench", core.Sprintf("key-%d", i), "value")
_ = storeInstance.Set("bench", core.Sprintf("key-%d", i), "value")
}
b.ReportAllocs()
b.ResetTimer()
for i := range b.N {
_ = s.Delete("bench", core.Sprintf("key-%d", i))
_ = storeInstance.Delete("bench", core.Sprintf("key-%d", i))
}
}
func BenchmarkSetWithTTL(b *testing.B) {
s, err := New(":memory:")
storeInstance, err := New(":memory:")
if err != nil {
b.Fatal(err)
}
defer s.Close()
defer storeInstance.Close()
b.ReportAllocs()
b.ResetTimer()
for i := range b.N {
_ = s.SetWithTTL("bench", core.Sprintf("key-%d", i), "value", 60_000_000_000) // 60s
_ = storeInstance.SetWithTTL("bench", core.Sprintf("key-%d", i), "value", 60_000_000_000) // 60s
}
}
func BenchmarkRender(b *testing.B) {
s, err := New(":memory:")
storeInstance, err := New(":memory:")
if err != nil {
b.Fatal(err)
}
defer s.Close()
defer storeInstance.Close()
for i := range 50 {
_ = s.Set("bench", core.Sprintf("key%d", i), core.Sprintf("val%d", i))
_ = storeInstance.Set("bench", core.Sprintf("key%d", i), core.Sprintf("val%d", i))
}
tmpl := `{{ .key0 }} {{ .key25 }} {{ .key49 }}`
templateSource := `{{ .key0 }} {{ .key25 }} {{ .key49 }}`
b.ReportAllocs()
b.ResetTimer()
for range b.N {
_, _ = s.Render(tmpl, "bench")
_, _ = storeInstance.Render(templateSource, "bench")
}
}

View file

@ -21,20 +21,20 @@ func TestCoverage_New_Bad_SchemaConflict(t *testing.T) {
// Pre-create a database with an INDEX named "entries". When New() runs
// CREATE TABLE IF NOT EXISTS entries, SQLite returns an error because the
// name "entries" is already taken by the index.
dbPath := testPath(t, "conflict.db")
databasePath := testPath(t, "conflict.db")
db, err := sql.Open("sqlite", dbPath)
database, err := sql.Open("sqlite", databasePath)
require.NoError(t, err)
db.SetMaxOpenConns(1)
_, err = db.Exec("PRAGMA journal_mode=WAL")
database.SetMaxOpenConns(1)
_, err = database.Exec("PRAGMA journal_mode=WAL")
require.NoError(t, err)
_, err = db.Exec("CREATE TABLE dummy (id INTEGER)")
_, err = database.Exec("CREATE TABLE dummy (id INTEGER)")
require.NoError(t, err)
_, err = db.Exec("CREATE INDEX entries ON dummy(id)")
_, err = database.Exec("CREATE INDEX entries ON dummy(id)")
require.NoError(t, err)
require.NoError(t, db.Close())
require.NoError(t, database.Close())
_, err = New(dbPath)
_, err = New(databasePath)
require.Error(t, err, "New should fail when an index named entries already exists")
assert.Contains(t, err.Error(), "store.New: schema")
}
@ -46,31 +46,31 @@ func TestCoverage_New_Bad_SchemaConflict(t *testing.T) {
func TestCoverage_GetAll_Bad_ScanError(t *testing.T) {
// Trigger a scan error by inserting a row with a NULL key. The production
// code scans into plain strings, which cannot represent NULL.
s, err := New(":memory:")
storeInstance, err := New(":memory:")
require.NoError(t, err)
defer s.Close()
defer storeInstance.Close()
// Insert a normal row first so the query returns results.
require.NoError(t, s.Set("g", "good", "value"))
require.NoError(t, storeInstance.Set("g", "good", "value"))
// Restructure the table to allow NULLs, then insert a NULL-key row.
_, err = s.database.Exec("ALTER TABLE entries RENAME TO entries_backup")
_, err = storeInstance.database.Exec("ALTER TABLE entries RENAME TO entries_backup")
require.NoError(t, err)
_, err = s.database.Exec(`CREATE TABLE entries (
_, err = storeInstance.database.Exec(`CREATE TABLE entries (
group_name TEXT,
entry_key TEXT,
entry_value TEXT,
expires_at INTEGER
)`)
require.NoError(t, err)
_, err = s.database.Exec("INSERT INTO entries SELECT * FROM entries_backup")
_, err = storeInstance.database.Exec("INSERT INTO entries SELECT * FROM entries_backup")
require.NoError(t, err)
_, err = s.database.Exec("INSERT INTO entries (group_name, entry_key, entry_value) VALUES ('g', NULL, 'null-key-val')")
_, err = storeInstance.database.Exec("INSERT INTO entries (group_name, entry_key, entry_value) VALUES ('g', NULL, 'null-key-val')")
require.NoError(t, err)
_, err = s.database.Exec("DROP TABLE entries_backup")
_, err = storeInstance.database.Exec("DROP TABLE entries_backup")
require.NoError(t, err)
_, err = s.GetAll("g")
_, err = storeInstance.GetAll("g")
require.Error(t, err, "GetAll should fail when a row contains a NULL key")
assert.Contains(t, err.Error(), "store.All: scan")
}
@ -82,31 +82,31 @@ func TestCoverage_GetAll_Bad_ScanError(t *testing.T) {
func TestCoverage_GetAll_Bad_RowsError(t *testing.T) {
// Trigger rows.Err() by corrupting the database file so that iteration
// starts successfully but encounters a malformed page mid-scan.
dbPath := testPath(t, "corrupt-getall.db")
databasePath := testPath(t, "corrupt-getall.db")
s, err := New(dbPath)
storeInstance, err := New(databasePath)
require.NoError(t, err)
// Insert enough rows to span multiple database pages.
const rows = 5000
for i := range rows {
require.NoError(t, s.Set("g",
require.NoError(t, storeInstance.Set("g",
core.Sprintf("key-%06d", i),
core.Sprintf("value-with-padding-%06d-xxxxxxxxxxxxxxxxxxxxxxxx", i)))
}
s.Close()
storeInstance.Close()
// Force a WAL checkpoint so all data is in the main database file.
raw, err := sql.Open("sqlite", dbPath)
rawDatabase, err := sql.Open("sqlite", databasePath)
require.NoError(t, err)
raw.SetMaxOpenConns(1)
_, err = raw.Exec("PRAGMA wal_checkpoint(TRUNCATE)")
rawDatabase.SetMaxOpenConns(1)
_, err = rawDatabase.Exec("PRAGMA wal_checkpoint(TRUNCATE)")
require.NoError(t, err)
require.NoError(t, raw.Close())
require.NoError(t, rawDatabase.Close())
// Corrupt data pages in the latter portion of the file (skip the first
// pages which hold the schema).
data := requireCoreReadBytes(t, dbPath)
data := requireCoreReadBytes(t, databasePath)
garbage := make([]byte, 4096)
for i := range garbage {
garbage[i] = 0xFF
@ -119,17 +119,17 @@ func TestCoverage_GetAll_Bad_RowsError(t *testing.T) {
}
copy(data[offset:offset+len(garbage)], garbage)
copy(data[offset+len(garbage):offset+(len(garbage)*2)], garbage)
requireCoreWriteBytes(t, dbPath, data)
requireCoreWriteBytes(t, databasePath, data)
// Remove WAL/SHM so the reopened connection reads from the main file.
_ = testFilesystem().Delete(dbPath + "-wal")
_ = testFilesystem().Delete(dbPath + "-shm")
_ = testFilesystem().Delete(databasePath + "-wal")
_ = testFilesystem().Delete(databasePath + "-shm")
s2, err := New(dbPath)
reopenedStore, err := New(databasePath)
require.NoError(t, err)
defer s2.Close()
defer reopenedStore.Close()
_, err = s2.GetAll("g")
_, err = reopenedStore.GetAll("g")
require.Error(t, err, "GetAll should fail on corrupted database pages")
assert.Contains(t, err.Error(), "store.All: rows")
}
@ -140,29 +140,29 @@ func TestCoverage_GetAll_Bad_RowsError(t *testing.T) {
func TestCoverage_Render_Bad_ScanError(t *testing.T) {
// Same NULL-key technique as TestCoverage_GetAll_Bad_ScanError.
s, err := New(":memory:")
storeInstance, err := New(":memory:")
require.NoError(t, err)
defer s.Close()
defer storeInstance.Close()
require.NoError(t, s.Set("g", "good", "value"))
require.NoError(t, storeInstance.Set("g", "good", "value"))
_, err = s.database.Exec("ALTER TABLE entries RENAME TO entries_backup")
_, err = storeInstance.database.Exec("ALTER TABLE entries RENAME TO entries_backup")
require.NoError(t, err)
_, err = s.database.Exec(`CREATE TABLE entries (
_, err = storeInstance.database.Exec(`CREATE TABLE entries (
group_name TEXT,
entry_key TEXT,
entry_value TEXT,
expires_at INTEGER
)`)
require.NoError(t, err)
_, err = s.database.Exec("INSERT INTO entries SELECT * FROM entries_backup")
_, err = storeInstance.database.Exec("INSERT INTO entries SELECT * FROM entries_backup")
require.NoError(t, err)
_, err = s.database.Exec("INSERT INTO entries (group_name, entry_key, entry_value) VALUES ('g', NULL, 'null-key-val')")
_, err = storeInstance.database.Exec("INSERT INTO entries (group_name, entry_key, entry_value) VALUES ('g', NULL, 'null-key-val')")
require.NoError(t, err)
_, err = s.database.Exec("DROP TABLE entries_backup")
_, err = storeInstance.database.Exec("DROP TABLE entries_backup")
require.NoError(t, err)
_, err = s.Render("{{ .good }}", "g")
_, err = storeInstance.Render("{{ .good }}", "g")
require.Error(t, err, "Render should fail when a row contains a NULL key")
assert.Contains(t, err.Error(), "store.All: scan")
}
@ -173,27 +173,27 @@ func TestCoverage_Render_Bad_ScanError(t *testing.T) {
func TestCoverage_Render_Bad_RowsError(t *testing.T) {
// Same corruption technique as TestCoverage_GetAll_Bad_RowsError.
dbPath := testPath(t, "corrupt-render.db")
databasePath := testPath(t, "corrupt-render.db")
s, err := New(dbPath)
storeInstance, err := New(databasePath)
require.NoError(t, err)
const rows = 5000
for i := range rows {
require.NoError(t, s.Set("g",
require.NoError(t, storeInstance.Set("g",
core.Sprintf("key-%06d", i),
core.Sprintf("value-with-padding-%06d-xxxxxxxxxxxxxxxxxxxxxxxx", i)))
}
s.Close()
storeInstance.Close()
raw, err := sql.Open("sqlite", dbPath)
rawDatabase, err := sql.Open("sqlite", databasePath)
require.NoError(t, err)
raw.SetMaxOpenConns(1)
_, err = raw.Exec("PRAGMA wal_checkpoint(TRUNCATE)")
rawDatabase.SetMaxOpenConns(1)
_, err = rawDatabase.Exec("PRAGMA wal_checkpoint(TRUNCATE)")
require.NoError(t, err)
require.NoError(t, raw.Close())
require.NoError(t, rawDatabase.Close())
data := requireCoreReadBytes(t, dbPath)
data := requireCoreReadBytes(t, databasePath)
garbage := make([]byte, 4096)
for i := range garbage {
garbage[i] = 0xFF
@ -206,16 +206,16 @@ func TestCoverage_Render_Bad_RowsError(t *testing.T) {
}
copy(data[offset:offset+len(garbage)], garbage)
copy(data[offset+len(garbage):offset+(len(garbage)*2)], garbage)
requireCoreWriteBytes(t, dbPath, data)
requireCoreWriteBytes(t, databasePath, data)
_ = testFilesystem().Delete(dbPath + "-wal")
_ = testFilesystem().Delete(dbPath + "-shm")
_ = testFilesystem().Delete(databasePath + "-wal")
_ = testFilesystem().Delete(databasePath + "-shm")
s2, err := New(dbPath)
reopenedStore, err := New(databasePath)
require.NoError(t, err)
defer s2.Close()
defer reopenedStore.Close()
_, err = s2.Render("{{ . }}", "g")
_, err = reopenedStore.Render("{{ . }}", "g")
require.Error(t, err, "Render should fail on corrupted database pages")
assert.Contains(t, err.Error(), "store.All: rows")
}
@ -227,27 +227,27 @@ func TestCoverage_Render_Bad_RowsError(t *testing.T) {
func TestCoverage_GroupsSeq_Bad_ScanError(t *testing.T) {
// Trigger a scan error by inserting a row with a NULL group name. The
// production code scans into a plain string, which cannot represent NULL.
s, err := New(":memory:")
storeInstance, err := New(":memory:")
require.NoError(t, err)
defer s.Close()
defer storeInstance.Close()
_, err = s.database.Exec("ALTER TABLE entries RENAME TO entries_backup")
_, err = storeInstance.database.Exec("ALTER TABLE entries RENAME TO entries_backup")
require.NoError(t, err)
_, err = s.database.Exec(`CREATE TABLE entries (
_, err = storeInstance.database.Exec(`CREATE TABLE entries (
group_name TEXT,
entry_key TEXT,
entry_value TEXT,
expires_at INTEGER
)`)
require.NoError(t, err)
_, err = s.database.Exec("INSERT INTO entries SELECT * FROM entries_backup")
_, err = storeInstance.database.Exec("INSERT INTO entries SELECT * FROM entries_backup")
require.NoError(t, err)
_, err = s.database.Exec("INSERT INTO entries (group_name, entry_key, entry_value) VALUES (NULL, 'k', 'v')")
_, err = storeInstance.database.Exec("INSERT INTO entries (group_name, entry_key, entry_value) VALUES (NULL, 'k', 'v')")
require.NoError(t, err)
_, err = s.database.Exec("DROP TABLE entries_backup")
_, err = storeInstance.database.Exec("DROP TABLE entries_backup")
require.NoError(t, err)
for groupName, iterationErr := range s.GroupsSeq("") {
for groupName, iterationErr := range storeInstance.GroupsSeq("") {
require.Error(t, iterationErr)
assert.Empty(t, groupName)
break

View file

@ -144,7 +144,7 @@ The only permitted runtime dependency is `modernc.org/sqlite`. Test-only depende
## Adding a New Method
1. Implement the method on `*Store` in `store.go` (or `scope.go` if it is namespace-scoped).
2. If it is a mutating operation, call `s.notify(Event{...})` after the successful database write.
2. If it is a mutating operation, call `storeInstance.notify(Event{...})` after the successful database write.
3. Add a corresponding delegation method to `ScopedStore` in `scope.go` that prefixes the group.
4. Write tests covering the happy path, error conditions, and closed-store behaviour.
5. Update quota checks in `checkQuota` if the operation affects key or group counts.

View file

@ -88,7 +88,7 @@ func main() {
return
}
// Watch the config group via a buffered channel.
// Watch "config" changes and print each event as it arrives.
watcher := storeInstance.Watch("config", "*")
defer storeInstance.Unwatch(watcher)
go func() {

View file

@ -68,8 +68,8 @@ type changeCallbackRegistration struct {
callback func(Event)
}
// Each watcher keeps 16 pending events before non-blocking sends start
// dropping new ones.
// Watch("config", "*") can hold 16 pending events before non-blocking sends
// start dropping new ones.
const watcherEventBufferCapacity = 16
// Usage example: `watcher := storeInstance.Watch("*", "*")`

View file

@ -355,8 +355,8 @@ func (storeInstance *Store) PurgeExpired() (int64, error) {
return removedRows, nil
}
// startBackgroundPurge(purgeContext) keeps PurgeExpired running every 60
// seconds until Close cancels the context.
// New(":memory:") starts a background goroutine that calls PurgeExpired every
// 60 seconds until Close stops the store.
func (storeInstance *Store) startBackgroundPurge(purgeContext context.Context) {
storeInstance.purgeWaitGroup.Go(func() {
ticker := time.NewTicker(storeInstance.purgeInterval)
@ -367,10 +367,8 @@ func (storeInstance *Store) startBackgroundPurge(purgeContext context.Context) {
return
case <-ticker.C:
if _, err := storeInstance.PurgeExpired(); err != nil {
// We can't return the error as we are in a background goroutine,
// but we should at least prevent it from being completely silent
// in a real app (e.g. by logging it). For this module, we keep it
// running to try again on the next tick.
// For example, a logger could record the failure here. The loop
// keeps running so the next tick can retry.
}
}
}