go-store/bench_test.go
Snider 4399f8b6d2 bench: add supplemental benchmarks for GetAll scaling, parallel, TTL, Render
Add bench_test.go with benchmarks not covered by store_test.go:
- BenchmarkGetAll_VaryingSize: 4 sub-benchmarks (10/100/1K/10K keys)
  to measure allocation scaling
- BenchmarkSetGet_Parallel: b.RunParallel for throughput under contention
- BenchmarkCount_10K: COUNT(*) on large groups
- BenchmarkDelete: delete throughput
- BenchmarkSetWithTTL: TTL write overhead vs plain Set
- BenchmarkRender: template rendering with 50 keys
All benchmarks use b.ReportAllocs().

Co-Authored-By: Virgil <virgil@lethean.io>
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-20 04:53:52 +00:00

132 lines
2.3 KiB
Go

// SPDX-Licence-Identifier: EUPL-1.2
package store
import (
"fmt"
"testing"
)
// Supplemental benchmarks beyond the core Set/Get/GetAll/FileBacked benchmarks
// in store_test.go. These add: varying group sizes, parallel throughput,
// Count on large groups, and Delete throughput.
func BenchmarkGetAll_VaryingSize(b *testing.B) {
sizes := []int{10, 100, 1_000, 10_000}
for _, size := range sizes {
b.Run(fmt.Sprintf("size=%d", size), func(b *testing.B) {
s, err := New(":memory:")
if err != nil {
b.Fatal(err)
}
defer s.Close()
for i := range size {
_ = s.Set("bench", fmt.Sprintf("key-%d", i), "value")
}
b.ReportAllocs()
b.ResetTimer()
for range b.N {
_, _ = s.GetAll("bench")
}
})
}
}
func BenchmarkSetGet_Parallel(b *testing.B) {
s, err := New(":memory:")
if err != nil {
b.Fatal(err)
}
defer s.Close()
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
i := 0
for pb.Next() {
key := fmt.Sprintf("key-%d", i)
_ = s.Set("parallel", key, "value")
_, _ = s.Get("parallel", key)
i++
}
})
}
func BenchmarkCount_10K(b *testing.B) {
s, err := New(":memory:")
if err != nil {
b.Fatal(err)
}
defer s.Close()
for i := range 10_000 {
_ = s.Set("bench", fmt.Sprintf("key-%d", i), "value")
}
b.ReportAllocs()
b.ResetTimer()
for range b.N {
_, _ = s.Count("bench")
}
}
func BenchmarkDelete(b *testing.B) {
s, err := New(":memory:")
if err != nil {
b.Fatal(err)
}
defer s.Close()
// Pre-populate keys that will be deleted.
for i := range b.N {
_ = s.Set("bench", fmt.Sprintf("key-%d", i), "value")
}
b.ReportAllocs()
b.ResetTimer()
for i := range b.N {
_ = s.Delete("bench", fmt.Sprintf("key-%d", i))
}
}
func BenchmarkSetWithTTL(b *testing.B) {
s, err := New(":memory:")
if err != nil {
b.Fatal(err)
}
defer s.Close()
b.ReportAllocs()
b.ResetTimer()
for i := range b.N {
_ = s.SetWithTTL("bench", fmt.Sprintf("key-%d", i), "value", 60_000_000_000) // 60s
}
}
func BenchmarkRender(b *testing.B) {
s, err := New(":memory:")
if err != nil {
b.Fatal(err)
}
defer s.Close()
for i := range 50 {
_ = s.Set("bench", fmt.Sprintf("key%d", i), fmt.Sprintf("val%d", i))
}
tmpl := `{{ .key0 }} {{ .key25 }} {{ .key49 }}`
b.ReportAllocs()
b.ResetTimer()
for range b.N {
_, _ = s.Render(tmpl, "bench")
}
}