refactor: delete pkg/io, slim pkg/log to go-io/go-log re-exports
- Delete pkg/io/ entirely (all consumers now use go-io)
- Delete pkg/log/{errors.go,log.go} duplicates (now in go-log)
- Rewrite pkg/log/log.go as thin re-export layer over go-log
- Keep pkg/log/{service.go,rotation.go} (framework/go-io deps)
- Swap internal pkg/ imports to go-io/go-log across ~30 files
Co-Authored-By: Virgil <virgil@lethean.io>
This commit is contained in:
parent
ddc8582d7f
commit
ef5c83c04e
45 changed files with 183 additions and 9277 deletions
35
go.mod
35
go.mod
|
|
@ -7,30 +7,29 @@ require (
|
|||
forge.lthn.ai/core/cli v0.1.0
|
||||
forge.lthn.ai/core/go-crypt v0.1.0
|
||||
forge.lthn.ai/core/go-devops v0.1.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0
|
||||
forge.lthn.ai/core/go-io v0.0.1
|
||||
forge.lthn.ai/core/go-log v0.0.1
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.3
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.4
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/spf13/viper v1.21.0
|
||||
github.com/stretchr/testify v1.11.1
|
||||
golang.org/x/crypto v0.48.0
|
||||
golang.org/x/text v0.34.0
|
||||
google.golang.org/grpc v1.79.1
|
||||
google.golang.org/protobuf v1.36.11
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
modernc.org/sqlite v1.46.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/ProtonMail/go-crypto v1.3.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 // indirect
|
||||
github.com/aws/smithy-go v1.24.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.19 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.19 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.20 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.19 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.19 // indirect
|
||||
github.com/aws/smithy-go v1.24.2 // indirect
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
||||
github.com/charmbracelet/bubbletea v1.3.10 // indirect
|
||||
github.com/charmbracelet/colorprofile v0.4.2 // indirect
|
||||
|
|
@ -60,7 +59,6 @@ require (
|
|||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/rogpeppe/go-internal v1.14.1 // indirect
|
||||
github.com/sagikazarmark/locafero v0.12.0 // indirect
|
||||
github.com/spf13/afero v1.15.0 // indirect
|
||||
github.com/spf13/cast v1.10.0 // indirect
|
||||
|
|
@ -68,16 +66,11 @@ require (
|
|||
github.com/spf13/pflag v1.0.10 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
go.opentelemetry.io/otel v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.40.0 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/exp v0.0.0-20260218203240-3dfff04db8fa // indirect
|
||||
golang.org/x/net v0.50.0 // indirect
|
||||
golang.org/x/sys v0.41.0 // indirect
|
||||
golang.org/x/term v0.40.0 // indirect
|
||||
gonum.org/v1/gonum v0.17.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
golang.org/x/text v0.34.0 // indirect
|
||||
modernc.org/libc v1.68.0 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
modernc.org/memory v1.11.0 // indirect
|
||||
|
|
|
|||
81
go.sum
81
go.sum
|
|
@ -6,34 +6,36 @@ forge.lthn.ai/core/go-crypt v0.1.0 h1:92gwdQi7iAwktpvZhL/8Cu+QS6xKCtGP4FJfyInPGn
|
|||
forge.lthn.ai/core/go-crypt v0.1.0/go.mod h1:zVAgx6ZiGtC+dbX4R/VKvEPqsEqjyuLl4gQZH9SXBUw=
|
||||
forge.lthn.ai/core/go-devops v0.1.0 h1:xT3J//gilwVz15ju63xhg/Lz700cOYjqQkRWhTZDHLk=
|
||||
forge.lthn.ai/core/go-devops v0.1.0/go.mod h1:V5/YaRsrDsYlSnCCJXKX7h1zSbaGyRdRQApPF5XwGAo=
|
||||
forge.lthn.ai/core/go-io v0.0.1 h1:N/GCl6Asusfr4gs53JZixJVtqcnerQ6GcxSN8F8iJXY=
|
||||
forge.lthn.ai/core/go-io v0.0.1/go.mod h1:l+gG/G5TMIOTG8G7y0dg4fh1a7Suy8wCYVwsz4duV7M=
|
||||
forge.lthn.ai/core/go-log v0.0.1 h1:x/E6EfF9vixzqiLHQOl2KT25HyBcMc9qiBkomqVlpPg=
|
||||
forge.lthn.ai/core/go-log v0.0.1/go.mod h1:r14MXKOD3LF/sI8XUJQhRk/SZHBE7jAFVuCfgkXoZPw=
|
||||
github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw=
|
||||
github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 h1:JqcdRG//czea7Ppjb+g/n4o8i/R50aTBHkA7vu0lK+k=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17/go.mod h1:CO+WeGmIdj/MlPel2KwID9Gt7CNq4M65HUfBW97liM0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 h1:Z5EiPIzXKewUQK0QTMkutjiaPVeVYXX7KIqhXu/0fXs=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8/go.mod h1:FsTpJtvC4U1fyDXk7c71XoDv3HlRm8V3NiYLeYLh5YE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 h1:bGeHBsGZx0Dvu/eJC0Lh9adJa3M1xREcndxLNZlve2U=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17/go.mod h1:dcW24lbU0CzHusTE8LLHhRLI42ejmINN8Lcr22bwh/g=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0 h1:oeu8VPlOre74lBA/PMhxa5vewaMIMmILM+RraSyB8KA=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0/go.mod h1:5jggDlZ2CLQhwJBiZJb4vfk4f0GxWdEDruWKEJ1xOdo=
|
||||
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
|
||||
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.3 h1:4kQ/fa22KjDt13QCy1+bYADvdgcxpfH18f0zP542kZA=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.3/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.6 h1:N4lRUXZpZ1KVEUn6hxtco/1d2lgYhNn1fHkkl8WhlyQ=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.6/go.mod h1:lyw7GFp3qENLh7kwzf7iMzAxDn+NzjXEAGjKS2UOKqI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.19 h1:/sECfyq2JTifMI2JPyZ4bdRN77zJmr6SrS1eL3augIA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.19/go.mod h1:dMf8A5oAqr9/oxOfLkC/c2LU/uMcALP0Rgn2BD5LWn0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.19 h1:AWeJMk33GTBf6J20XJe6qZoRSJo0WfUhsMdUKhoODXE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.19/go.mod h1:+GWrYoaAsV7/4pNHpwh1kiNLXkKaSoppxQq9lbH8Ejw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.20 h1:qi3e/dmpdONhj1RyIZdi6DKKpDXS5Lb8ftr3p7cyHJc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.20/go.mod h1:V1K+TeJVD5JOk3D9e5tsX2KUdL7BlB+FV6cBhdobN8c=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.6 h1:XAq62tBTJP/85lFD5oqOOe7YYgWxY9LvWq8plyDvDVg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.6/go.mod h1:x0nZssQ3qZSnIcePWLvcoFisRXJzcTVvYpAAdYX8+GI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.11 h1:BYf7XNsJMzl4mObARUBUib+j2tf0U//JAAtTnYqvqCw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.11/go.mod h1:aEUS4WrNk/+FxkBZZa7tVgp4pGH+kFGW40Y8rCPqt5g=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.19 h1:X1Tow7suZk9UCJHE1Iw9GMZJJl0dAnKXXP1NaSDHwmw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.19/go.mod h1:/rARO8psX+4sfjUQXp5LLifjUt8DuATZ31WptNJTyQA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.19 h1:JnQeStZvPHFHeyky/7LbMlyQjUa+jIBj36OlWm0pzIk=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.19/go.mod h1:HGyasyHvYdFQeJhvDHfH7HXkHh57htcJGKDZ+7z+I24=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.4 h1:4ExZyubQ6LQQVuF2Qp9OsfEvsTdAWh5Gfwf6PgIdLdk=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.4/go.mod h1:NF3JcMGOiARAss1ld3WGORCw71+4ExDD2cbbdKS5PpA=
|
||||
github.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng=
|
||||
github.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc=
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/charmbracelet/bubbletea v1.3.10 h1:otUDHWMMzQSB0Pkc87rm691KZ3SWa4KUlvF9nRvCICw=
|
||||
github.com/charmbracelet/bubbletea v1.3.10/go.mod h1:ORQfo0fk8U+po9VaNvnV95UPWA1BitP1E0N6xJPlHr4=
|
||||
github.com/charmbracelet/colorprofile v0.4.2 h1:BdSNuMjRbotnxHSfxy+PCSa4xAmz7szw70ktAtWRYrY=
|
||||
|
|
@ -63,14 +65,8 @@ github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHk
|
|||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro=
|
||||
github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
|
||||
|
|
@ -83,11 +79,8 @@ github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs
|
|||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/lucasb-eyer/go-colorful v1.3.0 h1:2/yBRLdWBZKrf7gB40FoiKfAWYQ0lqNcbuQwVHXptag=
|
||||
|
|
@ -136,18 +129,6 @@ github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8
|
|||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=
|
||||
go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
|
||||
go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=
|
||||
go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=
|
||||
go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=
|
||||
go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=
|
||||
go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=
|
||||
go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=
|
||||
|
|
@ -156,8 +137,6 @@ golang.org/x/exp v0.0.0-20260218203240-3dfff04db8fa h1:Zt3DZoOFFYkKhDT3v7Lm9FDME
|
|||
golang.org/x/exp v0.0.0-20260218203240-3dfff04db8fa/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA=
|
||||
golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8=
|
||||
golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w=
|
||||
golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60=
|
||||
golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
|
@ -170,14 +149,6 @@ golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
|
|||
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
|
||||
golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k=
|
||||
golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0=
|
||||
gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4=
|
||||
gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
|
||||
google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY=
|
||||
google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ=
|
||||
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
|
||||
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
|
|
|
|||
2
pkg/cache/cache.go
vendored
2
pkg/cache/cache.go
vendored
|
|
@ -8,7 +8,7 @@ import (
|
|||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// DefaultTTL is the default cache expiry time.
|
||||
|
|
|
|||
2
pkg/cache/cache_test.go
vendored
2
pkg/cache/cache_test.go
vendored
|
|
@ -5,7 +5,7 @@ import (
|
|||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cache"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
func TestCache(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -17,8 +17,9 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
|
||||
coreerr "forge.lthn.ai/core/go-log"
|
||||
coreio "forge.lthn.ai/core/go-io"
|
||||
core "forge.lthn.ai/core/go/pkg/framework/core"
|
||||
coreio "forge.lthn.ai/core/go/pkg/io"
|
||||
"github.com/spf13/viper"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
|
@ -79,7 +80,7 @@ func New(opts ...Option) (*Config, error) {
|
|||
if c.path == "" {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return nil, core.E("config.New", "failed to determine home directory", err)
|
||||
return nil, coreerr.E("config.New", "failed to determine home directory", err)
|
||||
}
|
||||
c.path = filepath.Join(home, ".core", "config.yaml")
|
||||
}
|
||||
|
|
@ -89,7 +90,7 @@ func New(opts ...Option) (*Config, error) {
|
|||
// Load existing config file if it exists
|
||||
if c.medium.Exists(c.path) {
|
||||
if err := c.LoadFile(c.medium, c.path); err != nil {
|
||||
return nil, core.E("config.New", "failed to load config file", err)
|
||||
return nil, coreerr.E("config.New", "failed to load config file", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -104,7 +105,7 @@ func (c *Config) LoadFile(m coreio.Medium, path string) error {
|
|||
|
||||
content, err := m.Read(path)
|
||||
if err != nil {
|
||||
return core.E("config.LoadFile", "failed to read config file: "+path, err)
|
||||
return coreerr.E("config.LoadFile", "failed to read config file: "+path, err)
|
||||
}
|
||||
|
||||
ext := filepath.Ext(path)
|
||||
|
|
@ -117,7 +118,7 @@ func (c *Config) LoadFile(m coreio.Medium, path string) error {
|
|||
}
|
||||
|
||||
if err := c.v.MergeConfig(strings.NewReader(content)); err != nil {
|
||||
return core.E("config.LoadFile", "failed to parse config file: "+path, err)
|
||||
return coreerr.E("config.LoadFile", "failed to parse config file: "+path, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -135,7 +136,7 @@ func (c *Config) Get(key string, out any) error {
|
|||
}
|
||||
|
||||
if !c.v.IsSet(key) {
|
||||
return core.E("config.Get", fmt.Sprintf("key not found: %s", key), nil)
|
||||
return coreerr.E("config.Get", fmt.Sprintf("key not found: %s", key), nil)
|
||||
}
|
||||
|
||||
return c.v.UnmarshalKey(key, out)
|
||||
|
|
@ -150,7 +151,7 @@ func (c *Config) Set(key string, v any) error {
|
|||
|
||||
// Persist to disk
|
||||
if err := Save(c.medium, c.path, c.v.AllSettings()); err != nil {
|
||||
return core.E("config.Set", "failed to save config", err)
|
||||
return coreerr.E("config.Set", "failed to save config", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -175,13 +176,13 @@ func (c *Config) Path() string {
|
|||
func Load(m coreio.Medium, path string) (map[string]any, error) {
|
||||
content, err := m.Read(path)
|
||||
if err != nil {
|
||||
return nil, core.E("config.Load", "failed to read config file: "+path, err)
|
||||
return nil, coreerr.E("config.Load", "failed to read config file: "+path, err)
|
||||
}
|
||||
|
||||
v := viper.New()
|
||||
v.SetConfigType("yaml")
|
||||
if err := v.ReadConfig(strings.NewReader(content)); err != nil {
|
||||
return nil, core.E("config.Load", "failed to parse config file: "+path, err)
|
||||
return nil, coreerr.E("config.Load", "failed to parse config file: "+path, err)
|
||||
}
|
||||
|
||||
return v.AllSettings(), nil
|
||||
|
|
@ -192,16 +193,16 @@ func Load(m coreio.Medium, path string) (map[string]any, error) {
|
|||
func Save(m coreio.Medium, path string, data map[string]any) error {
|
||||
out, err := yaml.Marshal(data)
|
||||
if err != nil {
|
||||
return core.E("config.Save", "failed to marshal config", err)
|
||||
return coreerr.E("config.Save", "failed to marshal config", err)
|
||||
}
|
||||
|
||||
dir := filepath.Dir(path)
|
||||
if err := m.EnsureDir(dir); err != nil {
|
||||
return core.E("config.Save", "failed to create config directory: "+dir, err)
|
||||
return coreerr.E("config.Save", "failed to create config directory: "+dir, err)
|
||||
}
|
||||
|
||||
if err := m.Write(path, string(out)); err != nil {
|
||||
return core.E("config.Save", "failed to write config file: "+path, err)
|
||||
return coreerr.E("config.Save", "failed to write config file: "+path, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -3,8 +3,9 @@ package config
|
|||
import (
|
||||
"context"
|
||||
|
||||
coreerr "forge.lthn.ai/core/go-log"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
core "forge.lthn.ai/core/go/pkg/framework/core"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
// Service wraps Config as a framework service with lifecycle support.
|
||||
|
|
@ -54,7 +55,7 @@ func (s *Service) OnStartup(_ context.Context) error {
|
|||
// Get retrieves a configuration value by key.
|
||||
func (s *Service) Get(key string, out any) error {
|
||||
if s.config == nil {
|
||||
return core.E("config.Service.Get", "config not loaded", nil)
|
||||
return coreerr.E("config.Service.Get", "config not loaded", nil)
|
||||
}
|
||||
return s.config.Get(key, out)
|
||||
}
|
||||
|
|
@ -62,7 +63,7 @@ func (s *Service) Get(key string, out any) error {
|
|||
// Set stores a configuration value by key.
|
||||
func (s *Service) Set(key string, v any) error {
|
||||
if s.config == nil {
|
||||
return core.E("config.Service.Set", "config not loaded", nil)
|
||||
return coreerr.E("config.Service.Set", "config not loaded", nil)
|
||||
}
|
||||
return s.config.Set(key, v)
|
||||
}
|
||||
|
|
@ -70,7 +71,7 @@ func (s *Service) Set(key string, v any) error {
|
|||
// LoadFile merges a configuration file into the central configuration.
|
||||
func (s *Service) LoadFile(m io.Medium, path string) error {
|
||||
if s.config == nil {
|
||||
return core.E("config.Service.LoadFile", "config not loaded", nil)
|
||||
return coreerr.E("config.Service.LoadFile", "config not loaded", nil)
|
||||
}
|
||||
return s.config.LoadFile(m, path)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,34 +0,0 @@
|
|||
package io
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkMockMedium_Write(b *testing.B) {
|
||||
m := NewMockMedium()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = m.Write("test.txt", "some content")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMockMedium_Read(b *testing.B) {
|
||||
m := NewMockMedium()
|
||||
_ = m.Write("test.txt", "some content")
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _ = m.Read("test.txt")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMockMedium_List(b *testing.B) {
|
||||
m := NewMockMedium()
|
||||
_ = m.EnsureDir("dir")
|
||||
for i := 0; i < 100; i++ {
|
||||
_ = m.Write("dir/file"+string(rune(i))+".txt", "content")
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _ = m.List("dir")
|
||||
}
|
||||
}
|
||||
|
|
@ -1,260 +0,0 @@
|
|||
package io
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// --- MockMedium Tests ---
|
||||
|
||||
func TestNewMockMedium_Good(t *testing.T) {
|
||||
m := NewMockMedium()
|
||||
assert.NotNil(t, m)
|
||||
assert.NotNil(t, m.Files)
|
||||
assert.NotNil(t, m.Dirs)
|
||||
assert.Empty(t, m.Files)
|
||||
assert.Empty(t, m.Dirs)
|
||||
}
|
||||
|
||||
func TestMockMedium_Read_Good(t *testing.T) {
|
||||
m := NewMockMedium()
|
||||
m.Files["test.txt"] = "hello world"
|
||||
content, err := m.Read("test.txt")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "hello world", content)
|
||||
}
|
||||
|
||||
func TestMockMedium_Read_Bad(t *testing.T) {
|
||||
m := NewMockMedium()
|
||||
_, err := m.Read("nonexistent.txt")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestMockMedium_Write_Good(t *testing.T) {
|
||||
m := NewMockMedium()
|
||||
err := m.Write("test.txt", "content")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "content", m.Files["test.txt"])
|
||||
|
||||
// Overwrite existing file
|
||||
err = m.Write("test.txt", "new content")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "new content", m.Files["test.txt"])
|
||||
}
|
||||
|
||||
func TestMockMedium_EnsureDir_Good(t *testing.T) {
|
||||
m := NewMockMedium()
|
||||
err := m.EnsureDir("/path/to/dir")
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, m.Dirs["/path/to/dir"])
|
||||
}
|
||||
|
||||
func TestMockMedium_IsFile_Good(t *testing.T) {
|
||||
m := NewMockMedium()
|
||||
m.Files["exists.txt"] = "content"
|
||||
|
||||
assert.True(t, m.IsFile("exists.txt"))
|
||||
assert.False(t, m.IsFile("nonexistent.txt"))
|
||||
}
|
||||
|
||||
func TestMockMedium_FileGet_Good(t *testing.T) {
|
||||
m := NewMockMedium()
|
||||
m.Files["test.txt"] = "content"
|
||||
content, err := m.FileGet("test.txt")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "content", content)
|
||||
}
|
||||
|
||||
func TestMockMedium_FileSet_Good(t *testing.T) {
|
||||
m := NewMockMedium()
|
||||
err := m.FileSet("test.txt", "content")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "content", m.Files["test.txt"])
|
||||
}
|
||||
|
||||
func TestMockMedium_Delete_Good(t *testing.T) {
|
||||
m := NewMockMedium()
|
||||
m.Files["test.txt"] = "content"
|
||||
|
||||
err := m.Delete("test.txt")
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, m.IsFile("test.txt"))
|
||||
}
|
||||
|
||||
func TestMockMedium_Delete_Bad_NotFound(t *testing.T) {
|
||||
m := NewMockMedium()
|
||||
err := m.Delete("nonexistent.txt")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestMockMedium_Delete_Bad_DirNotEmpty(t *testing.T) {
|
||||
m := NewMockMedium()
|
||||
m.Dirs["mydir"] = true
|
||||
m.Files["mydir/file.txt"] = "content"
|
||||
|
||||
err := m.Delete("mydir")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestMockMedium_DeleteAll_Good(t *testing.T) {
|
||||
m := NewMockMedium()
|
||||
m.Dirs["mydir"] = true
|
||||
m.Dirs["mydir/subdir"] = true
|
||||
m.Files["mydir/file.txt"] = "content"
|
||||
m.Files["mydir/subdir/nested.txt"] = "nested"
|
||||
|
||||
err := m.DeleteAll("mydir")
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, m.Dirs)
|
||||
assert.Empty(t, m.Files)
|
||||
}
|
||||
|
||||
func TestMockMedium_Rename_Good(t *testing.T) {
|
||||
m := NewMockMedium()
|
||||
m.Files["old.txt"] = "content"
|
||||
|
||||
err := m.Rename("old.txt", "new.txt")
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, m.IsFile("old.txt"))
|
||||
assert.True(t, m.IsFile("new.txt"))
|
||||
assert.Equal(t, "content", m.Files["new.txt"])
|
||||
}
|
||||
|
||||
func TestMockMedium_Rename_Good_Dir(t *testing.T) {
|
||||
m := NewMockMedium()
|
||||
m.Dirs["olddir"] = true
|
||||
m.Files["olddir/file.txt"] = "content"
|
||||
|
||||
err := m.Rename("olddir", "newdir")
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, m.Dirs["olddir"])
|
||||
assert.True(t, m.Dirs["newdir"])
|
||||
assert.Equal(t, "content", m.Files["newdir/file.txt"])
|
||||
}
|
||||
|
||||
func TestMockMedium_List_Good(t *testing.T) {
|
||||
m := NewMockMedium()
|
||||
m.Dirs["mydir"] = true
|
||||
m.Files["mydir/file1.txt"] = "content1"
|
||||
m.Files["mydir/file2.txt"] = "content2"
|
||||
m.Dirs["mydir/subdir"] = true
|
||||
|
||||
entries, err := m.List("mydir")
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, entries, 3)
|
||||
|
||||
names := make(map[string]bool)
|
||||
for _, e := range entries {
|
||||
names[e.Name()] = true
|
||||
}
|
||||
assert.True(t, names["file1.txt"])
|
||||
assert.True(t, names["file2.txt"])
|
||||
assert.True(t, names["subdir"])
|
||||
}
|
||||
|
||||
func TestMockMedium_Stat_Good(t *testing.T) {
|
||||
m := NewMockMedium()
|
||||
m.Files["test.txt"] = "hello world"
|
||||
|
||||
info, err := m.Stat("test.txt")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "test.txt", info.Name())
|
||||
assert.Equal(t, int64(11), info.Size())
|
||||
assert.False(t, info.IsDir())
|
||||
}
|
||||
|
||||
func TestMockMedium_Stat_Good_Dir(t *testing.T) {
|
||||
m := NewMockMedium()
|
||||
m.Dirs["mydir"] = true
|
||||
|
||||
info, err := m.Stat("mydir")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "mydir", info.Name())
|
||||
assert.True(t, info.IsDir())
|
||||
}
|
||||
|
||||
func TestMockMedium_Exists_Good(t *testing.T) {
|
||||
m := NewMockMedium()
|
||||
m.Files["file.txt"] = "content"
|
||||
m.Dirs["mydir"] = true
|
||||
|
||||
assert.True(t, m.Exists("file.txt"))
|
||||
assert.True(t, m.Exists("mydir"))
|
||||
assert.False(t, m.Exists("nonexistent"))
|
||||
}
|
||||
|
||||
func TestMockMedium_IsDir_Good(t *testing.T) {
|
||||
m := NewMockMedium()
|
||||
m.Files["file.txt"] = "content"
|
||||
m.Dirs["mydir"] = true
|
||||
|
||||
assert.False(t, m.IsDir("file.txt"))
|
||||
assert.True(t, m.IsDir("mydir"))
|
||||
assert.False(t, m.IsDir("nonexistent"))
|
||||
}
|
||||
|
||||
// --- Wrapper Function Tests ---
|
||||
|
||||
func TestRead_Good(t *testing.T) {
|
||||
m := NewMockMedium()
|
||||
m.Files["test.txt"] = "hello"
|
||||
content, err := Read(m, "test.txt")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "hello", content)
|
||||
}
|
||||
|
||||
func TestWrite_Good(t *testing.T) {
|
||||
m := NewMockMedium()
|
||||
err := Write(m, "test.txt", "hello")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "hello", m.Files["test.txt"])
|
||||
}
|
||||
|
||||
func TestEnsureDir_Good(t *testing.T) {
|
||||
m := NewMockMedium()
|
||||
err := EnsureDir(m, "/my/dir")
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, m.Dirs["/my/dir"])
|
||||
}
|
||||
|
||||
func TestIsFile_Good(t *testing.T) {
|
||||
m := NewMockMedium()
|
||||
m.Files["exists.txt"] = "content"
|
||||
|
||||
assert.True(t, IsFile(m, "exists.txt"))
|
||||
assert.False(t, IsFile(m, "nonexistent.txt"))
|
||||
}
|
||||
|
||||
func TestCopy_Good(t *testing.T) {
|
||||
source := NewMockMedium()
|
||||
dest := NewMockMedium()
|
||||
source.Files["test.txt"] = "hello"
|
||||
err := Copy(source, "test.txt", dest, "test.txt")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "hello", dest.Files["test.txt"])
|
||||
|
||||
// Copy to different path
|
||||
source.Files["original.txt"] = "content"
|
||||
err = Copy(source, "original.txt", dest, "copied.txt")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "content", dest.Files["copied.txt"])
|
||||
}
|
||||
|
||||
func TestCopy_Bad(t *testing.T) {
|
||||
source := NewMockMedium()
|
||||
dest := NewMockMedium()
|
||||
err := Copy(source, "nonexistent.txt", dest, "dest.txt")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// --- Local Global Tests ---
|
||||
|
||||
func TestLocalGlobal_Good(t *testing.T) {
|
||||
// io.Local should be initialized by init()
|
||||
assert.NotNil(t, Local, "io.Local should be initialized")
|
||||
|
||||
// Should be able to use it as a Medium
|
||||
var m = Local
|
||||
assert.NotNil(t, m)
|
||||
}
|
||||
|
|
@ -1,576 +0,0 @@
|
|||
// Package datanode provides an in-memory io.Medium backed by Borg's DataNode.
|
||||
//
|
||||
// DataNode is an in-memory fs.FS that serializes to tar. Wrapping it as a
|
||||
// Medium lets any code that works with io.Medium transparently operate on
|
||||
// an in-memory filesystem that can be snapshotted, shipped as a crash report,
|
||||
// or wrapped in a TIM container for runc execution.
|
||||
package datanode
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
goio "io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
coreerr "forge.lthn.ai/core/go/pkg/framework/core"
|
||||
"forge.lthn.ai/Snider/Borg/pkg/datanode"
|
||||
)
|
||||
|
||||
// Medium is an in-memory storage backend backed by a Borg DataNode.
|
||||
// All paths are relative (no leading slash). Thread-safe via RWMutex.
|
||||
type Medium struct {
|
||||
dn *datanode.DataNode
|
||||
dirs map[string]bool // explicit directory tracking
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// New creates a new empty DataNode Medium.
|
||||
func New() *Medium {
|
||||
return &Medium{
|
||||
dn: datanode.New(),
|
||||
dirs: make(map[string]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// FromTar creates a Medium from a tarball, restoring all files.
|
||||
func FromTar(data []byte) (*Medium, error) {
|
||||
dn, err := datanode.FromTar(data)
|
||||
if err != nil {
|
||||
return nil, coreerr.E("datanode.FromTar", "failed to restore", err)
|
||||
}
|
||||
return &Medium{
|
||||
dn: dn,
|
||||
dirs: make(map[string]bool),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Snapshot serializes the entire filesystem to a tarball.
|
||||
// Use this for crash reports, workspace packaging, or TIM creation.
|
||||
func (m *Medium) Snapshot() ([]byte, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
data, err := m.dn.ToTar()
|
||||
if err != nil {
|
||||
return nil, coreerr.E("datanode.Snapshot", "tar failed", err)
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Restore replaces the filesystem contents from a tarball.
|
||||
func (m *Medium) Restore(data []byte) error {
|
||||
dn, err := datanode.FromTar(data)
|
||||
if err != nil {
|
||||
return coreerr.E("datanode.Restore", "tar failed", err)
|
||||
}
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.dn = dn
|
||||
m.dirs = make(map[string]bool)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DataNode returns the underlying Borg DataNode.
|
||||
// Use this to wrap the filesystem in a TIM container.
|
||||
func (m *Medium) DataNode() *datanode.DataNode {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
return m.dn
|
||||
}
|
||||
|
||||
// clean normalizes a path: strips leading slash, cleans traversal.
|
||||
func clean(p string) string {
|
||||
p = strings.TrimPrefix(p, "/")
|
||||
p = path.Clean(p)
|
||||
if p == "." {
|
||||
return ""
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// --- io.Medium interface ---
|
||||
|
||||
func (m *Medium) Read(p string) (string, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
p = clean(p)
|
||||
f, err := m.dn.Open(p)
|
||||
if err != nil {
|
||||
return "", coreerr.E("datanode.Read", "not found: "+p, os.ErrNotExist)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
info, err := f.Stat()
|
||||
if err != nil {
|
||||
return "", coreerr.E("datanode.Read", "stat failed: "+p, err)
|
||||
}
|
||||
if info.IsDir() {
|
||||
return "", coreerr.E("datanode.Read", "is a directory: "+p, os.ErrInvalid)
|
||||
}
|
||||
|
||||
data, err := goio.ReadAll(f)
|
||||
if err != nil {
|
||||
return "", coreerr.E("datanode.Read", "read failed: "+p, err)
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
func (m *Medium) Write(p, content string) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
p = clean(p)
|
||||
if p == "" {
|
||||
return coreerr.E("datanode.Write", "empty path", os.ErrInvalid)
|
||||
}
|
||||
m.dn.AddData(p, []byte(content))
|
||||
|
||||
// ensure parent dirs are tracked
|
||||
m.ensureDirsLocked(path.Dir(p))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Medium) EnsureDir(p string) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
p = clean(p)
|
||||
if p == "" {
|
||||
return nil
|
||||
}
|
||||
m.ensureDirsLocked(p)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureDirsLocked marks a directory and all ancestors as existing.
|
||||
// Caller must hold m.mu.
|
||||
func (m *Medium) ensureDirsLocked(p string) {
|
||||
for p != "" && p != "." {
|
||||
m.dirs[p] = true
|
||||
p = path.Dir(p)
|
||||
if p == "." {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Medium) IsFile(p string) bool {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
p = clean(p)
|
||||
info, err := m.dn.Stat(p)
|
||||
return err == nil && !info.IsDir()
|
||||
}
|
||||
|
||||
func (m *Medium) FileGet(p string) (string, error) {
|
||||
return m.Read(p)
|
||||
}
|
||||
|
||||
func (m *Medium) FileSet(p, content string) error {
|
||||
return m.Write(p, content)
|
||||
}
|
||||
|
||||
func (m *Medium) Delete(p string) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
p = clean(p)
|
||||
if p == "" {
|
||||
return coreerr.E("datanode.Delete", "cannot delete root", os.ErrPermission)
|
||||
}
|
||||
|
||||
// Check if it's a file in the DataNode
|
||||
info, err := m.dn.Stat(p)
|
||||
if err != nil {
|
||||
// Check explicit dirs
|
||||
if m.dirs[p] {
|
||||
// Check if dir is empty
|
||||
if m.hasPrefixLocked(p + "/") {
|
||||
return coreerr.E("datanode.Delete", "directory not empty: "+p, os.ErrExist)
|
||||
}
|
||||
delete(m.dirs, p)
|
||||
return nil
|
||||
}
|
||||
return coreerr.E("datanode.Delete", "not found: "+p, os.ErrNotExist)
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
if m.hasPrefixLocked(p + "/") {
|
||||
return coreerr.E("datanode.Delete", "directory not empty: "+p, os.ErrExist)
|
||||
}
|
||||
delete(m.dirs, p)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove the file by creating a new DataNode without it
|
||||
m.removeFileLocked(p)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Medium) DeleteAll(p string) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
p = clean(p)
|
||||
if p == "" {
|
||||
return coreerr.E("datanode.DeleteAll", "cannot delete root", os.ErrPermission)
|
||||
}
|
||||
|
||||
prefix := p + "/"
|
||||
found := false
|
||||
|
||||
// Check if p itself is a file
|
||||
info, err := m.dn.Stat(p)
|
||||
if err == nil && !info.IsDir() {
|
||||
m.removeFileLocked(p)
|
||||
found = true
|
||||
}
|
||||
|
||||
// Remove all files under prefix
|
||||
entries, _ := m.collectAllLocked()
|
||||
for _, name := range entries {
|
||||
if name == p || strings.HasPrefix(name, prefix) {
|
||||
m.removeFileLocked(name)
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
// Remove explicit dirs under prefix
|
||||
for d := range m.dirs {
|
||||
if d == p || strings.HasPrefix(d, prefix) {
|
||||
delete(m.dirs, d)
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return coreerr.E("datanode.DeleteAll", "not found: "+p, os.ErrNotExist)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Medium) Rename(oldPath, newPath string) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
oldPath = clean(oldPath)
|
||||
newPath = clean(newPath)
|
||||
|
||||
// Check if source is a file
|
||||
info, err := m.dn.Stat(oldPath)
|
||||
if err != nil {
|
||||
return coreerr.E("datanode.Rename", "not found: "+oldPath, os.ErrNotExist)
|
||||
}
|
||||
|
||||
if !info.IsDir() {
|
||||
// Read old, write new, delete old
|
||||
f, err := m.dn.Open(oldPath)
|
||||
if err != nil {
|
||||
return coreerr.E("datanode.Rename", "open failed: "+oldPath, err)
|
||||
}
|
||||
data, err := goio.ReadAll(f)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
return coreerr.E("datanode.Rename", "read failed: "+oldPath, err)
|
||||
}
|
||||
m.dn.AddData(newPath, data)
|
||||
m.ensureDirsLocked(path.Dir(newPath))
|
||||
m.removeFileLocked(oldPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Directory rename: move all files under oldPath to newPath
|
||||
oldPrefix := oldPath + "/"
|
||||
newPrefix := newPath + "/"
|
||||
|
||||
entries, _ := m.collectAllLocked()
|
||||
for _, name := range entries {
|
||||
if strings.HasPrefix(name, oldPrefix) {
|
||||
newName := newPrefix + strings.TrimPrefix(name, oldPrefix)
|
||||
f, err := m.dn.Open(name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
data, _ := goio.ReadAll(f)
|
||||
f.Close()
|
||||
m.dn.AddData(newName, data)
|
||||
m.removeFileLocked(name)
|
||||
}
|
||||
}
|
||||
|
||||
// Move explicit dirs
|
||||
dirsToMove := make(map[string]string)
|
||||
for d := range m.dirs {
|
||||
if d == oldPath || strings.HasPrefix(d, oldPrefix) {
|
||||
newD := newPath + strings.TrimPrefix(d, oldPath)
|
||||
dirsToMove[d] = newD
|
||||
}
|
||||
}
|
||||
for old, nw := range dirsToMove {
|
||||
delete(m.dirs, old)
|
||||
m.dirs[nw] = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Medium) List(p string) ([]fs.DirEntry, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
p = clean(p)
|
||||
|
||||
entries, err := m.dn.ReadDir(p)
|
||||
if err != nil {
|
||||
// Check explicit dirs
|
||||
if p == "" || m.dirs[p] {
|
||||
return []fs.DirEntry{}, nil
|
||||
}
|
||||
return nil, coreerr.E("datanode.List", "not found: "+p, os.ErrNotExist)
|
||||
}
|
||||
|
||||
// Also include explicit subdirectories not discovered via files
|
||||
prefix := p
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
}
|
||||
seen := make(map[string]bool)
|
||||
for _, e := range entries {
|
||||
seen[e.Name()] = true
|
||||
}
|
||||
|
||||
for d := range m.dirs {
|
||||
if !strings.HasPrefix(d, prefix) {
|
||||
continue
|
||||
}
|
||||
rest := strings.TrimPrefix(d, prefix)
|
||||
if rest == "" {
|
||||
continue
|
||||
}
|
||||
first := strings.SplitN(rest, "/", 2)[0]
|
||||
if !seen[first] {
|
||||
seen[first] = true
|
||||
entries = append(entries, &dirEntry{name: first})
|
||||
}
|
||||
}
|
||||
|
||||
slices.SortFunc(entries, func(a, b fs.DirEntry) int {
|
||||
return cmp.Compare(a.Name(), b.Name())
|
||||
})
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func (m *Medium) Stat(p string) (fs.FileInfo, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
p = clean(p)
|
||||
if p == "" {
|
||||
return &fileInfo{name: ".", isDir: true, mode: fs.ModeDir | 0755}, nil
|
||||
}
|
||||
|
||||
info, err := m.dn.Stat(p)
|
||||
if err == nil {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
if m.dirs[p] {
|
||||
return &fileInfo{name: path.Base(p), isDir: true, mode: fs.ModeDir | 0755}, nil
|
||||
}
|
||||
return nil, coreerr.E("datanode.Stat", "not found: "+p, os.ErrNotExist)
|
||||
}
|
||||
|
||||
func (m *Medium) Open(p string) (fs.File, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
p = clean(p)
|
||||
return m.dn.Open(p)
|
||||
}
|
||||
|
||||
func (m *Medium) Create(p string) (goio.WriteCloser, error) {
|
||||
p = clean(p)
|
||||
if p == "" {
|
||||
return nil, coreerr.E("datanode.Create", "empty path", os.ErrInvalid)
|
||||
}
|
||||
return &writeCloser{m: m, path: p}, nil
|
||||
}
|
||||
|
||||
func (m *Medium) Append(p string) (goio.WriteCloser, error) {
|
||||
p = clean(p)
|
||||
if p == "" {
|
||||
return nil, coreerr.E("datanode.Append", "empty path", os.ErrInvalid)
|
||||
}
|
||||
|
||||
// Read existing content
|
||||
var existing []byte
|
||||
m.mu.RLock()
|
||||
f, err := m.dn.Open(p)
|
||||
if err == nil {
|
||||
existing, _ = goio.ReadAll(f)
|
||||
f.Close()
|
||||
}
|
||||
m.mu.RUnlock()
|
||||
|
||||
return &writeCloser{m: m, path: p, buf: existing}, nil
|
||||
}
|
||||
|
||||
func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
p = clean(p)
|
||||
f, err := m.dn.Open(p)
|
||||
if err != nil {
|
||||
return nil, coreerr.E("datanode.ReadStream", "not found: "+p, os.ErrNotExist)
|
||||
}
|
||||
return f.(goio.ReadCloser), nil
|
||||
}
|
||||
|
||||
func (m *Medium) WriteStream(p string) (goio.WriteCloser, error) {
|
||||
return m.Create(p)
|
||||
}
|
||||
|
||||
func (m *Medium) Exists(p string) bool {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
p = clean(p)
|
||||
if p == "" {
|
||||
return true // root always exists
|
||||
}
|
||||
_, err := m.dn.Stat(p)
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
return m.dirs[p]
|
||||
}
|
||||
|
||||
func (m *Medium) IsDir(p string) bool {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
p = clean(p)
|
||||
if p == "" {
|
||||
return true
|
||||
}
|
||||
info, err := m.dn.Stat(p)
|
||||
if err == nil {
|
||||
return info.IsDir()
|
||||
}
|
||||
return m.dirs[p]
|
||||
}
|
||||
|
||||
// --- internal helpers ---
|
||||
|
||||
// hasPrefixLocked checks if any file path starts with prefix. Caller holds lock.
|
||||
func (m *Medium) hasPrefixLocked(prefix string) bool {
|
||||
entries, _ := m.collectAllLocked()
|
||||
for _, name := range entries {
|
||||
if strings.HasPrefix(name, prefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
for d := range m.dirs {
|
||||
if strings.HasPrefix(d, prefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// collectAllLocked returns all file paths in the DataNode. Caller holds lock.
|
||||
func (m *Medium) collectAllLocked() ([]string, error) {
|
||||
var names []string
|
||||
err := fs.WalkDir(m.dn, ".", func(p string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if !d.IsDir() {
|
||||
names = append(names, p)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return names, err
|
||||
}
|
||||
|
||||
// removeFileLocked removes a single file by rebuilding the DataNode.
|
||||
// This is necessary because Borg's DataNode doesn't expose a Remove method.
|
||||
// Caller must hold m.mu write lock.
|
||||
func (m *Medium) removeFileLocked(target string) {
|
||||
entries, _ := m.collectAllLocked()
|
||||
newDN := datanode.New()
|
||||
for _, name := range entries {
|
||||
if name == target {
|
||||
continue
|
||||
}
|
||||
f, err := m.dn.Open(name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
data, err := goio.ReadAll(f)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
newDN.AddData(name, data)
|
||||
}
|
||||
m.dn = newDN
|
||||
}
|
||||
|
||||
// --- writeCloser buffers writes and flushes to DataNode on Close ---
|
||||
|
||||
type writeCloser struct {
|
||||
m *Medium
|
||||
path string
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func (w *writeCloser) Write(p []byte) (int, error) {
|
||||
w.buf = append(w.buf, p...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (w *writeCloser) Close() error {
|
||||
w.m.mu.Lock()
|
||||
defer w.m.mu.Unlock()
|
||||
|
||||
w.m.dn.AddData(w.path, w.buf)
|
||||
w.m.ensureDirsLocked(path.Dir(w.path))
|
||||
return nil
|
||||
}
|
||||
|
||||
// --- fs types for explicit directories ---
|
||||
|
||||
type dirEntry struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (d *dirEntry) Name() string { return d.name }
|
||||
func (d *dirEntry) IsDir() bool { return true }
|
||||
func (d *dirEntry) Type() fs.FileMode { return fs.ModeDir }
|
||||
func (d *dirEntry) Info() (fs.FileInfo, error) {
|
||||
return &fileInfo{name: d.name, isDir: true, mode: fs.ModeDir | 0755}, nil
|
||||
}
|
||||
|
||||
type fileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode fs.FileMode
|
||||
modTime time.Time
|
||||
isDir bool
|
||||
}
|
||||
|
||||
func (fi *fileInfo) Name() string { return fi.name }
|
||||
func (fi *fileInfo) Size() int64 { return fi.size }
|
||||
func (fi *fileInfo) Mode() fs.FileMode { return fi.mode }
|
||||
func (fi *fileInfo) ModTime() time.Time { return fi.modTime }
|
||||
func (fi *fileInfo) IsDir() bool { return fi.isDir }
|
||||
func (fi *fileInfo) Sys() any { return nil }
|
||||
|
|
@ -1,352 +0,0 @@
|
|||
package datanode
|
||||
|
||||
import (
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
coreio "forge.lthn.ai/core/go/pkg/io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Compile-time check: Medium implements io.Medium.
|
||||
var _ coreio.Medium = (*Medium)(nil)
|
||||
|
||||
func TestReadWrite_Good(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
err := m.Write("hello.txt", "world")
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := m.Read("hello.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "world", got)
|
||||
}
|
||||
|
||||
func TestReadWrite_Bad(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
_, err := m.Read("missing.txt")
|
||||
assert.Error(t, err)
|
||||
|
||||
err = m.Write("", "content")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestNestedPaths_Good(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
require.NoError(t, m.Write("a/b/c/deep.txt", "deep"))
|
||||
|
||||
got, err := m.Read("a/b/c/deep.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "deep", got)
|
||||
|
||||
assert.True(t, m.IsDir("a"))
|
||||
assert.True(t, m.IsDir("a/b"))
|
||||
assert.True(t, m.IsDir("a/b/c"))
|
||||
}
|
||||
|
||||
func TestLeadingSlash_Good(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
require.NoError(t, m.Write("/leading/file.txt", "stripped"))
|
||||
got, err := m.Read("leading/file.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "stripped", got)
|
||||
|
||||
got, err = m.Read("/leading/file.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "stripped", got)
|
||||
}
|
||||
|
||||
func TestIsFile_Good(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
require.NoError(t, m.Write("file.go", "package main"))
|
||||
|
||||
assert.True(t, m.IsFile("file.go"))
|
||||
assert.False(t, m.IsFile("missing.go"))
|
||||
assert.False(t, m.IsFile("")) // empty path
|
||||
}
|
||||
|
||||
func TestEnsureDir_Good(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
require.NoError(t, m.EnsureDir("foo/bar/baz"))
|
||||
|
||||
assert.True(t, m.IsDir("foo"))
|
||||
assert.True(t, m.IsDir("foo/bar"))
|
||||
assert.True(t, m.IsDir("foo/bar/baz"))
|
||||
assert.True(t, m.Exists("foo/bar/baz"))
|
||||
}
|
||||
|
||||
func TestDelete_Good(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
require.NoError(t, m.Write("delete-me.txt", "bye"))
|
||||
assert.True(t, m.Exists("delete-me.txt"))
|
||||
|
||||
require.NoError(t, m.Delete("delete-me.txt"))
|
||||
assert.False(t, m.Exists("delete-me.txt"))
|
||||
}
|
||||
|
||||
func TestDelete_Bad(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
// Delete non-existent
|
||||
assert.Error(t, m.Delete("ghost.txt"))
|
||||
|
||||
// Delete non-empty dir
|
||||
require.NoError(t, m.Write("dir/file.txt", "content"))
|
||||
assert.Error(t, m.Delete("dir"))
|
||||
}
|
||||
|
||||
func TestDeleteAll_Good(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
require.NoError(t, m.Write("tree/a.txt", "a"))
|
||||
require.NoError(t, m.Write("tree/sub/b.txt", "b"))
|
||||
require.NoError(t, m.Write("keep.txt", "keep"))
|
||||
|
||||
require.NoError(t, m.DeleteAll("tree"))
|
||||
|
||||
assert.False(t, m.Exists("tree/a.txt"))
|
||||
assert.False(t, m.Exists("tree/sub/b.txt"))
|
||||
assert.True(t, m.Exists("keep.txt"))
|
||||
}
|
||||
|
||||
func TestRename_Good(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
require.NoError(t, m.Write("old.txt", "content"))
|
||||
require.NoError(t, m.Rename("old.txt", "new.txt"))
|
||||
|
||||
assert.False(t, m.Exists("old.txt"))
|
||||
got, err := m.Read("new.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "content", got)
|
||||
}
|
||||
|
||||
func TestRenameDir_Good(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
require.NoError(t, m.Write("src/a.go", "package a"))
|
||||
require.NoError(t, m.Write("src/sub/b.go", "package b"))
|
||||
|
||||
require.NoError(t, m.Rename("src", "dst"))
|
||||
|
||||
assert.False(t, m.Exists("src/a.go"))
|
||||
|
||||
got, err := m.Read("dst/a.go")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "package a", got)
|
||||
|
||||
got, err = m.Read("dst/sub/b.go")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "package b", got)
|
||||
}
|
||||
|
||||
func TestList_Good(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
require.NoError(t, m.Write("root.txt", "r"))
|
||||
require.NoError(t, m.Write("pkg/a.go", "a"))
|
||||
require.NoError(t, m.Write("pkg/b.go", "b"))
|
||||
require.NoError(t, m.Write("pkg/sub/c.go", "c"))
|
||||
|
||||
entries, err := m.List("")
|
||||
require.NoError(t, err)
|
||||
|
||||
names := make([]string, len(entries))
|
||||
for i, e := range entries {
|
||||
names[i] = e.Name()
|
||||
}
|
||||
assert.Contains(t, names, "root.txt")
|
||||
assert.Contains(t, names, "pkg")
|
||||
|
||||
entries, err = m.List("pkg")
|
||||
require.NoError(t, err)
|
||||
names = make([]string, len(entries))
|
||||
for i, e := range entries {
|
||||
names[i] = e.Name()
|
||||
}
|
||||
assert.Contains(t, names, "a.go")
|
||||
assert.Contains(t, names, "b.go")
|
||||
assert.Contains(t, names, "sub")
|
||||
}
|
||||
|
||||
func TestStat_Good(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
require.NoError(t, m.Write("stat.txt", "hello"))
|
||||
|
||||
info, err := m.Stat("stat.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(5), info.Size())
|
||||
assert.False(t, info.IsDir())
|
||||
|
||||
// Root stat
|
||||
info, err = m.Stat("")
|
||||
require.NoError(t, err)
|
||||
assert.True(t, info.IsDir())
|
||||
}
|
||||
|
||||
func TestOpen_Good(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
require.NoError(t, m.Write("open.txt", "opened"))
|
||||
|
||||
f, err := m.Open("open.txt")
|
||||
require.NoError(t, err)
|
||||
defer f.Close()
|
||||
|
||||
data, err := io.ReadAll(f)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "opened", string(data))
|
||||
}
|
||||
|
||||
func TestCreateAppend_Good(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
// Create
|
||||
w, err := m.Create("new.txt")
|
||||
require.NoError(t, err)
|
||||
w.Write([]byte("hello"))
|
||||
w.Close()
|
||||
|
||||
got, err := m.Read("new.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "hello", got)
|
||||
|
||||
// Append
|
||||
w, err = m.Append("new.txt")
|
||||
require.NoError(t, err)
|
||||
w.Write([]byte(" world"))
|
||||
w.Close()
|
||||
|
||||
got, err = m.Read("new.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "hello world", got)
|
||||
}
|
||||
|
||||
func TestStreams_Good(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
// WriteStream
|
||||
ws, err := m.WriteStream("stream.txt")
|
||||
require.NoError(t, err)
|
||||
ws.Write([]byte("streamed"))
|
||||
ws.Close()
|
||||
|
||||
// ReadStream
|
||||
rs, err := m.ReadStream("stream.txt")
|
||||
require.NoError(t, err)
|
||||
data, err := io.ReadAll(rs)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "streamed", string(data))
|
||||
rs.Close()
|
||||
}
|
||||
|
||||
func TestFileGetFileSet_Good(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
require.NoError(t, m.FileSet("alias.txt", "via set"))
|
||||
|
||||
got, err := m.FileGet("alias.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "via set", got)
|
||||
}
|
||||
|
||||
func TestSnapshotRestore_Good(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
require.NoError(t, m.Write("a.txt", "alpha"))
|
||||
require.NoError(t, m.Write("b/c.txt", "charlie"))
|
||||
|
||||
snap, err := m.Snapshot()
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, snap)
|
||||
|
||||
// Restore into a new Medium
|
||||
m2, err := FromTar(snap)
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := m2.Read("a.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "alpha", got)
|
||||
|
||||
got, err = m2.Read("b/c.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "charlie", got)
|
||||
}
|
||||
|
||||
func TestRestore_Good(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
require.NoError(t, m.Write("original.txt", "before"))
|
||||
|
||||
snap, err := m.Snapshot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Modify
|
||||
require.NoError(t, m.Write("original.txt", "after"))
|
||||
require.NoError(t, m.Write("extra.txt", "extra"))
|
||||
|
||||
// Restore to snapshot
|
||||
require.NoError(t, m.Restore(snap))
|
||||
|
||||
got, err := m.Read("original.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "before", got)
|
||||
|
||||
assert.False(t, m.Exists("extra.txt"))
|
||||
}
|
||||
|
||||
func TestDataNode_Good(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
require.NoError(t, m.Write("test.txt", "borg"))
|
||||
|
||||
dn := m.DataNode()
|
||||
assert.NotNil(t, dn)
|
||||
|
||||
// Verify we can use the DataNode directly
|
||||
f, err := dn.Open("test.txt")
|
||||
require.NoError(t, err)
|
||||
defer f.Close()
|
||||
|
||||
data, err := io.ReadAll(f)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "borg", string(data))
|
||||
}
|
||||
|
||||
func TestOverwrite_Good(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
require.NoError(t, m.Write("file.txt", "v1"))
|
||||
require.NoError(t, m.Write("file.txt", "v2"))
|
||||
|
||||
got, err := m.Read("file.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "v2", got)
|
||||
}
|
||||
|
||||
func TestExists_Good(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
assert.True(t, m.Exists("")) // root
|
||||
assert.False(t, m.Exists("x"))
|
||||
|
||||
require.NoError(t, m.Write("x", "y"))
|
||||
assert.True(t, m.Exists("x"))
|
||||
}
|
||||
|
||||
func TestReadDir_Ugly(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
// Read from a file path (not a dir) should return empty or error
|
||||
require.NoError(t, m.Write("file.txt", "content"))
|
||||
_, err := m.Read("file.txt")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
581
pkg/io/io.go
581
pkg/io/io.go
|
|
@ -1,581 +0,0 @@
|
|||
package io
|
||||
|
||||
import (
|
||||
goio "io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
coreerr "forge.lthn.ai/core/go/pkg/framework/core"
|
||||
"forge.lthn.ai/core/go/pkg/io/local"
|
||||
)
|
||||
|
||||
// Medium defines the standard interface for a storage backend.
|
||||
// This allows for different implementations (e.g., local disk, S3, SFTP)
|
||||
// to be used interchangeably.
|
||||
type Medium interface {
|
||||
// Read retrieves the content of a file as a string.
|
||||
Read(path string) (string, error)
|
||||
|
||||
// Write saves the given content to a file, overwriting it if it exists.
|
||||
Write(path, content string) error
|
||||
|
||||
// EnsureDir makes sure a directory exists, creating it if necessary.
|
||||
EnsureDir(path string) error
|
||||
|
||||
// IsFile checks if a path exists and is a regular file.
|
||||
IsFile(path string) bool
|
||||
|
||||
// FileGet is a convenience function that reads a file from the medium.
|
||||
FileGet(path string) (string, error)
|
||||
|
||||
// FileSet is a convenience function that writes a file to the medium.
|
||||
FileSet(path, content string) error
|
||||
|
||||
// Delete removes a file or empty directory.
|
||||
Delete(path string) error
|
||||
|
||||
// DeleteAll removes a file or directory and all its contents recursively.
|
||||
DeleteAll(path string) error
|
||||
|
||||
// Rename moves a file or directory from oldPath to newPath.
|
||||
Rename(oldPath, newPath string) error
|
||||
|
||||
// List returns the directory entries for the given path.
|
||||
List(path string) ([]fs.DirEntry, error)
|
||||
|
||||
// Stat returns file information for the given path.
|
||||
Stat(path string) (fs.FileInfo, error)
|
||||
|
||||
// Open opens the named file for reading.
|
||||
Open(path string) (fs.File, error)
|
||||
|
||||
// Create creates or truncates the named file.
|
||||
Create(path string) (goio.WriteCloser, error)
|
||||
|
||||
// Append opens the named file for appending, creating it if it doesn't exist.
|
||||
Append(path string) (goio.WriteCloser, error)
|
||||
|
||||
// ReadStream returns a reader for the file content.
|
||||
// Use this for large files to avoid loading the entire content into memory.
|
||||
ReadStream(path string) (goio.ReadCloser, error)
|
||||
|
||||
// WriteStream returns a writer for the file content.
|
||||
// Use this for large files to avoid loading the entire content into memory.
|
||||
WriteStream(path string) (goio.WriteCloser, error)
|
||||
|
||||
// Exists checks if a path exists (file or directory).
|
||||
Exists(path string) bool
|
||||
|
||||
// IsDir checks if a path exists and is a directory.
|
||||
IsDir(path string) bool
|
||||
}
|
||||
|
||||
// FileInfo provides a simple implementation of fs.FileInfo for mock testing.
|
||||
type FileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode fs.FileMode
|
||||
modTime time.Time
|
||||
isDir bool
|
||||
}
|
||||
|
||||
func (fi FileInfo) Name() string { return fi.name }
|
||||
func (fi FileInfo) Size() int64 { return fi.size }
|
||||
func (fi FileInfo) Mode() fs.FileMode { return fi.mode }
|
||||
func (fi FileInfo) ModTime() time.Time { return fi.modTime }
|
||||
func (fi FileInfo) IsDir() bool { return fi.isDir }
|
||||
func (fi FileInfo) Sys() any { return nil }
|
||||
|
||||
// DirEntry provides a simple implementation of fs.DirEntry for mock testing.
|
||||
type DirEntry struct {
|
||||
name string
|
||||
isDir bool
|
||||
mode fs.FileMode
|
||||
info fs.FileInfo
|
||||
}
|
||||
|
||||
func (de DirEntry) Name() string { return de.name }
|
||||
func (de DirEntry) IsDir() bool { return de.isDir }
|
||||
func (de DirEntry) Type() fs.FileMode { return de.mode.Type() }
|
||||
func (de DirEntry) Info() (fs.FileInfo, error) { return de.info, nil }
|
||||
|
||||
// Local is a pre-initialized medium for the local filesystem.
|
||||
// It uses "/" as root, providing unsandboxed access to the filesystem.
|
||||
// For sandboxed access, use NewSandboxed with a specific root path.
|
||||
var Local Medium
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
Local, err = local.New("/")
|
||||
if err != nil {
|
||||
panic("io: failed to initialize Local medium: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// NewSandboxed creates a new Medium sandboxed to the given root directory.
|
||||
// All file operations are restricted to paths within the root.
|
||||
// The root directory will be created if it doesn't exist.
|
||||
func NewSandboxed(root string) (Medium, error) {
|
||||
return local.New(root)
|
||||
}
|
||||
|
||||
// --- Helper Functions ---
|
||||
|
||||
// Read retrieves the content of a file from the given medium.
|
||||
func Read(m Medium, path string) (string, error) {
|
||||
return m.Read(path)
|
||||
}
|
||||
|
||||
// Write saves the given content to a file in the given medium.
|
||||
func Write(m Medium, path, content string) error {
|
||||
return m.Write(path, content)
|
||||
}
|
||||
|
||||
// ReadStream returns a reader for the file content from the given medium.
|
||||
func ReadStream(m Medium, path string) (goio.ReadCloser, error) {
|
||||
return m.ReadStream(path)
|
||||
}
|
||||
|
||||
// WriteStream returns a writer for the file content in the given medium.
|
||||
func WriteStream(m Medium, path string) (goio.WriteCloser, error) {
|
||||
return m.WriteStream(path)
|
||||
}
|
||||
|
||||
// EnsureDir makes sure a directory exists in the given medium.
|
||||
func EnsureDir(m Medium, path string) error {
|
||||
return m.EnsureDir(path)
|
||||
}
|
||||
|
||||
// IsFile checks if a path exists and is a regular file in the given medium.
|
||||
func IsFile(m Medium, path string) bool {
|
||||
return m.IsFile(path)
|
||||
}
|
||||
|
||||
// Copy copies a file from one medium to another.
|
||||
func Copy(src Medium, srcPath string, dst Medium, dstPath string) error {
|
||||
content, err := src.Read(srcPath)
|
||||
if err != nil {
|
||||
return coreerr.E("io.Copy", "read failed: "+srcPath, err)
|
||||
}
|
||||
if err := dst.Write(dstPath, content); err != nil {
|
||||
return coreerr.E("io.Copy", "write failed: "+dstPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// --- MockMedium ---
|
||||
|
||||
// MockMedium is an in-memory implementation of Medium for testing.
|
||||
type MockMedium struct {
|
||||
Files map[string]string
|
||||
Dirs map[string]bool
|
||||
ModTimes map[string]time.Time
|
||||
}
|
||||
|
||||
// NewMockMedium creates a new MockMedium instance.
|
||||
func NewMockMedium() *MockMedium {
|
||||
return &MockMedium{
|
||||
Files: make(map[string]string),
|
||||
Dirs: make(map[string]bool),
|
||||
ModTimes: make(map[string]time.Time),
|
||||
}
|
||||
}
|
||||
|
||||
// Read retrieves the content of a file from the mock filesystem.
|
||||
func (m *MockMedium) Read(path string) (string, error) {
|
||||
content, ok := m.Files[path]
|
||||
if !ok {
|
||||
return "", coreerr.E("io.MockMedium.Read", "file not found: "+path, os.ErrNotExist)
|
||||
}
|
||||
return content, nil
|
||||
}
|
||||
|
||||
// Write saves the given content to a file in the mock filesystem.
|
||||
func (m *MockMedium) Write(path, content string) error {
|
||||
m.Files[path] = content
|
||||
m.ModTimes[path] = time.Now()
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureDir records that a directory exists in the mock filesystem.
|
||||
func (m *MockMedium) EnsureDir(path string) error {
|
||||
m.Dirs[path] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsFile checks if a path exists as a file in the mock filesystem.
|
||||
func (m *MockMedium) IsFile(path string) bool {
|
||||
_, ok := m.Files[path]
|
||||
return ok
|
||||
}
|
||||
|
||||
// FileGet is a convenience function that reads a file from the mock filesystem.
|
||||
func (m *MockMedium) FileGet(path string) (string, error) {
|
||||
return m.Read(path)
|
||||
}
|
||||
|
||||
// FileSet is a convenience function that writes a file to the mock filesystem.
|
||||
func (m *MockMedium) FileSet(path, content string) error {
|
||||
return m.Write(path, content)
|
||||
}
|
||||
|
||||
// Delete removes a file or empty directory from the mock filesystem.
|
||||
func (m *MockMedium) Delete(path string) error {
|
||||
if _, ok := m.Files[path]; ok {
|
||||
delete(m.Files, path)
|
||||
return nil
|
||||
}
|
||||
if _, ok := m.Dirs[path]; ok {
|
||||
// Check if directory is empty (no files or subdirs with this prefix)
|
||||
prefix := path
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
for f := range m.Files {
|
||||
if strings.HasPrefix(f, prefix) {
|
||||
return coreerr.E("io.MockMedium.Delete", "directory not empty: "+path, os.ErrExist)
|
||||
}
|
||||
}
|
||||
for d := range m.Dirs {
|
||||
if d != path && strings.HasPrefix(d, prefix) {
|
||||
return coreerr.E("io.MockMedium.Delete", "directory not empty: "+path, os.ErrExist)
|
||||
}
|
||||
}
|
||||
delete(m.Dirs, path)
|
||||
return nil
|
||||
}
|
||||
return coreerr.E("io.MockMedium.Delete", "path not found: "+path, os.ErrNotExist)
|
||||
}
|
||||
|
||||
// DeleteAll removes a file or directory and all contents from the mock filesystem.
|
||||
func (m *MockMedium) DeleteAll(path string) error {
|
||||
found := false
|
||||
if _, ok := m.Files[path]; ok {
|
||||
delete(m.Files, path)
|
||||
found = true
|
||||
}
|
||||
if _, ok := m.Dirs[path]; ok {
|
||||
delete(m.Dirs, path)
|
||||
found = true
|
||||
}
|
||||
|
||||
// Delete all entries under this path
|
||||
prefix := path
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
for f := range m.Files {
|
||||
if strings.HasPrefix(f, prefix) {
|
||||
delete(m.Files, f)
|
||||
found = true
|
||||
}
|
||||
}
|
||||
for d := range m.Dirs {
|
||||
if strings.HasPrefix(d, prefix) {
|
||||
delete(m.Dirs, d)
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return coreerr.E("io.MockMedium.DeleteAll", "path not found: "+path, os.ErrNotExist)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rename moves a file or directory in the mock filesystem.
|
||||
func (m *MockMedium) Rename(oldPath, newPath string) error {
|
||||
if content, ok := m.Files[oldPath]; ok {
|
||||
m.Files[newPath] = content
|
||||
delete(m.Files, oldPath)
|
||||
if mt, ok := m.ModTimes[oldPath]; ok {
|
||||
m.ModTimes[newPath] = mt
|
||||
delete(m.ModTimes, oldPath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if _, ok := m.Dirs[oldPath]; ok {
|
||||
// Move directory and all contents
|
||||
m.Dirs[newPath] = true
|
||||
delete(m.Dirs, oldPath)
|
||||
|
||||
oldPrefix := oldPath
|
||||
if !strings.HasSuffix(oldPrefix, "/") {
|
||||
oldPrefix += "/"
|
||||
}
|
||||
newPrefix := newPath
|
||||
if !strings.HasSuffix(newPrefix, "/") {
|
||||
newPrefix += "/"
|
||||
}
|
||||
|
||||
// Collect files to move first (don't mutate during iteration)
|
||||
filesToMove := make(map[string]string)
|
||||
for f := range m.Files {
|
||||
if strings.HasPrefix(f, oldPrefix) {
|
||||
newF := newPrefix + strings.TrimPrefix(f, oldPrefix)
|
||||
filesToMove[f] = newF
|
||||
}
|
||||
}
|
||||
for oldF, newF := range filesToMove {
|
||||
m.Files[newF] = m.Files[oldF]
|
||||
delete(m.Files, oldF)
|
||||
if mt, ok := m.ModTimes[oldF]; ok {
|
||||
m.ModTimes[newF] = mt
|
||||
delete(m.ModTimes, oldF)
|
||||
}
|
||||
}
|
||||
|
||||
// Collect directories to move first
|
||||
dirsToMove := make(map[string]string)
|
||||
for d := range m.Dirs {
|
||||
if strings.HasPrefix(d, oldPrefix) {
|
||||
newD := newPrefix + strings.TrimPrefix(d, oldPrefix)
|
||||
dirsToMove[d] = newD
|
||||
}
|
||||
}
|
||||
for oldD, newD := range dirsToMove {
|
||||
m.Dirs[newD] = true
|
||||
delete(m.Dirs, oldD)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return coreerr.E("io.MockMedium.Rename", "path not found: "+oldPath, os.ErrNotExist)
|
||||
}
|
||||
|
||||
// Open opens a file from the mock filesystem.
|
||||
func (m *MockMedium) Open(path string) (fs.File, error) {
|
||||
content, ok := m.Files[path]
|
||||
if !ok {
|
||||
return nil, coreerr.E("io.MockMedium.Open", "file not found: "+path, os.ErrNotExist)
|
||||
}
|
||||
return &MockFile{
|
||||
name: filepath.Base(path),
|
||||
content: []byte(content),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Create creates a file in the mock filesystem.
|
||||
func (m *MockMedium) Create(path string) (goio.WriteCloser, error) {
|
||||
return &MockWriteCloser{
|
||||
medium: m,
|
||||
path: path,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Append opens a file for appending in the mock filesystem.
|
||||
func (m *MockMedium) Append(path string) (goio.WriteCloser, error) {
|
||||
content := m.Files[path]
|
||||
return &MockWriteCloser{
|
||||
medium: m,
|
||||
path: path,
|
||||
data: []byte(content),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ReadStream returns a reader for the file content in the mock filesystem.
|
||||
func (m *MockMedium) ReadStream(path string) (goio.ReadCloser, error) {
|
||||
return m.Open(path)
|
||||
}
|
||||
|
||||
// WriteStream returns a writer for the file content in the mock filesystem.
|
||||
func (m *MockMedium) WriteStream(path string) (goio.WriteCloser, error) {
|
||||
return m.Create(path)
|
||||
}
|
||||
|
||||
// MockFile implements fs.File for MockMedium.
|
||||
type MockFile struct {
|
||||
name string
|
||||
content []byte
|
||||
offset int64
|
||||
}
|
||||
|
||||
func (f *MockFile) Stat() (fs.FileInfo, error) {
|
||||
return FileInfo{
|
||||
name: f.name,
|
||||
size: int64(len(f.content)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *MockFile) Read(b []byte) (int, error) {
|
||||
if f.offset >= int64(len(f.content)) {
|
||||
return 0, goio.EOF
|
||||
}
|
||||
n := copy(b, f.content[f.offset:])
|
||||
f.offset += int64(n)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (f *MockFile) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MockWriteCloser implements WriteCloser for MockMedium.
|
||||
type MockWriteCloser struct {
|
||||
medium *MockMedium
|
||||
path string
|
||||
data []byte
|
||||
}
|
||||
|
||||
func (w *MockWriteCloser) Write(p []byte) (int, error) {
|
||||
w.data = append(w.data, p...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (w *MockWriteCloser) Close() error {
|
||||
w.medium.Files[w.path] = string(w.data)
|
||||
w.medium.ModTimes[w.path] = time.Now()
|
||||
return nil
|
||||
}
|
||||
|
||||
// List returns directory entries for the mock filesystem.
|
||||
func (m *MockMedium) List(path string) ([]fs.DirEntry, error) {
|
||||
if _, ok := m.Dirs[path]; !ok {
|
||||
// Check if it's the root or has children
|
||||
hasChildren := false
|
||||
prefix := path
|
||||
if path != "" && !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
for f := range m.Files {
|
||||
if strings.HasPrefix(f, prefix) {
|
||||
hasChildren = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasChildren {
|
||||
for d := range m.Dirs {
|
||||
if strings.HasPrefix(d, prefix) {
|
||||
hasChildren = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !hasChildren && path != "" {
|
||||
return nil, coreerr.E("io.MockMedium.List", "directory not found: "+path, os.ErrNotExist)
|
||||
}
|
||||
}
|
||||
|
||||
prefix := path
|
||||
if path != "" && !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
|
||||
seen := make(map[string]bool)
|
||||
var entries []fs.DirEntry
|
||||
|
||||
// Find immediate children (files)
|
||||
for f, content := range m.Files {
|
||||
if !strings.HasPrefix(f, prefix) {
|
||||
continue
|
||||
}
|
||||
rest := strings.TrimPrefix(f, prefix)
|
||||
if rest == "" || strings.Contains(rest, "/") {
|
||||
// Skip if it's not an immediate child
|
||||
if idx := strings.Index(rest, "/"); idx != -1 {
|
||||
// This is a subdirectory
|
||||
dirName := rest[:idx]
|
||||
if !seen[dirName] {
|
||||
seen[dirName] = true
|
||||
entries = append(entries, DirEntry{
|
||||
name: dirName,
|
||||
isDir: true,
|
||||
mode: fs.ModeDir | 0755,
|
||||
info: FileInfo{
|
||||
name: dirName,
|
||||
isDir: true,
|
||||
mode: fs.ModeDir | 0755,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !seen[rest] {
|
||||
seen[rest] = true
|
||||
entries = append(entries, DirEntry{
|
||||
name: rest,
|
||||
isDir: false,
|
||||
mode: 0644,
|
||||
info: FileInfo{
|
||||
name: rest,
|
||||
size: int64(len(content)),
|
||||
mode: 0644,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Find immediate subdirectories
|
||||
for d := range m.Dirs {
|
||||
if !strings.HasPrefix(d, prefix) {
|
||||
continue
|
||||
}
|
||||
rest := strings.TrimPrefix(d, prefix)
|
||||
if rest == "" {
|
||||
continue
|
||||
}
|
||||
// Get only immediate child
|
||||
if idx := strings.Index(rest, "/"); idx != -1 {
|
||||
rest = rest[:idx]
|
||||
}
|
||||
if !seen[rest] {
|
||||
seen[rest] = true
|
||||
entries = append(entries, DirEntry{
|
||||
name: rest,
|
||||
isDir: true,
|
||||
mode: fs.ModeDir | 0755,
|
||||
info: FileInfo{
|
||||
name: rest,
|
||||
isDir: true,
|
||||
mode: fs.ModeDir | 0755,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Stat returns file information for the mock filesystem.
|
||||
func (m *MockMedium) Stat(path string) (fs.FileInfo, error) {
|
||||
if content, ok := m.Files[path]; ok {
|
||||
modTime, ok := m.ModTimes[path]
|
||||
if !ok {
|
||||
modTime = time.Now()
|
||||
}
|
||||
return FileInfo{
|
||||
name: filepath.Base(path),
|
||||
size: int64(len(content)),
|
||||
mode: 0644,
|
||||
modTime: modTime,
|
||||
}, nil
|
||||
}
|
||||
if _, ok := m.Dirs[path]; ok {
|
||||
return FileInfo{
|
||||
name: filepath.Base(path),
|
||||
isDir: true,
|
||||
mode: fs.ModeDir | 0755,
|
||||
}, nil
|
||||
}
|
||||
return nil, coreerr.E("io.MockMedium.Stat", "path not found: "+path, os.ErrNotExist)
|
||||
}
|
||||
|
||||
// Exists checks if a path exists in the mock filesystem.
|
||||
func (m *MockMedium) Exists(path string) bool {
|
||||
if _, ok := m.Files[path]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := m.Dirs[path]; ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsDir checks if a path is a directory in the mock filesystem.
|
||||
func (m *MockMedium) IsDir(path string) bool {
|
||||
_, ok := m.Dirs[path]
|
||||
return ok
|
||||
}
|
||||
|
|
@ -1,297 +0,0 @@
|
|||
// Package local provides a local filesystem implementation of the io.Medium interface.
|
||||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
goio "io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Medium is a local filesystem storage backend.
|
||||
type Medium struct {
|
||||
root string
|
||||
}
|
||||
|
||||
// New creates a new local Medium rooted at the given directory.
|
||||
// Pass "/" for full filesystem access, or a specific path to sandbox.
|
||||
func New(root string) (*Medium, error) {
|
||||
abs, err := filepath.Abs(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Resolve symlinks so sandbox checks compare like-for-like.
|
||||
// On macOS, /var is a symlink to /private/var — without this,
|
||||
// EvalSymlinks on child paths resolves to /private/var/... while
|
||||
// root stays /var/..., causing false sandbox escape detections.
|
||||
if resolved, err := filepath.EvalSymlinks(abs); err == nil {
|
||||
abs = resolved
|
||||
}
|
||||
return &Medium{root: abs}, nil
|
||||
}
|
||||
|
||||
// path sanitizes and returns the full path.
|
||||
// Absolute paths are sandboxed under root (unless root is "/").
|
||||
func (m *Medium) path(p string) string {
|
||||
if p == "" {
|
||||
return m.root
|
||||
}
|
||||
|
||||
// If the path is relative and the medium is rooted at "/",
|
||||
// treat it as relative to the current working directory.
|
||||
// This makes io.Local behave more like the standard 'os' package.
|
||||
if m.root == "/" && !filepath.IsAbs(p) {
|
||||
cwd, _ := os.Getwd()
|
||||
return filepath.Join(cwd, p)
|
||||
}
|
||||
|
||||
// Use filepath.Clean with a leading slash to resolve all .. and . internally
|
||||
// before joining with the root. This is a standard way to sandbox paths.
|
||||
clean := filepath.Clean("/" + p)
|
||||
|
||||
// If root is "/", allow absolute paths through
|
||||
if m.root == "/" {
|
||||
return clean
|
||||
}
|
||||
|
||||
// Join cleaned relative path with root
|
||||
return filepath.Join(m.root, clean)
|
||||
}
|
||||
|
||||
// validatePath ensures the path is within the sandbox, following symlinks if they exist.
|
||||
func (m *Medium) validatePath(p string) (string, error) {
|
||||
if m.root == "/" {
|
||||
return m.path(p), nil
|
||||
}
|
||||
|
||||
// Split the cleaned path into components
|
||||
parts := strings.Split(filepath.Clean("/"+p), string(os.PathSeparator))
|
||||
current := m.root
|
||||
|
||||
for _, part := range parts {
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
next := filepath.Join(current, part)
|
||||
realNext, err := filepath.EvalSymlinks(next)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// Part doesn't exist, we can't follow symlinks anymore.
|
||||
// Since the path is already Cleaned and current is safe,
|
||||
// appending a component to current will not escape.
|
||||
current = next
|
||||
continue
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Verify the resolved part is still within the root
|
||||
rel, err := filepath.Rel(m.root, realNext)
|
||||
if err != nil || strings.HasPrefix(rel, "..") {
|
||||
// Security event: sandbox escape attempt
|
||||
username := "unknown"
|
||||
if u, err := user.Current(); err == nil {
|
||||
username = u.Username
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "[%s] SECURITY sandbox escape detected root=%s path=%s attempted=%s user=%s\n",
|
||||
time.Now().Format(time.RFC3339), m.root, p, realNext, username)
|
||||
return "", os.ErrPermission // Path escapes sandbox
|
||||
}
|
||||
current = realNext
|
||||
}
|
||||
|
||||
return current, nil
|
||||
}
|
||||
|
||||
// Read returns file contents as string.
|
||||
func (m *Medium) Read(p string) (string, error) {
|
||||
full, err := m.validatePath(p)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
data, err := os.ReadFile(full)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
// Write saves content to file, creating parent directories as needed.
|
||||
func (m *Medium) Write(p, content string) error {
|
||||
full, err := m.validatePath(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(full), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(full, []byte(content), 0644)
|
||||
}
|
||||
|
||||
// EnsureDir creates directory if it doesn't exist.
|
||||
func (m *Medium) EnsureDir(p string) error {
|
||||
full, err := m.validatePath(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.MkdirAll(full, 0755)
|
||||
}
|
||||
|
||||
// IsDir returns true if path is a directory.
|
||||
func (m *Medium) IsDir(p string) bool {
|
||||
if p == "" {
|
||||
return false
|
||||
}
|
||||
full, err := m.validatePath(p)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
info, err := os.Stat(full)
|
||||
return err == nil && info.IsDir()
|
||||
}
|
||||
|
||||
// IsFile returns true if path is a regular file.
|
||||
func (m *Medium) IsFile(p string) bool {
|
||||
if p == "" {
|
||||
return false
|
||||
}
|
||||
full, err := m.validatePath(p)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
info, err := os.Stat(full)
|
||||
return err == nil && info.Mode().IsRegular()
|
||||
}
|
||||
|
||||
// Exists returns true if path exists.
|
||||
func (m *Medium) Exists(p string) bool {
|
||||
full, err := m.validatePath(p)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
_, err = os.Stat(full)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// List returns directory entries.
|
||||
func (m *Medium) List(p string) ([]fs.DirEntry, error) {
|
||||
full, err := m.validatePath(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return os.ReadDir(full)
|
||||
}
|
||||
|
||||
// Stat returns file info.
|
||||
func (m *Medium) Stat(p string) (fs.FileInfo, error) {
|
||||
full, err := m.validatePath(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return os.Stat(full)
|
||||
}
|
||||
|
||||
// Open opens the named file for reading.
|
||||
func (m *Medium) Open(p string) (fs.File, error) {
|
||||
full, err := m.validatePath(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return os.Open(full)
|
||||
}
|
||||
|
||||
// Create creates or truncates the named file.
|
||||
func (m *Medium) Create(p string) (goio.WriteCloser, error) {
|
||||
full, err := m.validatePath(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(full), 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return os.Create(full)
|
||||
}
|
||||
|
||||
// Append opens the named file for appending, creating it if it doesn't exist.
|
||||
func (m *Medium) Append(p string) (goio.WriteCloser, error) {
|
||||
full, err := m.validatePath(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(full), 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return os.OpenFile(full, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
}
|
||||
|
||||
// ReadStream returns a reader for the file content.
|
||||
//
|
||||
// This is a convenience wrapper around Open that exposes a streaming-oriented
|
||||
// API, as required by the io.Medium interface, while Open provides the more
|
||||
// general filesystem-level operation. Both methods are kept for semantic
|
||||
// clarity and backward compatibility.
|
||||
func (m *Medium) ReadStream(path string) (goio.ReadCloser, error) {
|
||||
return m.Open(path)
|
||||
}
|
||||
|
||||
// WriteStream returns a writer for the file content.
|
||||
//
|
||||
// This is a convenience wrapper around Create that exposes a streaming-oriented
|
||||
// API, as required by the io.Medium interface, while Create provides the more
|
||||
// general filesystem-level operation. Both methods are kept for semantic
|
||||
// clarity and backward compatibility.
|
||||
func (m *Medium) WriteStream(path string) (goio.WriteCloser, error) {
|
||||
return m.Create(path)
|
||||
}
|
||||
|
||||
// Delete removes a file or empty directory.
|
||||
func (m *Medium) Delete(p string) error {
|
||||
full, err := m.validatePath(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(full) < 3 {
|
||||
return nil
|
||||
}
|
||||
return os.Remove(full)
|
||||
}
|
||||
|
||||
// DeleteAll removes a file or directory recursively.
|
||||
func (m *Medium) DeleteAll(p string) error {
|
||||
full, err := m.validatePath(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(full) < 3 {
|
||||
return nil
|
||||
}
|
||||
return os.RemoveAll(full)
|
||||
}
|
||||
|
||||
// Rename moves a file or directory.
|
||||
func (m *Medium) Rename(oldPath, newPath string) error {
|
||||
oldFull, err := m.validatePath(oldPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newFull, err := m.validatePath(newPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Rename(oldFull, newFull)
|
||||
}
|
||||
|
||||
// FileGet is an alias for Read.
|
||||
func (m *Medium) FileGet(p string) (string, error) {
|
||||
return m.Read(p)
|
||||
}
|
||||
|
||||
// FileSet is an alias for Write.
|
||||
func (m *Medium) FileSet(p, content string) error {
|
||||
return m.Write(p, content)
|
||||
}
|
||||
|
|
@ -1,513 +0,0 @@
|
|||
package local
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
m, err := New(root)
|
||||
assert.NoError(t, err)
|
||||
// New() resolves symlinks (macOS /var → /private/var), so compare resolved paths.
|
||||
resolved, _ := filepath.EvalSymlinks(root)
|
||||
assert.Equal(t, resolved, m.root)
|
||||
}
|
||||
|
||||
func TestPath(t *testing.T) {
|
||||
m := &Medium{root: "/home/user"}
|
||||
|
||||
// Normal paths
|
||||
assert.Equal(t, "/home/user/file.txt", m.path("file.txt"))
|
||||
assert.Equal(t, "/home/user/dir/file.txt", m.path("dir/file.txt"))
|
||||
|
||||
// Empty returns root
|
||||
assert.Equal(t, "/home/user", m.path(""))
|
||||
|
||||
// Traversal attempts get sanitized
|
||||
assert.Equal(t, "/home/user/file.txt", m.path("../file.txt"))
|
||||
assert.Equal(t, "/home/user/file.txt", m.path("dir/../file.txt"))
|
||||
|
||||
// Absolute paths are constrained to sandbox (no escape)
|
||||
assert.Equal(t, "/home/user/etc/passwd", m.path("/etc/passwd"))
|
||||
}
|
||||
|
||||
func TestPath_RootFilesystem(t *testing.T) {
|
||||
m := &Medium{root: "/"}
|
||||
|
||||
// When root is "/", absolute paths pass through
|
||||
assert.Equal(t, "/etc/passwd", m.path("/etc/passwd"))
|
||||
assert.Equal(t, "/home/user/file.txt", m.path("/home/user/file.txt"))
|
||||
|
||||
// Relative paths are relative to CWD when root is "/"
|
||||
cwd, _ := os.Getwd()
|
||||
assert.Equal(t, filepath.Join(cwd, "file.txt"), m.path("file.txt"))
|
||||
}
|
||||
|
||||
func TestReadWrite(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
m, _ := New(root)
|
||||
|
||||
// Write and read back
|
||||
err := m.Write("test.txt", "hello")
|
||||
assert.NoError(t, err)
|
||||
|
||||
content, err := m.Read("test.txt")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "hello", content)
|
||||
|
||||
// Write creates parent dirs
|
||||
err = m.Write("a/b/c.txt", "nested")
|
||||
assert.NoError(t, err)
|
||||
|
||||
content, err = m.Read("a/b/c.txt")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "nested", content)
|
||||
|
||||
// Read nonexistent
|
||||
_, err = m.Read("nope.txt")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestEnsureDir(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
m, _ := New(root)
|
||||
|
||||
err := m.EnsureDir("one/two/three")
|
||||
assert.NoError(t, err)
|
||||
|
||||
info, err := os.Stat(filepath.Join(root, "one/two/three"))
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, info.IsDir())
|
||||
}
|
||||
|
||||
func TestIsDir(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
m, _ := New(root)
|
||||
|
||||
_ = os.Mkdir(filepath.Join(root, "mydir"), 0755)
|
||||
_ = os.WriteFile(filepath.Join(root, "myfile"), []byte("x"), 0644)
|
||||
|
||||
assert.True(t, m.IsDir("mydir"))
|
||||
assert.False(t, m.IsDir("myfile"))
|
||||
assert.False(t, m.IsDir("nope"))
|
||||
assert.False(t, m.IsDir(""))
|
||||
}
|
||||
|
||||
func TestIsFile(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
m, _ := New(root)
|
||||
|
||||
_ = os.Mkdir(filepath.Join(root, "mydir"), 0755)
|
||||
_ = os.WriteFile(filepath.Join(root, "myfile"), []byte("x"), 0644)
|
||||
|
||||
assert.True(t, m.IsFile("myfile"))
|
||||
assert.False(t, m.IsFile("mydir"))
|
||||
assert.False(t, m.IsFile("nope"))
|
||||
assert.False(t, m.IsFile(""))
|
||||
}
|
||||
|
||||
func TestExists(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
m, _ := New(root)
|
||||
|
||||
_ = os.WriteFile(filepath.Join(root, "exists"), []byte("x"), 0644)
|
||||
|
||||
assert.True(t, m.Exists("exists"))
|
||||
assert.False(t, m.Exists("nope"))
|
||||
}
|
||||
|
||||
func TestList(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
m, _ := New(root)
|
||||
|
||||
_ = os.WriteFile(filepath.Join(root, "a.txt"), []byte("a"), 0644)
|
||||
_ = os.WriteFile(filepath.Join(root, "b.txt"), []byte("b"), 0644)
|
||||
_ = os.Mkdir(filepath.Join(root, "subdir"), 0755)
|
||||
|
||||
entries, err := m.List("")
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, entries, 3)
|
||||
}
|
||||
|
||||
func TestStat(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
m, _ := New(root)
|
||||
|
||||
_ = os.WriteFile(filepath.Join(root, "file"), []byte("content"), 0644)
|
||||
|
||||
info, err := m.Stat("file")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(7), info.Size())
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
m, _ := New(root)
|
||||
|
||||
_ = os.WriteFile(filepath.Join(root, "todelete"), []byte("x"), 0644)
|
||||
assert.True(t, m.Exists("todelete"))
|
||||
|
||||
err := m.Delete("todelete")
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, m.Exists("todelete"))
|
||||
}
|
||||
|
||||
func TestDeleteAll(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
m, _ := New(root)
|
||||
|
||||
_ = os.MkdirAll(filepath.Join(root, "dir/sub"), 0755)
|
||||
_ = os.WriteFile(filepath.Join(root, "dir/sub/file"), []byte("x"), 0644)
|
||||
|
||||
err := m.DeleteAll("dir")
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, m.Exists("dir"))
|
||||
}
|
||||
|
||||
func TestRename(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
m, _ := New(root)
|
||||
|
||||
_ = os.WriteFile(filepath.Join(root, "old"), []byte("x"), 0644)
|
||||
|
||||
err := m.Rename("old", "new")
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, m.Exists("old"))
|
||||
assert.True(t, m.Exists("new"))
|
||||
}
|
||||
|
||||
func TestFileGetFileSet(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
m, _ := New(root)
|
||||
|
||||
err := m.FileSet("data", "value")
|
||||
assert.NoError(t, err)
|
||||
|
||||
val, err := m.FileGet("data")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "value", val)
|
||||
}
|
||||
|
||||
func TestDelete_Good(t *testing.T) {
|
||||
testRoot, err := os.MkdirTemp("", "local_delete_test")
|
||||
assert.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(testRoot) }()
|
||||
|
||||
medium, err := New(testRoot)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create and delete a file
|
||||
err = medium.Write("file.txt", "content")
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, medium.IsFile("file.txt"))
|
||||
|
||||
err = medium.Delete("file.txt")
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, medium.IsFile("file.txt"))
|
||||
|
||||
// Create and delete an empty directory
|
||||
err = medium.EnsureDir("emptydir")
|
||||
assert.NoError(t, err)
|
||||
err = medium.Delete("emptydir")
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, medium.IsDir("emptydir"))
|
||||
}
|
||||
|
||||
func TestDelete_Bad_NotEmpty(t *testing.T) {
|
||||
testRoot, err := os.MkdirTemp("", "local_delete_notempty_test")
|
||||
assert.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(testRoot) }()
|
||||
|
||||
medium, err := New(testRoot)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create a directory with a file
|
||||
err = medium.Write("mydir/file.txt", "content")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Try to delete non-empty directory
|
||||
err = medium.Delete("mydir")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestDeleteAll_Good(t *testing.T) {
|
||||
testRoot, err := os.MkdirTemp("", "local_deleteall_test")
|
||||
assert.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(testRoot) }()
|
||||
|
||||
medium, err := New(testRoot)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create nested structure
|
||||
err = medium.Write("mydir/file1.txt", "content1")
|
||||
assert.NoError(t, err)
|
||||
err = medium.Write("mydir/subdir/file2.txt", "content2")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Delete all
|
||||
err = medium.DeleteAll("mydir")
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, medium.Exists("mydir"))
|
||||
assert.False(t, medium.Exists("mydir/file1.txt"))
|
||||
assert.False(t, medium.Exists("mydir/subdir/file2.txt"))
|
||||
}
|
||||
|
||||
func TestRename_Good(t *testing.T) {
|
||||
testRoot, err := os.MkdirTemp("", "local_rename_test")
|
||||
assert.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(testRoot) }()
|
||||
|
||||
medium, err := New(testRoot)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Rename a file
|
||||
err = medium.Write("old.txt", "content")
|
||||
assert.NoError(t, err)
|
||||
err = medium.Rename("old.txt", "new.txt")
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, medium.IsFile("old.txt"))
|
||||
assert.True(t, medium.IsFile("new.txt"))
|
||||
|
||||
content, err := medium.Read("new.txt")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "content", content)
|
||||
}
|
||||
|
||||
func TestRename_Traversal_Sanitized(t *testing.T) {
|
||||
testRoot, err := os.MkdirTemp("", "local_rename_traversal_test")
|
||||
assert.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(testRoot) }()
|
||||
|
||||
medium, err := New(testRoot)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = medium.Write("file.txt", "content")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Traversal attempts are sanitized (.. becomes .), so this renames to "./escaped.txt"
|
||||
// which is just "escaped.txt" in the root
|
||||
err = medium.Rename("file.txt", "../escaped.txt")
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, medium.Exists("file.txt"))
|
||||
assert.True(t, medium.Exists("escaped.txt"))
|
||||
}
|
||||
|
||||
func TestList_Good(t *testing.T) {
|
||||
testRoot, err := os.MkdirTemp("", "local_list_test")
|
||||
assert.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(testRoot) }()
|
||||
|
||||
medium, err := New(testRoot)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create some files and directories
|
||||
err = medium.Write("file1.txt", "content1")
|
||||
assert.NoError(t, err)
|
||||
err = medium.Write("file2.txt", "content2")
|
||||
assert.NoError(t, err)
|
||||
err = medium.EnsureDir("subdir")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// List root
|
||||
entries, err := medium.List(".")
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, entries, 3)
|
||||
|
||||
names := make(map[string]bool)
|
||||
for _, e := range entries {
|
||||
names[e.Name()] = true
|
||||
}
|
||||
assert.True(t, names["file1.txt"])
|
||||
assert.True(t, names["file2.txt"])
|
||||
assert.True(t, names["subdir"])
|
||||
}
|
||||
|
||||
func TestStat_Good(t *testing.T) {
|
||||
testRoot, err := os.MkdirTemp("", "local_stat_test")
|
||||
assert.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(testRoot) }()
|
||||
|
||||
medium, err := New(testRoot)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Stat a file
|
||||
err = medium.Write("file.txt", "hello world")
|
||||
assert.NoError(t, err)
|
||||
info, err := medium.Stat("file.txt")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "file.txt", info.Name())
|
||||
assert.Equal(t, int64(11), info.Size())
|
||||
assert.False(t, info.IsDir())
|
||||
|
||||
// Stat a directory
|
||||
err = medium.EnsureDir("mydir")
|
||||
assert.NoError(t, err)
|
||||
info, err = medium.Stat("mydir")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "mydir", info.Name())
|
||||
assert.True(t, info.IsDir())
|
||||
}
|
||||
|
||||
func TestExists_Good(t *testing.T) {
|
||||
testRoot, err := os.MkdirTemp("", "local_exists_test")
|
||||
assert.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(testRoot) }()
|
||||
|
||||
medium, err := New(testRoot)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.False(t, medium.Exists("nonexistent"))
|
||||
|
||||
err = medium.Write("file.txt", "content")
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, medium.Exists("file.txt"))
|
||||
|
||||
err = medium.EnsureDir("mydir")
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, medium.Exists("mydir"))
|
||||
}
|
||||
|
||||
func TestIsDir_Good(t *testing.T) {
|
||||
testRoot, err := os.MkdirTemp("", "local_isdir_test")
|
||||
assert.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(testRoot) }()
|
||||
|
||||
medium, err := New(testRoot)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = medium.Write("file.txt", "content")
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, medium.IsDir("file.txt"))
|
||||
|
||||
err = medium.EnsureDir("mydir")
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, medium.IsDir("mydir"))
|
||||
|
||||
assert.False(t, medium.IsDir("nonexistent"))
|
||||
}
|
||||
|
||||
func TestReadStream(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
m, _ := New(root)
|
||||
|
||||
content := "streaming content"
|
||||
err := m.Write("stream.txt", content)
|
||||
assert.NoError(t, err)
|
||||
|
||||
reader, err := m.ReadStream("stream.txt")
|
||||
assert.NoError(t, err)
|
||||
defer reader.Close()
|
||||
|
||||
// Read only first 9 bytes
|
||||
limitReader := io.LimitReader(reader, 9)
|
||||
data, err := io.ReadAll(limitReader)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "streaming", string(data))
|
||||
}
|
||||
|
||||
func TestWriteStream(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
m, _ := New(root)
|
||||
|
||||
writer, err := m.WriteStream("output.txt")
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = io.Copy(writer, strings.NewReader("piped data"))
|
||||
assert.NoError(t, err)
|
||||
err = writer.Close()
|
||||
assert.NoError(t, err)
|
||||
|
||||
content, err := m.Read("output.txt")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "piped data", content)
|
||||
}
|
||||
|
||||
func TestPath_Traversal_Advanced(t *testing.T) {
|
||||
m := &Medium{root: "/sandbox"}
|
||||
|
||||
// Multiple levels of traversal
|
||||
assert.Equal(t, "/sandbox/file.txt", m.path("../../../file.txt"))
|
||||
assert.Equal(t, "/sandbox/target", m.path("dir/../../target"))
|
||||
|
||||
// Traversal with hidden files
|
||||
assert.Equal(t, "/sandbox/.ssh/id_rsa", m.path(".ssh/id_rsa"))
|
||||
assert.Equal(t, "/sandbox/id_rsa", m.path(".ssh/../id_rsa"))
|
||||
|
||||
// Null bytes (Go's filepath.Clean handles them, but good to check)
|
||||
assert.Equal(t, "/sandbox/file\x00.txt", m.path("file\x00.txt"))
|
||||
}
|
||||
|
||||
func TestValidatePath_Security(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
m, err := New(root)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create a directory outside the sandbox
|
||||
outside := t.TempDir()
|
||||
outsideFile := filepath.Join(outside, "secret.txt")
|
||||
err = os.WriteFile(outsideFile, []byte("secret"), 0644)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Test 1: Simple traversal
|
||||
_, err = m.validatePath("../outside.txt")
|
||||
assert.NoError(t, err) // path() sanitizes to root, so this shouldn't escape
|
||||
|
||||
// Test 2: Symlink escape
|
||||
// Create a symlink inside the sandbox pointing outside
|
||||
linkPath := filepath.Join(root, "evil_link")
|
||||
err = os.Symlink(outside, linkPath)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Try to access a file through the symlink
|
||||
_, err = m.validatePath("evil_link/secret.txt")
|
||||
assert.Error(t, err)
|
||||
assert.ErrorIs(t, err, os.ErrPermission)
|
||||
|
||||
// Test 3: Nested symlink escape
|
||||
innerDir := filepath.Join(root, "inner")
|
||||
err = os.Mkdir(innerDir, 0755)
|
||||
assert.NoError(t, err)
|
||||
nestedLink := filepath.Join(innerDir, "nested_evil")
|
||||
err = os.Symlink(outside, nestedLink)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = m.validatePath("inner/nested_evil/secret.txt")
|
||||
assert.Error(t, err)
|
||||
assert.ErrorIs(t, err, os.ErrPermission)
|
||||
}
|
||||
|
||||
func TestEmptyPaths(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
m, err := New(root)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Read empty path (should fail as it's a directory)
|
||||
_, err = m.Read("")
|
||||
assert.Error(t, err)
|
||||
|
||||
// Write empty path (should fail as it's a directory)
|
||||
err = m.Write("", "content")
|
||||
assert.Error(t, err)
|
||||
|
||||
// EnsureDir empty path (should be ok, it's just the root)
|
||||
err = m.EnsureDir("")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// IsDir empty path (should be true for root, but current impl returns false for "")
|
||||
// Wait, I noticed IsDir returns false for "" in the code.
|
||||
assert.False(t, m.IsDir(""))
|
||||
|
||||
// Exists empty path (root exists)
|
||||
assert.True(t, m.Exists(""))
|
||||
|
||||
// List empty path (lists root)
|
||||
entries, err := m.List("")
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, entries)
|
||||
}
|
||||
|
|
@ -1,612 +0,0 @@
|
|||
// Package node provides an in-memory filesystem implementation of io.Medium
|
||||
// ported from Borg's DataNode. It stores files in memory with implicit
|
||||
// directory structure and supports tar serialisation.
|
||||
package node
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"cmp"
|
||||
goio "io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
coreio "forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
// Node is an in-memory filesystem that implements coreio.Node (and therefore
|
||||
// coreio.Medium). Directories are implicit -- they exist whenever a file path
|
||||
// contains a "/".
|
||||
type Node struct {
|
||||
files map[string]*dataFile
|
||||
}
|
||||
|
||||
// compile-time interface checks
|
||||
var _ coreio.Medium = (*Node)(nil)
|
||||
var _ fs.ReadFileFS = (*Node)(nil)
|
||||
|
||||
// New creates a new, empty Node.
|
||||
func New() *Node {
|
||||
return &Node{files: make(map[string]*dataFile)}
|
||||
}
|
||||
|
||||
// ---------- Node-specific methods ----------
|
||||
|
||||
// AddData stages content in the in-memory filesystem.
|
||||
func (n *Node) AddData(name string, content []byte) {
|
||||
name = strings.TrimPrefix(name, "/")
|
||||
if name == "" {
|
||||
return
|
||||
}
|
||||
// Directories are implicit, so we don't store them.
|
||||
if strings.HasSuffix(name, "/") {
|
||||
return
|
||||
}
|
||||
n.files[name] = &dataFile{
|
||||
name: name,
|
||||
content: content,
|
||||
modTime: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
// ToTar serialises the entire in-memory tree to a tar archive.
|
||||
func (n *Node) ToTar() ([]byte, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
tw := tar.NewWriter(buf)
|
||||
|
||||
for _, file := range n.files {
|
||||
hdr := &tar.Header{
|
||||
Name: file.name,
|
||||
Mode: 0600,
|
||||
Size: int64(len(file.content)),
|
||||
ModTime: file.modTime,
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := tw.Write(file.content); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := tw.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// FromTar creates a new Node from a tar archive.
|
||||
func FromTar(data []byte) (*Node, error) {
|
||||
n := New()
|
||||
if err := n.LoadTar(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// LoadTar replaces the in-memory tree with the contents of a tar archive.
|
||||
func (n *Node) LoadTar(data []byte) error {
|
||||
newFiles := make(map[string]*dataFile)
|
||||
tr := tar.NewReader(bytes.NewReader(data))
|
||||
|
||||
for {
|
||||
header, err := tr.Next()
|
||||
if err == goio.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if header.Typeflag == tar.TypeReg {
|
||||
content, err := goio.ReadAll(tr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
name := strings.TrimPrefix(header.Name, "/")
|
||||
if name == "" || strings.HasSuffix(name, "/") {
|
||||
continue
|
||||
}
|
||||
newFiles[name] = &dataFile{
|
||||
name: name,
|
||||
content: content,
|
||||
modTime: header.ModTime,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
n.files = newFiles
|
||||
return nil
|
||||
}
|
||||
|
||||
// WalkNode walks the in-memory tree, calling fn for each entry.
|
||||
func (n *Node) WalkNode(root string, fn fs.WalkDirFunc) error {
|
||||
return fs.WalkDir(n, root, fn)
|
||||
}
|
||||
|
||||
// WalkOptions configures the behaviour of Walk.
|
||||
type WalkOptions struct {
|
||||
// MaxDepth limits how many directory levels to descend. 0 means unlimited.
|
||||
MaxDepth int
|
||||
// Filter, if set, is called for each entry. Return true to include the
|
||||
// entry (and descend into it if it is a directory).
|
||||
Filter func(path string, d fs.DirEntry) bool
|
||||
// SkipErrors suppresses errors (e.g. nonexistent root) instead of
|
||||
// propagating them through the callback.
|
||||
SkipErrors bool
|
||||
}
|
||||
|
||||
// Walk walks the in-memory tree with optional WalkOptions.
|
||||
func (n *Node) Walk(root string, fn fs.WalkDirFunc, opts ...WalkOptions) error {
|
||||
var opt WalkOptions
|
||||
if len(opts) > 0 {
|
||||
opt = opts[0]
|
||||
}
|
||||
|
||||
if opt.SkipErrors {
|
||||
// If root doesn't exist, silently return nil.
|
||||
if _, err := n.Stat(root); err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fs.WalkDir(n, root, func(p string, d fs.DirEntry, err error) error {
|
||||
if opt.Filter != nil && err == nil {
|
||||
if !opt.Filter(p, d) {
|
||||
if d != nil && d.IsDir() {
|
||||
return fs.SkipDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Call the user's function first so the entry is visited.
|
||||
result := fn(p, d, err)
|
||||
|
||||
// After visiting a directory at MaxDepth, prevent descending further.
|
||||
if result == nil && opt.MaxDepth > 0 && d != nil && d.IsDir() && p != root {
|
||||
rel := strings.TrimPrefix(p, root)
|
||||
rel = strings.TrimPrefix(rel, "/")
|
||||
depth := strings.Count(rel, "/") + 1
|
||||
if depth >= opt.MaxDepth {
|
||||
return fs.SkipDir
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
})
|
||||
}
|
||||
|
||||
// ReadFile returns the content of the named file as a byte slice.
|
||||
// Implements fs.ReadFileFS.
|
||||
func (n *Node) ReadFile(name string) ([]byte, error) {
|
||||
name = strings.TrimPrefix(name, "/")
|
||||
f, ok := n.files[name]
|
||||
if !ok {
|
||||
return nil, &fs.PathError{Op: "read", Path: name, Err: fs.ErrNotExist}
|
||||
}
|
||||
// Return a copy to prevent callers from mutating internal state.
|
||||
result := make([]byte, len(f.content))
|
||||
copy(result, f.content)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// CopyFile copies a file from the in-memory tree to the local filesystem.
|
||||
func (n *Node) CopyFile(src, dst string, perm fs.FileMode) error {
|
||||
src = strings.TrimPrefix(src, "/")
|
||||
f, ok := n.files[src]
|
||||
if !ok {
|
||||
// Check if it's a directory — can't copy directories this way.
|
||||
info, err := n.Stat(src)
|
||||
if err != nil {
|
||||
return &fs.PathError{Op: "copyfile", Path: src, Err: fs.ErrNotExist}
|
||||
}
|
||||
if info.IsDir() {
|
||||
return &fs.PathError{Op: "copyfile", Path: src, Err: fs.ErrInvalid}
|
||||
}
|
||||
return &fs.PathError{Op: "copyfile", Path: src, Err: fs.ErrNotExist}
|
||||
}
|
||||
return os.WriteFile(dst, f.content, perm)
|
||||
}
|
||||
|
||||
// CopyTo copies a file (or directory tree) from the node to any Medium.
|
||||
func (n *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) error {
|
||||
sourcePath = strings.TrimPrefix(sourcePath, "/")
|
||||
info, err := n.Stat(sourcePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !info.IsDir() {
|
||||
// Single file copy
|
||||
f, ok := n.files[sourcePath]
|
||||
if !ok {
|
||||
return fs.ErrNotExist
|
||||
}
|
||||
return target.Write(destPath, string(f.content))
|
||||
}
|
||||
|
||||
// Directory: walk and copy all files underneath
|
||||
prefix := sourcePath
|
||||
if prefix != "" && !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
|
||||
for p, f := range n.files {
|
||||
if !strings.HasPrefix(p, prefix) && p != sourcePath {
|
||||
continue
|
||||
}
|
||||
rel := strings.TrimPrefix(p, prefix)
|
||||
dest := destPath
|
||||
if rel != "" {
|
||||
dest = destPath + "/" + rel
|
||||
}
|
||||
if err := target.Write(dest, string(f.content)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ---------- Medium interface: fs.FS methods ----------
|
||||
|
||||
// Open opens a file from the Node. Implements fs.FS.
|
||||
func (n *Node) Open(name string) (fs.File, error) {
|
||||
name = strings.TrimPrefix(name, "/")
|
||||
if file, ok := n.files[name]; ok {
|
||||
return &dataFileReader{file: file}, nil
|
||||
}
|
||||
// Check if it's a directory
|
||||
prefix := name + "/"
|
||||
if name == "." || name == "" {
|
||||
prefix = ""
|
||||
}
|
||||
for p := range n.files {
|
||||
if strings.HasPrefix(p, prefix) {
|
||||
return &dirFile{path: name, modTime: time.Now()}, nil
|
||||
}
|
||||
}
|
||||
return nil, fs.ErrNotExist
|
||||
}
|
||||
|
||||
// Stat returns file information for the given path.
|
||||
func (n *Node) Stat(name string) (fs.FileInfo, error) {
|
||||
name = strings.TrimPrefix(name, "/")
|
||||
if file, ok := n.files[name]; ok {
|
||||
return file.Stat()
|
||||
}
|
||||
// Check if it's a directory
|
||||
prefix := name + "/"
|
||||
if name == "." || name == "" {
|
||||
prefix = ""
|
||||
}
|
||||
for p := range n.files {
|
||||
if strings.HasPrefix(p, prefix) {
|
||||
return &dirInfo{name: path.Base(name), modTime: time.Now()}, nil
|
||||
}
|
||||
}
|
||||
return nil, fs.ErrNotExist
|
||||
}
|
||||
|
||||
// ReadDir reads and returns all directory entries for the named directory.
|
||||
func (n *Node) ReadDir(name string) ([]fs.DirEntry, error) {
|
||||
name = strings.TrimPrefix(name, "/")
|
||||
if name == "." {
|
||||
name = ""
|
||||
}
|
||||
|
||||
// Disallow reading a file as a directory.
|
||||
if info, err := n.Stat(name); err == nil && !info.IsDir() {
|
||||
return nil, &fs.PathError{Op: "readdir", Path: name, Err: fs.ErrInvalid}
|
||||
}
|
||||
|
||||
entries := []fs.DirEntry{}
|
||||
seen := make(map[string]bool)
|
||||
|
||||
prefix := ""
|
||||
if name != "" {
|
||||
prefix = name + "/"
|
||||
}
|
||||
|
||||
for p := range n.files {
|
||||
if !strings.HasPrefix(p, prefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
relPath := strings.TrimPrefix(p, prefix)
|
||||
firstComponent := strings.Split(relPath, "/")[0]
|
||||
|
||||
if seen[firstComponent] {
|
||||
continue
|
||||
}
|
||||
seen[firstComponent] = true
|
||||
|
||||
if strings.Contains(relPath, "/") {
|
||||
dir := &dirInfo{name: firstComponent, modTime: time.Now()}
|
||||
entries = append(entries, fs.FileInfoToDirEntry(dir))
|
||||
} else {
|
||||
file := n.files[p]
|
||||
info, _ := file.Stat()
|
||||
entries = append(entries, fs.FileInfoToDirEntry(info))
|
||||
}
|
||||
}
|
||||
|
||||
slices.SortFunc(entries, func(a, b fs.DirEntry) int {
|
||||
return cmp.Compare(a.Name(), b.Name())
|
||||
})
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// ---------- Medium interface: read/write ----------
|
||||
|
||||
// Read retrieves the content of a file as a string.
|
||||
func (n *Node) Read(p string) (string, error) {
|
||||
p = strings.TrimPrefix(p, "/")
|
||||
f, ok := n.files[p]
|
||||
if !ok {
|
||||
return "", fs.ErrNotExist
|
||||
}
|
||||
return string(f.content), nil
|
||||
}
|
||||
|
||||
// Write saves the given content to a file, overwriting it if it exists.
|
||||
func (n *Node) Write(p, content string) error {
|
||||
n.AddData(p, []byte(content))
|
||||
return nil
|
||||
}
|
||||
|
||||
// FileGet is an alias for Read.
|
||||
func (n *Node) FileGet(p string) (string, error) {
|
||||
return n.Read(p)
|
||||
}
|
||||
|
||||
// FileSet is an alias for Write.
|
||||
func (n *Node) FileSet(p, content string) error {
|
||||
return n.Write(p, content)
|
||||
}
|
||||
|
||||
// EnsureDir is a no-op because directories are implicit in Node.
|
||||
func (n *Node) EnsureDir(_ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ---------- Medium interface: existence checks ----------
|
||||
|
||||
// Exists checks if a path exists (file or directory).
|
||||
func (n *Node) Exists(p string) bool {
|
||||
_, err := n.Stat(p)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// IsFile checks if a path exists and is a regular file.
|
||||
func (n *Node) IsFile(p string) bool {
|
||||
p = strings.TrimPrefix(p, "/")
|
||||
_, ok := n.files[p]
|
||||
return ok
|
||||
}
|
||||
|
||||
// IsDir checks if a path exists and is a directory.
|
||||
func (n *Node) IsDir(p string) bool {
|
||||
info, err := n.Stat(p)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return info.IsDir()
|
||||
}
|
||||
|
||||
// ---------- Medium interface: mutations ----------
|
||||
|
||||
// Delete removes a single file.
|
||||
func (n *Node) Delete(p string) error {
|
||||
p = strings.TrimPrefix(p, "/")
|
||||
if _, ok := n.files[p]; ok {
|
||||
delete(n.files, p)
|
||||
return nil
|
||||
}
|
||||
return fs.ErrNotExist
|
||||
}
|
||||
|
||||
// DeleteAll removes a file or directory and all children.
|
||||
func (n *Node) DeleteAll(p string) error {
|
||||
p = strings.TrimPrefix(p, "/")
|
||||
|
||||
found := false
|
||||
if _, ok := n.files[p]; ok {
|
||||
delete(n.files, p)
|
||||
found = true
|
||||
}
|
||||
|
||||
prefix := p + "/"
|
||||
for k := range n.files {
|
||||
if strings.HasPrefix(k, prefix) {
|
||||
delete(n.files, k)
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return fs.ErrNotExist
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rename moves a file from oldPath to newPath.
|
||||
func (n *Node) Rename(oldPath, newPath string) error {
|
||||
oldPath = strings.TrimPrefix(oldPath, "/")
|
||||
newPath = strings.TrimPrefix(newPath, "/")
|
||||
|
||||
f, ok := n.files[oldPath]
|
||||
if !ok {
|
||||
return fs.ErrNotExist
|
||||
}
|
||||
|
||||
f.name = newPath
|
||||
n.files[newPath] = f
|
||||
delete(n.files, oldPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// List returns directory entries for the given path.
|
||||
func (n *Node) List(p string) ([]fs.DirEntry, error) {
|
||||
p = strings.TrimPrefix(p, "/")
|
||||
if p == "" || p == "." {
|
||||
return n.ReadDir(".")
|
||||
}
|
||||
return n.ReadDir(p)
|
||||
}
|
||||
|
||||
// ---------- Medium interface: streams ----------
|
||||
|
||||
// Create creates or truncates the named file, returning a WriteCloser.
|
||||
// Content is committed to the Node on Close.
|
||||
func (n *Node) Create(p string) (goio.WriteCloser, error) {
|
||||
p = strings.TrimPrefix(p, "/")
|
||||
return &nodeWriter{node: n, path: p}, nil
|
||||
}
|
||||
|
||||
// Append opens the named file for appending, creating it if needed.
|
||||
// Content is committed to the Node on Close.
|
||||
func (n *Node) Append(p string) (goio.WriteCloser, error) {
|
||||
p = strings.TrimPrefix(p, "/")
|
||||
var existing []byte
|
||||
if f, ok := n.files[p]; ok {
|
||||
existing = make([]byte, len(f.content))
|
||||
copy(existing, f.content)
|
||||
}
|
||||
return &nodeWriter{node: n, path: p, buf: existing}, nil
|
||||
}
|
||||
|
||||
// ReadStream returns a ReadCloser for the file content.
|
||||
func (n *Node) ReadStream(p string) (goio.ReadCloser, error) {
|
||||
f, err := n.Open(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return goio.NopCloser(f), nil
|
||||
}
|
||||
|
||||
// WriteStream returns a WriteCloser for the file content.
|
||||
func (n *Node) WriteStream(p string) (goio.WriteCloser, error) {
|
||||
return n.Create(p)
|
||||
}
|
||||
|
||||
// ---------- Internal types ----------
|
||||
|
||||
// nodeWriter buffers writes and commits them to the Node on Close.
|
||||
type nodeWriter struct {
|
||||
node *Node
|
||||
path string
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func (w *nodeWriter) Write(p []byte) (int, error) {
|
||||
w.buf = append(w.buf, p...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (w *nodeWriter) Close() error {
|
||||
w.node.files[w.path] = &dataFile{
|
||||
name: w.path,
|
||||
content: w.buf,
|
||||
modTime: time.Now(),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// dataFile represents a file in the Node.
|
||||
type dataFile struct {
|
||||
name string
|
||||
content []byte
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
func (d *dataFile) Stat() (fs.FileInfo, error) { return &dataFileInfo{file: d}, nil }
|
||||
func (d *dataFile) Read(_ []byte) (int, error) { return 0, goio.EOF }
|
||||
func (d *dataFile) Close() error { return nil }
|
||||
|
||||
// dataFileInfo implements fs.FileInfo for a dataFile.
|
||||
type dataFileInfo struct{ file *dataFile }
|
||||
|
||||
func (d *dataFileInfo) Name() string { return path.Base(d.file.name) }
|
||||
func (d *dataFileInfo) Size() int64 { return int64(len(d.file.content)) }
|
||||
func (d *dataFileInfo) Mode() fs.FileMode { return 0444 }
|
||||
func (d *dataFileInfo) ModTime() time.Time { return d.file.modTime }
|
||||
func (d *dataFileInfo) IsDir() bool { return false }
|
||||
func (d *dataFileInfo) Sys() any { return nil }
|
||||
|
||||
// dataFileReader implements fs.File for reading a dataFile.
|
||||
type dataFileReader struct {
|
||||
file *dataFile
|
||||
reader *bytes.Reader
|
||||
}
|
||||
|
||||
func (d *dataFileReader) Stat() (fs.FileInfo, error) { return d.file.Stat() }
|
||||
func (d *dataFileReader) Read(p []byte) (int, error) {
|
||||
if d.reader == nil {
|
||||
d.reader = bytes.NewReader(d.file.content)
|
||||
}
|
||||
return d.reader.Read(p)
|
||||
}
|
||||
func (d *dataFileReader) Close() error { return nil }
|
||||
|
||||
// dirInfo implements fs.FileInfo for an implicit directory.
|
||||
type dirInfo struct {
|
||||
name string
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
func (d *dirInfo) Name() string { return d.name }
|
||||
func (d *dirInfo) Size() int64 { return 0 }
|
||||
func (d *dirInfo) Mode() fs.FileMode { return fs.ModeDir | 0555 }
|
||||
func (d *dirInfo) ModTime() time.Time { return d.modTime }
|
||||
func (d *dirInfo) IsDir() bool { return true }
|
||||
func (d *dirInfo) Sys() any { return nil }
|
||||
|
||||
// dirFile implements fs.File for a directory.
|
||||
type dirFile struct {
|
||||
path string
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
func (d *dirFile) Stat() (fs.FileInfo, error) {
|
||||
return &dirInfo{name: path.Base(d.path), modTime: d.modTime}, nil
|
||||
}
|
||||
func (d *dirFile) Read([]byte) (int, error) {
|
||||
return 0, &fs.PathError{Op: "read", Path: d.path, Err: fs.ErrInvalid}
|
||||
}
|
||||
func (d *dirFile) Close() error { return nil }
|
||||
|
||||
// Ensure Node implements fs.FS so WalkDir works.
|
||||
var _ fs.FS = (*Node)(nil)
|
||||
|
||||
// Ensure Node also satisfies fs.StatFS and fs.ReadDirFS for WalkDir.
|
||||
var _ fs.StatFS = (*Node)(nil)
|
||||
var _ fs.ReadDirFS = (*Node)(nil)
|
||||
|
||||
// Unexported helper: ensure ReadStream result also satisfies fs.File
|
||||
// (for cases where callers do a type assertion).
|
||||
var _ goio.ReadCloser = goio.NopCloser(nil)
|
||||
|
||||
// Ensure nodeWriter satisfies goio.WriteCloser.
|
||||
var _ goio.WriteCloser = (*nodeWriter)(nil)
|
||||
|
||||
// Ensure dirFile satisfies fs.File.
|
||||
var _ fs.File = (*dirFile)(nil)
|
||||
|
||||
// Ensure dataFileReader satisfies fs.File.
|
||||
var _ fs.File = (*dataFileReader)(nil)
|
||||
|
||||
// ReadDirFile is not needed since fs.WalkDir works via ReadDirFS on the FS itself,
|
||||
// but we need the Node to satisfy fs.ReadDirFS.
|
||||
|
||||
// ensure all internal compile-time checks are grouped above
|
||||
// no further type assertions needed
|
||||
|
||||
// unused import guard
|
||||
var _ = os.ErrNotExist
|
||||
|
|
@ -1,528 +0,0 @@
|
|||
package node
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// New
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestNew_Good(t *testing.T) {
|
||||
n := New()
|
||||
require.NotNil(t, n, "New() must not return nil")
|
||||
assert.NotNil(t, n.files, "New() must initialize the files map")
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// AddData
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestAddData_Good(t *testing.T) {
|
||||
n := New()
|
||||
n.AddData("foo.txt", []byte("foo"))
|
||||
|
||||
file, ok := n.files["foo.txt"]
|
||||
require.True(t, ok, "file foo.txt should be present")
|
||||
assert.Equal(t, []byte("foo"), file.content)
|
||||
|
||||
info, err := file.Stat()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "foo.txt", info.Name())
|
||||
}
|
||||
|
||||
func TestAddData_Bad(t *testing.T) {
|
||||
n := New()
|
||||
|
||||
// Empty name is silently ignored.
|
||||
n.AddData("", []byte("data"))
|
||||
assert.Empty(t, n.files, "empty name must not be stored")
|
||||
|
||||
// Directory entry (trailing slash) is silently ignored.
|
||||
n.AddData("dir/", nil)
|
||||
assert.Empty(t, n.files, "directory entry must not be stored")
|
||||
}
|
||||
|
||||
func TestAddData_Ugly(t *testing.T) {
|
||||
t.Run("Overwrite", func(t *testing.T) {
|
||||
n := New()
|
||||
n.AddData("foo.txt", []byte("foo"))
|
||||
n.AddData("foo.txt", []byte("bar"))
|
||||
|
||||
file := n.files["foo.txt"]
|
||||
assert.Equal(t, []byte("bar"), file.content, "second AddData should overwrite")
|
||||
})
|
||||
|
||||
t.Run("LeadingSlash", func(t *testing.T) {
|
||||
n := New()
|
||||
n.AddData("/hello.txt", []byte("hi"))
|
||||
_, ok := n.files["hello.txt"]
|
||||
assert.True(t, ok, "leading slash should be trimmed")
|
||||
})
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Open
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestOpen_Good(t *testing.T) {
|
||||
n := New()
|
||||
n.AddData("foo.txt", []byte("foo"))
|
||||
|
||||
file, err := n.Open("foo.txt")
|
||||
require.NoError(t, err)
|
||||
defer file.Close()
|
||||
|
||||
buf := make([]byte, 10)
|
||||
nr, err := file.Read(buf)
|
||||
require.True(t, nr > 0 || err == io.EOF)
|
||||
assert.Equal(t, "foo", string(buf[:nr]))
|
||||
}
|
||||
|
||||
func TestOpen_Bad(t *testing.T) {
|
||||
n := New()
|
||||
_, err := n.Open("nonexistent.txt")
|
||||
require.Error(t, err)
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
}
|
||||
|
||||
func TestOpen_Ugly(t *testing.T) {
|
||||
n := New()
|
||||
n.AddData("bar/baz.txt", []byte("baz"))
|
||||
|
||||
// Opening a directory should succeed.
|
||||
file, err := n.Open("bar")
|
||||
require.NoError(t, err)
|
||||
defer file.Close()
|
||||
|
||||
// Reading from a directory should fail.
|
||||
_, err = file.Read(make([]byte, 1))
|
||||
require.Error(t, err)
|
||||
|
||||
var pathErr *fs.PathError
|
||||
require.True(t, errors.As(err, &pathErr))
|
||||
assert.Equal(t, fs.ErrInvalid, pathErr.Err)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Stat
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestStat_Good(t *testing.T) {
|
||||
n := New()
|
||||
n.AddData("foo.txt", []byte("foo"))
|
||||
n.AddData("bar/baz.txt", []byte("baz"))
|
||||
|
||||
// File stat.
|
||||
info, err := n.Stat("bar/baz.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "baz.txt", info.Name())
|
||||
assert.Equal(t, int64(3), info.Size())
|
||||
assert.False(t, info.IsDir())
|
||||
|
||||
// Directory stat.
|
||||
dirInfo, err := n.Stat("bar")
|
||||
require.NoError(t, err)
|
||||
assert.True(t, dirInfo.IsDir())
|
||||
assert.Equal(t, "bar", dirInfo.Name())
|
||||
}
|
||||
|
||||
func TestStat_Bad(t *testing.T) {
|
||||
n := New()
|
||||
_, err := n.Stat("nonexistent")
|
||||
require.Error(t, err)
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
}
|
||||
|
||||
func TestStat_Ugly(t *testing.T) {
|
||||
n := New()
|
||||
n.AddData("foo.txt", []byte("foo"))
|
||||
|
||||
// Root directory.
|
||||
info, err := n.Stat(".")
|
||||
require.NoError(t, err)
|
||||
assert.True(t, info.IsDir())
|
||||
assert.Equal(t, ".", info.Name())
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// ReadFile
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestReadFile_Good(t *testing.T) {
|
||||
n := New()
|
||||
n.AddData("hello.txt", []byte("hello world"))
|
||||
|
||||
data, err := n.ReadFile("hello.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte("hello world"), data)
|
||||
}
|
||||
|
||||
func TestReadFile_Bad(t *testing.T) {
|
||||
n := New()
|
||||
_, err := n.ReadFile("missing.txt")
|
||||
require.Error(t, err)
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
}
|
||||
|
||||
func TestReadFile_Ugly(t *testing.T) {
|
||||
n := New()
|
||||
n.AddData("data.bin", []byte("original"))
|
||||
|
||||
// Returned slice must be a copy — mutating it must not affect internal state.
|
||||
data, err := n.ReadFile("data.bin")
|
||||
require.NoError(t, err)
|
||||
data[0] = 'X'
|
||||
|
||||
data2, err := n.ReadFile("data.bin")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte("original"), data2, "ReadFile must return an independent copy")
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// ReadDir
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestReadDir_Good(t *testing.T) {
|
||||
n := New()
|
||||
n.AddData("foo.txt", []byte("foo"))
|
||||
n.AddData("bar/baz.txt", []byte("baz"))
|
||||
n.AddData("bar/qux.txt", []byte("qux"))
|
||||
|
||||
// Root.
|
||||
entries, err := n.ReadDir(".")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{"bar", "foo.txt"}, sortedNames(entries))
|
||||
|
||||
// Subdirectory.
|
||||
barEntries, err := n.ReadDir("bar")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{"baz.txt", "qux.txt"}, sortedNames(barEntries))
|
||||
}
|
||||
|
||||
func TestReadDir_Bad(t *testing.T) {
|
||||
n := New()
|
||||
n.AddData("foo.txt", []byte("foo"))
|
||||
|
||||
// Reading a file as a directory should fail.
|
||||
_, err := n.ReadDir("foo.txt")
|
||||
require.Error(t, err)
|
||||
var pathErr *fs.PathError
|
||||
require.True(t, errors.As(err, &pathErr))
|
||||
assert.Equal(t, fs.ErrInvalid, pathErr.Err)
|
||||
}
|
||||
|
||||
func TestReadDir_Ugly(t *testing.T) {
|
||||
n := New()
|
||||
n.AddData("bar/baz.txt", []byte("baz"))
|
||||
n.AddData("empty_dir/", nil) // Ignored by AddData.
|
||||
|
||||
entries, err := n.ReadDir(".")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{"bar"}, sortedNames(entries))
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Exists
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestExists_Good(t *testing.T) {
|
||||
n := New()
|
||||
n.AddData("foo.txt", []byte("foo"))
|
||||
n.AddData("bar/baz.txt", []byte("baz"))
|
||||
|
||||
assert.True(t, n.Exists("foo.txt"))
|
||||
assert.True(t, n.Exists("bar"))
|
||||
}
|
||||
|
||||
func TestExists_Bad(t *testing.T) {
|
||||
n := New()
|
||||
assert.False(t, n.Exists("nonexistent"))
|
||||
}
|
||||
|
||||
func TestExists_Ugly(t *testing.T) {
|
||||
n := New()
|
||||
n.AddData("dummy.txt", []byte("dummy"))
|
||||
|
||||
assert.True(t, n.Exists("."), "root '.' must exist")
|
||||
assert.True(t, n.Exists(""), "empty path (root) must exist")
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Walk
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestWalk_Good(t *testing.T) {
|
||||
n := New()
|
||||
n.AddData("foo.txt", []byte("foo"))
|
||||
n.AddData("bar/baz.txt", []byte("baz"))
|
||||
n.AddData("bar/qux.txt", []byte("qux"))
|
||||
|
||||
var paths []string
|
||||
err := n.Walk(".", func(p string, d fs.DirEntry, err error) error {
|
||||
paths = append(paths, p)
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Strings(paths)
|
||||
assert.Equal(t, []string{".", "bar", "bar/baz.txt", "bar/qux.txt", "foo.txt"}, paths)
|
||||
}
|
||||
|
||||
func TestWalk_Bad(t *testing.T) {
|
||||
n := New()
|
||||
|
||||
var called bool
|
||||
err := n.Walk("nonexistent", func(p string, d fs.DirEntry, err error) error {
|
||||
called = true
|
||||
assert.Error(t, err)
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
return err
|
||||
})
|
||||
assert.True(t, called, "walk function must be called for nonexistent root")
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
}
|
||||
|
||||
func TestWalk_Ugly(t *testing.T) {
|
||||
n := New()
|
||||
n.AddData("a/b.txt", []byte("b"))
|
||||
n.AddData("a/c.txt", []byte("c"))
|
||||
|
||||
// Stop walk early with a custom error.
|
||||
walkErr := errors.New("stop walking")
|
||||
var paths []string
|
||||
err := n.Walk(".", func(p string, d fs.DirEntry, err error) error {
|
||||
if p == "a/b.txt" {
|
||||
return walkErr
|
||||
}
|
||||
paths = append(paths, p)
|
||||
return nil
|
||||
})
|
||||
|
||||
assert.Equal(t, walkErr, err, "Walk must propagate the callback error")
|
||||
}
|
||||
|
||||
func TestWalk_Options(t *testing.T) {
|
||||
n := New()
|
||||
n.AddData("root.txt", []byte("root"))
|
||||
n.AddData("a/a1.txt", []byte("a1"))
|
||||
n.AddData("a/b/b1.txt", []byte("b1"))
|
||||
n.AddData("c/c1.txt", []byte("c1"))
|
||||
|
||||
t.Run("MaxDepth", func(t *testing.T) {
|
||||
var paths []string
|
||||
err := n.Walk(".", func(p string, d fs.DirEntry, err error) error {
|
||||
paths = append(paths, p)
|
||||
return nil
|
||||
}, WalkOptions{MaxDepth: 1})
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Strings(paths)
|
||||
assert.Equal(t, []string{".", "a", "c", "root.txt"}, paths)
|
||||
})
|
||||
|
||||
t.Run("Filter", func(t *testing.T) {
|
||||
var paths []string
|
||||
err := n.Walk(".", func(p string, d fs.DirEntry, err error) error {
|
||||
paths = append(paths, p)
|
||||
return nil
|
||||
}, WalkOptions{Filter: func(p string, d fs.DirEntry) bool {
|
||||
return !strings.HasPrefix(p, "a")
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Strings(paths)
|
||||
assert.Equal(t, []string{".", "c", "c/c1.txt", "root.txt"}, paths)
|
||||
})
|
||||
|
||||
t.Run("SkipErrors", func(t *testing.T) {
|
||||
var called bool
|
||||
err := n.Walk("nonexistent", func(p string, d fs.DirEntry, err error) error {
|
||||
called = true
|
||||
return err
|
||||
}, WalkOptions{SkipErrors: true})
|
||||
|
||||
assert.NoError(t, err, "SkipErrors should suppress the error")
|
||||
assert.False(t, called, "callback should not be called when error is skipped")
|
||||
})
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// CopyFile
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestCopyFile_Good(t *testing.T) {
|
||||
n := New()
|
||||
n.AddData("foo.txt", []byte("foo"))
|
||||
|
||||
tmpfile := filepath.Join(t.TempDir(), "test.txt")
|
||||
err := n.CopyFile("foo.txt", tmpfile, 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
content, err := os.ReadFile(tmpfile)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "foo", string(content))
|
||||
}
|
||||
|
||||
func TestCopyFile_Bad(t *testing.T) {
|
||||
n := New()
|
||||
tmpfile := filepath.Join(t.TempDir(), "test.txt")
|
||||
|
||||
// Source does not exist.
|
||||
err := n.CopyFile("nonexistent.txt", tmpfile, 0644)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Destination not writable.
|
||||
n.AddData("foo.txt", []byte("foo"))
|
||||
err = n.CopyFile("foo.txt", "/nonexistent_dir/test.txt", 0644)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCopyFile_Ugly(t *testing.T) {
|
||||
n := New()
|
||||
n.AddData("bar/baz.txt", []byte("baz"))
|
||||
tmpfile := filepath.Join(t.TempDir(), "test.txt")
|
||||
|
||||
// Attempting to copy a directory should fail.
|
||||
err := n.CopyFile("bar", tmpfile, 0644)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// ToTar / FromTar
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestToTar_Good(t *testing.T) {
|
||||
n := New()
|
||||
n.AddData("foo.txt", []byte("foo"))
|
||||
n.AddData("bar/baz.txt", []byte("baz"))
|
||||
|
||||
tarball, err := n.ToTar()
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, tarball)
|
||||
|
||||
// Verify tar content.
|
||||
tr := tar.NewReader(bytes.NewReader(tarball))
|
||||
files := make(map[string]string)
|
||||
for {
|
||||
header, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
content, err := io.ReadAll(tr)
|
||||
require.NoError(t, err)
|
||||
files[header.Name] = string(content)
|
||||
}
|
||||
|
||||
assert.Equal(t, "foo", files["foo.txt"])
|
||||
assert.Equal(t, "baz", files["bar/baz.txt"])
|
||||
}
|
||||
|
||||
func TestFromTar_Good(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
tw := tar.NewWriter(buf)
|
||||
|
||||
for _, f := range []struct{ Name, Body string }{
|
||||
{"foo.txt", "foo"},
|
||||
{"bar/baz.txt", "baz"},
|
||||
} {
|
||||
hdr := &tar.Header{
|
||||
Name: f.Name,
|
||||
Mode: 0600,
|
||||
Size: int64(len(f.Body)),
|
||||
Typeflag: tar.TypeReg,
|
||||
}
|
||||
require.NoError(t, tw.WriteHeader(hdr))
|
||||
_, err := tw.Write([]byte(f.Body))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.NoError(t, tw.Close())
|
||||
|
||||
n, err := FromTar(buf.Bytes())
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, n.Exists("foo.txt"), "foo.txt should exist")
|
||||
assert.True(t, n.Exists("bar/baz.txt"), "bar/baz.txt should exist")
|
||||
}
|
||||
|
||||
func TestFromTar_Bad(t *testing.T) {
|
||||
// Truncated data that cannot be a valid tar.
|
||||
truncated := make([]byte, 100)
|
||||
_, err := FromTar(truncated)
|
||||
assert.Error(t, err, "truncated data should produce an error")
|
||||
}
|
||||
|
||||
func TestTarRoundTrip_Good(t *testing.T) {
|
||||
n1 := New()
|
||||
n1.AddData("a.txt", []byte("alpha"))
|
||||
n1.AddData("b/c.txt", []byte("charlie"))
|
||||
|
||||
tarball, err := n1.ToTar()
|
||||
require.NoError(t, err)
|
||||
|
||||
n2, err := FromTar(tarball)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify n2 matches n1.
|
||||
data, err := n2.ReadFile("a.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte("alpha"), data)
|
||||
|
||||
data, err = n2.ReadFile("b/c.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte("charlie"), data)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// fs.FS interface compliance
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestFSInterface_Good(t *testing.T) {
|
||||
n := New()
|
||||
n.AddData("hello.txt", []byte("world"))
|
||||
|
||||
// fs.FS
|
||||
var fsys fs.FS = n
|
||||
file, err := fsys.Open("hello.txt")
|
||||
require.NoError(t, err)
|
||||
defer file.Close()
|
||||
|
||||
// fs.StatFS
|
||||
var statFS fs.StatFS = n
|
||||
info, err := statFS.Stat("hello.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "hello.txt", info.Name())
|
||||
assert.Equal(t, int64(5), info.Size())
|
||||
|
||||
// fs.ReadFileFS
|
||||
var readFS fs.ReadFileFS = n
|
||||
data, err := readFS.ReadFile("hello.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte("world"), data)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func sortedNames(entries []fs.DirEntry) []string {
|
||||
var names []string
|
||||
for _, e := range entries {
|
||||
names = append(names, e.Name())
|
||||
}
|
||||
sort.Strings(names)
|
||||
return names
|
||||
}
|
||||
625
pkg/io/s3/s3.go
625
pkg/io/s3/s3.go
|
|
@ -1,625 +0,0 @@
|
|||
// Package s3 provides an S3-backed implementation of the io.Medium interface.
|
||||
package s3
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
goio "io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
|
||||
coreerr "forge.lthn.ai/core/go/pkg/framework/core"
|
||||
)
|
||||
|
||||
// s3API is the subset of the S3 client API used by this package.
|
||||
// This allows for interface-based mocking in tests.
|
||||
type s3API interface {
|
||||
GetObject(ctx context.Context, params *s3.GetObjectInput, optFns ...func(*s3.Options)) (*s3.GetObjectOutput, error)
|
||||
PutObject(ctx context.Context, params *s3.PutObjectInput, optFns ...func(*s3.Options)) (*s3.PutObjectOutput, error)
|
||||
DeleteObject(ctx context.Context, params *s3.DeleteObjectInput, optFns ...func(*s3.Options)) (*s3.DeleteObjectOutput, error)
|
||||
DeleteObjects(ctx context.Context, params *s3.DeleteObjectsInput, optFns ...func(*s3.Options)) (*s3.DeleteObjectsOutput, error)
|
||||
HeadObject(ctx context.Context, params *s3.HeadObjectInput, optFns ...func(*s3.Options)) (*s3.HeadObjectOutput, error)
|
||||
ListObjectsV2(ctx context.Context, params *s3.ListObjectsV2Input, optFns ...func(*s3.Options)) (*s3.ListObjectsV2Output, error)
|
||||
CopyObject(ctx context.Context, params *s3.CopyObjectInput, optFns ...func(*s3.Options)) (*s3.CopyObjectOutput, error)
|
||||
}
|
||||
|
||||
// Medium is an S3-backed storage backend implementing the io.Medium interface.
|
||||
type Medium struct {
|
||||
client s3API
|
||||
bucket string
|
||||
prefix string
|
||||
}
|
||||
|
||||
// Option configures a Medium.
|
||||
type Option func(*Medium)
|
||||
|
||||
// WithPrefix sets an optional key prefix for all operations.
|
||||
func WithPrefix(prefix string) Option {
|
||||
return func(m *Medium) {
|
||||
// Ensure prefix ends with "/" if non-empty
|
||||
if prefix != "" && !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
m.prefix = prefix
|
||||
}
|
||||
}
|
||||
|
||||
// WithClient sets the S3 client for dependency injection.
|
||||
func WithClient(client *s3.Client) Option {
|
||||
return func(m *Medium) {
|
||||
m.client = client
|
||||
}
|
||||
}
|
||||
|
||||
// withAPI sets the s3API interface directly (for testing with mocks).
|
||||
func withAPI(api s3API) Option {
|
||||
return func(m *Medium) {
|
||||
m.client = api
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a new S3 Medium for the given bucket.
|
||||
func New(bucket string, opts ...Option) (*Medium, error) {
|
||||
if bucket == "" {
|
||||
return nil, coreerr.E("s3.New", "bucket name is required", nil)
|
||||
}
|
||||
m := &Medium{bucket: bucket}
|
||||
for _, opt := range opts {
|
||||
opt(m)
|
||||
}
|
||||
if m.client == nil {
|
||||
return nil, coreerr.E("s3.New", "S3 client is required (use WithClient option)", nil)
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// key returns the full S3 object key for a given path.
|
||||
func (m *Medium) key(p string) string {
|
||||
// Clean the path using a leading "/" to sandbox traversal attempts,
|
||||
// then strip the "/" prefix. This ensures ".." can't escape.
|
||||
clean := path.Clean("/" + p)
|
||||
if clean == "/" {
|
||||
clean = ""
|
||||
}
|
||||
clean = strings.TrimPrefix(clean, "/")
|
||||
|
||||
if m.prefix == "" {
|
||||
return clean
|
||||
}
|
||||
if clean == "" {
|
||||
return m.prefix
|
||||
}
|
||||
return m.prefix + clean
|
||||
}
|
||||
|
||||
// Read retrieves the content of a file as a string.
|
||||
func (m *Medium) Read(p string) (string, error) {
|
||||
key := m.key(p)
|
||||
if key == "" {
|
||||
return "", coreerr.E("s3.Read", "path is required", os.ErrInvalid)
|
||||
}
|
||||
|
||||
out, err := m.client.GetObject(context.Background(), &s3.GetObjectInput{
|
||||
Bucket: aws.String(m.bucket),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
if err != nil {
|
||||
return "", coreerr.E("s3.Read", "failed to get object: "+key, err)
|
||||
}
|
||||
defer out.Body.Close()
|
||||
|
||||
data, err := goio.ReadAll(out.Body)
|
||||
if err != nil {
|
||||
return "", coreerr.E("s3.Read", "failed to read body: "+key, err)
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
// Write saves the given content to a file, overwriting it if it exists.
|
||||
func (m *Medium) Write(p, content string) error {
|
||||
key := m.key(p)
|
||||
if key == "" {
|
||||
return coreerr.E("s3.Write", "path is required", os.ErrInvalid)
|
||||
}
|
||||
|
||||
_, err := m.client.PutObject(context.Background(), &s3.PutObjectInput{
|
||||
Bucket: aws.String(m.bucket),
|
||||
Key: aws.String(key),
|
||||
Body: strings.NewReader(content),
|
||||
})
|
||||
if err != nil {
|
||||
return coreerr.E("s3.Write", "failed to put object: "+key, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureDir is a no-op for S3 (S3 has no real directories).
|
||||
func (m *Medium) EnsureDir(_ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsFile checks if a path exists and is a regular file (not a "directory" prefix).
|
||||
func (m *Medium) IsFile(p string) bool {
|
||||
key := m.key(p)
|
||||
if key == "" {
|
||||
return false
|
||||
}
|
||||
// A "file" in S3 is an object whose key does not end with "/"
|
||||
if strings.HasSuffix(key, "/") {
|
||||
return false
|
||||
}
|
||||
_, err := m.client.HeadObject(context.Background(), &s3.HeadObjectInput{
|
||||
Bucket: aws.String(m.bucket),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// FileGet is a convenience function that reads a file from the medium.
|
||||
func (m *Medium) FileGet(p string) (string, error) {
|
||||
return m.Read(p)
|
||||
}
|
||||
|
||||
// FileSet is a convenience function that writes a file to the medium.
|
||||
func (m *Medium) FileSet(p, content string) error {
|
||||
return m.Write(p, content)
|
||||
}
|
||||
|
||||
// Delete removes a single object.
|
||||
func (m *Medium) Delete(p string) error {
|
||||
key := m.key(p)
|
||||
if key == "" {
|
||||
return coreerr.E("s3.Delete", "path is required", os.ErrInvalid)
|
||||
}
|
||||
|
||||
_, err := m.client.DeleteObject(context.Background(), &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(m.bucket),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
if err != nil {
|
||||
return coreerr.E("s3.Delete", "failed to delete object: "+key, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteAll removes all objects under the given prefix.
|
||||
func (m *Medium) DeleteAll(p string) error {
|
||||
key := m.key(p)
|
||||
if key == "" {
|
||||
return coreerr.E("s3.DeleteAll", "path is required", os.ErrInvalid)
|
||||
}
|
||||
|
||||
// First, try deleting the exact key
|
||||
_, _ = m.client.DeleteObject(context.Background(), &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(m.bucket),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
|
||||
// Then delete all objects under the prefix
|
||||
prefix := key
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
|
||||
paginator := true
|
||||
var continuationToken *string
|
||||
|
||||
for paginator {
|
||||
listOut, err := m.client.ListObjectsV2(context.Background(), &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(m.bucket),
|
||||
Prefix: aws.String(prefix),
|
||||
ContinuationToken: continuationToken,
|
||||
})
|
||||
if err != nil {
|
||||
return coreerr.E("s3.DeleteAll", "failed to list objects: "+prefix, err)
|
||||
}
|
||||
|
||||
if len(listOut.Contents) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
objects := make([]types.ObjectIdentifier, len(listOut.Contents))
|
||||
for i, obj := range listOut.Contents {
|
||||
objects[i] = types.ObjectIdentifier{Key: obj.Key}
|
||||
}
|
||||
|
||||
_, err = m.client.DeleteObjects(context.Background(), &s3.DeleteObjectsInput{
|
||||
Bucket: aws.String(m.bucket),
|
||||
Delete: &types.Delete{Objects: objects, Quiet: aws.Bool(true)},
|
||||
})
|
||||
if err != nil {
|
||||
return coreerr.E("s3.DeleteAll", "failed to delete objects", err)
|
||||
}
|
||||
|
||||
if listOut.IsTruncated != nil && *listOut.IsTruncated {
|
||||
continuationToken = listOut.NextContinuationToken
|
||||
} else {
|
||||
paginator = false
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rename moves an object by copying then deleting the original.
|
||||
func (m *Medium) Rename(oldPath, newPath string) error {
|
||||
oldKey := m.key(oldPath)
|
||||
newKey := m.key(newPath)
|
||||
if oldKey == "" || newKey == "" {
|
||||
return coreerr.E("s3.Rename", "both old and new paths are required", os.ErrInvalid)
|
||||
}
|
||||
|
||||
copySource := m.bucket + "/" + oldKey
|
||||
|
||||
_, err := m.client.CopyObject(context.Background(), &s3.CopyObjectInput{
|
||||
Bucket: aws.String(m.bucket),
|
||||
CopySource: aws.String(copySource),
|
||||
Key: aws.String(newKey),
|
||||
})
|
||||
if err != nil {
|
||||
return coreerr.E("s3.Rename", "failed to copy object: "+oldKey+" -> "+newKey, err)
|
||||
}
|
||||
|
||||
_, err = m.client.DeleteObject(context.Background(), &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(m.bucket),
|
||||
Key: aws.String(oldKey),
|
||||
})
|
||||
if err != nil {
|
||||
return coreerr.E("s3.Rename", "failed to delete source object: "+oldKey, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// List returns directory entries for the given path using ListObjectsV2 with delimiter.
|
||||
func (m *Medium) List(p string) ([]fs.DirEntry, error) {
|
||||
prefix := m.key(p)
|
||||
if prefix != "" && !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
|
||||
var entries []fs.DirEntry
|
||||
|
||||
listOut, err := m.client.ListObjectsV2(context.Background(), &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(m.bucket),
|
||||
Prefix: aws.String(prefix),
|
||||
Delimiter: aws.String("/"),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, coreerr.E("s3.List", "failed to list objects: "+prefix, err)
|
||||
}
|
||||
|
||||
// Common prefixes are "directories"
|
||||
for _, cp := range listOut.CommonPrefixes {
|
||||
if cp.Prefix == nil {
|
||||
continue
|
||||
}
|
||||
name := strings.TrimPrefix(*cp.Prefix, prefix)
|
||||
name = strings.TrimSuffix(name, "/")
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
entries = append(entries, &dirEntry{
|
||||
name: name,
|
||||
isDir: true,
|
||||
mode: fs.ModeDir | 0755,
|
||||
info: &fileInfo{
|
||||
name: name,
|
||||
isDir: true,
|
||||
mode: fs.ModeDir | 0755,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Contents are "files" (excluding the prefix itself)
|
||||
for _, obj := range listOut.Contents {
|
||||
if obj.Key == nil {
|
||||
continue
|
||||
}
|
||||
name := strings.TrimPrefix(*obj.Key, prefix)
|
||||
if name == "" || strings.Contains(name, "/") {
|
||||
continue
|
||||
}
|
||||
var size int64
|
||||
if obj.Size != nil {
|
||||
size = *obj.Size
|
||||
}
|
||||
var modTime time.Time
|
||||
if obj.LastModified != nil {
|
||||
modTime = *obj.LastModified
|
||||
}
|
||||
entries = append(entries, &dirEntry{
|
||||
name: name,
|
||||
isDir: false,
|
||||
mode: 0644,
|
||||
info: &fileInfo{
|
||||
name: name,
|
||||
size: size,
|
||||
mode: 0644,
|
||||
modTime: modTime,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Stat returns file information for the given path using HeadObject.
|
||||
func (m *Medium) Stat(p string) (fs.FileInfo, error) {
|
||||
key := m.key(p)
|
||||
if key == "" {
|
||||
return nil, coreerr.E("s3.Stat", "path is required", os.ErrInvalid)
|
||||
}
|
||||
|
||||
out, err := m.client.HeadObject(context.Background(), &s3.HeadObjectInput{
|
||||
Bucket: aws.String(m.bucket),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, coreerr.E("s3.Stat", "failed to head object: "+key, err)
|
||||
}
|
||||
|
||||
var size int64
|
||||
if out.ContentLength != nil {
|
||||
size = *out.ContentLength
|
||||
}
|
||||
var modTime time.Time
|
||||
if out.LastModified != nil {
|
||||
modTime = *out.LastModified
|
||||
}
|
||||
|
||||
name := path.Base(key)
|
||||
return &fileInfo{
|
||||
name: name,
|
||||
size: size,
|
||||
mode: 0644,
|
||||
modTime: modTime,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Open opens the named file for reading.
|
||||
func (m *Medium) Open(p string) (fs.File, error) {
|
||||
key := m.key(p)
|
||||
if key == "" {
|
||||
return nil, coreerr.E("s3.Open", "path is required", os.ErrInvalid)
|
||||
}
|
||||
|
||||
out, err := m.client.GetObject(context.Background(), &s3.GetObjectInput{
|
||||
Bucket: aws.String(m.bucket),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, coreerr.E("s3.Open", "failed to get object: "+key, err)
|
||||
}
|
||||
|
||||
data, err := goio.ReadAll(out.Body)
|
||||
out.Body.Close()
|
||||
if err != nil {
|
||||
return nil, coreerr.E("s3.Open", "failed to read body: "+key, err)
|
||||
}
|
||||
|
||||
var size int64
|
||||
if out.ContentLength != nil {
|
||||
size = *out.ContentLength
|
||||
}
|
||||
var modTime time.Time
|
||||
if out.LastModified != nil {
|
||||
modTime = *out.LastModified
|
||||
}
|
||||
|
||||
return &s3File{
|
||||
name: path.Base(key),
|
||||
content: data,
|
||||
size: size,
|
||||
modTime: modTime,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Create creates or truncates the named file. Returns a writer that
|
||||
// uploads the content on Close.
|
||||
func (m *Medium) Create(p string) (goio.WriteCloser, error) {
|
||||
key := m.key(p)
|
||||
if key == "" {
|
||||
return nil, coreerr.E("s3.Create", "path is required", os.ErrInvalid)
|
||||
}
|
||||
return &s3WriteCloser{
|
||||
medium: m,
|
||||
key: key,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Append opens the named file for appending. It downloads the existing
|
||||
// content (if any) and re-uploads the combined content on Close.
|
||||
func (m *Medium) Append(p string) (goio.WriteCloser, error) {
|
||||
key := m.key(p)
|
||||
if key == "" {
|
||||
return nil, coreerr.E("s3.Append", "path is required", os.ErrInvalid)
|
||||
}
|
||||
|
||||
var existing []byte
|
||||
out, err := m.client.GetObject(context.Background(), &s3.GetObjectInput{
|
||||
Bucket: aws.String(m.bucket),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
if err == nil {
|
||||
existing, _ = goio.ReadAll(out.Body)
|
||||
out.Body.Close()
|
||||
}
|
||||
|
||||
return &s3WriteCloser{
|
||||
medium: m,
|
||||
key: key,
|
||||
data: existing,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ReadStream returns a reader for the file content.
|
||||
func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) {
|
||||
key := m.key(p)
|
||||
if key == "" {
|
||||
return nil, coreerr.E("s3.ReadStream", "path is required", os.ErrInvalid)
|
||||
}
|
||||
|
||||
out, err := m.client.GetObject(context.Background(), &s3.GetObjectInput{
|
||||
Bucket: aws.String(m.bucket),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, coreerr.E("s3.ReadStream", "failed to get object: "+key, err)
|
||||
}
|
||||
return out.Body, nil
|
||||
}
|
||||
|
||||
// WriteStream returns a writer for the file content. Content is uploaded on Close.
|
||||
func (m *Medium) WriteStream(p string) (goio.WriteCloser, error) {
|
||||
return m.Create(p)
|
||||
}
|
||||
|
||||
// Exists checks if a path exists (file or directory prefix).
|
||||
func (m *Medium) Exists(p string) bool {
|
||||
key := m.key(p)
|
||||
if key == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check as an exact object
|
||||
_, err := m.client.HeadObject(context.Background(), &s3.HeadObjectInput{
|
||||
Bucket: aws.String(m.bucket),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check as a "directory" prefix
|
||||
prefix := key
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
listOut, err := m.client.ListObjectsV2(context.Background(), &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(m.bucket),
|
||||
Prefix: aws.String(prefix),
|
||||
MaxKeys: aws.Int32(1),
|
||||
})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return len(listOut.Contents) > 0 || len(listOut.CommonPrefixes) > 0
|
||||
}
|
||||
|
||||
// IsDir checks if a path exists and is a directory (has objects under it as a prefix).
|
||||
func (m *Medium) IsDir(p string) bool {
|
||||
key := m.key(p)
|
||||
if key == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
prefix := key
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
|
||||
listOut, err := m.client.ListObjectsV2(context.Background(), &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(m.bucket),
|
||||
Prefix: aws.String(prefix),
|
||||
MaxKeys: aws.Int32(1),
|
||||
})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return len(listOut.Contents) > 0 || len(listOut.CommonPrefixes) > 0
|
||||
}
|
||||
|
||||
// --- Internal types ---
|
||||
|
||||
// fileInfo implements fs.FileInfo for S3 objects.
|
||||
type fileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode fs.FileMode
|
||||
modTime time.Time
|
||||
isDir bool
|
||||
}
|
||||
|
||||
func (fi *fileInfo) Name() string { return fi.name }
|
||||
func (fi *fileInfo) Size() int64 { return fi.size }
|
||||
func (fi *fileInfo) Mode() fs.FileMode { return fi.mode }
|
||||
func (fi *fileInfo) ModTime() time.Time { return fi.modTime }
|
||||
func (fi *fileInfo) IsDir() bool { return fi.isDir }
|
||||
func (fi *fileInfo) Sys() any { return nil }
|
||||
|
||||
// dirEntry implements fs.DirEntry for S3 listings.
|
||||
type dirEntry struct {
|
||||
name string
|
||||
isDir bool
|
||||
mode fs.FileMode
|
||||
info fs.FileInfo
|
||||
}
|
||||
|
||||
func (de *dirEntry) Name() string { return de.name }
|
||||
func (de *dirEntry) IsDir() bool { return de.isDir }
|
||||
func (de *dirEntry) Type() fs.FileMode { return de.mode.Type() }
|
||||
func (de *dirEntry) Info() (fs.FileInfo, error) { return de.info, nil }
|
||||
|
||||
// s3File implements fs.File for S3 objects.
|
||||
type s3File struct {
|
||||
name string
|
||||
content []byte
|
||||
offset int64
|
||||
size int64
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
func (f *s3File) Stat() (fs.FileInfo, error) {
|
||||
return &fileInfo{
|
||||
name: f.name,
|
||||
size: int64(len(f.content)),
|
||||
mode: 0644,
|
||||
modTime: f.modTime,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *s3File) Read(b []byte) (int, error) {
|
||||
if f.offset >= int64(len(f.content)) {
|
||||
return 0, goio.EOF
|
||||
}
|
||||
n := copy(b, f.content[f.offset:])
|
||||
f.offset += int64(n)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (f *s3File) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// s3WriteCloser buffers writes and uploads to S3 on Close.
|
||||
type s3WriteCloser struct {
|
||||
medium *Medium
|
||||
key string
|
||||
data []byte
|
||||
}
|
||||
|
||||
func (w *s3WriteCloser) Write(p []byte) (int, error) {
|
||||
w.data = append(w.data, p...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (w *s3WriteCloser) Close() error {
|
||||
_, err := w.medium.client.PutObject(context.Background(), &s3.PutObjectInput{
|
||||
Bucket: aws.String(w.medium.bucket),
|
||||
Key: aws.String(w.key),
|
||||
Body: bytes.NewReader(w.data),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("s3: failed to upload on close: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,646 +0,0 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
goio "io"
|
||||
"io/fs"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// mockS3 is an in-memory mock implementing the s3API interface.
|
||||
type mockS3 struct {
|
||||
mu sync.RWMutex
|
||||
objects map[string][]byte
|
||||
mtimes map[string]time.Time
|
||||
}
|
||||
|
||||
func newMockS3() *mockS3 {
|
||||
return &mockS3{
|
||||
objects: make(map[string][]byte),
|
||||
mtimes: make(map[string]time.Time),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *mockS3) GetObject(_ context.Context, params *s3.GetObjectInput, _ ...func(*s3.Options)) (*s3.GetObjectOutput, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
key := aws.ToString(params.Key)
|
||||
data, ok := m.objects[key]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("NoSuchKey: key %q not found", key)
|
||||
}
|
||||
mtime := m.mtimes[key]
|
||||
return &s3.GetObjectOutput{
|
||||
Body: goio.NopCloser(bytes.NewReader(data)),
|
||||
ContentLength: aws.Int64(int64(len(data))),
|
||||
LastModified: &mtime,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *mockS3) PutObject(_ context.Context, params *s3.PutObjectInput, _ ...func(*s3.Options)) (*s3.PutObjectOutput, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
key := aws.ToString(params.Key)
|
||||
data, err := goio.ReadAll(params.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.objects[key] = data
|
||||
m.mtimes[key] = time.Now()
|
||||
return &s3.PutObjectOutput{}, nil
|
||||
}
|
||||
|
||||
func (m *mockS3) DeleteObject(_ context.Context, params *s3.DeleteObjectInput, _ ...func(*s3.Options)) (*s3.DeleteObjectOutput, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
key := aws.ToString(params.Key)
|
||||
delete(m.objects, key)
|
||||
delete(m.mtimes, key)
|
||||
return &s3.DeleteObjectOutput{}, nil
|
||||
}
|
||||
|
||||
func (m *mockS3) DeleteObjects(_ context.Context, params *s3.DeleteObjectsInput, _ ...func(*s3.Options)) (*s3.DeleteObjectsOutput, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
for _, obj := range params.Delete.Objects {
|
||||
key := aws.ToString(obj.Key)
|
||||
delete(m.objects, key)
|
||||
delete(m.mtimes, key)
|
||||
}
|
||||
return &s3.DeleteObjectsOutput{}, nil
|
||||
}
|
||||
|
||||
func (m *mockS3) HeadObject(_ context.Context, params *s3.HeadObjectInput, _ ...func(*s3.Options)) (*s3.HeadObjectOutput, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
key := aws.ToString(params.Key)
|
||||
data, ok := m.objects[key]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("NotFound: key %q not found", key)
|
||||
}
|
||||
mtime := m.mtimes[key]
|
||||
return &s3.HeadObjectOutput{
|
||||
ContentLength: aws.Int64(int64(len(data))),
|
||||
LastModified: &mtime,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *mockS3) ListObjectsV2(_ context.Context, params *s3.ListObjectsV2Input, _ ...func(*s3.Options)) (*s3.ListObjectsV2Output, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
prefix := aws.ToString(params.Prefix)
|
||||
delimiter := aws.ToString(params.Delimiter)
|
||||
maxKeys := int32(1000)
|
||||
if params.MaxKeys != nil {
|
||||
maxKeys = *params.MaxKeys
|
||||
}
|
||||
|
||||
// Collect all matching keys sorted
|
||||
var allKeys []string
|
||||
for k := range m.objects {
|
||||
if strings.HasPrefix(k, prefix) {
|
||||
allKeys = append(allKeys, k)
|
||||
}
|
||||
}
|
||||
sort.Strings(allKeys)
|
||||
|
||||
var contents []types.Object
|
||||
commonPrefixes := make(map[string]bool)
|
||||
|
||||
for _, k := range allKeys {
|
||||
rest := strings.TrimPrefix(k, prefix)
|
||||
|
||||
if delimiter != "" {
|
||||
if idx := strings.Index(rest, delimiter); idx >= 0 {
|
||||
// This key has a delimiter after the prefix -> common prefix
|
||||
cp := prefix + rest[:idx+len(delimiter)]
|
||||
commonPrefixes[cp] = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if int32(len(contents)) >= maxKeys {
|
||||
break
|
||||
}
|
||||
|
||||
data := m.objects[k]
|
||||
mtime := m.mtimes[k]
|
||||
contents = append(contents, types.Object{
|
||||
Key: aws.String(k),
|
||||
Size: aws.Int64(int64(len(data))),
|
||||
LastModified: &mtime,
|
||||
})
|
||||
}
|
||||
|
||||
var cpSlice []types.CommonPrefix
|
||||
// Sort common prefixes for deterministic output
|
||||
var cpKeys []string
|
||||
for cp := range commonPrefixes {
|
||||
cpKeys = append(cpKeys, cp)
|
||||
}
|
||||
sort.Strings(cpKeys)
|
||||
for _, cp := range cpKeys {
|
||||
cpSlice = append(cpSlice, types.CommonPrefix{Prefix: aws.String(cp)})
|
||||
}
|
||||
|
||||
return &s3.ListObjectsV2Output{
|
||||
Contents: contents,
|
||||
CommonPrefixes: cpSlice,
|
||||
IsTruncated: aws.Bool(false),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *mockS3) CopyObject(_ context.Context, params *s3.CopyObjectInput, _ ...func(*s3.Options)) (*s3.CopyObjectOutput, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
// CopySource is "bucket/key"
|
||||
source := aws.ToString(params.CopySource)
|
||||
parts := strings.SplitN(source, "/", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("invalid CopySource: %s", source)
|
||||
}
|
||||
srcKey := parts[1]
|
||||
|
||||
data, ok := m.objects[srcKey]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("NoSuchKey: source key %q not found", srcKey)
|
||||
}
|
||||
|
||||
destKey := aws.ToString(params.Key)
|
||||
m.objects[destKey] = append([]byte{}, data...)
|
||||
m.mtimes[destKey] = time.Now()
|
||||
|
||||
return &s3.CopyObjectOutput{}, nil
|
||||
}
|
||||
|
||||
// --- Helper ---
|
||||
|
||||
func newTestMedium(t *testing.T) (*Medium, *mockS3) {
|
||||
t.Helper()
|
||||
mock := newMockS3()
|
||||
m, err := New("test-bucket", withAPI(mock))
|
||||
require.NoError(t, err)
|
||||
return m, mock
|
||||
}
|
||||
|
||||
// --- Tests ---
|
||||
|
||||
func TestNew_Good(t *testing.T) {
|
||||
mock := newMockS3()
|
||||
m, err := New("my-bucket", withAPI(mock))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "my-bucket", m.bucket)
|
||||
assert.Equal(t, "", m.prefix)
|
||||
}
|
||||
|
||||
func TestNew_Bad_NoBucket(t *testing.T) {
|
||||
_, err := New("")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "bucket name is required")
|
||||
}
|
||||
|
||||
func TestNew_Bad_NoClient(t *testing.T) {
|
||||
_, err := New("bucket")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "S3 client is required")
|
||||
}
|
||||
|
||||
func TestWithPrefix_Good(t *testing.T) {
|
||||
mock := newMockS3()
|
||||
m, err := New("bucket", withAPI(mock), WithPrefix("data/"))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "data/", m.prefix)
|
||||
|
||||
// Prefix without trailing slash gets one added
|
||||
m2, err := New("bucket", withAPI(mock), WithPrefix("data"))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "data/", m2.prefix)
|
||||
}
|
||||
|
||||
func TestReadWrite_Good(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
|
||||
err := m.Write("hello.txt", "world")
|
||||
require.NoError(t, err)
|
||||
|
||||
content, err := m.Read("hello.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "world", content)
|
||||
}
|
||||
|
||||
func TestReadWrite_Bad_NotFound(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
|
||||
_, err := m.Read("nonexistent.txt")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestReadWrite_Bad_EmptyPath(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
|
||||
_, err := m.Read("")
|
||||
assert.Error(t, err)
|
||||
|
||||
err = m.Write("", "content")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestReadWrite_Good_WithPrefix(t *testing.T) {
|
||||
mock := newMockS3()
|
||||
m, err := New("bucket", withAPI(mock), WithPrefix("pfx"))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = m.Write("file.txt", "data")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the key has the prefix
|
||||
_, ok := mock.objects["pfx/file.txt"]
|
||||
assert.True(t, ok, "object should be stored with prefix")
|
||||
|
||||
content, err := m.Read("file.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "data", content)
|
||||
}
|
||||
|
||||
func TestEnsureDir_Good(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
// EnsureDir is a no-op for S3
|
||||
err := m.EnsureDir("any/path")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestIsFile_Good(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
|
||||
err := m.Write("file.txt", "content")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, m.IsFile("file.txt"))
|
||||
assert.False(t, m.IsFile("nonexistent.txt"))
|
||||
assert.False(t, m.IsFile(""))
|
||||
}
|
||||
|
||||
func TestFileGetFileSet_Good(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
|
||||
err := m.FileSet("key.txt", "value")
|
||||
require.NoError(t, err)
|
||||
|
||||
val, err := m.FileGet("key.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "value", val)
|
||||
}
|
||||
|
||||
func TestDelete_Good(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
|
||||
err := m.Write("to-delete.txt", "content")
|
||||
require.NoError(t, err)
|
||||
assert.True(t, m.Exists("to-delete.txt"))
|
||||
|
||||
err = m.Delete("to-delete.txt")
|
||||
require.NoError(t, err)
|
||||
assert.False(t, m.IsFile("to-delete.txt"))
|
||||
}
|
||||
|
||||
func TestDelete_Bad_EmptyPath(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
err := m.Delete("")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestDeleteAll_Good(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
|
||||
// Create nested structure
|
||||
require.NoError(t, m.Write("dir/file1.txt", "a"))
|
||||
require.NoError(t, m.Write("dir/sub/file2.txt", "b"))
|
||||
require.NoError(t, m.Write("other.txt", "c"))
|
||||
|
||||
err := m.DeleteAll("dir")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.False(t, m.IsFile("dir/file1.txt"))
|
||||
assert.False(t, m.IsFile("dir/sub/file2.txt"))
|
||||
assert.True(t, m.IsFile("other.txt"))
|
||||
}
|
||||
|
||||
func TestDeleteAll_Bad_EmptyPath(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
err := m.DeleteAll("")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestRename_Good(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.Write("old.txt", "content"))
|
||||
assert.True(t, m.IsFile("old.txt"))
|
||||
|
||||
err := m.Rename("old.txt", "new.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.False(t, m.IsFile("old.txt"))
|
||||
assert.True(t, m.IsFile("new.txt"))
|
||||
|
||||
content, err := m.Read("new.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "content", content)
|
||||
}
|
||||
|
||||
func TestRename_Bad_EmptyPath(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
err := m.Rename("", "new.txt")
|
||||
assert.Error(t, err)
|
||||
|
||||
err = m.Rename("old.txt", "")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestRename_Bad_SourceNotFound(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
err := m.Rename("nonexistent.txt", "new.txt")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestList_Good(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.Write("dir/file1.txt", "a"))
|
||||
require.NoError(t, m.Write("dir/file2.txt", "b"))
|
||||
require.NoError(t, m.Write("dir/sub/file3.txt", "c"))
|
||||
|
||||
entries, err := m.List("dir")
|
||||
require.NoError(t, err)
|
||||
|
||||
names := make(map[string]bool)
|
||||
for _, e := range entries {
|
||||
names[e.Name()] = true
|
||||
}
|
||||
|
||||
assert.True(t, names["file1.txt"], "should list file1.txt")
|
||||
assert.True(t, names["file2.txt"], "should list file2.txt")
|
||||
assert.True(t, names["sub"], "should list sub directory")
|
||||
assert.Len(t, entries, 3)
|
||||
|
||||
// Check that sub is a directory
|
||||
for _, e := range entries {
|
||||
if e.Name() == "sub" {
|
||||
assert.True(t, e.IsDir())
|
||||
info, err := e.Info()
|
||||
require.NoError(t, err)
|
||||
assert.True(t, info.IsDir())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestList_Good_Root(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.Write("root.txt", "content"))
|
||||
require.NoError(t, m.Write("dir/nested.txt", "nested"))
|
||||
|
||||
entries, err := m.List("")
|
||||
require.NoError(t, err)
|
||||
|
||||
names := make(map[string]bool)
|
||||
for _, e := range entries {
|
||||
names[e.Name()] = true
|
||||
}
|
||||
|
||||
assert.True(t, names["root.txt"])
|
||||
assert.True(t, names["dir"])
|
||||
}
|
||||
|
||||
func TestStat_Good(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.Write("file.txt", "hello world"))
|
||||
|
||||
info, err := m.Stat("file.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "file.txt", info.Name())
|
||||
assert.Equal(t, int64(11), info.Size())
|
||||
assert.False(t, info.IsDir())
|
||||
}
|
||||
|
||||
func TestStat_Bad_NotFound(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
|
||||
_, err := m.Stat("nonexistent.txt")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestStat_Bad_EmptyPath(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
_, err := m.Stat("")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestOpen_Good(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.Write("file.txt", "open me"))
|
||||
|
||||
f, err := m.Open("file.txt")
|
||||
require.NoError(t, err)
|
||||
defer f.Close()
|
||||
|
||||
data, err := goio.ReadAll(f.(goio.Reader))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "open me", string(data))
|
||||
|
||||
stat, err := f.Stat()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "file.txt", stat.Name())
|
||||
}
|
||||
|
||||
func TestOpen_Bad_NotFound(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
|
||||
_, err := m.Open("nonexistent.txt")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCreate_Good(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
|
||||
w, err := m.Create("new.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
n, err := w.Write([]byte("created"))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 7, n)
|
||||
|
||||
err = w.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
content, err := m.Read("new.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "created", content)
|
||||
}
|
||||
|
||||
func TestAppend_Good(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.Write("append.txt", "hello"))
|
||||
|
||||
w, err := m.Append("append.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = w.Write([]byte(" world"))
|
||||
require.NoError(t, err)
|
||||
err = w.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
content, err := m.Read("append.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "hello world", content)
|
||||
}
|
||||
|
||||
func TestAppend_Good_NewFile(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
|
||||
w, err := m.Append("new.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = w.Write([]byte("fresh"))
|
||||
require.NoError(t, err)
|
||||
err = w.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
content, err := m.Read("new.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "fresh", content)
|
||||
}
|
||||
|
||||
func TestReadStream_Good(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.Write("stream.txt", "streaming content"))
|
||||
|
||||
reader, err := m.ReadStream("stream.txt")
|
||||
require.NoError(t, err)
|
||||
defer reader.Close()
|
||||
|
||||
data, err := goio.ReadAll(reader)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "streaming content", string(data))
|
||||
}
|
||||
|
||||
func TestReadStream_Bad_NotFound(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
_, err := m.ReadStream("nonexistent.txt")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestWriteStream_Good(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
|
||||
writer, err := m.WriteStream("output.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = goio.Copy(writer, strings.NewReader("piped data"))
|
||||
require.NoError(t, err)
|
||||
err = writer.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
content, err := m.Read("output.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "piped data", content)
|
||||
}
|
||||
|
||||
func TestExists_Good(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
|
||||
assert.False(t, m.Exists("nonexistent.txt"))
|
||||
|
||||
require.NoError(t, m.Write("file.txt", "content"))
|
||||
assert.True(t, m.Exists("file.txt"))
|
||||
}
|
||||
|
||||
func TestExists_Good_DirectoryPrefix(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.Write("dir/file.txt", "content"))
|
||||
// "dir" should exist as a directory prefix
|
||||
assert.True(t, m.Exists("dir"))
|
||||
}
|
||||
|
||||
func TestIsDir_Good(t *testing.T) {
|
||||
m, _ := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.Write("dir/file.txt", "content"))
|
||||
|
||||
assert.True(t, m.IsDir("dir"))
|
||||
assert.False(t, m.IsDir("dir/file.txt"))
|
||||
assert.False(t, m.IsDir("nonexistent"))
|
||||
assert.False(t, m.IsDir(""))
|
||||
}
|
||||
|
||||
func TestKey_Good(t *testing.T) {
|
||||
mock := newMockS3()
|
||||
|
||||
// No prefix
|
||||
m, _ := New("bucket", withAPI(mock))
|
||||
assert.Equal(t, "file.txt", m.key("file.txt"))
|
||||
assert.Equal(t, "dir/file.txt", m.key("dir/file.txt"))
|
||||
assert.Equal(t, "", m.key(""))
|
||||
assert.Equal(t, "file.txt", m.key("/file.txt"))
|
||||
assert.Equal(t, "file.txt", m.key("../file.txt"))
|
||||
|
||||
// With prefix
|
||||
m2, _ := New("bucket", withAPI(mock), WithPrefix("pfx"))
|
||||
assert.Equal(t, "pfx/file.txt", m2.key("file.txt"))
|
||||
assert.Equal(t, "pfx/dir/file.txt", m2.key("dir/file.txt"))
|
||||
assert.Equal(t, "pfx/", m2.key(""))
|
||||
}
|
||||
|
||||
// Ugly: verify the Medium interface is satisfied at compile time.
|
||||
func TestInterfaceCompliance_Ugly(t *testing.T) {
|
||||
mock := newMockS3()
|
||||
m, err := New("bucket", withAPI(mock))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify all methods exist by calling them in a way that
|
||||
// proves compile-time satisfaction of the interface.
|
||||
var _ interface {
|
||||
Read(string) (string, error)
|
||||
Write(string, string) error
|
||||
EnsureDir(string) error
|
||||
IsFile(string) bool
|
||||
FileGet(string) (string, error)
|
||||
FileSet(string, string) error
|
||||
Delete(string) error
|
||||
DeleteAll(string) error
|
||||
Rename(string, string) error
|
||||
List(string) ([]fs.DirEntry, error)
|
||||
Stat(string) (fs.FileInfo, error)
|
||||
Open(string) (fs.File, error)
|
||||
Create(string) (goio.WriteCloser, error)
|
||||
Append(string) (goio.WriteCloser, error)
|
||||
ReadStream(string) (goio.ReadCloser, error)
|
||||
WriteStream(string) (goio.WriteCloser, error)
|
||||
Exists(string) bool
|
||||
IsDir(string) bool
|
||||
} = m
|
||||
}
|
||||
|
|
@ -1,367 +0,0 @@
|
|||
// This file implements the Pre-Obfuscation Layer Protocol with
|
||||
// XChaCha20-Poly1305 encryption. The protocol applies a reversible transformation
|
||||
// to plaintext BEFORE it reaches CPU encryption routines, providing defense-in-depth
|
||||
// against side-channel attacks.
|
||||
//
|
||||
// The encryption flow is:
|
||||
//
|
||||
// plaintext -> obfuscate(nonce) -> encrypt -> [nonce || ciphertext || tag]
|
||||
//
|
||||
// The decryption flow is:
|
||||
//
|
||||
// [nonce || ciphertext || tag] -> decrypt -> deobfuscate(nonce) -> plaintext
|
||||
package sigil
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"golang.org/x/crypto/chacha20poly1305"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrInvalidKey is returned when the encryption key is invalid.
|
||||
ErrInvalidKey = errors.New("sigil: invalid key size, must be 32 bytes")
|
||||
// ErrCiphertextTooShort is returned when the ciphertext is too short to decrypt.
|
||||
ErrCiphertextTooShort = errors.New("sigil: ciphertext too short")
|
||||
// ErrDecryptionFailed is returned when decryption or authentication fails.
|
||||
ErrDecryptionFailed = errors.New("sigil: decryption failed")
|
||||
// ErrNoKeyConfigured is returned when no encryption key has been set.
|
||||
ErrNoKeyConfigured = errors.New("sigil: no encryption key configured")
|
||||
)
|
||||
|
||||
// PreObfuscator applies a reversible transformation to data before encryption.
|
||||
// This ensures that raw plaintext patterns are never sent directly to CPU
|
||||
// encryption routines, providing defense against side-channel attacks.
|
||||
//
|
||||
// Implementations must be deterministic: given the same entropy, the transformation
|
||||
// must be perfectly reversible: Deobfuscate(Obfuscate(x, e), e) == x
|
||||
type PreObfuscator interface {
|
||||
// Obfuscate transforms plaintext before encryption using the provided entropy.
|
||||
// The entropy is typically the encryption nonce, ensuring the transformation
|
||||
// is unique per-encryption without additional random generation.
|
||||
Obfuscate(data []byte, entropy []byte) []byte
|
||||
|
||||
// Deobfuscate reverses the transformation after decryption.
|
||||
// Must be called with the same entropy used during Obfuscate.
|
||||
Deobfuscate(data []byte, entropy []byte) []byte
|
||||
}
|
||||
|
||||
// XORObfuscator performs XOR-based obfuscation using an entropy-derived key stream.
|
||||
//
|
||||
// The key stream is generated using SHA-256 in counter mode:
|
||||
//
|
||||
// keyStream[i*32:(i+1)*32] = SHA256(entropy || BigEndian64(i))
|
||||
//
|
||||
// This provides a cryptographically uniform key stream that decorrelates
|
||||
// plaintext patterns from the data seen by the encryption routine.
|
||||
// XOR is symmetric, so obfuscation and deobfuscation use the same operation.
|
||||
type XORObfuscator struct{}
|
||||
|
||||
// Obfuscate XORs the data with a key stream derived from the entropy.
|
||||
func (x *XORObfuscator) Obfuscate(data []byte, entropy []byte) []byte {
|
||||
if len(data) == 0 {
|
||||
return data
|
||||
}
|
||||
return x.transform(data, entropy)
|
||||
}
|
||||
|
||||
// Deobfuscate reverses the XOR transformation (XOR is symmetric).
|
||||
func (x *XORObfuscator) Deobfuscate(data []byte, entropy []byte) []byte {
|
||||
if len(data) == 0 {
|
||||
return data
|
||||
}
|
||||
return x.transform(data, entropy)
|
||||
}
|
||||
|
||||
// transform applies XOR with an entropy-derived key stream.
|
||||
func (x *XORObfuscator) transform(data []byte, entropy []byte) []byte {
|
||||
result := make([]byte, len(data))
|
||||
keyStream := x.deriveKeyStream(entropy, len(data))
|
||||
for i := range data {
|
||||
result[i] = data[i] ^ keyStream[i]
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// deriveKeyStream creates a deterministic key stream from entropy.
|
||||
func (x *XORObfuscator) deriveKeyStream(entropy []byte, length int) []byte {
|
||||
stream := make([]byte, length)
|
||||
h := sha256.New()
|
||||
|
||||
// Generate key stream in 32-byte blocks
|
||||
blockNum := uint64(0)
|
||||
offset := 0
|
||||
for offset < length {
|
||||
h.Reset()
|
||||
h.Write(entropy)
|
||||
var blockBytes [8]byte
|
||||
binary.BigEndian.PutUint64(blockBytes[:], blockNum)
|
||||
h.Write(blockBytes[:])
|
||||
block := h.Sum(nil)
|
||||
|
||||
copyLen := min(len(block), length-offset)
|
||||
copy(stream[offset:], block[:copyLen])
|
||||
offset += copyLen
|
||||
blockNum++
|
||||
}
|
||||
return stream
|
||||
}
|
||||
|
||||
// ShuffleMaskObfuscator provides stronger obfuscation through byte shuffling and masking.
|
||||
//
|
||||
// The obfuscation process:
|
||||
// 1. Generate a mask from entropy using SHA-256 in counter mode
|
||||
// 2. XOR the data with the mask
|
||||
// 3. Generate a deterministic permutation using Fisher-Yates shuffle
|
||||
// 4. Reorder bytes according to the permutation
|
||||
//
|
||||
// This provides both value transformation (XOR mask) and position transformation
|
||||
// (shuffle), making pattern analysis more difficult than XOR alone.
|
||||
type ShuffleMaskObfuscator struct{}
|
||||
|
||||
// Obfuscate shuffles bytes and applies a mask derived from entropy.
|
||||
func (s *ShuffleMaskObfuscator) Obfuscate(data []byte, entropy []byte) []byte {
|
||||
if len(data) == 0 {
|
||||
return data
|
||||
}
|
||||
|
||||
result := make([]byte, len(data))
|
||||
copy(result, data)
|
||||
|
||||
// Generate permutation and mask from entropy
|
||||
perm := s.generatePermutation(entropy, len(data))
|
||||
mask := s.deriveMask(entropy, len(data))
|
||||
|
||||
// Apply mask first, then shuffle
|
||||
for i := range result {
|
||||
result[i] ^= mask[i]
|
||||
}
|
||||
|
||||
// Shuffle using Fisher-Yates with deterministic seed
|
||||
shuffled := make([]byte, len(data))
|
||||
for i, p := range perm {
|
||||
shuffled[i] = result[p]
|
||||
}
|
||||
|
||||
return shuffled
|
||||
}
|
||||
|
||||
// Deobfuscate reverses the shuffle and mask operations.
|
||||
func (s *ShuffleMaskObfuscator) Deobfuscate(data []byte, entropy []byte) []byte {
|
||||
if len(data) == 0 {
|
||||
return data
|
||||
}
|
||||
|
||||
result := make([]byte, len(data))
|
||||
|
||||
// Generate permutation and mask from entropy
|
||||
perm := s.generatePermutation(entropy, len(data))
|
||||
mask := s.deriveMask(entropy, len(data))
|
||||
|
||||
// Unshuffle first
|
||||
for i, p := range perm {
|
||||
result[p] = data[i]
|
||||
}
|
||||
|
||||
// Remove mask
|
||||
for i := range result {
|
||||
result[i] ^= mask[i]
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// generatePermutation creates a deterministic permutation from entropy.
|
||||
func (s *ShuffleMaskObfuscator) generatePermutation(entropy []byte, length int) []int {
|
||||
perm := make([]int, length)
|
||||
for i := range perm {
|
||||
perm[i] = i
|
||||
}
|
||||
|
||||
// Use entropy to seed a deterministic shuffle
|
||||
h := sha256.New()
|
||||
h.Write(entropy)
|
||||
h.Write([]byte("permutation"))
|
||||
seed := h.Sum(nil)
|
||||
|
||||
// Fisher-Yates shuffle with deterministic randomness
|
||||
for i := length - 1; i > 0; i-- {
|
||||
h.Reset()
|
||||
h.Write(seed)
|
||||
var iBytes [8]byte
|
||||
binary.BigEndian.PutUint64(iBytes[:], uint64(i))
|
||||
h.Write(iBytes[:])
|
||||
jBytes := h.Sum(nil)
|
||||
j := int(binary.BigEndian.Uint64(jBytes[:8]) % uint64(i+1))
|
||||
perm[i], perm[j] = perm[j], perm[i]
|
||||
}
|
||||
|
||||
return perm
|
||||
}
|
||||
|
||||
// deriveMask creates a mask byte array from entropy.
|
||||
func (s *ShuffleMaskObfuscator) deriveMask(entropy []byte, length int) []byte {
|
||||
mask := make([]byte, length)
|
||||
h := sha256.New()
|
||||
|
||||
blockNum := uint64(0)
|
||||
offset := 0
|
||||
for offset < length {
|
||||
h.Reset()
|
||||
h.Write(entropy)
|
||||
h.Write([]byte("mask"))
|
||||
var blockBytes [8]byte
|
||||
binary.BigEndian.PutUint64(blockBytes[:], blockNum)
|
||||
h.Write(blockBytes[:])
|
||||
block := h.Sum(nil)
|
||||
|
||||
copyLen := min(len(block), length-offset)
|
||||
copy(mask[offset:], block[:copyLen])
|
||||
offset += copyLen
|
||||
blockNum++
|
||||
}
|
||||
return mask
|
||||
}
|
||||
|
||||
// ChaChaPolySigil is a Sigil that encrypts/decrypts data using ChaCha20-Poly1305.
|
||||
// It applies pre-obfuscation before encryption to ensure raw plaintext never
|
||||
// goes directly to CPU encryption routines.
|
||||
//
|
||||
// The output format is:
|
||||
// [24-byte nonce][encrypted(obfuscated(plaintext))]
|
||||
//
|
||||
// Unlike demo implementations, the nonce is ONLY embedded in the ciphertext,
|
||||
// not exposed separately in headers.
|
||||
type ChaChaPolySigil struct {
|
||||
Key []byte
|
||||
Obfuscator PreObfuscator
|
||||
randReader io.Reader // for testing injection
|
||||
}
|
||||
|
||||
// NewChaChaPolySigil creates a new encryption sigil with the given key.
|
||||
// The key must be exactly 32 bytes.
|
||||
func NewChaChaPolySigil(key []byte) (*ChaChaPolySigil, error) {
|
||||
if len(key) != 32 {
|
||||
return nil, ErrInvalidKey
|
||||
}
|
||||
|
||||
keyCopy := make([]byte, 32)
|
||||
copy(keyCopy, key)
|
||||
|
||||
return &ChaChaPolySigil{
|
||||
Key: keyCopy,
|
||||
Obfuscator: &XORObfuscator{},
|
||||
randReader: rand.Reader,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewChaChaPolySigilWithObfuscator creates a new encryption sigil with custom obfuscator.
|
||||
func NewChaChaPolySigilWithObfuscator(key []byte, obfuscator PreObfuscator) (*ChaChaPolySigil, error) {
|
||||
sigil, err := NewChaChaPolySigil(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if obfuscator != nil {
|
||||
sigil.Obfuscator = obfuscator
|
||||
}
|
||||
return sigil, nil
|
||||
}
|
||||
|
||||
// In encrypts the data with pre-obfuscation.
|
||||
// The flow is: plaintext -> obfuscate -> encrypt
|
||||
func (s *ChaChaPolySigil) In(data []byte) ([]byte, error) {
|
||||
if s.Key == nil {
|
||||
return nil, ErrNoKeyConfigured
|
||||
}
|
||||
if data == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
aead, err := chacha20poly1305.NewX(s.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Generate nonce
|
||||
nonce := make([]byte, aead.NonceSize())
|
||||
reader := s.randReader
|
||||
if reader == nil {
|
||||
reader = rand.Reader
|
||||
}
|
||||
if _, err := io.ReadFull(reader, nonce); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Pre-obfuscate the plaintext using nonce as entropy
|
||||
// This ensures CPU encryption routines never see raw plaintext
|
||||
obfuscated := data
|
||||
if s.Obfuscator != nil {
|
||||
obfuscated = s.Obfuscator.Obfuscate(data, nonce)
|
||||
}
|
||||
|
||||
// Encrypt the obfuscated data
|
||||
// Output: [nonce | ciphertext | auth tag]
|
||||
ciphertext := aead.Seal(nonce, nonce, obfuscated, nil)
|
||||
|
||||
return ciphertext, nil
|
||||
}
|
||||
|
||||
// Out decrypts the data and reverses obfuscation.
|
||||
// The flow is: decrypt -> deobfuscate -> plaintext
|
||||
func (s *ChaChaPolySigil) Out(data []byte) ([]byte, error) {
|
||||
if s.Key == nil {
|
||||
return nil, ErrNoKeyConfigured
|
||||
}
|
||||
if data == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
aead, err := chacha20poly1305.NewX(s.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
minLen := aead.NonceSize() + aead.Overhead()
|
||||
if len(data) < minLen {
|
||||
return nil, ErrCiphertextTooShort
|
||||
}
|
||||
|
||||
// Extract nonce from ciphertext
|
||||
nonce := data[:aead.NonceSize()]
|
||||
ciphertext := data[aead.NonceSize():]
|
||||
|
||||
// Decrypt
|
||||
obfuscated, err := aead.Open(nil, nonce, ciphertext, nil)
|
||||
if err != nil {
|
||||
return nil, ErrDecryptionFailed
|
||||
}
|
||||
|
||||
// Deobfuscate using the same nonce as entropy
|
||||
plaintext := obfuscated
|
||||
if s.Obfuscator != nil {
|
||||
plaintext = s.Obfuscator.Deobfuscate(obfuscated, nonce)
|
||||
}
|
||||
|
||||
if len(plaintext) == 0 {
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
return plaintext, nil
|
||||
}
|
||||
|
||||
// GetNonceFromCiphertext extracts the nonce from encrypted output.
|
||||
// This is provided for debugging/logging purposes only.
|
||||
// The nonce should NOT be stored separately in headers.
|
||||
func GetNonceFromCiphertext(ciphertext []byte) ([]byte, error) {
|
||||
nonceSize := chacha20poly1305.NonceSizeX
|
||||
if len(ciphertext) < nonceSize {
|
||||
return nil, ErrCiphertextTooShort
|
||||
}
|
||||
nonceCopy := make([]byte, nonceSize)
|
||||
copy(nonceCopy, ciphertext[:nonceSize])
|
||||
return nonceCopy, nil
|
||||
}
|
||||
|
|
@ -1,536 +0,0 @@
|
|||
package sigil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// ── XORObfuscator ──────────────────────────────────────────────────
|
||||
|
||||
func TestXORObfuscator_Good_RoundTrip(t *testing.T) {
|
||||
ob := &XORObfuscator{}
|
||||
data := []byte("the axioms are in the weights")
|
||||
entropy := []byte("deterministic-nonce-24bytes!")
|
||||
|
||||
obfuscated := ob.Obfuscate(data, entropy)
|
||||
assert.NotEqual(t, data, obfuscated)
|
||||
assert.Len(t, obfuscated, len(data))
|
||||
|
||||
restored := ob.Deobfuscate(obfuscated, entropy)
|
||||
assert.Equal(t, data, restored)
|
||||
}
|
||||
|
||||
func TestXORObfuscator_Good_DifferentEntropyDifferentOutput(t *testing.T) {
|
||||
ob := &XORObfuscator{}
|
||||
data := []byte("same plaintext")
|
||||
|
||||
out1 := ob.Obfuscate(data, []byte("entropy-a"))
|
||||
out2 := ob.Obfuscate(data, []byte("entropy-b"))
|
||||
assert.NotEqual(t, out1, out2)
|
||||
}
|
||||
|
||||
func TestXORObfuscator_Good_Deterministic(t *testing.T) {
|
||||
ob := &XORObfuscator{}
|
||||
data := []byte("reproducible")
|
||||
entropy := []byte("fixed-seed")
|
||||
|
||||
out1 := ob.Obfuscate(data, entropy)
|
||||
out2 := ob.Obfuscate(data, entropy)
|
||||
assert.Equal(t, out1, out2)
|
||||
}
|
||||
|
||||
func TestXORObfuscator_Good_LargeData(t *testing.T) {
|
||||
ob := &XORObfuscator{}
|
||||
// Larger than one SHA-256 block (32 bytes) to test multi-block key stream.
|
||||
data := make([]byte, 256)
|
||||
for i := range data {
|
||||
data[i] = byte(i)
|
||||
}
|
||||
entropy := []byte("test-entropy")
|
||||
|
||||
obfuscated := ob.Obfuscate(data, entropy)
|
||||
restored := ob.Deobfuscate(obfuscated, entropy)
|
||||
assert.Equal(t, data, restored)
|
||||
}
|
||||
|
||||
func TestXORObfuscator_Good_EmptyData(t *testing.T) {
|
||||
ob := &XORObfuscator{}
|
||||
result := ob.Obfuscate([]byte{}, []byte("entropy"))
|
||||
assert.Equal(t, []byte{}, result)
|
||||
|
||||
result = ob.Deobfuscate([]byte{}, []byte("entropy"))
|
||||
assert.Equal(t, []byte{}, result)
|
||||
}
|
||||
|
||||
func TestXORObfuscator_Good_SymmetricProperty(t *testing.T) {
|
||||
ob := &XORObfuscator{}
|
||||
data := []byte("XOR is its own inverse")
|
||||
entropy := []byte("nonce")
|
||||
|
||||
// XOR is symmetric: Obfuscate(Obfuscate(x)) == x
|
||||
double := ob.Obfuscate(ob.Obfuscate(data, entropy), entropy)
|
||||
assert.Equal(t, data, double)
|
||||
}
|
||||
|
||||
// ── ShuffleMaskObfuscator ──────────────────────────────────────────
|
||||
|
||||
func TestShuffleMaskObfuscator_Good_RoundTrip(t *testing.T) {
|
||||
ob := &ShuffleMaskObfuscator{}
|
||||
data := []byte("shuffle and mask protect patterns")
|
||||
entropy := []byte("deterministic-entropy")
|
||||
|
||||
obfuscated := ob.Obfuscate(data, entropy)
|
||||
assert.NotEqual(t, data, obfuscated)
|
||||
assert.Len(t, obfuscated, len(data))
|
||||
|
||||
restored := ob.Deobfuscate(obfuscated, entropy)
|
||||
assert.Equal(t, data, restored)
|
||||
}
|
||||
|
||||
func TestShuffleMaskObfuscator_Good_DifferentEntropy(t *testing.T) {
|
||||
ob := &ShuffleMaskObfuscator{}
|
||||
data := []byte("same data")
|
||||
|
||||
out1 := ob.Obfuscate(data, []byte("entropy-1"))
|
||||
out2 := ob.Obfuscate(data, []byte("entropy-2"))
|
||||
assert.NotEqual(t, out1, out2)
|
||||
}
|
||||
|
||||
func TestShuffleMaskObfuscator_Good_Deterministic(t *testing.T) {
|
||||
ob := &ShuffleMaskObfuscator{}
|
||||
data := []byte("reproducible shuffle")
|
||||
entropy := []byte("fixed")
|
||||
|
||||
out1 := ob.Obfuscate(data, entropy)
|
||||
out2 := ob.Obfuscate(data, entropy)
|
||||
assert.Equal(t, out1, out2)
|
||||
}
|
||||
|
||||
func TestShuffleMaskObfuscator_Good_LargeData(t *testing.T) {
|
||||
ob := &ShuffleMaskObfuscator{}
|
||||
data := make([]byte, 512)
|
||||
for i := range data {
|
||||
data[i] = byte(i % 256)
|
||||
}
|
||||
entropy := []byte("large-data-test")
|
||||
|
||||
obfuscated := ob.Obfuscate(data, entropy)
|
||||
restored := ob.Deobfuscate(obfuscated, entropy)
|
||||
assert.Equal(t, data, restored)
|
||||
}
|
||||
|
||||
func TestShuffleMaskObfuscator_Good_EmptyData(t *testing.T) {
|
||||
ob := &ShuffleMaskObfuscator{}
|
||||
result := ob.Obfuscate([]byte{}, []byte("entropy"))
|
||||
assert.Equal(t, []byte{}, result)
|
||||
|
||||
result = ob.Deobfuscate([]byte{}, []byte("entropy"))
|
||||
assert.Equal(t, []byte{}, result)
|
||||
}
|
||||
|
||||
func TestShuffleMaskObfuscator_Good_SingleByte(t *testing.T) {
|
||||
ob := &ShuffleMaskObfuscator{}
|
||||
data := []byte{0x42}
|
||||
entropy := []byte("single")
|
||||
|
||||
obfuscated := ob.Obfuscate(data, entropy)
|
||||
restored := ob.Deobfuscate(obfuscated, entropy)
|
||||
assert.Equal(t, data, restored)
|
||||
}
|
||||
|
||||
// ── NewChaChaPolySigil ─────────────────────────────────────────────
|
||||
|
||||
func TestNewChaChaPolySigil_Good(t *testing.T) {
|
||||
key := make([]byte, 32)
|
||||
_, _ = rand.Read(key)
|
||||
|
||||
s, err := NewChaChaPolySigil(key)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, s)
|
||||
assert.Equal(t, key, s.Key)
|
||||
assert.NotNil(t, s.Obfuscator)
|
||||
}
|
||||
|
||||
func TestNewChaChaPolySigil_Good_KeyIsCopied(t *testing.T) {
|
||||
key := make([]byte, 32)
|
||||
_, _ = rand.Read(key)
|
||||
original := make([]byte, 32)
|
||||
copy(original, key)
|
||||
|
||||
s, err := NewChaChaPolySigil(key)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Mutating the original key should not affect the sigil.
|
||||
key[0] ^= 0xFF
|
||||
assert.Equal(t, original, s.Key)
|
||||
}
|
||||
|
||||
func TestNewChaChaPolySigil_Bad_ShortKey(t *testing.T) {
|
||||
_, err := NewChaChaPolySigil([]byte("too short"))
|
||||
assert.ErrorIs(t, err, ErrInvalidKey)
|
||||
}
|
||||
|
||||
func TestNewChaChaPolySigil_Bad_LongKey(t *testing.T) {
|
||||
_, err := NewChaChaPolySigil(make([]byte, 64))
|
||||
assert.ErrorIs(t, err, ErrInvalidKey)
|
||||
}
|
||||
|
||||
func TestNewChaChaPolySigil_Bad_EmptyKey(t *testing.T) {
|
||||
_, err := NewChaChaPolySigil(nil)
|
||||
assert.ErrorIs(t, err, ErrInvalidKey)
|
||||
}
|
||||
|
||||
// ── NewChaChaPolySigilWithObfuscator ───────────────────────────────
|
||||
|
||||
func TestNewChaChaPolySigilWithObfuscator_Good(t *testing.T) {
|
||||
key := make([]byte, 32)
|
||||
_, _ = rand.Read(key)
|
||||
|
||||
ob := &ShuffleMaskObfuscator{}
|
||||
s, err := NewChaChaPolySigilWithObfuscator(key, ob)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, ob, s.Obfuscator)
|
||||
}
|
||||
|
||||
func TestNewChaChaPolySigilWithObfuscator_Good_NilObfuscator(t *testing.T) {
|
||||
key := make([]byte, 32)
|
||||
_, _ = rand.Read(key)
|
||||
|
||||
s, err := NewChaChaPolySigilWithObfuscator(key, nil)
|
||||
require.NoError(t, err)
|
||||
// Falls back to default XORObfuscator.
|
||||
assert.IsType(t, &XORObfuscator{}, s.Obfuscator)
|
||||
}
|
||||
|
||||
func TestNewChaChaPolySigilWithObfuscator_Bad_InvalidKey(t *testing.T) {
|
||||
_, err := NewChaChaPolySigilWithObfuscator([]byte("bad"), &XORObfuscator{})
|
||||
assert.ErrorIs(t, err, ErrInvalidKey)
|
||||
}
|
||||
|
||||
// ── ChaChaPolySigil In/Out (encrypt/decrypt) ───────────────────────
|
||||
|
||||
func TestChaChaPolySigil_Good_RoundTrip(t *testing.T) {
|
||||
key := make([]byte, 32)
|
||||
_, _ = rand.Read(key)
|
||||
|
||||
s, err := NewChaChaPolySigil(key)
|
||||
require.NoError(t, err)
|
||||
|
||||
plaintext := []byte("consciousness does not merely avoid causing harm")
|
||||
ciphertext, err := s.In(plaintext)
|
||||
require.NoError(t, err)
|
||||
assert.NotEqual(t, plaintext, ciphertext)
|
||||
assert.Greater(t, len(ciphertext), len(plaintext)) // nonce + tag overhead
|
||||
|
||||
decrypted, err := s.Out(ciphertext)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, plaintext, decrypted)
|
||||
}
|
||||
|
||||
func TestChaChaPolySigil_Good_WithShuffleMask(t *testing.T) {
|
||||
key := make([]byte, 32)
|
||||
_, _ = rand.Read(key)
|
||||
|
||||
s, err := NewChaChaPolySigilWithObfuscator(key, &ShuffleMaskObfuscator{})
|
||||
require.NoError(t, err)
|
||||
|
||||
plaintext := []byte("shuffle mask pre-obfuscation layer")
|
||||
ciphertext, err := s.In(plaintext)
|
||||
require.NoError(t, err)
|
||||
|
||||
decrypted, err := s.Out(ciphertext)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, plaintext, decrypted)
|
||||
}
|
||||
|
||||
func TestChaChaPolySigil_Good_NilData(t *testing.T) {
|
||||
key := make([]byte, 32)
|
||||
_, _ = rand.Read(key)
|
||||
|
||||
s, err := NewChaChaPolySigil(key)
|
||||
require.NoError(t, err)
|
||||
|
||||
enc, err := s.In(nil)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, enc)
|
||||
|
||||
dec, err := s.Out(nil)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, dec)
|
||||
}
|
||||
|
||||
func TestChaChaPolySigil_Good_EmptyPlaintext(t *testing.T) {
|
||||
key := make([]byte, 32)
|
||||
_, _ = rand.Read(key)
|
||||
|
||||
s, err := NewChaChaPolySigil(key)
|
||||
require.NoError(t, err)
|
||||
|
||||
ciphertext, err := s.In([]byte{})
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, ciphertext) // Has nonce + tag even for empty plaintext.
|
||||
|
||||
decrypted, err := s.Out(ciphertext)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte{}, decrypted)
|
||||
}
|
||||
|
||||
func TestChaChaPolySigil_Good_DifferentCiphertextsPerCall(t *testing.T) {
|
||||
key := make([]byte, 32)
|
||||
_, _ = rand.Read(key)
|
||||
|
||||
s, err := NewChaChaPolySigil(key)
|
||||
require.NoError(t, err)
|
||||
|
||||
plaintext := []byte("same input")
|
||||
ct1, _ := s.In(plaintext)
|
||||
ct2, _ := s.In(plaintext)
|
||||
|
||||
// Different nonces → different ciphertexts.
|
||||
assert.NotEqual(t, ct1, ct2)
|
||||
}
|
||||
|
||||
func TestChaChaPolySigil_Bad_NoKey(t *testing.T) {
|
||||
s := &ChaChaPolySigil{}
|
||||
|
||||
_, err := s.In([]byte("data"))
|
||||
assert.ErrorIs(t, err, ErrNoKeyConfigured)
|
||||
|
||||
_, err = s.Out([]byte("data"))
|
||||
assert.ErrorIs(t, err, ErrNoKeyConfigured)
|
||||
}
|
||||
|
||||
func TestChaChaPolySigil_Bad_WrongKey(t *testing.T) {
|
||||
key1 := make([]byte, 32)
|
||||
key2 := make([]byte, 32)
|
||||
_, _ = rand.Read(key1)
|
||||
_, _ = rand.Read(key2)
|
||||
|
||||
s1, _ := NewChaChaPolySigil(key1)
|
||||
s2, _ := NewChaChaPolySigil(key2)
|
||||
|
||||
ciphertext, err := s1.In([]byte("secret"))
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = s2.Out(ciphertext)
|
||||
assert.ErrorIs(t, err, ErrDecryptionFailed)
|
||||
}
|
||||
|
||||
func TestChaChaPolySigil_Bad_TruncatedCiphertext(t *testing.T) {
|
||||
key := make([]byte, 32)
|
||||
_, _ = rand.Read(key)
|
||||
|
||||
s, _ := NewChaChaPolySigil(key)
|
||||
_, err := s.Out([]byte("too short"))
|
||||
assert.ErrorIs(t, err, ErrCiphertextTooShort)
|
||||
}
|
||||
|
||||
func TestChaChaPolySigil_Bad_TamperedCiphertext(t *testing.T) {
|
||||
key := make([]byte, 32)
|
||||
_, _ = rand.Read(key)
|
||||
|
||||
s, _ := NewChaChaPolySigil(key)
|
||||
ciphertext, _ := s.In([]byte("authentic data"))
|
||||
|
||||
// Flip a bit in the ciphertext body (after nonce).
|
||||
ciphertext[30] ^= 0xFF
|
||||
|
||||
_, err := s.Out(ciphertext)
|
||||
assert.ErrorIs(t, err, ErrDecryptionFailed)
|
||||
}
|
||||
|
||||
// failReader returns an error on read — for testing nonce generation failure.
|
||||
type failReader struct{}
|
||||
|
||||
func (f *failReader) Read([]byte) (int, error) {
|
||||
return 0, errors.New("entropy source failed")
|
||||
}
|
||||
|
||||
func TestChaChaPolySigil_Bad_RandReaderFailure(t *testing.T) {
|
||||
key := make([]byte, 32)
|
||||
_, _ = rand.Read(key)
|
||||
|
||||
s, _ := NewChaChaPolySigil(key)
|
||||
s.randReader = &failReader{}
|
||||
|
||||
_, err := s.In([]byte("data"))
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// ── ChaChaPolySigil without obfuscator ─────────────────────────────
|
||||
|
||||
func TestChaChaPolySigil_Good_NoObfuscator(t *testing.T) {
|
||||
key := make([]byte, 32)
|
||||
_, _ = rand.Read(key)
|
||||
|
||||
s, _ := NewChaChaPolySigil(key)
|
||||
s.Obfuscator = nil // Disable pre-obfuscation.
|
||||
|
||||
plaintext := []byte("raw encryption without pre-obfuscation")
|
||||
ciphertext, err := s.In(plaintext)
|
||||
require.NoError(t, err)
|
||||
|
||||
decrypted, err := s.Out(ciphertext)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, plaintext, decrypted)
|
||||
}
|
||||
|
||||
// ── GetNonceFromCiphertext ─────────────────────────────────────────
|
||||
|
||||
func TestGetNonceFromCiphertext_Good(t *testing.T) {
|
||||
key := make([]byte, 32)
|
||||
_, _ = rand.Read(key)
|
||||
|
||||
s, _ := NewChaChaPolySigil(key)
|
||||
ciphertext, _ := s.In([]byte("nonce extraction test"))
|
||||
|
||||
nonce, err := GetNonceFromCiphertext(ciphertext)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, nonce, 24) // XChaCha20 nonce is 24 bytes.
|
||||
|
||||
// Nonce should match the prefix of the ciphertext.
|
||||
assert.Equal(t, ciphertext[:24], nonce)
|
||||
}
|
||||
|
||||
func TestGetNonceFromCiphertext_Good_NonceCopied(t *testing.T) {
|
||||
key := make([]byte, 32)
|
||||
_, _ = rand.Read(key)
|
||||
|
||||
s, _ := NewChaChaPolySigil(key)
|
||||
ciphertext, _ := s.In([]byte("data"))
|
||||
|
||||
nonce, _ := GetNonceFromCiphertext(ciphertext)
|
||||
original := make([]byte, len(nonce))
|
||||
copy(original, nonce)
|
||||
|
||||
// Mutating the nonce should not affect the ciphertext.
|
||||
nonce[0] ^= 0xFF
|
||||
assert.Equal(t, original, ciphertext[:24])
|
||||
}
|
||||
|
||||
func TestGetNonceFromCiphertext_Bad_TooShort(t *testing.T) {
|
||||
_, err := GetNonceFromCiphertext([]byte("short"))
|
||||
assert.ErrorIs(t, err, ErrCiphertextTooShort)
|
||||
}
|
||||
|
||||
func TestGetNonceFromCiphertext_Bad_Empty(t *testing.T) {
|
||||
_, err := GetNonceFromCiphertext(nil)
|
||||
assert.ErrorIs(t, err, ErrCiphertextTooShort)
|
||||
}
|
||||
|
||||
// ── ChaChaPolySigil in Transmute pipeline ──────────────────────────
|
||||
|
||||
func TestChaChaPolySigil_Good_InTransmutePipeline(t *testing.T) {
|
||||
key := make([]byte, 32)
|
||||
_, _ = rand.Read(key)
|
||||
|
||||
s, _ := NewChaChaPolySigil(key)
|
||||
hexSigil, _ := NewSigil("hex")
|
||||
|
||||
chain := []Sigil{s, hexSigil}
|
||||
plaintext := []byte("encrypt then hex encode")
|
||||
|
||||
encoded, err := Transmute(plaintext, chain)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Result should be hex-encoded ciphertext.
|
||||
assert.True(t, isHex(encoded))
|
||||
|
||||
decoded, err := Untransmute(encoded, chain)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, plaintext, decoded)
|
||||
}
|
||||
|
||||
func isHex(data []byte) bool {
|
||||
for _, b := range data {
|
||||
if !((b >= '0' && b <= '9') || (b >= 'a' && b <= 'f')) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return len(data) > 0
|
||||
}
|
||||
|
||||
// ── Transmute error propagation ────────────────────────────────────
|
||||
|
||||
type failSigil struct{}
|
||||
|
||||
func (f *failSigil) In([]byte) ([]byte, error) { return nil, errors.New("fail in") }
|
||||
func (f *failSigil) Out([]byte) ([]byte, error) { return nil, errors.New("fail out") }
|
||||
|
||||
func TestTransmute_Bad_ErrorPropagation(t *testing.T) {
|
||||
_, err := Transmute([]byte("data"), []Sigil{&failSigil{}})
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "fail in")
|
||||
}
|
||||
|
||||
func TestUntransmute_Bad_ErrorPropagation(t *testing.T) {
|
||||
_, err := Untransmute([]byte("data"), []Sigil{&failSigil{}})
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "fail out")
|
||||
}
|
||||
|
||||
// ── GzipSigil with custom writer (edge case) ──────────────────────
|
||||
|
||||
func TestGzipSigil_Good_CustomWriter(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
s := &GzipSigil{writer: &buf}
|
||||
|
||||
// With custom writer, compressed data goes to buf, returned bytes will be empty
|
||||
// because the internal buffer 'b' is unused when s.writer is set.
|
||||
_, err := s.In([]byte("test data"))
|
||||
require.NoError(t, err)
|
||||
assert.Greater(t, buf.Len(), 0)
|
||||
}
|
||||
|
||||
// ── deriveKeyStream edge: exactly 32 bytes ─────────────────────────
|
||||
|
||||
func TestDeriveKeyStream_Good_ExactBlockSize(t *testing.T) {
|
||||
ob := &XORObfuscator{}
|
||||
data := make([]byte, 32) // Exactly one SHA-256 block.
|
||||
for i := range data {
|
||||
data[i] = byte(i)
|
||||
}
|
||||
entropy := []byte("block-boundary")
|
||||
|
||||
obfuscated := ob.Obfuscate(data, entropy)
|
||||
restored := ob.Deobfuscate(obfuscated, entropy)
|
||||
assert.Equal(t, data, restored)
|
||||
}
|
||||
|
||||
// ── io.Reader fallback in In ───────────────────────────────────────
|
||||
|
||||
func TestChaChaPolySigil_Good_NilRandReader(t *testing.T) {
|
||||
key := make([]byte, 32)
|
||||
_, _ = rand.Read(key)
|
||||
|
||||
s, _ := NewChaChaPolySigil(key)
|
||||
s.randReader = nil // Should fall back to crypto/rand.Reader.
|
||||
|
||||
ciphertext, err := s.In([]byte("fallback reader"))
|
||||
require.NoError(t, err)
|
||||
|
||||
decrypted, err := s.Out(ciphertext)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte("fallback reader"), decrypted)
|
||||
}
|
||||
|
||||
// limitReader returns exactly N bytes then EOF — for deterministic tests.
|
||||
type limitReader struct {
|
||||
data []byte
|
||||
pos int
|
||||
}
|
||||
|
||||
func (l *limitReader) Read(p []byte) (int, error) {
|
||||
if l.pos >= len(l.data) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n := copy(p, l.data[l.pos:])
|
||||
l.pos += n
|
||||
return n, nil
|
||||
}
|
||||
|
|
@ -1,71 +0,0 @@
|
|||
// Package sigil provides the Sigil transformation framework for composable,
|
||||
// reversible data transformations.
|
||||
//
|
||||
// Sigils are the core abstraction - each sigil implements a specific transformation
|
||||
// (encoding, compression, hashing, encryption) with a uniform interface. Sigils can
|
||||
// be chained together to create transformation pipelines.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// hexSigil, _ := sigil.NewSigil("hex")
|
||||
// base64Sigil, _ := sigil.NewSigil("base64")
|
||||
// result, _ := sigil.Transmute(data, []sigil.Sigil{hexSigil, base64Sigil})
|
||||
package sigil
|
||||
|
||||
// Sigil defines the interface for a data transformer.
|
||||
//
|
||||
// A Sigil represents a single transformation unit that can be applied to byte data.
|
||||
// Sigils may be reversible (encoding, compression, encryption) or irreversible (hashing).
|
||||
//
|
||||
// For reversible sigils: Out(In(x)) == x for all valid x
|
||||
// For irreversible sigils: Out returns the input unchanged
|
||||
// For symmetric sigils: In(x) == Out(x)
|
||||
//
|
||||
// Implementations must handle nil input by returning nil without error,
|
||||
// and empty input by returning an empty slice without error.
|
||||
type Sigil interface {
|
||||
// In applies the forward transformation to the data.
|
||||
// For encoding sigils, this encodes the data.
|
||||
// For compression sigils, this compresses the data.
|
||||
// For hash sigils, this computes the digest.
|
||||
In(data []byte) ([]byte, error)
|
||||
|
||||
// Out applies the reverse transformation to the data.
|
||||
// For reversible sigils, this recovers the original data.
|
||||
// For irreversible sigils (e.g., hashing), this returns the input unchanged.
|
||||
Out(data []byte) ([]byte, error)
|
||||
}
|
||||
|
||||
// Transmute applies a series of sigils to data in sequence.
|
||||
//
|
||||
// Each sigil's In method is called in order, with the output of one sigil
|
||||
// becoming the input of the next. If any sigil returns an error, Transmute
|
||||
// stops immediately and returns nil with that error.
|
||||
//
|
||||
// To reverse a transmutation, call each sigil's Out method in reverse order.
|
||||
func Transmute(data []byte, sigils []Sigil) ([]byte, error) {
|
||||
var err error
|
||||
for _, s := range sigils {
|
||||
data, err = s.In(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Untransmute reverses a transmutation by applying Out in reverse order.
|
||||
//
|
||||
// Each sigil's Out method is called in reverse order, with the output of one sigil
|
||||
// becoming the input of the next. If any sigil returns an error, Untransmute
|
||||
// stops immediately and returns nil with that error.
|
||||
func Untransmute(data []byte, sigils []Sigil) ([]byte, error) {
|
||||
var err error
|
||||
for i := len(sigils) - 1; i >= 0; i-- {
|
||||
data, err = sigils[i].Out(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
|
@ -1,422 +0,0 @@
|
|||
package sigil
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// ReverseSigil
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestReverseSigil_Good(t *testing.T) {
|
||||
s := &ReverseSigil{}
|
||||
|
||||
out, err := s.In([]byte("hello"))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte("olleh"), out)
|
||||
|
||||
// Symmetric: Out does the same thing.
|
||||
restored, err := s.Out(out)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte("hello"), restored)
|
||||
}
|
||||
|
||||
func TestReverseSigil_Bad(t *testing.T) {
|
||||
s := &ReverseSigil{}
|
||||
|
||||
// Empty input returns empty.
|
||||
out, err := s.In([]byte{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte{}, out)
|
||||
}
|
||||
|
||||
func TestReverseSigil_Ugly(t *testing.T) {
|
||||
s := &ReverseSigil{}
|
||||
|
||||
// Nil input returns nil.
|
||||
out, err := s.In(nil)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, out)
|
||||
|
||||
out, err = s.Out(nil)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, out)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// HexSigil
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestHexSigil_Good(t *testing.T) {
|
||||
s := &HexSigil{}
|
||||
data := []byte("hello world")
|
||||
|
||||
encoded, err := s.In(data)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte(hex.EncodeToString(data)), encoded)
|
||||
|
||||
decoded, err := s.Out(encoded)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, data, decoded)
|
||||
}
|
||||
|
||||
func TestHexSigil_Bad(t *testing.T) {
|
||||
s := &HexSigil{}
|
||||
|
||||
// Invalid hex input.
|
||||
_, err := s.Out([]byte("zzzz"))
|
||||
assert.Error(t, err)
|
||||
|
||||
// Empty input.
|
||||
out, err := s.In([]byte{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte{}, out)
|
||||
}
|
||||
|
||||
func TestHexSigil_Ugly(t *testing.T) {
|
||||
s := &HexSigil{}
|
||||
|
||||
out, err := s.In(nil)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, out)
|
||||
|
||||
out, err = s.Out(nil)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, out)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Base64Sigil
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestBase64Sigil_Good(t *testing.T) {
|
||||
s := &Base64Sigil{}
|
||||
data := []byte("composable transforms")
|
||||
|
||||
encoded, err := s.In(data)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte(base64.StdEncoding.EncodeToString(data)), encoded)
|
||||
|
||||
decoded, err := s.Out(encoded)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, data, decoded)
|
||||
}
|
||||
|
||||
func TestBase64Sigil_Bad(t *testing.T) {
|
||||
s := &Base64Sigil{}
|
||||
|
||||
// Invalid base64 (wrong padding).
|
||||
_, err := s.Out([]byte("!!!"))
|
||||
assert.Error(t, err)
|
||||
|
||||
// Empty input.
|
||||
out, err := s.In([]byte{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte{}, out)
|
||||
}
|
||||
|
||||
func TestBase64Sigil_Ugly(t *testing.T) {
|
||||
s := &Base64Sigil{}
|
||||
|
||||
out, err := s.In(nil)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, out)
|
||||
|
||||
out, err = s.Out(nil)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, out)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// GzipSigil
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestGzipSigil_Good(t *testing.T) {
|
||||
s := &GzipSigil{}
|
||||
data := []byte("the quick brown fox jumps over the lazy dog")
|
||||
|
||||
compressed, err := s.In(data)
|
||||
require.NoError(t, err)
|
||||
assert.NotEqual(t, data, compressed)
|
||||
|
||||
decompressed, err := s.Out(compressed)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, data, decompressed)
|
||||
}
|
||||
|
||||
func TestGzipSigil_Bad(t *testing.T) {
|
||||
s := &GzipSigil{}
|
||||
|
||||
// Invalid gzip data.
|
||||
_, err := s.Out([]byte("not gzip"))
|
||||
assert.Error(t, err)
|
||||
|
||||
// Empty input compresses to a valid gzip stream.
|
||||
compressed, err := s.In([]byte{})
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, compressed) // gzip header is always present
|
||||
|
||||
decompressed, err := s.Out(compressed)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte{}, decompressed)
|
||||
}
|
||||
|
||||
func TestGzipSigil_Ugly(t *testing.T) {
|
||||
s := &GzipSigil{}
|
||||
|
||||
out, err := s.In(nil)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, out)
|
||||
|
||||
out, err = s.Out(nil)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, out)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// JSONSigil
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestJSONSigil_Good(t *testing.T) {
|
||||
s := &JSONSigil{Indent: false}
|
||||
data := []byte(`{ "key" : "value" }`)
|
||||
|
||||
compacted, err := s.In(data)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte(`{"key":"value"}`), compacted)
|
||||
|
||||
// Out is passthrough.
|
||||
passthrough, err := s.Out(compacted)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, compacted, passthrough)
|
||||
}
|
||||
|
||||
func TestJSONSigil_Good_Indent(t *testing.T) {
|
||||
s := &JSONSigil{Indent: true}
|
||||
data := []byte(`{"key":"value"}`)
|
||||
|
||||
indented, err := s.In(data)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, string(indented), "\n")
|
||||
assert.Contains(t, string(indented), " ")
|
||||
}
|
||||
|
||||
func TestJSONSigil_Bad(t *testing.T) {
|
||||
s := &JSONSigil{Indent: false}
|
||||
|
||||
// Invalid JSON.
|
||||
_, err := s.In([]byte("not json"))
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestJSONSigil_Ugly(t *testing.T) {
|
||||
s := &JSONSigil{Indent: false}
|
||||
|
||||
// json.Compact on nil/empty will produce an error (invalid JSON).
|
||||
_, err := s.In(nil)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Out with nil is passthrough.
|
||||
out, err := s.Out(nil)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, out)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// HashSigil
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestHashSigil_Good(t *testing.T) {
|
||||
data := []byte("hash me")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
sigilName string
|
||||
size int
|
||||
}{
|
||||
{"md5", "md5", md5.Size},
|
||||
{"sha1", "sha1", sha1.Size},
|
||||
{"sha256", "sha256", sha256.Size},
|
||||
{"sha512", "sha512", sha512.Size},
|
||||
{"sha224", "sha224", sha256.Size224},
|
||||
{"sha384", "sha384", sha512.Size384},
|
||||
{"sha512-224", "sha512-224", 28},
|
||||
{"sha512-256", "sha512-256", 32},
|
||||
{"sha3-224", "sha3-224", 28},
|
||||
{"sha3-256", "sha3-256", 32},
|
||||
{"sha3-384", "sha3-384", 48},
|
||||
{"sha3-512", "sha3-512", 64},
|
||||
{"ripemd160", "ripemd160", 20},
|
||||
{"blake2s-256", "blake2s-256", 32},
|
||||
{"blake2b-256", "blake2b-256", 32},
|
||||
{"blake2b-384", "blake2b-384", 48},
|
||||
{"blake2b-512", "blake2b-512", 64},
|
||||
{"md4", "md4", 16},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s, err := NewSigil(tt.sigilName)
|
||||
require.NoError(t, err)
|
||||
|
||||
hashed, err := s.In(data)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, hashed, tt.size)
|
||||
|
||||
// Out is passthrough.
|
||||
passthrough, err := s.Out(hashed)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, hashed, passthrough)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashSigil_Bad(t *testing.T) {
|
||||
// Unsupported hash constant.
|
||||
s := &HashSigil{Hash: 0}
|
||||
_, err := s.In([]byte("data"))
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not available")
|
||||
}
|
||||
|
||||
func TestHashSigil_Ugly(t *testing.T) {
|
||||
// Hashing empty data should still produce a valid digest.
|
||||
s, err := NewSigil("sha256")
|
||||
require.NoError(t, err)
|
||||
|
||||
hashed, err := s.In([]byte{})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, hashed, sha256.Size)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// NewSigil factory
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestNewSigil_Good(t *testing.T) {
|
||||
names := []string{
|
||||
"reverse", "hex", "base64", "gzip", "json", "json-indent",
|
||||
"md4", "md5", "sha1", "sha224", "sha256", "sha384", "sha512",
|
||||
"ripemd160",
|
||||
"sha3-224", "sha3-256", "sha3-384", "sha3-512",
|
||||
"sha512-224", "sha512-256",
|
||||
"blake2s-256", "blake2b-256", "blake2b-384", "blake2b-512",
|
||||
}
|
||||
|
||||
for _, name := range names {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
s, err := NewSigil(name)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, s)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewSigil_Bad(t *testing.T) {
|
||||
_, err := NewSigil("nonexistent")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "unknown sigil name")
|
||||
}
|
||||
|
||||
func TestNewSigil_Ugly(t *testing.T) {
|
||||
_, err := NewSigil("")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Transmute / Untransmute
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestTransmute_Good(t *testing.T) {
|
||||
data := []byte("round trip")
|
||||
|
||||
hexSigil, err := NewSigil("hex")
|
||||
require.NoError(t, err)
|
||||
base64Sigil, err := NewSigil("base64")
|
||||
require.NoError(t, err)
|
||||
|
||||
chain := []Sigil{hexSigil, base64Sigil}
|
||||
|
||||
encoded, err := Transmute(data, chain)
|
||||
require.NoError(t, err)
|
||||
assert.NotEqual(t, data, encoded)
|
||||
|
||||
decoded, err := Untransmute(encoded, chain)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, data, decoded)
|
||||
}
|
||||
|
||||
func TestTransmute_Good_MultiSigil(t *testing.T) {
|
||||
data := []byte("multi sigil pipeline test data")
|
||||
|
||||
reverseSigil, err := NewSigil("reverse")
|
||||
require.NoError(t, err)
|
||||
hexSigil, err := NewSigil("hex")
|
||||
require.NoError(t, err)
|
||||
base64Sigil, err := NewSigil("base64")
|
||||
require.NoError(t, err)
|
||||
|
||||
chain := []Sigil{reverseSigil, hexSigil, base64Sigil}
|
||||
|
||||
encoded, err := Transmute(data, chain)
|
||||
require.NoError(t, err)
|
||||
|
||||
decoded, err := Untransmute(encoded, chain)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, data, decoded)
|
||||
}
|
||||
|
||||
func TestTransmute_Good_GzipRoundTrip(t *testing.T) {
|
||||
data := []byte("compress then encode then decode then decompress")
|
||||
|
||||
gzipSigil, err := NewSigil("gzip")
|
||||
require.NoError(t, err)
|
||||
hexSigil, err := NewSigil("hex")
|
||||
require.NoError(t, err)
|
||||
|
||||
chain := []Sigil{gzipSigil, hexSigil}
|
||||
|
||||
encoded, err := Transmute(data, chain)
|
||||
require.NoError(t, err)
|
||||
|
||||
decoded, err := Untransmute(encoded, chain)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, data, decoded)
|
||||
}
|
||||
|
||||
func TestTransmute_Bad(t *testing.T) {
|
||||
// Transmute with a sigil that will fail: hex decode on non-hex input.
|
||||
hexSigil := &HexSigil{}
|
||||
|
||||
// Calling Out (decode) with invalid input via manual chain.
|
||||
_, err := Untransmute([]byte("not-hex!!"), []Sigil{hexSigil})
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestTransmute_Ugly(t *testing.T) {
|
||||
// Empty sigil chain is a no-op.
|
||||
data := []byte("unchanged")
|
||||
|
||||
result, err := Transmute(data, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, data, result)
|
||||
|
||||
result, err = Untransmute(data, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, data, result)
|
||||
|
||||
// Nil data through a chain.
|
||||
hexSigil, _ := NewSigil("hex")
|
||||
result, err = Transmute(nil, []Sigil{hexSigil})
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, result)
|
||||
}
|
||||
|
|
@ -1,274 +0,0 @@
|
|||
package sigil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"golang.org/x/crypto/blake2b"
|
||||
"golang.org/x/crypto/blake2s"
|
||||
"golang.org/x/crypto/md4"
|
||||
"golang.org/x/crypto/ripemd160"
|
||||
"golang.org/x/crypto/sha3"
|
||||
)
|
||||
|
||||
// ReverseSigil is a Sigil that reverses the bytes of the payload.
|
||||
// It is a symmetrical Sigil, meaning that the In and Out methods perform the same operation.
|
||||
type ReverseSigil struct{}
|
||||
|
||||
// In reverses the bytes of the data.
|
||||
func (s *ReverseSigil) In(data []byte) ([]byte, error) {
|
||||
if data == nil {
|
||||
return nil, nil
|
||||
}
|
||||
reversed := make([]byte, len(data))
|
||||
for i, j := 0, len(data)-1; i < len(data); i, j = i+1, j-1 {
|
||||
reversed[i] = data[j]
|
||||
}
|
||||
return reversed, nil
|
||||
}
|
||||
|
||||
// Out reverses the bytes of the data.
|
||||
func (s *ReverseSigil) Out(data []byte) ([]byte, error) {
|
||||
return s.In(data)
|
||||
}
|
||||
|
||||
// HexSigil is a Sigil that encodes/decodes data to/from hexadecimal.
|
||||
// The In method encodes the data, and the Out method decodes it.
|
||||
type HexSigil struct{}
|
||||
|
||||
// In encodes the data to hexadecimal.
|
||||
func (s *HexSigil) In(data []byte) ([]byte, error) {
|
||||
if data == nil {
|
||||
return nil, nil
|
||||
}
|
||||
dst := make([]byte, hex.EncodedLen(len(data)))
|
||||
hex.Encode(dst, data)
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
// Out decodes the data from hexadecimal.
|
||||
func (s *HexSigil) Out(data []byte) ([]byte, error) {
|
||||
if data == nil {
|
||||
return nil, nil
|
||||
}
|
||||
dst := make([]byte, hex.DecodedLen(len(data)))
|
||||
_, err := hex.Decode(dst, data)
|
||||
return dst, err
|
||||
}
|
||||
|
||||
// Base64Sigil is a Sigil that encodes/decodes data to/from base64.
|
||||
// The In method encodes the data, and the Out method decodes it.
|
||||
type Base64Sigil struct{}
|
||||
|
||||
// In encodes the data to base64.
|
||||
func (s *Base64Sigil) In(data []byte) ([]byte, error) {
|
||||
if data == nil {
|
||||
return nil, nil
|
||||
}
|
||||
dst := make([]byte, base64.StdEncoding.EncodedLen(len(data)))
|
||||
base64.StdEncoding.Encode(dst, data)
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
// Out decodes the data from base64.
|
||||
func (s *Base64Sigil) Out(data []byte) ([]byte, error) {
|
||||
if data == nil {
|
||||
return nil, nil
|
||||
}
|
||||
dst := make([]byte, base64.StdEncoding.DecodedLen(len(data)))
|
||||
n, err := base64.StdEncoding.Decode(dst, data)
|
||||
return dst[:n], err
|
||||
}
|
||||
|
||||
// GzipSigil is a Sigil that compresses/decompresses data using gzip.
|
||||
// The In method compresses the data, and the Out method decompresses it.
|
||||
type GzipSigil struct {
|
||||
writer io.Writer
|
||||
}
|
||||
|
||||
// In compresses the data using gzip.
|
||||
func (s *GzipSigil) In(data []byte) ([]byte, error) {
|
||||
if data == nil {
|
||||
return nil, nil
|
||||
}
|
||||
var b bytes.Buffer
|
||||
w := s.writer
|
||||
if w == nil {
|
||||
w = &b
|
||||
}
|
||||
gz := gzip.NewWriter(w)
|
||||
if _, err := gz.Write(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := gz.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// Out decompresses the data using gzip.
|
||||
func (s *GzipSigil) Out(data []byte) ([]byte, error) {
|
||||
if data == nil {
|
||||
return nil, nil
|
||||
}
|
||||
r, err := gzip.NewReader(bytes.NewReader(data))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer r.Close()
|
||||
return io.ReadAll(r)
|
||||
}
|
||||
|
||||
// JSONSigil is a Sigil that compacts or indents JSON data.
|
||||
// The Out method is a no-op.
|
||||
type JSONSigil struct{ Indent bool }
|
||||
|
||||
// In compacts or indents the JSON data.
|
||||
func (s *JSONSigil) In(data []byte) ([]byte, error) {
|
||||
if s.Indent {
|
||||
var out bytes.Buffer
|
||||
err := json.Indent(&out, data, "", " ")
|
||||
return out.Bytes(), err
|
||||
}
|
||||
var out bytes.Buffer
|
||||
err := json.Compact(&out, data)
|
||||
return out.Bytes(), err
|
||||
}
|
||||
|
||||
// Out is a no-op for JSONSigil.
|
||||
func (s *JSONSigil) Out(data []byte) ([]byte, error) {
|
||||
// For simplicity, Out is a no-op. The primary use is formatting.
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// HashSigil is a Sigil that hashes the data using a specified algorithm.
|
||||
// The In method hashes the data, and the Out method is a no-op.
|
||||
type HashSigil struct {
|
||||
Hash crypto.Hash
|
||||
}
|
||||
|
||||
// NewHashSigil creates a new HashSigil.
|
||||
func NewHashSigil(h crypto.Hash) *HashSigil {
|
||||
return &HashSigil{Hash: h}
|
||||
}
|
||||
|
||||
// In hashes the data.
|
||||
func (s *HashSigil) In(data []byte) ([]byte, error) {
|
||||
var h io.Writer
|
||||
switch s.Hash {
|
||||
case crypto.MD4:
|
||||
h = md4.New()
|
||||
case crypto.MD5:
|
||||
h = md5.New()
|
||||
case crypto.SHA1:
|
||||
h = sha1.New()
|
||||
case crypto.SHA224:
|
||||
h = sha256.New224()
|
||||
case crypto.SHA256:
|
||||
h = sha256.New()
|
||||
case crypto.SHA384:
|
||||
h = sha512.New384()
|
||||
case crypto.SHA512:
|
||||
h = sha512.New()
|
||||
case crypto.RIPEMD160:
|
||||
h = ripemd160.New()
|
||||
case crypto.SHA3_224:
|
||||
h = sha3.New224()
|
||||
case crypto.SHA3_256:
|
||||
h = sha3.New256()
|
||||
case crypto.SHA3_384:
|
||||
h = sha3.New384()
|
||||
case crypto.SHA3_512:
|
||||
h = sha3.New512()
|
||||
case crypto.SHA512_224:
|
||||
h = sha512.New512_224()
|
||||
case crypto.SHA512_256:
|
||||
h = sha512.New512_256()
|
||||
case crypto.BLAKE2s_256:
|
||||
h, _ = blake2s.New256(nil)
|
||||
case crypto.BLAKE2b_256:
|
||||
h, _ = blake2b.New256(nil)
|
||||
case crypto.BLAKE2b_384:
|
||||
h, _ = blake2b.New384(nil)
|
||||
case crypto.BLAKE2b_512:
|
||||
h, _ = blake2b.New512(nil)
|
||||
default:
|
||||
// MD5SHA1 is not supported as a direct hash
|
||||
return nil, errors.New("sigil: hash algorithm not available")
|
||||
}
|
||||
|
||||
h.Write(data)
|
||||
return h.(interface{ Sum([]byte) []byte }).Sum(nil), nil
|
||||
}
|
||||
|
||||
// Out is a no-op for HashSigil.
|
||||
func (s *HashSigil) Out(data []byte) ([]byte, error) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// NewSigil is a factory function that returns a Sigil based on a string name.
|
||||
// It is the primary way to create Sigil instances.
|
||||
func NewSigil(name string) (Sigil, error) {
|
||||
switch name {
|
||||
case "reverse":
|
||||
return &ReverseSigil{}, nil
|
||||
case "hex":
|
||||
return &HexSigil{}, nil
|
||||
case "base64":
|
||||
return &Base64Sigil{}, nil
|
||||
case "gzip":
|
||||
return &GzipSigil{}, nil
|
||||
case "json":
|
||||
return &JSONSigil{Indent: false}, nil
|
||||
case "json-indent":
|
||||
return &JSONSigil{Indent: true}, nil
|
||||
case "md4":
|
||||
return NewHashSigil(crypto.MD4), nil
|
||||
case "md5":
|
||||
return NewHashSigil(crypto.MD5), nil
|
||||
case "sha1":
|
||||
return NewHashSigil(crypto.SHA1), nil
|
||||
case "sha224":
|
||||
return NewHashSigil(crypto.SHA224), nil
|
||||
case "sha256":
|
||||
return NewHashSigil(crypto.SHA256), nil
|
||||
case "sha384":
|
||||
return NewHashSigil(crypto.SHA384), nil
|
||||
case "sha512":
|
||||
return NewHashSigil(crypto.SHA512), nil
|
||||
case "ripemd160":
|
||||
return NewHashSigil(crypto.RIPEMD160), nil
|
||||
case "sha3-224":
|
||||
return NewHashSigil(crypto.SHA3_224), nil
|
||||
case "sha3-256":
|
||||
return NewHashSigil(crypto.SHA3_256), nil
|
||||
case "sha3-384":
|
||||
return NewHashSigil(crypto.SHA3_384), nil
|
||||
case "sha3-512":
|
||||
return NewHashSigil(crypto.SHA3_512), nil
|
||||
case "sha512-224":
|
||||
return NewHashSigil(crypto.SHA512_224), nil
|
||||
case "sha512-256":
|
||||
return NewHashSigil(crypto.SHA512_256), nil
|
||||
case "blake2s-256":
|
||||
return NewHashSigil(crypto.BLAKE2s_256), nil
|
||||
case "blake2b-256":
|
||||
return NewHashSigil(crypto.BLAKE2b_256), nil
|
||||
case "blake2b-384":
|
||||
return NewHashSigil(crypto.BLAKE2b_384), nil
|
||||
case "blake2b-512":
|
||||
return NewHashSigil(crypto.BLAKE2b_512), nil
|
||||
default:
|
||||
return nil, errors.New("sigil: unknown sigil name")
|
||||
}
|
||||
}
|
||||
|
|
@ -1,669 +0,0 @@
|
|||
// Package sqlite provides a SQLite-backed implementation of the io.Medium interface.
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
goio "io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
coreerr "forge.lthn.ai/core/go/pkg/framework/core"
|
||||
|
||||
_ "modernc.org/sqlite" // Pure Go SQLite driver
|
||||
)
|
||||
|
||||
// Medium is a SQLite-backed storage backend implementing the io.Medium interface.
|
||||
type Medium struct {
|
||||
db *sql.DB
|
||||
table string
|
||||
}
|
||||
|
||||
// Option configures a Medium.
|
||||
type Option func(*Medium)
|
||||
|
||||
// WithTable sets the table name (default: "files").
|
||||
func WithTable(table string) Option {
|
||||
return func(m *Medium) {
|
||||
m.table = table
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a new SQLite Medium at the given database path.
|
||||
// Use ":memory:" for an in-memory database.
|
||||
func New(dbPath string, opts ...Option) (*Medium, error) {
|
||||
if dbPath == "" {
|
||||
return nil, coreerr.E("sqlite.New", "database path is required", nil)
|
||||
}
|
||||
|
||||
m := &Medium{table: "files"}
|
||||
for _, opt := range opts {
|
||||
opt(m)
|
||||
}
|
||||
|
||||
db, err := sql.Open("sqlite", dbPath)
|
||||
if err != nil {
|
||||
return nil, coreerr.E("sqlite.New", "failed to open database", err)
|
||||
}
|
||||
|
||||
// Enable WAL mode for better concurrency
|
||||
if _, err := db.Exec("PRAGMA journal_mode=WAL"); err != nil {
|
||||
db.Close()
|
||||
return nil, coreerr.E("sqlite.New", "failed to set WAL mode", err)
|
||||
}
|
||||
|
||||
// Create the schema
|
||||
createSQL := `CREATE TABLE IF NOT EXISTS ` + m.table + ` (
|
||||
path TEXT PRIMARY KEY,
|
||||
content BLOB NOT NULL,
|
||||
mode INTEGER DEFAULT 420,
|
||||
is_dir BOOLEAN DEFAULT FALSE,
|
||||
mtime DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
)`
|
||||
if _, err := db.Exec(createSQL); err != nil {
|
||||
db.Close()
|
||||
return nil, coreerr.E("sqlite.New", "failed to create table", err)
|
||||
}
|
||||
|
||||
m.db = db
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// Close closes the underlying database connection.
|
||||
func (m *Medium) Close() error {
|
||||
if m.db != nil {
|
||||
return m.db.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanPath normalizes a path for consistent storage.
|
||||
// Uses a leading "/" before Clean to sandbox traversal attempts.
|
||||
func cleanPath(p string) string {
|
||||
clean := path.Clean("/" + p)
|
||||
if clean == "/" {
|
||||
return ""
|
||||
}
|
||||
return strings.TrimPrefix(clean, "/")
|
||||
}
|
||||
|
||||
// Read retrieves the content of a file as a string.
|
||||
func (m *Medium) Read(p string) (string, error) {
|
||||
key := cleanPath(p)
|
||||
if key == "" {
|
||||
return "", coreerr.E("sqlite.Read", "path is required", os.ErrInvalid)
|
||||
}
|
||||
|
||||
var content []byte
|
||||
var isDir bool
|
||||
err := m.db.QueryRow(
|
||||
`SELECT content, is_dir FROM `+m.table+` WHERE path = ?`, key,
|
||||
).Scan(&content, &isDir)
|
||||
if err == sql.ErrNoRows {
|
||||
return "", coreerr.E("sqlite.Read", "file not found: "+key, os.ErrNotExist)
|
||||
}
|
||||
if err != nil {
|
||||
return "", coreerr.E("sqlite.Read", "query failed: "+key, err)
|
||||
}
|
||||
if isDir {
|
||||
return "", coreerr.E("sqlite.Read", "path is a directory: "+key, os.ErrInvalid)
|
||||
}
|
||||
return string(content), nil
|
||||
}
|
||||
|
||||
// Write saves the given content to a file, overwriting it if it exists.
|
||||
func (m *Medium) Write(p, content string) error {
|
||||
key := cleanPath(p)
|
||||
if key == "" {
|
||||
return coreerr.E("sqlite.Write", "path is required", os.ErrInvalid)
|
||||
}
|
||||
|
||||
_, err := m.db.Exec(
|
||||
`INSERT INTO `+m.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, 420, FALSE, ?)
|
||||
ON CONFLICT(path) DO UPDATE SET content = excluded.content, is_dir = FALSE, mtime = excluded.mtime`,
|
||||
key, []byte(content), time.Now().UTC(),
|
||||
)
|
||||
if err != nil {
|
||||
return coreerr.E("sqlite.Write", "insert failed: "+key, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureDir makes sure a directory exists, creating it if necessary.
|
||||
func (m *Medium) EnsureDir(p string) error {
|
||||
key := cleanPath(p)
|
||||
if key == "" {
|
||||
// Root always "exists"
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := m.db.Exec(
|
||||
`INSERT INTO `+m.table+` (path, content, mode, is_dir, mtime) VALUES (?, '', 493, TRUE, ?)
|
||||
ON CONFLICT(path) DO NOTHING`,
|
||||
key, time.Now().UTC(),
|
||||
)
|
||||
if err != nil {
|
||||
return coreerr.E("sqlite.EnsureDir", "insert failed: "+key, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsFile checks if a path exists and is a regular file.
|
||||
func (m *Medium) IsFile(p string) bool {
|
||||
key := cleanPath(p)
|
||||
if key == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
var isDir bool
|
||||
err := m.db.QueryRow(
|
||||
`SELECT is_dir FROM `+m.table+` WHERE path = ?`, key,
|
||||
).Scan(&isDir)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return !isDir
|
||||
}
|
||||
|
||||
// FileGet is a convenience function that reads a file from the medium.
|
||||
func (m *Medium) FileGet(p string) (string, error) {
|
||||
return m.Read(p)
|
||||
}
|
||||
|
||||
// FileSet is a convenience function that writes a file to the medium.
|
||||
func (m *Medium) FileSet(p, content string) error {
|
||||
return m.Write(p, content)
|
||||
}
|
||||
|
||||
// Delete removes a file or empty directory.
|
||||
func (m *Medium) Delete(p string) error {
|
||||
key := cleanPath(p)
|
||||
if key == "" {
|
||||
return coreerr.E("sqlite.Delete", "path is required", os.ErrInvalid)
|
||||
}
|
||||
|
||||
// Check if it's a directory with children
|
||||
var isDir bool
|
||||
err := m.db.QueryRow(
|
||||
`SELECT is_dir FROM `+m.table+` WHERE path = ?`, key,
|
||||
).Scan(&isDir)
|
||||
if err == sql.ErrNoRows {
|
||||
return coreerr.E("sqlite.Delete", "path not found: "+key, os.ErrNotExist)
|
||||
}
|
||||
if err != nil {
|
||||
return coreerr.E("sqlite.Delete", "query failed: "+key, err)
|
||||
}
|
||||
|
||||
if isDir {
|
||||
// Check for children
|
||||
prefix := key + "/"
|
||||
var count int
|
||||
err := m.db.QueryRow(
|
||||
`SELECT COUNT(*) FROM `+m.table+` WHERE path LIKE ? AND path != ?`, prefix+"%", key,
|
||||
).Scan(&count)
|
||||
if err != nil {
|
||||
return coreerr.E("sqlite.Delete", "count failed: "+key, err)
|
||||
}
|
||||
if count > 0 {
|
||||
return coreerr.E("sqlite.Delete", "directory not empty: "+key, os.ErrExist)
|
||||
}
|
||||
}
|
||||
|
||||
res, err := m.db.Exec(`DELETE FROM `+m.table+` WHERE path = ?`, key)
|
||||
if err != nil {
|
||||
return coreerr.E("sqlite.Delete", "delete failed: "+key, err)
|
||||
}
|
||||
n, _ := res.RowsAffected()
|
||||
if n == 0 {
|
||||
return coreerr.E("sqlite.Delete", "path not found: "+key, os.ErrNotExist)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteAll removes a file or directory and all its contents recursively.
|
||||
func (m *Medium) DeleteAll(p string) error {
|
||||
key := cleanPath(p)
|
||||
if key == "" {
|
||||
return coreerr.E("sqlite.DeleteAll", "path is required", os.ErrInvalid)
|
||||
}
|
||||
|
||||
prefix := key + "/"
|
||||
|
||||
// Delete the exact path and all children
|
||||
res, err := m.db.Exec(
|
||||
`DELETE FROM `+m.table+` WHERE path = ? OR path LIKE ?`,
|
||||
key, prefix+"%",
|
||||
)
|
||||
if err != nil {
|
||||
return coreerr.E("sqlite.DeleteAll", "delete failed: "+key, err)
|
||||
}
|
||||
n, _ := res.RowsAffected()
|
||||
if n == 0 {
|
||||
return coreerr.E("sqlite.DeleteAll", "path not found: "+key, os.ErrNotExist)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rename moves a file or directory from oldPath to newPath.
|
||||
func (m *Medium) Rename(oldPath, newPath string) error {
|
||||
oldKey := cleanPath(oldPath)
|
||||
newKey := cleanPath(newPath)
|
||||
if oldKey == "" || newKey == "" {
|
||||
return coreerr.E("sqlite.Rename", "both old and new paths are required", os.ErrInvalid)
|
||||
}
|
||||
|
||||
tx, err := m.db.Begin()
|
||||
if err != nil {
|
||||
return coreerr.E("sqlite.Rename", "begin tx failed", err)
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// Check if source exists
|
||||
var content []byte
|
||||
var mode int
|
||||
var isDir bool
|
||||
var mtime time.Time
|
||||
err = tx.QueryRow(
|
||||
`SELECT content, mode, is_dir, mtime FROM `+m.table+` WHERE path = ?`, oldKey,
|
||||
).Scan(&content, &mode, &isDir, &mtime)
|
||||
if err == sql.ErrNoRows {
|
||||
return coreerr.E("sqlite.Rename", "source not found: "+oldKey, os.ErrNotExist)
|
||||
}
|
||||
if err != nil {
|
||||
return coreerr.E("sqlite.Rename", "query failed: "+oldKey, err)
|
||||
}
|
||||
|
||||
// Insert or replace at new path
|
||||
_, err = tx.Exec(
|
||||
`INSERT INTO `+m.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, ?, ?, ?)
|
||||
ON CONFLICT(path) DO UPDATE SET content = excluded.content, mode = excluded.mode, is_dir = excluded.is_dir, mtime = excluded.mtime`,
|
||||
newKey, content, mode, isDir, mtime,
|
||||
)
|
||||
if err != nil {
|
||||
return coreerr.E("sqlite.Rename", "insert at new path failed: "+newKey, err)
|
||||
}
|
||||
|
||||
// Delete old path
|
||||
_, err = tx.Exec(`DELETE FROM `+m.table+` WHERE path = ?`, oldKey)
|
||||
if err != nil {
|
||||
return coreerr.E("sqlite.Rename", "delete old path failed: "+oldKey, err)
|
||||
}
|
||||
|
||||
// If it's a directory, move all children
|
||||
if isDir {
|
||||
oldPrefix := oldKey + "/"
|
||||
newPrefix := newKey + "/"
|
||||
|
||||
rows, err := tx.Query(
|
||||
`SELECT path, content, mode, is_dir, mtime FROM `+m.table+` WHERE path LIKE ?`,
|
||||
oldPrefix+"%",
|
||||
)
|
||||
if err != nil {
|
||||
return coreerr.E("sqlite.Rename", "query children failed", err)
|
||||
}
|
||||
|
||||
type child struct {
|
||||
path string
|
||||
content []byte
|
||||
mode int
|
||||
isDir bool
|
||||
mtime time.Time
|
||||
}
|
||||
var children []child
|
||||
for rows.Next() {
|
||||
var c child
|
||||
if err := rows.Scan(&c.path, &c.content, &c.mode, &c.isDir, &c.mtime); err != nil {
|
||||
rows.Close()
|
||||
return coreerr.E("sqlite.Rename", "scan child failed", err)
|
||||
}
|
||||
children = append(children, c)
|
||||
}
|
||||
rows.Close()
|
||||
|
||||
for _, c := range children {
|
||||
newChildPath := newPrefix + strings.TrimPrefix(c.path, oldPrefix)
|
||||
_, err = tx.Exec(
|
||||
`INSERT INTO `+m.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, ?, ?, ?)
|
||||
ON CONFLICT(path) DO UPDATE SET content = excluded.content, mode = excluded.mode, is_dir = excluded.is_dir, mtime = excluded.mtime`,
|
||||
newChildPath, c.content, c.mode, c.isDir, c.mtime,
|
||||
)
|
||||
if err != nil {
|
||||
return coreerr.E("sqlite.Rename", "insert child failed", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Delete old children
|
||||
_, err = tx.Exec(`DELETE FROM `+m.table+` WHERE path LIKE ?`, oldPrefix+"%")
|
||||
if err != nil {
|
||||
return coreerr.E("sqlite.Rename", "delete old children failed", err)
|
||||
}
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
// List returns the directory entries for the given path.
|
||||
func (m *Medium) List(p string) ([]fs.DirEntry, error) {
|
||||
prefix := cleanPath(p)
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
}
|
||||
|
||||
// Query all paths under the prefix
|
||||
rows, err := m.db.Query(
|
||||
`SELECT path, content, mode, is_dir, mtime FROM `+m.table+` WHERE path LIKE ? OR path LIKE ?`,
|
||||
prefix+"%", prefix+"%",
|
||||
)
|
||||
if err != nil {
|
||||
return nil, coreerr.E("sqlite.List", "query failed", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
seen := make(map[string]bool)
|
||||
var entries []fs.DirEntry
|
||||
|
||||
for rows.Next() {
|
||||
var rowPath string
|
||||
var content []byte
|
||||
var mode int
|
||||
var isDir bool
|
||||
var mtime time.Time
|
||||
if err := rows.Scan(&rowPath, &content, &mode, &isDir, &mtime); err != nil {
|
||||
return nil, coreerr.E("sqlite.List", "scan failed", err)
|
||||
}
|
||||
|
||||
rest := strings.TrimPrefix(rowPath, prefix)
|
||||
if rest == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if this is a direct child or nested
|
||||
if idx := strings.Index(rest, "/"); idx >= 0 {
|
||||
// Nested - register as a directory
|
||||
dirName := rest[:idx]
|
||||
if !seen[dirName] {
|
||||
seen[dirName] = true
|
||||
entries = append(entries, &dirEntry{
|
||||
name: dirName,
|
||||
isDir: true,
|
||||
mode: fs.ModeDir | 0755,
|
||||
info: &fileInfo{
|
||||
name: dirName,
|
||||
isDir: true,
|
||||
mode: fs.ModeDir | 0755,
|
||||
},
|
||||
})
|
||||
}
|
||||
} else {
|
||||
// Direct child
|
||||
if !seen[rest] {
|
||||
seen[rest] = true
|
||||
entries = append(entries, &dirEntry{
|
||||
name: rest,
|
||||
isDir: isDir,
|
||||
mode: fs.FileMode(mode),
|
||||
info: &fileInfo{
|
||||
name: rest,
|
||||
size: int64(len(content)),
|
||||
mode: fs.FileMode(mode),
|
||||
modTime: mtime,
|
||||
isDir: isDir,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return entries, rows.Err()
|
||||
}
|
||||
|
||||
// Stat returns file information for the given path.
|
||||
func (m *Medium) Stat(p string) (fs.FileInfo, error) {
|
||||
key := cleanPath(p)
|
||||
if key == "" {
|
||||
return nil, coreerr.E("sqlite.Stat", "path is required", os.ErrInvalid)
|
||||
}
|
||||
|
||||
var content []byte
|
||||
var mode int
|
||||
var isDir bool
|
||||
var mtime time.Time
|
||||
err := m.db.QueryRow(
|
||||
`SELECT content, mode, is_dir, mtime FROM `+m.table+` WHERE path = ?`, key,
|
||||
).Scan(&content, &mode, &isDir, &mtime)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, coreerr.E("sqlite.Stat", "path not found: "+key, os.ErrNotExist)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, coreerr.E("sqlite.Stat", "query failed: "+key, err)
|
||||
}
|
||||
|
||||
name := path.Base(key)
|
||||
return &fileInfo{
|
||||
name: name,
|
||||
size: int64(len(content)),
|
||||
mode: fs.FileMode(mode),
|
||||
modTime: mtime,
|
||||
isDir: isDir,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Open opens the named file for reading.
|
||||
func (m *Medium) Open(p string) (fs.File, error) {
|
||||
key := cleanPath(p)
|
||||
if key == "" {
|
||||
return nil, coreerr.E("sqlite.Open", "path is required", os.ErrInvalid)
|
||||
}
|
||||
|
||||
var content []byte
|
||||
var mode int
|
||||
var isDir bool
|
||||
var mtime time.Time
|
||||
err := m.db.QueryRow(
|
||||
`SELECT content, mode, is_dir, mtime FROM `+m.table+` WHERE path = ?`, key,
|
||||
).Scan(&content, &mode, &isDir, &mtime)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, coreerr.E("sqlite.Open", "file not found: "+key, os.ErrNotExist)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, coreerr.E("sqlite.Open", "query failed: "+key, err)
|
||||
}
|
||||
if isDir {
|
||||
return nil, coreerr.E("sqlite.Open", "path is a directory: "+key, os.ErrInvalid)
|
||||
}
|
||||
|
||||
return &sqliteFile{
|
||||
name: path.Base(key),
|
||||
content: content,
|
||||
mode: fs.FileMode(mode),
|
||||
modTime: mtime,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Create creates or truncates the named file.
|
||||
func (m *Medium) Create(p string) (goio.WriteCloser, error) {
|
||||
key := cleanPath(p)
|
||||
if key == "" {
|
||||
return nil, coreerr.E("sqlite.Create", "path is required", os.ErrInvalid)
|
||||
}
|
||||
return &sqliteWriteCloser{
|
||||
medium: m,
|
||||
path: key,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Append opens the named file for appending, creating it if it doesn't exist.
|
||||
func (m *Medium) Append(p string) (goio.WriteCloser, error) {
|
||||
key := cleanPath(p)
|
||||
if key == "" {
|
||||
return nil, coreerr.E("sqlite.Append", "path is required", os.ErrInvalid)
|
||||
}
|
||||
|
||||
var existing []byte
|
||||
err := m.db.QueryRow(
|
||||
`SELECT content FROM `+m.table+` WHERE path = ? AND is_dir = FALSE`, key,
|
||||
).Scan(&existing)
|
||||
if err != nil && err != sql.ErrNoRows {
|
||||
return nil, coreerr.E("sqlite.Append", "query failed: "+key, err)
|
||||
}
|
||||
|
||||
return &sqliteWriteCloser{
|
||||
medium: m,
|
||||
path: key,
|
||||
data: existing,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ReadStream returns a reader for the file content.
|
||||
func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) {
|
||||
key := cleanPath(p)
|
||||
if key == "" {
|
||||
return nil, coreerr.E("sqlite.ReadStream", "path is required", os.ErrInvalid)
|
||||
}
|
||||
|
||||
var content []byte
|
||||
var isDir bool
|
||||
err := m.db.QueryRow(
|
||||
`SELECT content, is_dir FROM `+m.table+` WHERE path = ?`, key,
|
||||
).Scan(&content, &isDir)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, coreerr.E("sqlite.ReadStream", "file not found: "+key, os.ErrNotExist)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, coreerr.E("sqlite.ReadStream", "query failed: "+key, err)
|
||||
}
|
||||
if isDir {
|
||||
return nil, coreerr.E("sqlite.ReadStream", "path is a directory: "+key, os.ErrInvalid)
|
||||
}
|
||||
|
||||
return goio.NopCloser(bytes.NewReader(content)), nil
|
||||
}
|
||||
|
||||
// WriteStream returns a writer for the file content. Content is stored on Close.
|
||||
func (m *Medium) WriteStream(p string) (goio.WriteCloser, error) {
|
||||
return m.Create(p)
|
||||
}
|
||||
|
||||
// Exists checks if a path exists (file or directory).
|
||||
func (m *Medium) Exists(p string) bool {
|
||||
key := cleanPath(p)
|
||||
if key == "" {
|
||||
// Root always exists
|
||||
return true
|
||||
}
|
||||
|
||||
var count int
|
||||
err := m.db.QueryRow(
|
||||
`SELECT COUNT(*) FROM `+m.table+` WHERE path = ?`, key,
|
||||
).Scan(&count)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return count > 0
|
||||
}
|
||||
|
||||
// IsDir checks if a path exists and is a directory.
|
||||
func (m *Medium) IsDir(p string) bool {
|
||||
key := cleanPath(p)
|
||||
if key == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
var isDir bool
|
||||
err := m.db.QueryRow(
|
||||
`SELECT is_dir FROM `+m.table+` WHERE path = ?`, key,
|
||||
).Scan(&isDir)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return isDir
|
||||
}
|
||||
|
||||
// --- Internal types ---
|
||||
|
||||
// fileInfo implements fs.FileInfo for SQLite entries.
|
||||
type fileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode fs.FileMode
|
||||
modTime time.Time
|
||||
isDir bool
|
||||
}
|
||||
|
||||
func (fi *fileInfo) Name() string { return fi.name }
|
||||
func (fi *fileInfo) Size() int64 { return fi.size }
|
||||
func (fi *fileInfo) Mode() fs.FileMode { return fi.mode }
|
||||
func (fi *fileInfo) ModTime() time.Time { return fi.modTime }
|
||||
func (fi *fileInfo) IsDir() bool { return fi.isDir }
|
||||
func (fi *fileInfo) Sys() any { return nil }
|
||||
|
||||
// dirEntry implements fs.DirEntry for SQLite listings.
|
||||
type dirEntry struct {
|
||||
name string
|
||||
isDir bool
|
||||
mode fs.FileMode
|
||||
info fs.FileInfo
|
||||
}
|
||||
|
||||
func (de *dirEntry) Name() string { return de.name }
|
||||
func (de *dirEntry) IsDir() bool { return de.isDir }
|
||||
func (de *dirEntry) Type() fs.FileMode { return de.mode.Type() }
|
||||
func (de *dirEntry) Info() (fs.FileInfo, error) { return de.info, nil }
|
||||
|
||||
// sqliteFile implements fs.File for SQLite entries.
|
||||
type sqliteFile struct {
|
||||
name string
|
||||
content []byte
|
||||
offset int64
|
||||
mode fs.FileMode
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
func (f *sqliteFile) Stat() (fs.FileInfo, error) {
|
||||
return &fileInfo{
|
||||
name: f.name,
|
||||
size: int64(len(f.content)),
|
||||
mode: f.mode,
|
||||
modTime: f.modTime,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *sqliteFile) Read(b []byte) (int, error) {
|
||||
if f.offset >= int64(len(f.content)) {
|
||||
return 0, goio.EOF
|
||||
}
|
||||
n := copy(b, f.content[f.offset:])
|
||||
f.offset += int64(n)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (f *sqliteFile) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// sqliteWriteCloser buffers writes and stores to SQLite on Close.
|
||||
type sqliteWriteCloser struct {
|
||||
medium *Medium
|
||||
path string
|
||||
data []byte
|
||||
}
|
||||
|
||||
func (w *sqliteWriteCloser) Write(p []byte) (int, error) {
|
||||
w.data = append(w.data, p...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (w *sqliteWriteCloser) Close() error {
|
||||
_, err := w.medium.db.Exec(
|
||||
`INSERT INTO `+w.medium.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, 420, FALSE, ?)
|
||||
ON CONFLICT(path) DO UPDATE SET content = excluded.content, is_dir = FALSE, mtime = excluded.mtime`,
|
||||
w.path, w.data, time.Now().UTC(),
|
||||
)
|
||||
if err != nil {
|
||||
return coreerr.E("sqlite.WriteCloser.Close", "store failed: "+w.path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,653 +0,0 @@
|
|||
package sqlite
|
||||
|
||||
import (
|
||||
goio "io"
|
||||
"io/fs"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func newTestMedium(t *testing.T) *Medium {
|
||||
t.Helper()
|
||||
m, err := New(":memory:")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { m.Close() })
|
||||
return m
|
||||
}
|
||||
|
||||
// --- Constructor Tests ---
|
||||
|
||||
func TestNew_Good(t *testing.T) {
|
||||
m, err := New(":memory:")
|
||||
require.NoError(t, err)
|
||||
defer m.Close()
|
||||
assert.Equal(t, "files", m.table)
|
||||
}
|
||||
|
||||
func TestNew_Good_WithTable(t *testing.T) {
|
||||
m, err := New(":memory:", WithTable("custom"))
|
||||
require.NoError(t, err)
|
||||
defer m.Close()
|
||||
assert.Equal(t, "custom", m.table)
|
||||
}
|
||||
|
||||
func TestNew_Bad_EmptyPath(t *testing.T) {
|
||||
_, err := New("")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "database path is required")
|
||||
}
|
||||
|
||||
// --- Read/Write Tests ---
|
||||
|
||||
func TestReadWrite_Good(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
err := m.Write("hello.txt", "world")
|
||||
require.NoError(t, err)
|
||||
|
||||
content, err := m.Read("hello.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "world", content)
|
||||
}
|
||||
|
||||
func TestReadWrite_Good_Overwrite(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.Write("file.txt", "first"))
|
||||
require.NoError(t, m.Write("file.txt", "second"))
|
||||
|
||||
content, err := m.Read("file.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "second", content)
|
||||
}
|
||||
|
||||
func TestReadWrite_Good_NestedPath(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
err := m.Write("a/b/c.txt", "nested")
|
||||
require.NoError(t, err)
|
||||
|
||||
content, err := m.Read("a/b/c.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "nested", content)
|
||||
}
|
||||
|
||||
func TestRead_Bad_NotFound(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
_, err := m.Read("nonexistent.txt")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestRead_Bad_EmptyPath(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
_, err := m.Read("")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestWrite_Bad_EmptyPath(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
err := m.Write("", "content")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestRead_Bad_IsDirectory(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.EnsureDir("mydir"))
|
||||
_, err := m.Read("mydir")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// --- EnsureDir Tests ---
|
||||
|
||||
func TestEnsureDir_Good(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
err := m.EnsureDir("mydir")
|
||||
require.NoError(t, err)
|
||||
assert.True(t, m.IsDir("mydir"))
|
||||
}
|
||||
|
||||
func TestEnsureDir_Good_EmptyPath(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
// Root always exists, no-op
|
||||
err := m.EnsureDir("")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestEnsureDir_Good_Idempotent(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.EnsureDir("mydir"))
|
||||
require.NoError(t, m.EnsureDir("mydir"))
|
||||
assert.True(t, m.IsDir("mydir"))
|
||||
}
|
||||
|
||||
// --- IsFile Tests ---
|
||||
|
||||
func TestIsFile_Good(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.Write("file.txt", "content"))
|
||||
require.NoError(t, m.EnsureDir("mydir"))
|
||||
|
||||
assert.True(t, m.IsFile("file.txt"))
|
||||
assert.False(t, m.IsFile("mydir"))
|
||||
assert.False(t, m.IsFile("nonexistent"))
|
||||
assert.False(t, m.IsFile(""))
|
||||
}
|
||||
|
||||
// --- FileGet/FileSet Tests ---
|
||||
|
||||
func TestFileGetFileSet_Good(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
err := m.FileSet("key.txt", "value")
|
||||
require.NoError(t, err)
|
||||
|
||||
val, err := m.FileGet("key.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "value", val)
|
||||
}
|
||||
|
||||
// --- Delete Tests ---
|
||||
|
||||
func TestDelete_Good(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.Write("to-delete.txt", "content"))
|
||||
assert.True(t, m.Exists("to-delete.txt"))
|
||||
|
||||
err := m.Delete("to-delete.txt")
|
||||
require.NoError(t, err)
|
||||
assert.False(t, m.Exists("to-delete.txt"))
|
||||
}
|
||||
|
||||
func TestDelete_Good_EmptyDir(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.EnsureDir("emptydir"))
|
||||
assert.True(t, m.IsDir("emptydir"))
|
||||
|
||||
err := m.Delete("emptydir")
|
||||
require.NoError(t, err)
|
||||
assert.False(t, m.IsDir("emptydir"))
|
||||
}
|
||||
|
||||
func TestDelete_Bad_NotFound(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
err := m.Delete("nonexistent")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestDelete_Bad_EmptyPath(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
err := m.Delete("")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestDelete_Bad_NotEmpty(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.EnsureDir("mydir"))
|
||||
require.NoError(t, m.Write("mydir/file.txt", "content"))
|
||||
|
||||
err := m.Delete("mydir")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// --- DeleteAll Tests ---
|
||||
|
||||
func TestDeleteAll_Good(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.Write("dir/file1.txt", "a"))
|
||||
require.NoError(t, m.Write("dir/sub/file2.txt", "b"))
|
||||
require.NoError(t, m.Write("other.txt", "c"))
|
||||
|
||||
err := m.DeleteAll("dir")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.False(t, m.Exists("dir/file1.txt"))
|
||||
assert.False(t, m.Exists("dir/sub/file2.txt"))
|
||||
assert.True(t, m.Exists("other.txt"))
|
||||
}
|
||||
|
||||
func TestDeleteAll_Good_SingleFile(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.Write("file.txt", "content"))
|
||||
|
||||
err := m.DeleteAll("file.txt")
|
||||
require.NoError(t, err)
|
||||
assert.False(t, m.Exists("file.txt"))
|
||||
}
|
||||
|
||||
func TestDeleteAll_Bad_NotFound(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
err := m.DeleteAll("nonexistent")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestDeleteAll_Bad_EmptyPath(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
err := m.DeleteAll("")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// --- Rename Tests ---
|
||||
|
||||
func TestRename_Good(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.Write("old.txt", "content"))
|
||||
|
||||
err := m.Rename("old.txt", "new.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.False(t, m.Exists("old.txt"))
|
||||
assert.True(t, m.IsFile("new.txt"))
|
||||
|
||||
content, err := m.Read("new.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "content", content)
|
||||
}
|
||||
|
||||
func TestRename_Good_Directory(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.EnsureDir("olddir"))
|
||||
require.NoError(t, m.Write("olddir/file.txt", "content"))
|
||||
|
||||
err := m.Rename("olddir", "newdir")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.False(t, m.Exists("olddir"))
|
||||
assert.False(t, m.Exists("olddir/file.txt"))
|
||||
assert.True(t, m.IsDir("newdir"))
|
||||
assert.True(t, m.IsFile("newdir/file.txt"))
|
||||
|
||||
content, err := m.Read("newdir/file.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "content", content)
|
||||
}
|
||||
|
||||
func TestRename_Bad_SourceNotFound(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
err := m.Rename("nonexistent", "new")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestRename_Bad_EmptyPath(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
err := m.Rename("", "new")
|
||||
assert.Error(t, err)
|
||||
|
||||
err = m.Rename("old", "")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// --- List Tests ---
|
||||
|
||||
func TestList_Good(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.Write("dir/file1.txt", "a"))
|
||||
require.NoError(t, m.Write("dir/file2.txt", "b"))
|
||||
require.NoError(t, m.Write("dir/sub/file3.txt", "c"))
|
||||
|
||||
entries, err := m.List("dir")
|
||||
require.NoError(t, err)
|
||||
|
||||
names := make(map[string]bool)
|
||||
for _, e := range entries {
|
||||
names[e.Name()] = true
|
||||
}
|
||||
|
||||
assert.True(t, names["file1.txt"])
|
||||
assert.True(t, names["file2.txt"])
|
||||
assert.True(t, names["sub"])
|
||||
assert.Len(t, entries, 3)
|
||||
}
|
||||
|
||||
func TestList_Good_Root(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.Write("root.txt", "content"))
|
||||
require.NoError(t, m.Write("dir/nested.txt", "nested"))
|
||||
|
||||
entries, err := m.List("")
|
||||
require.NoError(t, err)
|
||||
|
||||
names := make(map[string]bool)
|
||||
for _, e := range entries {
|
||||
names[e.Name()] = true
|
||||
}
|
||||
|
||||
assert.True(t, names["root.txt"])
|
||||
assert.True(t, names["dir"])
|
||||
}
|
||||
|
||||
func TestList_Good_DirectoryEntry(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.Write("dir/sub/file.txt", "content"))
|
||||
|
||||
entries, err := m.List("dir")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, entries, 1)
|
||||
assert.Equal(t, "sub", entries[0].Name())
|
||||
assert.True(t, entries[0].IsDir())
|
||||
|
||||
info, err := entries[0].Info()
|
||||
require.NoError(t, err)
|
||||
assert.True(t, info.IsDir())
|
||||
}
|
||||
|
||||
// --- Stat Tests ---
|
||||
|
||||
func TestStat_Good(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.Write("file.txt", "hello world"))
|
||||
|
||||
info, err := m.Stat("file.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "file.txt", info.Name())
|
||||
assert.Equal(t, int64(11), info.Size())
|
||||
assert.False(t, info.IsDir())
|
||||
}
|
||||
|
||||
func TestStat_Good_Directory(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.EnsureDir("mydir"))
|
||||
|
||||
info, err := m.Stat("mydir")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "mydir", info.Name())
|
||||
assert.True(t, info.IsDir())
|
||||
}
|
||||
|
||||
func TestStat_Bad_NotFound(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
_, err := m.Stat("nonexistent")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestStat_Bad_EmptyPath(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
_, err := m.Stat("")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// --- Open Tests ---
|
||||
|
||||
func TestOpen_Good(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.Write("file.txt", "open me"))
|
||||
|
||||
f, err := m.Open("file.txt")
|
||||
require.NoError(t, err)
|
||||
defer f.Close()
|
||||
|
||||
data, err := goio.ReadAll(f.(goio.Reader))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "open me", string(data))
|
||||
|
||||
stat, err := f.Stat()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "file.txt", stat.Name())
|
||||
}
|
||||
|
||||
func TestOpen_Bad_NotFound(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
_, err := m.Open("nonexistent.txt")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestOpen_Bad_IsDirectory(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.EnsureDir("mydir"))
|
||||
_, err := m.Open("mydir")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// --- Create Tests ---
|
||||
|
||||
func TestCreate_Good(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
w, err := m.Create("new.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
n, err := w.Write([]byte("created"))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 7, n)
|
||||
|
||||
err = w.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
content, err := m.Read("new.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "created", content)
|
||||
}
|
||||
|
||||
func TestCreate_Good_Overwrite(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.Write("file.txt", "old content"))
|
||||
|
||||
w, err := m.Create("file.txt")
|
||||
require.NoError(t, err)
|
||||
_, err = w.Write([]byte("new"))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, w.Close())
|
||||
|
||||
content, err := m.Read("file.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "new", content)
|
||||
}
|
||||
|
||||
func TestCreate_Bad_EmptyPath(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
_, err := m.Create("")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// --- Append Tests ---
|
||||
|
||||
func TestAppend_Good(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.Write("append.txt", "hello"))
|
||||
|
||||
w, err := m.Append("append.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = w.Write([]byte(" world"))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, w.Close())
|
||||
|
||||
content, err := m.Read("append.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "hello world", content)
|
||||
}
|
||||
|
||||
func TestAppend_Good_NewFile(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
w, err := m.Append("new.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = w.Write([]byte("fresh"))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, w.Close())
|
||||
|
||||
content, err := m.Read("new.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "fresh", content)
|
||||
}
|
||||
|
||||
func TestAppend_Bad_EmptyPath(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
_, err := m.Append("")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// --- ReadStream Tests ---
|
||||
|
||||
func TestReadStream_Good(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.Write("stream.txt", "streaming content"))
|
||||
|
||||
reader, err := m.ReadStream("stream.txt")
|
||||
require.NoError(t, err)
|
||||
defer reader.Close()
|
||||
|
||||
data, err := goio.ReadAll(reader)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "streaming content", string(data))
|
||||
}
|
||||
|
||||
func TestReadStream_Bad_NotFound(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
_, err := m.ReadStream("nonexistent.txt")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestReadStream_Bad_IsDirectory(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.EnsureDir("mydir"))
|
||||
_, err := m.ReadStream("mydir")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// --- WriteStream Tests ---
|
||||
|
||||
func TestWriteStream_Good(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
writer, err := m.WriteStream("output.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = goio.Copy(writer, strings.NewReader("piped data"))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, writer.Close())
|
||||
|
||||
content, err := m.Read("output.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "piped data", content)
|
||||
}
|
||||
|
||||
// --- Exists Tests ---
|
||||
|
||||
func TestExists_Good(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
assert.False(t, m.Exists("nonexistent"))
|
||||
|
||||
require.NoError(t, m.Write("file.txt", "content"))
|
||||
assert.True(t, m.Exists("file.txt"))
|
||||
|
||||
require.NoError(t, m.EnsureDir("mydir"))
|
||||
assert.True(t, m.Exists("mydir"))
|
||||
}
|
||||
|
||||
func TestExists_Good_EmptyPath(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
// Root always exists
|
||||
assert.True(t, m.Exists(""))
|
||||
}
|
||||
|
||||
// --- IsDir Tests ---
|
||||
|
||||
func TestIsDir_Good(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
require.NoError(t, m.Write("file.txt", "content"))
|
||||
require.NoError(t, m.EnsureDir("mydir"))
|
||||
|
||||
assert.True(t, m.IsDir("mydir"))
|
||||
assert.False(t, m.IsDir("file.txt"))
|
||||
assert.False(t, m.IsDir("nonexistent"))
|
||||
assert.False(t, m.IsDir(""))
|
||||
}
|
||||
|
||||
// --- cleanPath Tests ---
|
||||
|
||||
func TestCleanPath_Good(t *testing.T) {
|
||||
assert.Equal(t, "file.txt", cleanPath("file.txt"))
|
||||
assert.Equal(t, "dir/file.txt", cleanPath("dir/file.txt"))
|
||||
assert.Equal(t, "file.txt", cleanPath("/file.txt"))
|
||||
assert.Equal(t, "file.txt", cleanPath("../file.txt"))
|
||||
assert.Equal(t, "file.txt", cleanPath("dir/../file.txt"))
|
||||
assert.Equal(t, "", cleanPath(""))
|
||||
assert.Equal(t, "", cleanPath("."))
|
||||
assert.Equal(t, "", cleanPath("/"))
|
||||
}
|
||||
|
||||
// --- Interface Compliance ---
|
||||
|
||||
func TestInterfaceCompliance_Ugly(t *testing.T) {
|
||||
m := newTestMedium(t)
|
||||
|
||||
// Verify all methods exist by asserting the interface shape.
|
||||
var _ interface {
|
||||
Read(string) (string, error)
|
||||
Write(string, string) error
|
||||
EnsureDir(string) error
|
||||
IsFile(string) bool
|
||||
FileGet(string) (string, error)
|
||||
FileSet(string, string) error
|
||||
Delete(string) error
|
||||
DeleteAll(string) error
|
||||
Rename(string, string) error
|
||||
List(string) ([]fs.DirEntry, error)
|
||||
Stat(string) (fs.FileInfo, error)
|
||||
Open(string) (fs.File, error)
|
||||
Create(string) (goio.WriteCloser, error)
|
||||
Append(string) (goio.WriteCloser, error)
|
||||
ReadStream(string) (goio.ReadCloser, error)
|
||||
WriteStream(string) (goio.WriteCloser, error)
|
||||
Exists(string) bool
|
||||
IsDir(string) bool
|
||||
} = m
|
||||
}
|
||||
|
||||
// --- Custom Table ---
|
||||
|
||||
func TestCustomTable_Good(t *testing.T) {
|
||||
m, err := New(":memory:", WithTable("my_files"))
|
||||
require.NoError(t, err)
|
||||
defer m.Close()
|
||||
|
||||
require.NoError(t, m.Write("file.txt", "content"))
|
||||
|
||||
content, err := m.Read("file.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "content", content)
|
||||
}
|
||||
|
|
@ -1,260 +0,0 @@
|
|||
// Package log provides structured logging and error handling for Core applications.
|
||||
//
|
||||
// This file implements structured error types and combined log-and-return helpers
|
||||
// that simplify common error handling patterns.
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Err represents a structured error with operational context.
|
||||
// It implements the error interface and supports unwrapping.
|
||||
type Err struct {
|
||||
Op string // Operation being performed (e.g., "user.Save")
|
||||
Msg string // Human-readable message
|
||||
Err error // Underlying error (optional)
|
||||
Code string // Error code (optional, e.g., "VALIDATION_FAILED")
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e *Err) Error() string {
|
||||
var prefix string
|
||||
if e.Op != "" {
|
||||
prefix = e.Op + ": "
|
||||
}
|
||||
if e.Err != nil {
|
||||
if e.Code != "" {
|
||||
return fmt.Sprintf("%s%s [%s]: %v", prefix, e.Msg, e.Code, e.Err)
|
||||
}
|
||||
return fmt.Sprintf("%s%s: %v", prefix, e.Msg, e.Err)
|
||||
}
|
||||
if e.Code != "" {
|
||||
return fmt.Sprintf("%s%s [%s]", prefix, e.Msg, e.Code)
|
||||
}
|
||||
return fmt.Sprintf("%s%s", prefix, e.Msg)
|
||||
}
|
||||
|
||||
// Unwrap returns the underlying error for use with errors.Is and errors.As.
|
||||
func (e *Err) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
// --- Error Creation Functions ---
|
||||
|
||||
// E creates a new Err with operation context.
|
||||
// The underlying error can be nil for creating errors without a cause.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// return log.E("user.Save", "failed to save user", err)
|
||||
// return log.E("api.Call", "rate limited", nil) // No underlying cause
|
||||
func E(op, msg string, err error) error {
|
||||
return &Err{Op: op, Msg: msg, Err: err}
|
||||
}
|
||||
|
||||
// Wrap wraps an error with operation context.
|
||||
// Returns nil if err is nil, to support conditional wrapping.
|
||||
// Preserves error Code if the wrapped error is an *Err.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// return log.Wrap(err, "db.Query", "database query failed")
|
||||
func Wrap(err error, op, msg string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
// Preserve Code from wrapped *Err
|
||||
var logErr *Err
|
||||
if As(err, &logErr) && logErr.Code != "" {
|
||||
return &Err{Op: op, Msg: msg, Err: err, Code: logErr.Code}
|
||||
}
|
||||
return &Err{Op: op, Msg: msg, Err: err}
|
||||
}
|
||||
|
||||
// WrapCode wraps an error with operation context and error code.
|
||||
// Returns nil only if both err is nil AND code is empty.
|
||||
// Useful for API errors that need machine-readable codes.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// return log.WrapCode(err, "VALIDATION_ERROR", "user.Validate", "invalid email")
|
||||
func WrapCode(err error, code, op, msg string) error {
|
||||
if err == nil && code == "" {
|
||||
return nil
|
||||
}
|
||||
return &Err{Op: op, Msg: msg, Err: err, Code: code}
|
||||
}
|
||||
|
||||
// NewCode creates an error with just code and message (no underlying error).
|
||||
// Useful for creating sentinel errors with codes.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var ErrNotFound = log.NewCode("NOT_FOUND", "resource not found")
|
||||
func NewCode(code, msg string) error {
|
||||
return &Err{Msg: msg, Code: code}
|
||||
}
|
||||
|
||||
// --- Standard Library Wrappers ---
|
||||
|
||||
// Is reports whether any error in err's tree matches target.
|
||||
// Wrapper around errors.Is for convenience.
|
||||
func Is(err, target error) bool {
|
||||
return errors.Is(err, target)
|
||||
}
|
||||
|
||||
// As finds the first error in err's tree that matches target.
|
||||
// Wrapper around errors.As for convenience.
|
||||
func As(err error, target any) bool {
|
||||
return errors.As(err, target)
|
||||
}
|
||||
|
||||
// NewError creates a simple error with the given text.
|
||||
// Wrapper around errors.New for convenience.
|
||||
func NewError(text string) error {
|
||||
return errors.New(text)
|
||||
}
|
||||
|
||||
// Join combines multiple errors into one.
|
||||
// Wrapper around errors.Join for convenience.
|
||||
func Join(errs ...error) error {
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
// --- Error Introspection Helpers ---
|
||||
|
||||
// Op extracts the operation name from an error.
|
||||
// Returns empty string if the error is not an *Err.
|
||||
func Op(err error) string {
|
||||
var e *Err
|
||||
if As(err, &e) {
|
||||
return e.Op
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// ErrCode extracts the error code from an error.
|
||||
// Returns empty string if the error is not an *Err or has no code.
|
||||
func ErrCode(err error) string {
|
||||
var e *Err
|
||||
if As(err, &e) {
|
||||
return e.Code
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Message extracts the message from an error.
|
||||
// Returns the error's Error() string if not an *Err.
|
||||
func Message(err error) string {
|
||||
if err == nil {
|
||||
return ""
|
||||
}
|
||||
var e *Err
|
||||
if As(err, &e) {
|
||||
return e.Msg
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
// Root returns the root cause of an error chain.
|
||||
// Unwraps until no more wrapped errors are found.
|
||||
func Root(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
for {
|
||||
unwrapped := errors.Unwrap(err)
|
||||
if unwrapped == nil {
|
||||
return err
|
||||
}
|
||||
err = unwrapped
|
||||
}
|
||||
}
|
||||
|
||||
// StackTrace returns the logical stack trace (chain of operations) from an error.
|
||||
// It returns an empty slice if no operational context is found.
|
||||
func StackTrace(err error) []string {
|
||||
var stack []string
|
||||
for err != nil {
|
||||
if e, ok := err.(*Err); ok {
|
||||
if e.Op != "" {
|
||||
stack = append(stack, e.Op)
|
||||
}
|
||||
}
|
||||
err = errors.Unwrap(err)
|
||||
}
|
||||
return stack
|
||||
}
|
||||
|
||||
// FormatStackTrace returns a pretty-printed logical stack trace.
|
||||
func FormatStackTrace(err error) string {
|
||||
stack := StackTrace(err)
|
||||
if len(stack) == 0 {
|
||||
return ""
|
||||
}
|
||||
var res string
|
||||
for i, op := range stack {
|
||||
if i > 0 {
|
||||
res += " -> "
|
||||
}
|
||||
res += op
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// --- Combined Log-and-Return Helpers ---
|
||||
|
||||
// LogError logs an error at Error level and returns a wrapped error.
|
||||
// Reduces boilerplate in error handling paths.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// // Before
|
||||
// if err != nil {
|
||||
// log.Error("failed to save", "err", err)
|
||||
// return errors.Wrap(err, "user.Save", "failed to save")
|
||||
// }
|
||||
//
|
||||
// // After
|
||||
// if err != nil {
|
||||
// return log.LogError(err, "user.Save", "failed to save")
|
||||
// }
|
||||
func LogError(err error, op, msg string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
wrapped := Wrap(err, op, msg)
|
||||
defaultLogger.Error(msg, "op", op, "err", err)
|
||||
return wrapped
|
||||
}
|
||||
|
||||
// LogWarn logs at Warn level and returns a wrapped error.
|
||||
// Use for recoverable errors that should be logged but not treated as critical.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// return log.LogWarn(err, "cache.Get", "cache miss, falling back to db")
|
||||
func LogWarn(err error, op, msg string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
wrapped := Wrap(err, op, msg)
|
||||
defaultLogger.Warn(msg, "op", op, "err", err)
|
||||
return wrapped
|
||||
}
|
||||
|
||||
// Must panics if err is not nil, logging first.
|
||||
// Use for errors that should never happen and indicate programmer error.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// log.Must(Initialize(), "app", "startup failed")
|
||||
func Must(err error, op, msg string) {
|
||||
if err != nil {
|
||||
defaultLogger.Error(msg, "op", op, "err", err)
|
||||
panic(Wrap(err, op, msg))
|
||||
}
|
||||
}
|
||||
|
|
@ -1,349 +0,0 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// --- Err Type Tests ---
|
||||
|
||||
func TestErr_Error_Good(t *testing.T) {
|
||||
// With underlying error
|
||||
err := &Err{Op: "db.Query", Msg: "failed to query", Err: errors.New("connection refused")}
|
||||
assert.Equal(t, "db.Query: failed to query: connection refused", err.Error())
|
||||
|
||||
// With code
|
||||
err = &Err{Op: "api.Call", Msg: "request failed", Code: "TIMEOUT"}
|
||||
assert.Equal(t, "api.Call: request failed [TIMEOUT]", err.Error())
|
||||
|
||||
// With both underlying error and code
|
||||
err = &Err{Op: "user.Save", Msg: "save failed", Err: errors.New("duplicate key"), Code: "DUPLICATE"}
|
||||
assert.Equal(t, "user.Save: save failed [DUPLICATE]: duplicate key", err.Error())
|
||||
|
||||
// Just op and msg
|
||||
err = &Err{Op: "cache.Get", Msg: "miss"}
|
||||
assert.Equal(t, "cache.Get: miss", err.Error())
|
||||
}
|
||||
|
||||
func TestErr_Error_EmptyOp_Good(t *testing.T) {
|
||||
// No Op - should not have leading colon
|
||||
err := &Err{Msg: "just a message"}
|
||||
assert.Equal(t, "just a message", err.Error())
|
||||
|
||||
// No Op with code
|
||||
err = &Err{Msg: "error with code", Code: "ERR_CODE"}
|
||||
assert.Equal(t, "error with code [ERR_CODE]", err.Error())
|
||||
|
||||
// No Op with underlying error
|
||||
err = &Err{Msg: "wrapped", Err: errors.New("underlying")}
|
||||
assert.Equal(t, "wrapped: underlying", err.Error())
|
||||
}
|
||||
|
||||
func TestErr_Unwrap_Good(t *testing.T) {
|
||||
underlying := errors.New("underlying error")
|
||||
err := &Err{Op: "test", Msg: "wrapped", Err: underlying}
|
||||
|
||||
assert.Equal(t, underlying, errors.Unwrap(err))
|
||||
assert.True(t, errors.Is(err, underlying))
|
||||
}
|
||||
|
||||
// --- Error Creation Function Tests ---
|
||||
|
||||
func TestE_Good(t *testing.T) {
|
||||
underlying := errors.New("base error")
|
||||
err := E("op.Name", "something failed", underlying)
|
||||
|
||||
assert.NotNil(t, err)
|
||||
var logErr *Err
|
||||
assert.True(t, errors.As(err, &logErr))
|
||||
assert.Equal(t, "op.Name", logErr.Op)
|
||||
assert.Equal(t, "something failed", logErr.Msg)
|
||||
assert.Equal(t, underlying, logErr.Err)
|
||||
}
|
||||
|
||||
func TestE_Good_NilError(t *testing.T) {
|
||||
// E creates an error even with nil underlying - useful for errors without causes
|
||||
err := E("op.Name", "message", nil)
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, "op.Name: message", err.Error())
|
||||
}
|
||||
|
||||
func TestWrap_Good(t *testing.T) {
|
||||
underlying := errors.New("base")
|
||||
err := Wrap(underlying, "handler.Process", "processing failed")
|
||||
|
||||
assert.NotNil(t, err)
|
||||
assert.Contains(t, err.Error(), "handler.Process")
|
||||
assert.Contains(t, err.Error(), "processing failed")
|
||||
assert.True(t, errors.Is(err, underlying))
|
||||
}
|
||||
|
||||
func TestWrap_PreservesCode_Good(t *testing.T) {
|
||||
// Create an error with a code
|
||||
inner := WrapCode(errors.New("base"), "VALIDATION_ERROR", "inner.Op", "validation failed")
|
||||
|
||||
// Wrap it - should preserve the code
|
||||
outer := Wrap(inner, "outer.Op", "outer context")
|
||||
|
||||
assert.NotNil(t, outer)
|
||||
assert.Equal(t, "VALIDATION_ERROR", ErrCode(outer))
|
||||
assert.Contains(t, outer.Error(), "[VALIDATION_ERROR]")
|
||||
}
|
||||
|
||||
func TestWrap_NilError_Good(t *testing.T) {
|
||||
err := Wrap(nil, "op", "msg")
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestWrapCode_Good(t *testing.T) {
|
||||
underlying := errors.New("validation failed")
|
||||
err := WrapCode(underlying, "INVALID_INPUT", "api.Validate", "bad request")
|
||||
|
||||
assert.NotNil(t, err)
|
||||
var logErr *Err
|
||||
assert.True(t, errors.As(err, &logErr))
|
||||
assert.Equal(t, "INVALID_INPUT", logErr.Code)
|
||||
assert.Equal(t, "api.Validate", logErr.Op)
|
||||
assert.Contains(t, err.Error(), "[INVALID_INPUT]")
|
||||
}
|
||||
|
||||
func TestWrapCode_Good_NilError(t *testing.T) {
|
||||
// WrapCode with nil error but with code still creates an error
|
||||
err := WrapCode(nil, "CODE", "op", "msg")
|
||||
assert.NotNil(t, err)
|
||||
assert.Contains(t, err.Error(), "[CODE]")
|
||||
|
||||
// Only returns nil when both error and code are empty
|
||||
err = WrapCode(nil, "", "op", "msg")
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestNewCode_Good(t *testing.T) {
|
||||
err := NewCode("NOT_FOUND", "resource not found")
|
||||
|
||||
var logErr *Err
|
||||
assert.True(t, errors.As(err, &logErr))
|
||||
assert.Equal(t, "NOT_FOUND", logErr.Code)
|
||||
assert.Equal(t, "resource not found", logErr.Msg)
|
||||
assert.Nil(t, logErr.Err)
|
||||
}
|
||||
|
||||
// --- Standard Library Wrapper Tests ---
|
||||
|
||||
func TestIs_Good(t *testing.T) {
|
||||
sentinel := errors.New("sentinel")
|
||||
wrapped := Wrap(sentinel, "test", "wrapped")
|
||||
|
||||
assert.True(t, Is(wrapped, sentinel))
|
||||
assert.False(t, Is(wrapped, errors.New("other")))
|
||||
}
|
||||
|
||||
func TestAs_Good(t *testing.T) {
|
||||
err := E("test.Op", "message", errors.New("base"))
|
||||
|
||||
var logErr *Err
|
||||
assert.True(t, As(err, &logErr))
|
||||
assert.Equal(t, "test.Op", logErr.Op)
|
||||
}
|
||||
|
||||
func TestNewError_Good(t *testing.T) {
|
||||
err := NewError("simple error")
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, "simple error", err.Error())
|
||||
}
|
||||
|
||||
func TestJoin_Good(t *testing.T) {
|
||||
err1 := errors.New("error 1")
|
||||
err2 := errors.New("error 2")
|
||||
joined := Join(err1, err2)
|
||||
|
||||
assert.True(t, errors.Is(joined, err1))
|
||||
assert.True(t, errors.Is(joined, err2))
|
||||
}
|
||||
|
||||
// --- Helper Function Tests ---
|
||||
|
||||
func TestOp_Good(t *testing.T) {
|
||||
err := E("mypackage.MyFunc", "failed", errors.New("cause"))
|
||||
assert.Equal(t, "mypackage.MyFunc", Op(err))
|
||||
}
|
||||
|
||||
func TestOp_Good_NotLogError(t *testing.T) {
|
||||
err := errors.New("plain error")
|
||||
assert.Equal(t, "", Op(err))
|
||||
}
|
||||
|
||||
func TestErrCode_Good(t *testing.T) {
|
||||
err := WrapCode(errors.New("base"), "ERR_CODE", "op", "msg")
|
||||
assert.Equal(t, "ERR_CODE", ErrCode(err))
|
||||
}
|
||||
|
||||
func TestErrCode_Good_NoCode(t *testing.T) {
|
||||
err := E("op", "msg", errors.New("base"))
|
||||
assert.Equal(t, "", ErrCode(err))
|
||||
}
|
||||
|
||||
func TestMessage_Good(t *testing.T) {
|
||||
err := E("op", "the message", errors.New("base"))
|
||||
assert.Equal(t, "the message", Message(err))
|
||||
}
|
||||
|
||||
func TestMessage_Good_PlainError(t *testing.T) {
|
||||
err := errors.New("plain message")
|
||||
assert.Equal(t, "plain message", Message(err))
|
||||
}
|
||||
|
||||
func TestMessage_Good_Nil(t *testing.T) {
|
||||
assert.Equal(t, "", Message(nil))
|
||||
}
|
||||
|
||||
func TestRoot_Good(t *testing.T) {
|
||||
root := errors.New("root cause")
|
||||
level1 := Wrap(root, "level1", "wrapped once")
|
||||
level2 := Wrap(level1, "level2", "wrapped twice")
|
||||
|
||||
assert.Equal(t, root, Root(level2))
|
||||
}
|
||||
|
||||
func TestRoot_Good_SingleError(t *testing.T) {
|
||||
err := errors.New("single")
|
||||
assert.Equal(t, err, Root(err))
|
||||
}
|
||||
|
||||
func TestRoot_Good_Nil(t *testing.T) {
|
||||
assert.Nil(t, Root(nil))
|
||||
}
|
||||
|
||||
// --- Log-and-Return Helper Tests ---
|
||||
|
||||
func TestLogError_Good(t *testing.T) {
|
||||
// Capture log output
|
||||
var buf bytes.Buffer
|
||||
logger := New(Options{Level: LevelDebug, Output: &buf})
|
||||
SetDefault(logger)
|
||||
defer SetDefault(New(Options{Level: LevelInfo}))
|
||||
|
||||
underlying := errors.New("connection failed")
|
||||
err := LogError(underlying, "db.Connect", "database unavailable")
|
||||
|
||||
// Check returned error
|
||||
assert.NotNil(t, err)
|
||||
assert.Contains(t, err.Error(), "db.Connect")
|
||||
assert.Contains(t, err.Error(), "database unavailable")
|
||||
assert.True(t, errors.Is(err, underlying))
|
||||
|
||||
// Check log output
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "[ERR]")
|
||||
assert.Contains(t, output, "database unavailable")
|
||||
assert.Contains(t, output, "op=db.Connect")
|
||||
}
|
||||
|
||||
func TestLogError_Good_NilError(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
logger := New(Options{Level: LevelDebug, Output: &buf})
|
||||
SetDefault(logger)
|
||||
defer SetDefault(New(Options{Level: LevelInfo}))
|
||||
|
||||
err := LogError(nil, "op", "msg")
|
||||
assert.Nil(t, err)
|
||||
assert.Empty(t, buf.String()) // No log output for nil error
|
||||
}
|
||||
|
||||
func TestLogWarn_Good(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
logger := New(Options{Level: LevelDebug, Output: &buf})
|
||||
SetDefault(logger)
|
||||
defer SetDefault(New(Options{Level: LevelInfo}))
|
||||
|
||||
underlying := errors.New("cache miss")
|
||||
err := LogWarn(underlying, "cache.Get", "falling back to db")
|
||||
|
||||
assert.NotNil(t, err)
|
||||
assert.True(t, errors.Is(err, underlying))
|
||||
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "[WRN]")
|
||||
assert.Contains(t, output, "falling back to db")
|
||||
}
|
||||
|
||||
func TestLogWarn_Good_NilError(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
logger := New(Options{Level: LevelDebug, Output: &buf})
|
||||
SetDefault(logger)
|
||||
defer SetDefault(New(Options{Level: LevelInfo}))
|
||||
|
||||
err := LogWarn(nil, "op", "msg")
|
||||
assert.Nil(t, err)
|
||||
assert.Empty(t, buf.String())
|
||||
}
|
||||
|
||||
func TestMust_Good_NoError(t *testing.T) {
|
||||
// Should not panic when error is nil
|
||||
assert.NotPanics(t, func() {
|
||||
Must(nil, "test", "should not panic")
|
||||
})
|
||||
}
|
||||
|
||||
func TestMust_Ugly_Panics(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
logger := New(Options{Level: LevelDebug, Output: &buf})
|
||||
SetDefault(logger)
|
||||
defer SetDefault(New(Options{Level: LevelInfo}))
|
||||
|
||||
assert.Panics(t, func() {
|
||||
Must(errors.New("fatal error"), "startup", "initialization failed")
|
||||
})
|
||||
|
||||
// Verify error was logged before panic
|
||||
output := buf.String()
|
||||
assert.True(t, strings.Contains(output, "[ERR]") || len(output) > 0)
|
||||
}
|
||||
|
||||
func TestStackTrace_Good(t *testing.T) {
|
||||
// Nested operations
|
||||
err := E("op1", "msg1", nil)
|
||||
err = Wrap(err, "op2", "msg2")
|
||||
err = Wrap(err, "op3", "msg3")
|
||||
|
||||
stack := StackTrace(err)
|
||||
assert.Equal(t, []string{"op3", "op2", "op1"}, stack)
|
||||
|
||||
// Format
|
||||
formatted := FormatStackTrace(err)
|
||||
assert.Equal(t, "op3 -> op2 -> op1", formatted)
|
||||
}
|
||||
|
||||
func TestStackTrace_PlainError(t *testing.T) {
|
||||
err := errors.New("plain error")
|
||||
assert.Empty(t, StackTrace(err))
|
||||
assert.Empty(t, FormatStackTrace(err))
|
||||
}
|
||||
|
||||
func TestStackTrace_Nil(t *testing.T) {
|
||||
assert.Empty(t, StackTrace(nil))
|
||||
assert.Empty(t, FormatStackTrace(nil))
|
||||
}
|
||||
|
||||
func TestStackTrace_NoOp(t *testing.T) {
|
||||
err := &Err{Msg: "no op"}
|
||||
assert.Empty(t, StackTrace(err))
|
||||
assert.Empty(t, FormatStackTrace(err))
|
||||
}
|
||||
|
||||
func TestStackTrace_Mixed(t *testing.T) {
|
||||
err := E("inner", "msg", nil)
|
||||
err = errors.New("middle: " + err.Error()) // Breaks the chain if not handled properly, but Unwrap should work if it's a wrapped error
|
||||
// Wait, errors.New doesn't wrap. fmt.Errorf("%w") does.
|
||||
err = E("inner", "msg", nil)
|
||||
err = fmt.Errorf("wrapper: %w", err)
|
||||
err = Wrap(err, "outer", "msg")
|
||||
|
||||
stack := StackTrace(err)
|
||||
assert.Equal(t, []string{"outer", "inner"}, stack)
|
||||
}
|
||||
354
pkg/log/log.go
354
pkg/log/log.go
|
|
@ -1,314 +1,74 @@
|
|||
// Package log provides structured logging for Core applications.
|
||||
// Package log re-exports go-log and provides framework integration (Service)
|
||||
// and log rotation (RotatingWriter) that depend on core/go internals.
|
||||
//
|
||||
// The package works standalone or integrated with the Core framework:
|
||||
//
|
||||
// // Standalone usage
|
||||
// log.SetLevel(log.LevelDebug)
|
||||
// log.Info("server started", "port", 8080)
|
||||
// log.Error("failed to connect", "err", err)
|
||||
//
|
||||
// // With Core framework
|
||||
// core.New(
|
||||
// framework.WithName("log", log.NewService(log.Options{Level: log.LevelInfo})),
|
||||
// )
|
||||
// New code should import forge.lthn.ai/core/go-log directly.
|
||||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/user"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
golog "forge.lthn.ai/core/go-log"
|
||||
)
|
||||
|
||||
// Level defines logging verbosity.
|
||||
type Level int
|
||||
// Type aliases — all go-log types available as log.X
|
||||
type (
|
||||
Level = golog.Level
|
||||
Logger = golog.Logger
|
||||
Options = golog.Options
|
||||
RotationOptions = golog.RotationOptions
|
||||
Err = golog.Err
|
||||
)
|
||||
|
||||
// Logging level constants ordered by increasing verbosity.
|
||||
// Level constants.
|
||||
const (
|
||||
// LevelQuiet suppresses all log output.
|
||||
LevelQuiet Level = iota
|
||||
// LevelError shows only error messages.
|
||||
LevelError
|
||||
// LevelWarn shows warnings and errors.
|
||||
LevelWarn
|
||||
// LevelInfo shows informational messages, warnings, and errors.
|
||||
LevelInfo
|
||||
// LevelDebug shows all messages including debug details.
|
||||
LevelDebug
|
||||
LevelQuiet = golog.LevelQuiet
|
||||
LevelError = golog.LevelError
|
||||
LevelWarn = golog.LevelWarn
|
||||
LevelInfo = golog.LevelInfo
|
||||
LevelDebug = golog.LevelDebug
|
||||
)
|
||||
|
||||
// String returns the level name.
|
||||
func (l Level) String() string {
|
||||
switch l {
|
||||
case LevelQuiet:
|
||||
return "quiet"
|
||||
case LevelError:
|
||||
return "error"
|
||||
case LevelWarn:
|
||||
return "warn"
|
||||
case LevelInfo:
|
||||
return "info"
|
||||
case LevelDebug:
|
||||
return "debug"
|
||||
default:
|
||||
return "unknown"
|
||||
func init() {
|
||||
// Wire rotation into go-log: when go-log's New() gets RotationOptions,
|
||||
// it calls this factory to create the RotatingWriter (which needs go-io).
|
||||
golog.RotationWriterFactory = func(opts RotationOptions) io.WriteCloser {
|
||||
return NewRotatingWriter(opts, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// Logger provides structured logging.
|
||||
type Logger struct {
|
||||
mu sync.RWMutex
|
||||
level Level
|
||||
output io.Writer
|
||||
// --- Logging functions (re-exported from go-log) ---
|
||||
|
||||
// Style functions for formatting (can be overridden)
|
||||
StyleTimestamp func(string) string
|
||||
StyleDebug func(string) string
|
||||
StyleInfo func(string) string
|
||||
StyleWarn func(string) string
|
||||
StyleError func(string) string
|
||||
StyleSecurity func(string) string
|
||||
}
|
||||
var (
|
||||
New = golog.New
|
||||
Default = golog.Default
|
||||
SetDefault = golog.SetDefault
|
||||
SetLevel = golog.SetLevel
|
||||
Debug = golog.Debug
|
||||
Info = golog.Info
|
||||
Warn = golog.Warn
|
||||
Error = golog.Error
|
||||
Security = golog.Security
|
||||
Username = golog.Username
|
||||
)
|
||||
|
||||
// RotationOptions defines the log rotation and retention policy.
|
||||
type RotationOptions struct {
|
||||
// Filename is the log file path. If empty, rotation is disabled.
|
||||
Filename string
|
||||
// --- Error functions (re-exported from go-log) ---
|
||||
|
||||
// MaxSize is the maximum size of the log file in megabytes before it gets rotated.
|
||||
// It defaults to 100 megabytes.
|
||||
MaxSize int
|
||||
|
||||
// MaxAge is the maximum number of days to retain old log files based on their
|
||||
// file modification time. It defaults to 28 days.
|
||||
// Note: set to a negative value to disable age-based retention.
|
||||
MaxAge int
|
||||
|
||||
// MaxBackups is the maximum number of old log files to retain.
|
||||
// It defaults to 5 backups.
|
||||
MaxBackups int
|
||||
|
||||
// Compress determines if the rotated log files should be compressed using gzip.
|
||||
// It defaults to true.
|
||||
Compress bool
|
||||
}
|
||||
|
||||
// Options configures a Logger.
|
||||
type Options struct {
|
||||
Level Level
|
||||
// Output is the destination for log messages. If Rotation is provided,
|
||||
// Output is ignored and logs are written to the rotating file instead.
|
||||
Output io.Writer
|
||||
// Rotation enables log rotation to file. If provided, Filename must be set.
|
||||
Rotation *RotationOptions
|
||||
}
|
||||
|
||||
// New creates a new Logger with the given options.
|
||||
func New(opts Options) *Logger {
|
||||
output := opts.Output
|
||||
if opts.Rotation != nil && opts.Rotation.Filename != "" {
|
||||
output = NewRotatingWriter(*opts.Rotation, nil)
|
||||
}
|
||||
if output == nil {
|
||||
output = os.Stderr
|
||||
}
|
||||
|
||||
return &Logger{
|
||||
level: opts.Level,
|
||||
output: output,
|
||||
StyleTimestamp: identity,
|
||||
StyleDebug: identity,
|
||||
StyleInfo: identity,
|
||||
StyleWarn: identity,
|
||||
StyleError: identity,
|
||||
StyleSecurity: identity,
|
||||
}
|
||||
}
|
||||
|
||||
func identity(s string) string { return s }
|
||||
|
||||
// SetLevel changes the log level.
|
||||
func (l *Logger) SetLevel(level Level) {
|
||||
l.mu.Lock()
|
||||
l.level = level
|
||||
l.mu.Unlock()
|
||||
}
|
||||
|
||||
// Level returns the current log level.
|
||||
func (l *Logger) Level() Level {
|
||||
l.mu.RLock()
|
||||
defer l.mu.RUnlock()
|
||||
return l.level
|
||||
}
|
||||
|
||||
// SetOutput changes the output writer.
|
||||
func (l *Logger) SetOutput(w io.Writer) {
|
||||
l.mu.Lock()
|
||||
l.output = w
|
||||
l.mu.Unlock()
|
||||
}
|
||||
|
||||
func (l *Logger) shouldLog(level Level) bool {
|
||||
l.mu.RLock()
|
||||
defer l.mu.RUnlock()
|
||||
return level <= l.level
|
||||
}
|
||||
|
||||
func (l *Logger) log(level Level, prefix, msg string, keyvals ...any) {
|
||||
l.mu.RLock()
|
||||
output := l.output
|
||||
styleTimestamp := l.StyleTimestamp
|
||||
l.mu.RUnlock()
|
||||
|
||||
timestamp := styleTimestamp(time.Now().Format("15:04:05"))
|
||||
|
||||
// Automatically extract context from error if present in keyvals
|
||||
origLen := len(keyvals)
|
||||
for i := 0; i < origLen; i += 2 {
|
||||
if i+1 < origLen {
|
||||
if err, ok := keyvals[i+1].(error); ok {
|
||||
if op := Op(err); op != "" {
|
||||
// Check if op is already in keyvals
|
||||
hasOp := false
|
||||
for j := 0; j < len(keyvals); j += 2 {
|
||||
if keyvals[j] == "op" {
|
||||
hasOp = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasOp {
|
||||
keyvals = append(keyvals, "op", op)
|
||||
}
|
||||
}
|
||||
if stack := FormatStackTrace(err); stack != "" {
|
||||
// Check if stack is already in keyvals
|
||||
hasStack := false
|
||||
for j := 0; j < len(keyvals); j += 2 {
|
||||
if keyvals[j] == "stack" {
|
||||
hasStack = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasStack {
|
||||
keyvals = append(keyvals, "stack", stack)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Format key-value pairs
|
||||
var kvStr string
|
||||
if len(keyvals) > 0 {
|
||||
kvStr = " "
|
||||
for i := 0; i < len(keyvals); i += 2 {
|
||||
if i > 0 {
|
||||
kvStr += " "
|
||||
}
|
||||
key := keyvals[i]
|
||||
var val any
|
||||
if i+1 < len(keyvals) {
|
||||
val = keyvals[i+1]
|
||||
}
|
||||
kvStr += fmt.Sprintf("%v=%v", key, val)
|
||||
}
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(output, "%s %s %s%s\n", timestamp, prefix, msg, kvStr)
|
||||
}
|
||||
|
||||
// Debug logs a debug message with optional key-value pairs.
|
||||
func (l *Logger) Debug(msg string, keyvals ...any) {
|
||||
if l.shouldLog(LevelDebug) {
|
||||
l.log(LevelDebug, l.StyleDebug("[DBG]"), msg, keyvals...)
|
||||
}
|
||||
}
|
||||
|
||||
// Info logs an info message with optional key-value pairs.
|
||||
func (l *Logger) Info(msg string, keyvals ...any) {
|
||||
if l.shouldLog(LevelInfo) {
|
||||
l.log(LevelInfo, l.StyleInfo("[INF]"), msg, keyvals...)
|
||||
}
|
||||
}
|
||||
|
||||
// Warn logs a warning message with optional key-value pairs.
|
||||
func (l *Logger) Warn(msg string, keyvals ...any) {
|
||||
if l.shouldLog(LevelWarn) {
|
||||
l.log(LevelWarn, l.StyleWarn("[WRN]"), msg, keyvals...)
|
||||
}
|
||||
}
|
||||
|
||||
// Error logs an error message with optional key-value pairs.
|
||||
func (l *Logger) Error(msg string, keyvals ...any) {
|
||||
if l.shouldLog(LevelError) {
|
||||
l.log(LevelError, l.StyleError("[ERR]"), msg, keyvals...)
|
||||
}
|
||||
}
|
||||
|
||||
// Security logs a security event with optional key-value pairs.
|
||||
// It uses LevelError to ensure security events are visible even in restrictive
|
||||
// log configurations.
|
||||
func (l *Logger) Security(msg string, keyvals ...any) {
|
||||
if l.shouldLog(LevelError) {
|
||||
l.log(LevelError, l.StyleSecurity("[SEC]"), msg, keyvals...)
|
||||
}
|
||||
}
|
||||
|
||||
// Username returns the current system username.
|
||||
// It uses os/user for reliability and falls back to environment variables.
|
||||
func Username() string {
|
||||
if u, err := user.Current(); err == nil {
|
||||
return u.Username
|
||||
}
|
||||
// Fallback for environments where user lookup might fail
|
||||
if u := os.Getenv("USER"); u != "" {
|
||||
return u
|
||||
}
|
||||
return os.Getenv("USERNAME")
|
||||
}
|
||||
|
||||
// --- Default logger ---
|
||||
|
||||
var defaultLogger = New(Options{Level: LevelInfo})
|
||||
|
||||
// Default returns the default logger.
|
||||
func Default() *Logger {
|
||||
return defaultLogger
|
||||
}
|
||||
|
||||
// SetDefault sets the default logger.
|
||||
func SetDefault(l *Logger) {
|
||||
defaultLogger = l
|
||||
}
|
||||
|
||||
// SetLevel sets the default logger's level.
|
||||
func SetLevel(level Level) {
|
||||
defaultLogger.SetLevel(level)
|
||||
}
|
||||
|
||||
// Debug logs to the default logger.
|
||||
func Debug(msg string, keyvals ...any) {
|
||||
defaultLogger.Debug(msg, keyvals...)
|
||||
}
|
||||
|
||||
// Info logs to the default logger.
|
||||
func Info(msg string, keyvals ...any) {
|
||||
defaultLogger.Info(msg, keyvals...)
|
||||
}
|
||||
|
||||
// Warn logs to the default logger.
|
||||
func Warn(msg string, keyvals ...any) {
|
||||
defaultLogger.Warn(msg, keyvals...)
|
||||
}
|
||||
|
||||
// Error logs to the default logger.
|
||||
func Error(msg string, keyvals ...any) {
|
||||
defaultLogger.Error(msg, keyvals...)
|
||||
}
|
||||
|
||||
// Security logs to the default logger.
|
||||
func Security(msg string, keyvals ...any) {
|
||||
defaultLogger.Security(msg, keyvals...)
|
||||
}
|
||||
var (
|
||||
E = golog.E
|
||||
Wrap = golog.Wrap
|
||||
WrapCode = golog.WrapCode
|
||||
NewCode = golog.NewCode
|
||||
Is = golog.Is
|
||||
As = golog.As
|
||||
NewError = golog.NewError
|
||||
Join = golog.Join
|
||||
Op = golog.Op
|
||||
ErrCode = golog.ErrCode
|
||||
Message = golog.Message
|
||||
Root = golog.Root
|
||||
StackTrace = golog.StackTrace
|
||||
FormatStackTrace = golog.FormatStackTrace
|
||||
LogError = golog.LogError
|
||||
LogWarn = golog.LogWarn
|
||||
Must = golog.Must
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,196 +0,0 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
func TestLogger_Levels(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
level Level
|
||||
logFunc func(*Logger, string, ...any)
|
||||
expected bool
|
||||
}{
|
||||
{"debug at debug", LevelDebug, (*Logger).Debug, true},
|
||||
{"info at debug", LevelDebug, (*Logger).Info, true},
|
||||
{"warn at debug", LevelDebug, (*Logger).Warn, true},
|
||||
{"error at debug", LevelDebug, (*Logger).Error, true},
|
||||
|
||||
{"debug at info", LevelInfo, (*Logger).Debug, false},
|
||||
{"info at info", LevelInfo, (*Logger).Info, true},
|
||||
{"warn at info", LevelInfo, (*Logger).Warn, true},
|
||||
{"error at info", LevelInfo, (*Logger).Error, true},
|
||||
|
||||
{"debug at warn", LevelWarn, (*Logger).Debug, false},
|
||||
{"info at warn", LevelWarn, (*Logger).Info, false},
|
||||
{"warn at warn", LevelWarn, (*Logger).Warn, true},
|
||||
{"error at warn", LevelWarn, (*Logger).Error, true},
|
||||
|
||||
{"debug at error", LevelError, (*Logger).Debug, false},
|
||||
{"info at error", LevelError, (*Logger).Info, false},
|
||||
{"warn at error", LevelError, (*Logger).Warn, false},
|
||||
{"error at error", LevelError, (*Logger).Error, true},
|
||||
|
||||
{"debug at quiet", LevelQuiet, (*Logger).Debug, false},
|
||||
{"info at quiet", LevelQuiet, (*Logger).Info, false},
|
||||
{"warn at quiet", LevelQuiet, (*Logger).Warn, false},
|
||||
{"error at quiet", LevelQuiet, (*Logger).Error, false},
|
||||
|
||||
{"security at info", LevelInfo, (*Logger).Security, true},
|
||||
{"security at error", LevelError, (*Logger).Security, true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
l := New(Options{Level: tt.level, Output: &buf})
|
||||
tt.logFunc(l, "test message")
|
||||
|
||||
hasOutput := buf.Len() > 0
|
||||
if hasOutput != tt.expected {
|
||||
t.Errorf("expected output=%v, got output=%v", tt.expected, hasOutput)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLogger_KeyValues(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
l := New(Options{Level: LevelDebug, Output: &buf})
|
||||
|
||||
l.Info("test message", "key1", "value1", "key2", 42)
|
||||
|
||||
output := buf.String()
|
||||
if !strings.Contains(output, "test message") {
|
||||
t.Error("expected message in output")
|
||||
}
|
||||
if !strings.Contains(output, "key1=value1") {
|
||||
t.Error("expected key1=value1 in output")
|
||||
}
|
||||
if !strings.Contains(output, "key2=42") {
|
||||
t.Error("expected key2=42 in output")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLogger_ErrorContext(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
l := New(Options{Output: &buf, Level: LevelInfo})
|
||||
|
||||
err := E("test.Op", "failed", NewError("root cause"))
|
||||
err = Wrap(err, "outer.Op", "outer failed")
|
||||
|
||||
l.Error("something failed", "err", err)
|
||||
|
||||
got := buf.String()
|
||||
if !strings.Contains(got, "op=outer.Op") {
|
||||
t.Errorf("expected output to contain op=outer.Op, got %q", got)
|
||||
}
|
||||
if !strings.Contains(got, "stack=outer.Op -> test.Op") {
|
||||
t.Errorf("expected output to contain stack=outer.Op -> test.Op, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLogger_SetLevel(t *testing.T) {
|
||||
l := New(Options{Level: LevelInfo})
|
||||
|
||||
if l.Level() != LevelInfo {
|
||||
t.Error("expected initial level to be Info")
|
||||
}
|
||||
|
||||
l.SetLevel(LevelDebug)
|
||||
if l.Level() != LevelDebug {
|
||||
t.Error("expected level to be Debug after SetLevel")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLevel_String(t *testing.T) {
|
||||
tests := []struct {
|
||||
level Level
|
||||
expected string
|
||||
}{
|
||||
{LevelQuiet, "quiet"},
|
||||
{LevelError, "error"},
|
||||
{LevelWarn, "warn"},
|
||||
{LevelInfo, "info"},
|
||||
{LevelDebug, "debug"},
|
||||
{Level(99), "unknown"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.expected, func(t *testing.T) {
|
||||
if got := tt.level.String(); got != tt.expected {
|
||||
t.Errorf("expected %q, got %q", tt.expected, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLogger_Security(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
l := New(Options{Level: LevelError, Output: &buf})
|
||||
|
||||
l.Security("unauthorized access", "user", "admin")
|
||||
|
||||
output := buf.String()
|
||||
if !strings.Contains(output, "[SEC]") {
|
||||
t.Error("expected [SEC] prefix in security log")
|
||||
}
|
||||
if !strings.Contains(output, "unauthorized access") {
|
||||
t.Error("expected message in security log")
|
||||
}
|
||||
if !strings.Contains(output, "user=admin") {
|
||||
t.Error("expected context in security log")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefault(t *testing.T) {
|
||||
// Default logger should exist
|
||||
if Default() == nil {
|
||||
t.Error("expected default logger to exist")
|
||||
}
|
||||
|
||||
// Package-level functions should work
|
||||
var buf bytes.Buffer
|
||||
l := New(Options{Level: LevelDebug, Output: &buf})
|
||||
SetDefault(l)
|
||||
|
||||
Info("test")
|
||||
if buf.Len() == 0 {
|
||||
t.Error("expected package-level Info to produce output")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLogger_RotationIntegration(t *testing.T) {
|
||||
m := io.NewMockMedium()
|
||||
// Hack: override io.Local for testing
|
||||
oldLocal := io.Local
|
||||
io.Local = m
|
||||
defer func() { io.Local = oldLocal }()
|
||||
|
||||
l := New(Options{
|
||||
Level: LevelInfo,
|
||||
Rotation: &RotationOptions{
|
||||
Filename: "integration.log",
|
||||
MaxSize: 1,
|
||||
},
|
||||
})
|
||||
|
||||
l.Info("integration test")
|
||||
|
||||
// RotatingWriter needs to be closed to ensure data is written to MockMedium
|
||||
if rw, ok := l.output.(*RotatingWriter); ok {
|
||||
rw.Close()
|
||||
}
|
||||
|
||||
content, err := m.Read("integration.log")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read log: %v", err)
|
||||
}
|
||||
if !strings.Contains(content, "integration test") {
|
||||
t.Errorf("expected content to contain log message, got %q", content)
|
||||
}
|
||||
}
|
||||
|
|
@ -6,7 +6,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
coreio "forge.lthn.ai/core/go/pkg/io"
|
||||
coreio "forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// RotatingWriter implements io.WriteCloser and provides log rotation.
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
func TestRotatingWriter_Basic(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import (
|
|||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import (
|
|||
"crypto/ed25519"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"forge.lthn.ai/core/go/pkg/manifest"
|
||||
"forge.lthn.ai/core/go/pkg/store"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -8,8 +8,8 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
core "forge.lthn.ai/core/go/pkg/framework/core"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
coreerr "forge.lthn.ai/core/go-log"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// Installer handles plugin installation from GitHub.
|
||||
|
|
@ -31,22 +31,22 @@ func NewInstaller(m io.Medium, registry *Registry) *Installer {
|
|||
func (i *Installer) Install(ctx context.Context, source string) error {
|
||||
org, repo, version, err := ParseSource(source)
|
||||
if err != nil {
|
||||
return core.E("plugin.Installer.Install", "invalid source", err)
|
||||
return coreerr.E("plugin.Installer.Install", "invalid source", err)
|
||||
}
|
||||
|
||||
// Check if already installed
|
||||
if _, exists := i.registry.Get(repo); exists {
|
||||
return core.E("plugin.Installer.Install", "plugin already installed: "+repo, nil)
|
||||
return coreerr.E("plugin.Installer.Install", "plugin already installed: "+repo, nil)
|
||||
}
|
||||
|
||||
// Clone the repository
|
||||
pluginDir := filepath.Join(i.registry.basePath, repo)
|
||||
if err := i.medium.EnsureDir(pluginDir); err != nil {
|
||||
return core.E("plugin.Installer.Install", "failed to create plugin directory", err)
|
||||
return coreerr.E("plugin.Installer.Install", "failed to create plugin directory", err)
|
||||
}
|
||||
|
||||
if err := i.cloneRepo(ctx, org, repo, version, pluginDir); err != nil {
|
||||
return core.E("plugin.Installer.Install", "failed to clone repository", err)
|
||||
return coreerr.E("plugin.Installer.Install", "failed to clone repository", err)
|
||||
}
|
||||
|
||||
// Load and validate manifest
|
||||
|
|
@ -55,12 +55,12 @@ func (i *Installer) Install(ctx context.Context, source string) error {
|
|||
if err != nil {
|
||||
// Clean up on failure
|
||||
_ = i.medium.DeleteAll(pluginDir)
|
||||
return core.E("plugin.Installer.Install", "failed to load manifest", err)
|
||||
return coreerr.E("plugin.Installer.Install", "failed to load manifest", err)
|
||||
}
|
||||
|
||||
if err := manifest.Validate(); err != nil {
|
||||
_ = i.medium.DeleteAll(pluginDir)
|
||||
return core.E("plugin.Installer.Install", "invalid manifest", err)
|
||||
return coreerr.E("plugin.Installer.Install", "invalid manifest", err)
|
||||
}
|
||||
|
||||
// Resolve version
|
||||
|
|
@ -78,11 +78,11 @@ func (i *Installer) Install(ctx context.Context, source string) error {
|
|||
}
|
||||
|
||||
if err := i.registry.Add(cfg); err != nil {
|
||||
return core.E("plugin.Installer.Install", "failed to register plugin", err)
|
||||
return coreerr.E("plugin.Installer.Install", "failed to register plugin", err)
|
||||
}
|
||||
|
||||
if err := i.registry.Save(); err != nil {
|
||||
return core.E("plugin.Installer.Install", "failed to save registry", err)
|
||||
return coreerr.E("plugin.Installer.Install", "failed to save registry", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -92,7 +92,7 @@ func (i *Installer) Install(ctx context.Context, source string) error {
|
|||
func (i *Installer) Update(ctx context.Context, name string) error {
|
||||
cfg, ok := i.registry.Get(name)
|
||||
if !ok {
|
||||
return core.E("plugin.Installer.Update", "plugin not found: "+name, nil)
|
||||
return coreerr.E("plugin.Installer.Update", "plugin not found: "+name, nil)
|
||||
}
|
||||
|
||||
// Parse the source to get org/repo
|
||||
|
|
@ -102,20 +102,20 @@ func (i *Installer) Update(ctx context.Context, name string) error {
|
|||
// Pull latest changes
|
||||
cmd := exec.CommandContext(ctx, "git", "-C", pluginDir, "pull", "--ff-only")
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return core.E("plugin.Installer.Update", "failed to pull updates: "+strings.TrimSpace(string(output)), err)
|
||||
return coreerr.E("plugin.Installer.Update", "failed to pull updates: "+strings.TrimSpace(string(output)), err)
|
||||
}
|
||||
|
||||
// Reload manifest to get updated version
|
||||
manifestPath := filepath.Join(pluginDir, "plugin.json")
|
||||
manifest, err := LoadManifest(i.medium, manifestPath)
|
||||
if err != nil {
|
||||
return core.E("plugin.Installer.Update", "failed to read updated manifest", err)
|
||||
return coreerr.E("plugin.Installer.Update", "failed to read updated manifest", err)
|
||||
}
|
||||
|
||||
// Update registry
|
||||
cfg.Version = manifest.Version
|
||||
if err := i.registry.Save(); err != nil {
|
||||
return core.E("plugin.Installer.Update", "failed to save registry", err)
|
||||
return coreerr.E("plugin.Installer.Update", "failed to save registry", err)
|
||||
}
|
||||
|
||||
_ = source // used for context
|
||||
|
|
@ -125,24 +125,24 @@ func (i *Installer) Update(ctx context.Context, name string) error {
|
|||
// Remove uninstalls a plugin by removing its files and registry entry.
|
||||
func (i *Installer) Remove(name string) error {
|
||||
if _, ok := i.registry.Get(name); !ok {
|
||||
return core.E("plugin.Installer.Remove", "plugin not found: "+name, nil)
|
||||
return coreerr.E("plugin.Installer.Remove", "plugin not found: "+name, nil)
|
||||
}
|
||||
|
||||
// Delete plugin directory
|
||||
pluginDir := filepath.Join(i.registry.basePath, name)
|
||||
if i.medium.Exists(pluginDir) {
|
||||
if err := i.medium.DeleteAll(pluginDir); err != nil {
|
||||
return core.E("plugin.Installer.Remove", "failed to delete plugin files", err)
|
||||
return coreerr.E("plugin.Installer.Remove", "failed to delete plugin files", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove from registry
|
||||
if err := i.registry.Remove(name); err != nil {
|
||||
return core.E("plugin.Installer.Remove", "failed to unregister plugin", err)
|
||||
return coreerr.E("plugin.Installer.Remove", "failed to unregister plugin", err)
|
||||
}
|
||||
|
||||
if err := i.registry.Save(); err != nil {
|
||||
return core.E("plugin.Installer.Remove", "failed to save registry", err)
|
||||
return coreerr.E("plugin.Installer.Remove", "failed to save registry", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -171,7 +171,7 @@ func (i *Installer) cloneRepo(ctx context.Context, org, repo, version, dest stri
|
|||
// - "org/repo@v1.0" -> org="org", repo="repo", version="v1.0"
|
||||
func ParseSource(source string) (org, repo, version string, err error) {
|
||||
if source == "" {
|
||||
return "", "", "", core.E("plugin.ParseSource", "source is empty", nil)
|
||||
return "", "", "", coreerr.E("plugin.ParseSource", "source is empty", nil)
|
||||
}
|
||||
|
||||
// Split off version if present
|
||||
|
|
@ -181,14 +181,14 @@ func ParseSource(source string) (org, repo, version string, err error) {
|
|||
path = source[:atIdx]
|
||||
version = source[atIdx+1:]
|
||||
if version == "" {
|
||||
return "", "", "", core.E("plugin.ParseSource", "version is empty after @", nil)
|
||||
return "", "", "", coreerr.E("plugin.ParseSource", "version is empty after @", nil)
|
||||
}
|
||||
}
|
||||
|
||||
// Split org/repo
|
||||
parts := strings.Split(path, "/")
|
||||
if len(parts) != 2 || parts[0] == "" || parts[1] == "" {
|
||||
return "", "", "", core.E("plugin.ParseSource", "source must be in format org/repo[@version]", nil)
|
||||
return "", "", "", coreerr.E("plugin.ParseSource", "source must be in format org/repo[@version]", nil)
|
||||
}
|
||||
|
||||
return parts[0], parts[1], version, nil
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import (
|
|||
"context"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -3,8 +3,8 @@ package plugin
|
|||
import (
|
||||
"path/filepath"
|
||||
|
||||
core "forge.lthn.ai/core/go/pkg/framework/core"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
coreerr "forge.lthn.ai/core/go-log"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// Loader loads plugins from the filesystem.
|
||||
|
|
@ -26,7 +26,7 @@ func NewLoader(m io.Medium, baseDir string) *Loader {
|
|||
func (l *Loader) Discover() ([]*Manifest, error) {
|
||||
entries, err := l.medium.List(l.baseDir)
|
||||
if err != nil {
|
||||
return nil, core.E("plugin.Loader.Discover", "failed to list plugin directory", err)
|
||||
return nil, coreerr.E("plugin.Loader.Discover", "failed to list plugin directory", err)
|
||||
}
|
||||
|
||||
var manifests []*Manifest
|
||||
|
|
@ -52,11 +52,11 @@ func (l *Loader) LoadPlugin(name string) (*Manifest, error) {
|
|||
manifestPath := filepath.Join(l.baseDir, name, "plugin.json")
|
||||
manifest, err := LoadManifest(l.medium, manifestPath)
|
||||
if err != nil {
|
||||
return nil, core.E("plugin.Loader.LoadPlugin", "failed to load plugin: "+name, err)
|
||||
return nil, coreerr.E("plugin.Loader.LoadPlugin", "failed to load plugin: "+name, err)
|
||||
}
|
||||
|
||||
if err := manifest.Validate(); err != nil {
|
||||
return nil, core.E("plugin.Loader.LoadPlugin", "invalid plugin manifest: "+name, err)
|
||||
return nil, coreerr.E("plugin.Loader.LoadPlugin", "invalid plugin manifest: "+name, err)
|
||||
}
|
||||
|
||||
return manifest, nil
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ package plugin
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -3,8 +3,8 @@ package plugin
|
|||
import (
|
||||
"encoding/json"
|
||||
|
||||
core "forge.lthn.ai/core/go/pkg/framework/core"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
coreerr "forge.lthn.ai/core/go-log"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// Manifest represents a plugin.json manifest file.
|
||||
|
|
@ -23,12 +23,12 @@ type Manifest struct {
|
|||
func LoadManifest(m io.Medium, path string) (*Manifest, error) {
|
||||
content, err := m.Read(path)
|
||||
if err != nil {
|
||||
return nil, core.E("plugin.LoadManifest", "failed to read manifest", err)
|
||||
return nil, coreerr.E("plugin.LoadManifest", "failed to read manifest", err)
|
||||
}
|
||||
|
||||
var manifest Manifest
|
||||
if err := json.Unmarshal([]byte(content), &manifest); err != nil {
|
||||
return nil, core.E("plugin.LoadManifest", "failed to parse manifest JSON", err)
|
||||
return nil, coreerr.E("plugin.LoadManifest", "failed to parse manifest JSON", err)
|
||||
}
|
||||
|
||||
return &manifest, nil
|
||||
|
|
@ -38,13 +38,13 @@ func LoadManifest(m io.Medium, path string) (*Manifest, error) {
|
|||
// Returns an error if name, version, or entrypoint are missing.
|
||||
func (m *Manifest) Validate() error {
|
||||
if m.Name == "" {
|
||||
return core.E("plugin.Manifest.Validate", "name is required", nil)
|
||||
return coreerr.E("plugin.Manifest.Validate", "name is required", nil)
|
||||
}
|
||||
if m.Version == "" {
|
||||
return core.E("plugin.Manifest.Validate", "version is required", nil)
|
||||
return coreerr.E("plugin.Manifest.Validate", "version is required", nil)
|
||||
}
|
||||
if m.Entrypoint == "" {
|
||||
return core.E("plugin.Manifest.Validate", "entrypoint is required", nil)
|
||||
return coreerr.E("plugin.Manifest.Validate", "entrypoint is required", nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ package plugin
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -6,8 +6,8 @@ import (
|
|||
"path/filepath"
|
||||
"slices"
|
||||
|
||||
core "forge.lthn.ai/core/go/pkg/framework/core"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
coreerr "forge.lthn.ai/core/go-log"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
const registryFilename = "registry.json"
|
||||
|
|
@ -51,7 +51,7 @@ func (r *Registry) Get(name string) (*PluginConfig, bool) {
|
|||
// Add registers a plugin in the registry.
|
||||
func (r *Registry) Add(cfg *PluginConfig) error {
|
||||
if cfg.Name == "" {
|
||||
return core.E("plugin.Registry.Add", "plugin name is required", nil)
|
||||
return coreerr.E("plugin.Registry.Add", "plugin name is required", nil)
|
||||
}
|
||||
r.plugins[cfg.Name] = cfg
|
||||
return nil
|
||||
|
|
@ -60,7 +60,7 @@ func (r *Registry) Add(cfg *PluginConfig) error {
|
|||
// Remove unregisters a plugin from the registry.
|
||||
func (r *Registry) Remove(name string) error {
|
||||
if _, ok := r.plugins[name]; !ok {
|
||||
return core.E("plugin.Registry.Remove", "plugin not found: "+name, nil)
|
||||
return coreerr.E("plugin.Registry.Remove", "plugin not found: "+name, nil)
|
||||
}
|
||||
delete(r.plugins, name)
|
||||
return nil
|
||||
|
|
@ -84,12 +84,12 @@ func (r *Registry) Load() error {
|
|||
|
||||
content, err := r.medium.Read(path)
|
||||
if err != nil {
|
||||
return core.E("plugin.Registry.Load", "failed to read registry", err)
|
||||
return coreerr.E("plugin.Registry.Load", "failed to read registry", err)
|
||||
}
|
||||
|
||||
var plugins map[string]*PluginConfig
|
||||
if err := json.Unmarshal([]byte(content), &plugins); err != nil {
|
||||
return core.E("plugin.Registry.Load", "failed to parse registry", err)
|
||||
return coreerr.E("plugin.Registry.Load", "failed to parse registry", err)
|
||||
}
|
||||
|
||||
if plugins == nil {
|
||||
|
|
@ -102,16 +102,16 @@ func (r *Registry) Load() error {
|
|||
// Save writes the plugin registry to disk.
|
||||
func (r *Registry) Save() error {
|
||||
if err := r.medium.EnsureDir(r.basePath); err != nil {
|
||||
return core.E("plugin.Registry.Save", "failed to create plugin directory", err)
|
||||
return coreerr.E("plugin.Registry.Save", "failed to create plugin directory", err)
|
||||
}
|
||||
|
||||
data, err := json.MarshalIndent(r.plugins, "", " ")
|
||||
if err != nil {
|
||||
return core.E("plugin.Registry.Save", "failed to marshal registry", err)
|
||||
return coreerr.E("plugin.Registry.Save", "failed to marshal registry", err)
|
||||
}
|
||||
|
||||
if err := r.medium.Write(r.registryPath(), string(data)); err != nil {
|
||||
return core.E("plugin.Registry.Save", "failed to write registry", err)
|
||||
return coreerr.E("plugin.Registry.Save", "failed to write registry", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ package plugin
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ import (
|
|||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ package repos
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -7,8 +7,9 @@ import (
|
|||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
coreerr "forge.lthn.ai/core/go-log"
|
||||
core "forge.lthn.ai/core/go/pkg/framework/core"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// Service implements the core.Workspace interface.
|
||||
|
|
@ -24,7 +25,7 @@ type Service struct {
|
|||
func New(c *core.Core) (any, error) {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return nil, core.E("workspace.New", "failed to determine home directory", err)
|
||||
return nil, coreerr.E("workspace.New", "failed to determine home directory", err)
|
||||
}
|
||||
rootPath := filepath.Join(home, ".core", "workspaces")
|
||||
|
||||
|
|
@ -35,7 +36,7 @@ func New(c *core.Core) (any, error) {
|
|||
}
|
||||
|
||||
if err := s.medium.EnsureDir(rootPath); err != nil {
|
||||
return nil, core.E("workspace.New", "failed to ensure root directory", err)
|
||||
return nil, coreerr.E("workspace.New", "failed to ensure root directory", err)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
|
|
@ -54,30 +55,30 @@ func (s *Service) CreateWorkspace(identifier, password string) (string, error) {
|
|||
wsPath := filepath.Join(s.rootPath, wsID)
|
||||
|
||||
if s.medium.Exists(wsPath) {
|
||||
return "", core.E("workspace.CreateWorkspace", "workspace already exists", nil)
|
||||
return "", coreerr.E("workspace.CreateWorkspace", "workspace already exists", nil)
|
||||
}
|
||||
|
||||
// 2. Directory structure
|
||||
dirs := []string{"config", "log", "data", "files", "keys"}
|
||||
for _, d := range dirs {
|
||||
if err := s.medium.EnsureDir(filepath.Join(wsPath, d)); err != nil {
|
||||
return "", core.E("workspace.CreateWorkspace", "failed to create directory: "+d, err)
|
||||
return "", coreerr.E("workspace.CreateWorkspace", "failed to create directory: "+d, err)
|
||||
}
|
||||
}
|
||||
|
||||
// 3. PGP Keypair generation
|
||||
crypt := s.core.Crypt()
|
||||
if crypt == nil {
|
||||
return "", core.E("workspace.CreateWorkspace", "crypt service not available", nil)
|
||||
return "", coreerr.E("workspace.CreateWorkspace", "crypt service not available", nil)
|
||||
}
|
||||
privKey, err := crypt.CreateKeyPair(identifier, password)
|
||||
if err != nil {
|
||||
return "", core.E("workspace.CreateWorkspace", "failed to generate keys", err)
|
||||
return "", coreerr.E("workspace.CreateWorkspace", "failed to generate keys", err)
|
||||
}
|
||||
|
||||
// Save private key
|
||||
if err := s.medium.Write(filepath.Join(wsPath, "keys", "private.key"), privKey); err != nil {
|
||||
return "", core.E("workspace.CreateWorkspace", "failed to save private key", err)
|
||||
return "", coreerr.E("workspace.CreateWorkspace", "failed to save private key", err)
|
||||
}
|
||||
|
||||
return wsID, nil
|
||||
|
|
@ -90,7 +91,7 @@ func (s *Service) SwitchWorkspace(name string) error {
|
|||
|
||||
wsPath := filepath.Join(s.rootPath, name)
|
||||
if !s.medium.IsDir(wsPath) {
|
||||
return core.E("workspace.SwitchWorkspace", "workspace not found: "+name, nil)
|
||||
return coreerr.E("workspace.SwitchWorkspace", "workspace not found: "+name, nil)
|
||||
}
|
||||
|
||||
s.activeWorkspace = name
|
||||
|
|
@ -104,7 +105,7 @@ func (s *Service) WorkspaceFileGet(filename string) (string, error) {
|
|||
defer s.mu.RUnlock()
|
||||
|
||||
if s.activeWorkspace == "" {
|
||||
return "", core.E("workspace.WorkspaceFileGet", "no active workspace", nil)
|
||||
return "", coreerr.E("workspace.WorkspaceFileGet", "no active workspace", nil)
|
||||
}
|
||||
|
||||
path := filepath.Join(s.rootPath, s.activeWorkspace, "files", filename)
|
||||
|
|
@ -118,7 +119,7 @@ func (s *Service) WorkspaceFileSet(filename, content string) error {
|
|||
defer s.mu.Unlock()
|
||||
|
||||
if s.activeWorkspace == "" {
|
||||
return core.E("workspace.WorkspaceFileSet", "no active workspace", nil)
|
||||
return coreerr.E("workspace.WorkspaceFileSet", "no active workspace", nil)
|
||||
}
|
||||
|
||||
path := filepath.Join(s.rootPath, s.activeWorkspace, "files", filename)
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue